zte's code,first commit

Change-Id: I9a04da59e459a9bc0d67f101f700d9d7dc8d681b
diff --git a/ap/os/linux/linux-3.4.x/drivers/s390/Makefile b/ap/os/linux/linux-3.4.x/drivers/s390/Makefile
new file mode 100644
index 0000000..95bccfd
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/s390/Makefile
@@ -0,0 +1,8 @@
+#
+# Makefile for the S/390 specific device drivers
+#
+
+obj-y += cio/ block/ char/ crypto/ net/ scsi/ kvm/
+
+drivers-y += drivers/s390/built-in.o
+
diff --git a/ap/os/linux/linux-3.4.x/drivers/s390/block/Kconfig b/ap/os/linux/linux-3.4.x/drivers/s390/block/Kconfig
new file mode 100644
index 0000000..8e477bb
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/s390/block/Kconfig
@@ -0,0 +1,72 @@
+comment "S/390 block device drivers"
+	depends on S390 && BLOCK
+
+config BLK_DEV_XPRAM
+	def_tristate m
+	prompt "XPRAM disk support"
+	depends on S390 && BLOCK
+	help
+	  Select this option if you want to use your expanded storage on S/390
+	  or zSeries as a disk.  This is useful as a _fast_ swap device if you
+	  want to access more than 2G of memory when running in 31 bit mode.
+	  This option is also available as a module which will be called
+	  xpram.  If unsure, say "N".
+
+config DCSSBLK
+	def_tristate m
+	prompt "DCSSBLK support"
+	depends on S390 && BLOCK
+	help
+	  Support for dcss block device
+
+config DASD
+	def_tristate y
+	prompt "Support for DASD devices"
+	depends on CCW && BLOCK
+	select IOSCHED_DEADLINE
+	help
+	  Enable this option if you want to access DASDs directly utilizing
+	  S/390s channel subsystem commands. This is necessary for running
+	  natively on a single image or an LPAR.
+
+config DASD_PROFILE
+	def_bool y
+	prompt "Profiling support for dasd devices"
+	depends on DASD
+	help
+	  Enable this option if you want to see profiling information
+          in /proc/dasd/statistics.
+
+config DASD_ECKD
+	def_tristate y
+	prompt "Support for ECKD Disks"
+	depends on DASD
+	help
+	  ECKD devices are the most commonly used devices. You should enable
+	  this option unless you are very sure to have no ECKD device.
+
+config DASD_FBA
+	def_tristate y
+	prompt "Support for FBA  Disks"
+	depends on DASD
+	help
+	  Select this option to be able to access FBA devices. It is safe to
+	  say "Y".
+
+config DASD_DIAG
+	def_tristate y
+	prompt "Support for DIAG access to Disks"
+	depends on DASD
+	help
+	  Select this option if you want to use Diagnose250 command to access
+	  Disks under VM.  If you are not running under VM or unsure what it is,
+	  say "N".
+
+config DASD_EER
+	def_bool y
+	prompt "Extended error reporting (EER)"
+	depends on DASD
+	help
+	  This driver provides a character device interface to the
+	  DASD extended error reporting. This is only needed if you want to
+	  use applications written for the EER facility.
diff --git a/ap/os/linux/linux-3.4.x/drivers/s390/block/Makefile b/ap/os/linux/linux-3.4.x/drivers/s390/block/Makefile
new file mode 100644
index 0000000..0a89e08
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/s390/block/Makefile
@@ -0,0 +1,19 @@
+#
+# S/390 block devices
+#
+
+dasd_eckd_mod-objs := dasd_eckd.o dasd_3990_erp.o dasd_alias.o
+dasd_fba_mod-objs  := dasd_fba.o
+dasd_diag_mod-objs := dasd_diag.o
+dasd_mod-objs      := dasd.o dasd_ioctl.o dasd_proc.o dasd_devmap.o \
+			dasd_genhd.o dasd_erp.o
+ifdef CONFIG_DASD_EER
+dasd_mod-objs      += dasd_eer.o
+endif
+
+obj-$(CONFIG_DASD) += dasd_mod.o
+obj-$(CONFIG_DASD_DIAG) += dasd_diag_mod.o
+obj-$(CONFIG_DASD_ECKD) += dasd_eckd_mod.o
+obj-$(CONFIG_DASD_FBA)  += dasd_fba_mod.o
+obj-$(CONFIG_BLK_DEV_XPRAM) += xpram.o
+obj-$(CONFIG_DCSSBLK) += dcssblk.o
diff --git a/ap/os/linux/linux-3.4.x/drivers/s390/block/dasd.c b/ap/os/linux/linux-3.4.x/drivers/s390/block/dasd.c
new file mode 100644
index 0000000..f350912
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/s390/block/dasd.c
@@ -0,0 +1,3562 @@
+/*
+ * File...........: linux/drivers/s390/block/dasd.c
+ * Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com>
+ *		    Horst Hummel <Horst.Hummel@de.ibm.com>
+ *		    Carsten Otte <Cotte@de.ibm.com>
+ *		    Martin Schwidefsky <schwidefsky@de.ibm.com>
+ * Bugreports.to..: <Linux390@de.ibm.com>
+ * Copyright IBM Corp. 1999, 2009
+ */
+
+#define KMSG_COMPONENT "dasd"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
+#include <linux/kmod.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/ctype.h>
+#include <linux/major.h>
+#include <linux/slab.h>
+#include <linux/hdreg.h>
+#include <linux/async.h>
+#include <linux/mutex.h>
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+#include <linux/vmalloc.h>
+
+#include <asm/ccwdev.h>
+#include <asm/ebcdic.h>
+#include <asm/idals.h>
+#include <asm/itcw.h>
+#include <asm/diag.h>
+
+/* This is ugly... */
+#define PRINTK_HEADER "dasd:"
+
+#include "dasd_int.h"
+/*
+ * SECTION: Constant definitions to be used within this file
+ */
+#define DASD_CHANQ_MAX_SIZE 4
+
+#define DASD_SLEEPON_START_TAG	(void *) 1
+#define DASD_SLEEPON_END_TAG	(void *) 2
+
+/*
+ * SECTION: exported variables of dasd.c
+ */
+debug_info_t *dasd_debug_area;
+static struct dentry *dasd_debugfs_root_entry;
+struct dasd_discipline *dasd_diag_discipline_pointer;
+void dasd_int_handler(struct ccw_device *, unsigned long, struct irb *);
+
+MODULE_AUTHOR("Holger Smolinski <Holger.Smolinski@de.ibm.com>");
+MODULE_DESCRIPTION("Linux on S/390 DASD device driver,"
+		   " Copyright 2000 IBM Corporation");
+MODULE_SUPPORTED_DEVICE("dasd");
+MODULE_LICENSE("GPL");
+
+/*
+ * SECTION: prototypes for static functions of dasd.c
+ */
+static int  dasd_alloc_queue(struct dasd_block *);
+static void dasd_setup_queue(struct dasd_block *);
+static void dasd_free_queue(struct dasd_block *);
+static void dasd_flush_request_queue(struct dasd_block *);
+static int dasd_flush_block_queue(struct dasd_block *);
+static void dasd_device_tasklet(struct dasd_device *);
+static void dasd_block_tasklet(struct dasd_block *);
+static void do_kick_device(struct work_struct *);
+static void do_restore_device(struct work_struct *);
+static void do_reload_device(struct work_struct *);
+static void dasd_return_cqr_cb(struct dasd_ccw_req *, void *);
+static void dasd_device_timeout(unsigned long);
+static void dasd_block_timeout(unsigned long);
+static void __dasd_process_erp(struct dasd_device *, struct dasd_ccw_req *);
+static void dasd_profile_init(struct dasd_profile *, struct dentry *);
+static void dasd_profile_exit(struct dasd_profile *);
+
+/*
+ * SECTION: Operations on the device structure.
+ */
+static wait_queue_head_t dasd_init_waitq;
+static wait_queue_head_t dasd_flush_wq;
+static wait_queue_head_t generic_waitq;
+
+/*
+ * Allocate memory for a new device structure.
+ */
+struct dasd_device *dasd_alloc_device(void)
+{
+	struct dasd_device *device;
+
+	device = kzalloc(sizeof(struct dasd_device), GFP_ATOMIC);
+	if (!device)
+		return ERR_PTR(-ENOMEM);
+
+	/* Get two pages for normal block device operations. */
+	device->ccw_mem = (void *) __get_free_pages(GFP_ATOMIC | GFP_DMA, 1);
+	if (!device->ccw_mem) {
+		kfree(device);
+		return ERR_PTR(-ENOMEM);
+	}
+	/* Get one page for error recovery. */
+	device->erp_mem = (void *) get_zeroed_page(GFP_ATOMIC | GFP_DMA);
+	if (!device->erp_mem) {
+		free_pages((unsigned long) device->ccw_mem, 1);
+		kfree(device);
+		return ERR_PTR(-ENOMEM);
+	}
+
+	dasd_init_chunklist(&device->ccw_chunks, device->ccw_mem, PAGE_SIZE*2);
+	dasd_init_chunklist(&device->erp_chunks, device->erp_mem, PAGE_SIZE);
+	spin_lock_init(&device->mem_lock);
+	atomic_set(&device->tasklet_scheduled, 0);
+	tasklet_init(&device->tasklet,
+		     (void (*)(unsigned long)) dasd_device_tasklet,
+		     (unsigned long) device);
+	INIT_LIST_HEAD(&device->ccw_queue);
+	init_timer(&device->timer);
+	device->timer.function = dasd_device_timeout;
+	device->timer.data = (unsigned long) device;
+	INIT_WORK(&device->kick_work, do_kick_device);
+	INIT_WORK(&device->restore_device, do_restore_device);
+	INIT_WORK(&device->reload_device, do_reload_device);
+	device->state = DASD_STATE_NEW;
+	device->target = DASD_STATE_NEW;
+	mutex_init(&device->state_mutex);
+	spin_lock_init(&device->profile.lock);
+	return device;
+}
+
+/*
+ * Free memory of a device structure.
+ */
+void dasd_free_device(struct dasd_device *device)
+{
+	kfree(device->private);
+	free_page((unsigned long) device->erp_mem);
+	free_pages((unsigned long) device->ccw_mem, 1);
+	kfree(device);
+}
+
+/*
+ * Allocate memory for a new device structure.
+ */
+struct dasd_block *dasd_alloc_block(void)
+{
+	struct dasd_block *block;
+
+	block = kzalloc(sizeof(*block), GFP_ATOMIC);
+	if (!block)
+		return ERR_PTR(-ENOMEM);
+	/* open_count = 0 means device online but not in use */
+	atomic_set(&block->open_count, -1);
+
+	spin_lock_init(&block->request_queue_lock);
+	atomic_set(&block->tasklet_scheduled, 0);
+	tasklet_init(&block->tasklet,
+		     (void (*)(unsigned long)) dasd_block_tasklet,
+		     (unsigned long) block);
+	INIT_LIST_HEAD(&block->ccw_queue);
+	spin_lock_init(&block->queue_lock);
+	init_timer(&block->timer);
+	block->timer.function = dasd_block_timeout;
+	block->timer.data = (unsigned long) block;
+	spin_lock_init(&block->profile.lock);
+
+	return block;
+}
+
+/*
+ * Free memory of a device structure.
+ */
+void dasd_free_block(struct dasd_block *block)
+{
+	kfree(block);
+}
+
+/*
+ * Make a new device known to the system.
+ */
+static int dasd_state_new_to_known(struct dasd_device *device)
+{
+	int rc;
+
+	/*
+	 * As long as the device is not in state DASD_STATE_NEW we want to
+	 * keep the reference count > 0.
+	 */
+	dasd_get_device(device);
+
+	if (device->block) {
+		rc = dasd_alloc_queue(device->block);
+		if (rc) {
+			dasd_put_device(device);
+			return rc;
+		}
+	}
+	device->state = DASD_STATE_KNOWN;
+	return 0;
+}
+
+/*
+ * Let the system forget about a device.
+ */
+static int dasd_state_known_to_new(struct dasd_device *device)
+{
+	/* Disable extended error reporting for this device. */
+	dasd_eer_disable(device);
+	/* Forget the discipline information. */
+	if (device->discipline) {
+		if (device->discipline->uncheck_device)
+			device->discipline->uncheck_device(device);
+		module_put(device->discipline->owner);
+	}
+	device->discipline = NULL;
+	if (device->base_discipline)
+		module_put(device->base_discipline->owner);
+	device->base_discipline = NULL;
+	device->state = DASD_STATE_NEW;
+
+	if (device->block)
+		dasd_free_queue(device->block);
+
+	/* Give up reference we took in dasd_state_new_to_known. */
+	dasd_put_device(device);
+	return 0;
+}
+
+static struct dentry *dasd_debugfs_setup(const char *name,
+					 struct dentry *base_dentry)
+{
+	struct dentry *pde;
+
+	if (!base_dentry)
+		return NULL;
+	pde = debugfs_create_dir(name, base_dentry);
+	if (!pde || IS_ERR(pde))
+		return NULL;
+	return pde;
+}
+
+/*
+ * Request the irq line for the device.
+ */
+static int dasd_state_known_to_basic(struct dasd_device *device)
+{
+	struct dasd_block *block = device->block;
+	int rc;
+
+	/* Allocate and register gendisk structure. */
+	if (block) {
+		rc = dasd_gendisk_alloc(block);
+		if (rc)
+			return rc;
+		block->debugfs_dentry =
+			dasd_debugfs_setup(block->gdp->disk_name,
+					   dasd_debugfs_root_entry);
+		dasd_profile_init(&block->profile, block->debugfs_dentry);
+		if (dasd_global_profile_level == DASD_PROFILE_ON)
+			dasd_profile_on(&device->block->profile);
+	}
+	device->debugfs_dentry =
+		dasd_debugfs_setup(dev_name(&device->cdev->dev),
+				   dasd_debugfs_root_entry);
+	dasd_profile_init(&device->profile, device->debugfs_dentry);
+
+	/* register 'device' debug area, used for all DBF_DEV_XXX calls */
+	device->debug_area = debug_register(dev_name(&device->cdev->dev), 4, 1,
+					    8 * sizeof(long));
+	debug_register_view(device->debug_area, &debug_sprintf_view);
+	debug_set_level(device->debug_area, DBF_WARNING);
+	DBF_DEV_EVENT(DBF_EMERG, device, "%s", "debug area created");
+
+	device->state = DASD_STATE_BASIC;
+	return 0;
+}
+
+/*
+ * Release the irq line for the device. Terminate any running i/o.
+ */
+static int dasd_state_basic_to_known(struct dasd_device *device)
+{
+	int rc;
+	if (device->block) {
+		dasd_profile_exit(&device->block->profile);
+		if (device->block->debugfs_dentry)
+			debugfs_remove(device->block->debugfs_dentry);
+		dasd_gendisk_free(device->block);
+		dasd_block_clear_timer(device->block);
+	}
+	rc = dasd_flush_device_queue(device);
+	if (rc)
+		return rc;
+	dasd_device_clear_timer(device);
+	dasd_profile_exit(&device->profile);
+	if (device->debugfs_dentry)
+		debugfs_remove(device->debugfs_dentry);
+
+	DBF_DEV_EVENT(DBF_EMERG, device, "%p debug area deleted", device);
+	if (device->debug_area != NULL) {
+		debug_unregister(device->debug_area);
+		device->debug_area = NULL;
+	}
+	device->state = DASD_STATE_KNOWN;
+	return 0;
+}
+
+/*
+ * Do the initial analysis. The do_analysis function may return
+ * -EAGAIN in which case the device keeps the state DASD_STATE_BASIC
+ * until the discipline decides to continue the startup sequence
+ * by calling the function dasd_change_state. The eckd disciplines
+ * uses this to start a ccw that detects the format. The completion
+ * interrupt for this detection ccw uses the kernel event daemon to
+ * trigger the call to dasd_change_state. All this is done in the
+ * discipline code, see dasd_eckd.c.
+ * After the analysis ccw is done (do_analysis returned 0) the block
+ * device is setup.
+ * In case the analysis returns an error, the device setup is stopped
+ * (a fake disk was already added to allow formatting).
+ */
+static int dasd_state_basic_to_ready(struct dasd_device *device)
+{
+	int rc;
+	struct dasd_block *block;
+
+	rc = 0;
+	block = device->block;
+	/* make disk known with correct capacity */
+	if (block) {
+		if (block->base->discipline->do_analysis != NULL)
+			rc = block->base->discipline->do_analysis(block);
+		if (rc) {
+			if (rc != -EAGAIN)
+				device->state = DASD_STATE_UNFMT;
+			return rc;
+		}
+		dasd_setup_queue(block);
+		set_capacity(block->gdp,
+			     block->blocks << block->s2b_shift);
+		device->state = DASD_STATE_READY;
+		rc = dasd_scan_partitions(block);
+		if (rc)
+			device->state = DASD_STATE_BASIC;
+	} else {
+		device->state = DASD_STATE_READY;
+	}
+	return rc;
+}
+
+/*
+ * Remove device from block device layer. Destroy dirty buffers.
+ * Forget format information. Check if the target level is basic
+ * and if it is create fake disk for formatting.
+ */
+static int dasd_state_ready_to_basic(struct dasd_device *device)
+{
+	int rc;
+
+	device->state = DASD_STATE_BASIC;
+	if (device->block) {
+		struct dasd_block *block = device->block;
+		rc = dasd_flush_block_queue(block);
+		if (rc) {
+			device->state = DASD_STATE_READY;
+			return rc;
+		}
+		dasd_flush_request_queue(block);
+		dasd_destroy_partitions(block);
+		block->blocks = 0;
+		block->bp_block = 0;
+		block->s2b_shift = 0;
+	}
+	return 0;
+}
+
+/*
+ * Back to basic.
+ */
+static int dasd_state_unfmt_to_basic(struct dasd_device *device)
+{
+	device->state = DASD_STATE_BASIC;
+	return 0;
+}
+
+/*
+ * Make the device online and schedule the bottom half to start
+ * the requeueing of requests from the linux request queue to the
+ * ccw queue.
+ */
+static int
+dasd_state_ready_to_online(struct dasd_device * device)
+{
+	int rc;
+	struct gendisk *disk;
+	struct disk_part_iter piter;
+	struct hd_struct *part;
+
+	if (device->discipline->ready_to_online) {
+		rc = device->discipline->ready_to_online(device);
+		if (rc)
+			return rc;
+	}
+	device->state = DASD_STATE_ONLINE;
+	if (device->block) {
+		dasd_schedule_block_bh(device->block);
+		if ((device->features & DASD_FEATURE_USERAW)) {
+			disk = device->block->gdp;
+			kobject_uevent(&disk_to_dev(disk)->kobj, KOBJ_CHANGE);
+			return 0;
+		}
+		disk = device->block->bdev->bd_disk;
+		disk_part_iter_init(&piter, disk, DISK_PITER_INCL_PART0);
+		while ((part = disk_part_iter_next(&piter)))
+			kobject_uevent(&part_to_dev(part)->kobj, KOBJ_CHANGE);
+		disk_part_iter_exit(&piter);
+	}
+	return 0;
+}
+
+/*
+ * Stop the requeueing of requests again.
+ */
+static int dasd_state_online_to_ready(struct dasd_device *device)
+{
+	int rc;
+	struct gendisk *disk;
+	struct disk_part_iter piter;
+	struct hd_struct *part;
+
+	if (device->discipline->online_to_ready) {
+		rc = device->discipline->online_to_ready(device);
+		if (rc)
+			return rc;
+	}
+	device->state = DASD_STATE_READY;
+	if (device->block && !(device->features & DASD_FEATURE_USERAW)) {
+		disk = device->block->bdev->bd_disk;
+		disk_part_iter_init(&piter, disk, DISK_PITER_INCL_PART0);
+		while ((part = disk_part_iter_next(&piter)))
+			kobject_uevent(&part_to_dev(part)->kobj, KOBJ_CHANGE);
+		disk_part_iter_exit(&piter);
+	}
+	return 0;
+}
+
+/*
+ * Device startup state changes.
+ */
+static int dasd_increase_state(struct dasd_device *device)
+{
+	int rc;
+
+	rc = 0;
+	if (device->state == DASD_STATE_NEW &&
+	    device->target >= DASD_STATE_KNOWN)
+		rc = dasd_state_new_to_known(device);
+
+	if (!rc &&
+	    device->state == DASD_STATE_KNOWN &&
+	    device->target >= DASD_STATE_BASIC)
+		rc = dasd_state_known_to_basic(device);
+
+	if (!rc &&
+	    device->state == DASD_STATE_BASIC &&
+	    device->target >= DASD_STATE_READY)
+		rc = dasd_state_basic_to_ready(device);
+
+	if (!rc &&
+	    device->state == DASD_STATE_UNFMT &&
+	    device->target > DASD_STATE_UNFMT)
+		rc = -EPERM;
+
+	if (!rc &&
+	    device->state == DASD_STATE_READY &&
+	    device->target >= DASD_STATE_ONLINE)
+		rc = dasd_state_ready_to_online(device);
+
+	return rc;
+}
+
+/*
+ * Device shutdown state changes.
+ */
+static int dasd_decrease_state(struct dasd_device *device)
+{
+	int rc;
+
+	rc = 0;
+	if (device->state == DASD_STATE_ONLINE &&
+	    device->target <= DASD_STATE_READY)
+		rc = dasd_state_online_to_ready(device);
+
+	if (!rc &&
+	    device->state == DASD_STATE_READY &&
+	    device->target <= DASD_STATE_BASIC)
+		rc = dasd_state_ready_to_basic(device);
+
+	if (!rc &&
+	    device->state == DASD_STATE_UNFMT &&
+	    device->target <= DASD_STATE_BASIC)
+		rc = dasd_state_unfmt_to_basic(device);
+
+	if (!rc &&
+	    device->state == DASD_STATE_BASIC &&
+	    device->target <= DASD_STATE_KNOWN)
+		rc = dasd_state_basic_to_known(device);
+
+	if (!rc &&
+	    device->state == DASD_STATE_KNOWN &&
+	    device->target <= DASD_STATE_NEW)
+		rc = dasd_state_known_to_new(device);
+
+	return rc;
+}
+
+/*
+ * This is the main startup/shutdown routine.
+ */
+static void dasd_change_state(struct dasd_device *device)
+{
+	int rc;
+
+	if (device->state == device->target)
+		/* Already where we want to go today... */
+		return;
+	if (device->state < device->target)
+		rc = dasd_increase_state(device);
+	else
+		rc = dasd_decrease_state(device);
+	if (rc == -EAGAIN)
+		return;
+	if (rc)
+		device->target = device->state;
+
+	if (device->state == device->target)
+		wake_up(&dasd_init_waitq);
+
+	/* let user-space know that the device status changed */
+	kobject_uevent(&device->cdev->dev.kobj, KOBJ_CHANGE);
+}
+
+/*
+ * Kick starter for devices that did not complete the startup/shutdown
+ * procedure or were sleeping because of a pending state.
+ * dasd_kick_device will schedule a call do do_kick_device to the kernel
+ * event daemon.
+ */
+static void do_kick_device(struct work_struct *work)
+{
+	struct dasd_device *device = container_of(work, struct dasd_device, kick_work);
+	mutex_lock(&device->state_mutex);
+	dasd_change_state(device);
+	mutex_unlock(&device->state_mutex);
+	dasd_schedule_device_bh(device);
+	dasd_put_device(device);
+}
+
+void dasd_kick_device(struct dasd_device *device)
+{
+	dasd_get_device(device);
+	/* queue call to dasd_kick_device to the kernel event daemon. */
+	schedule_work(&device->kick_work);
+}
+
+/*
+ * dasd_reload_device will schedule a call do do_reload_device to the kernel
+ * event daemon.
+ */
+static void do_reload_device(struct work_struct *work)
+{
+	struct dasd_device *device = container_of(work, struct dasd_device,
+						  reload_device);
+	device->discipline->reload(device);
+	dasd_put_device(device);
+}
+
+void dasd_reload_device(struct dasd_device *device)
+{
+	dasd_get_device(device);
+	/* queue call to dasd_reload_device to the kernel event daemon. */
+	schedule_work(&device->reload_device);
+}
+EXPORT_SYMBOL(dasd_reload_device);
+
+/*
+ * dasd_restore_device will schedule a call do do_restore_device to the kernel
+ * event daemon.
+ */
+static void do_restore_device(struct work_struct *work)
+{
+	struct dasd_device *device = container_of(work, struct dasd_device,
+						  restore_device);
+	device->cdev->drv->restore(device->cdev);
+	dasd_put_device(device);
+}
+
+void dasd_restore_device(struct dasd_device *device)
+{
+	dasd_get_device(device);
+	/* queue call to dasd_restore_device to the kernel event daemon. */
+	schedule_work(&device->restore_device);
+}
+
+/*
+ * Set the target state for a device and starts the state change.
+ */
+void dasd_set_target_state(struct dasd_device *device, int target)
+{
+	dasd_get_device(device);
+	mutex_lock(&device->state_mutex);
+	/* If we are in probeonly mode stop at DASD_STATE_READY. */
+	if (dasd_probeonly && target > DASD_STATE_READY)
+		target = DASD_STATE_READY;
+	if (device->target != target) {
+		if (device->state == target)
+			wake_up(&dasd_init_waitq);
+		device->target = target;
+	}
+	if (device->state != device->target)
+		dasd_change_state(device);
+	mutex_unlock(&device->state_mutex);
+	dasd_put_device(device);
+}
+
+/*
+ * Enable devices with device numbers in [from..to].
+ */
+static inline int _wait_for_device(struct dasd_device *device)
+{
+	return (device->state == device->target);
+}
+
+void dasd_enable_device(struct dasd_device *device)
+{
+	dasd_set_target_state(device, DASD_STATE_ONLINE);
+	if (device->state <= DASD_STATE_KNOWN)
+		/* No discipline for device found. */
+		dasd_set_target_state(device, DASD_STATE_NEW);
+	/* Now wait for the devices to come up. */
+	wait_event(dasd_init_waitq, _wait_for_device(device));
+
+	dasd_reload_device(device);
+	if (device->discipline->kick_validate)
+		device->discipline->kick_validate(device);
+}
+
+/*
+ * SECTION: device operation (interrupt handler, start i/o, term i/o ...)
+ */
+
+unsigned int dasd_global_profile_level = DASD_PROFILE_OFF;
+
+#ifdef CONFIG_DASD_PROFILE
+struct dasd_profile_info dasd_global_profile_data;
+static struct dentry *dasd_global_profile_dentry;
+static struct dentry *dasd_debugfs_global_entry;
+
+/*
+ * Add profiling information for cqr before execution.
+ */
+static void dasd_profile_start(struct dasd_block *block,
+			       struct dasd_ccw_req *cqr,
+			       struct request *req)
+{
+	struct list_head *l;
+	unsigned int counter;
+	struct dasd_device *device;
+
+	/* count the length of the chanq for statistics */
+	counter = 0;
+	if (dasd_global_profile_level || block->profile.data)
+		list_for_each(l, &block->ccw_queue)
+			if (++counter >= 31)
+				break;
+
+	if (dasd_global_profile_level) {
+		dasd_global_profile_data.dasd_io_nr_req[counter]++;
+		if (rq_data_dir(req) == READ)
+			dasd_global_profile_data.dasd_read_nr_req[counter]++;
+	}
+
+	spin_lock(&block->profile.lock);
+	if (block->profile.data)
+		block->profile.data->dasd_io_nr_req[counter]++;
+		if (rq_data_dir(req) == READ)
+			block->profile.data->dasd_read_nr_req[counter]++;
+	spin_unlock(&block->profile.lock);
+
+	/*
+	 * We count the request for the start device, even though it may run on
+	 * some other device due to error recovery. This way we make sure that
+	 * we count each request only once.
+	 */
+	device = cqr->startdev;
+	if (device->profile.data) {
+		counter = 1; /* request is not yet queued on the start device */
+		list_for_each(l, &device->ccw_queue)
+			if (++counter >= 31)
+				break;
+	}
+	spin_lock(&device->profile.lock);
+	if (device->profile.data) {
+		device->profile.data->dasd_io_nr_req[counter]++;
+		if (rq_data_dir(req) == READ)
+			device->profile.data->dasd_read_nr_req[counter]++;
+	}
+	spin_unlock(&device->profile.lock);
+}
+
+/*
+ * Add profiling information for cqr after execution.
+ */
+
+#define dasd_profile_counter(value, index)			   \
+{								   \
+	for (index = 0; index < 31 && value >> (2+index); index++) \
+		;						   \
+}
+
+static void dasd_profile_end_add_data(struct dasd_profile_info *data,
+				      int is_alias,
+				      int is_tpm,
+				      int is_read,
+				      long sectors,
+				      int sectors_ind,
+				      int tottime_ind,
+				      int tottimeps_ind,
+				      int strtime_ind,
+				      int irqtime_ind,
+				      int irqtimeps_ind,
+				      int endtime_ind)
+{
+	/* in case of an overflow, reset the whole profile */
+	if (data->dasd_io_reqs == UINT_MAX) {
+			memset(data, 0, sizeof(*data));
+			getnstimeofday(&data->starttod);
+	}
+	data->dasd_io_reqs++;
+	data->dasd_io_sects += sectors;
+	if (is_alias)
+		data->dasd_io_alias++;
+	if (is_tpm)
+		data->dasd_io_tpm++;
+
+	data->dasd_io_secs[sectors_ind]++;
+	data->dasd_io_times[tottime_ind]++;
+	data->dasd_io_timps[tottimeps_ind]++;
+	data->dasd_io_time1[strtime_ind]++;
+	data->dasd_io_time2[irqtime_ind]++;
+	data->dasd_io_time2ps[irqtimeps_ind]++;
+	data->dasd_io_time3[endtime_ind]++;
+
+	if (is_read) {
+		data->dasd_read_reqs++;
+		data->dasd_read_sects += sectors;
+		if (is_alias)
+			data->dasd_read_alias++;
+		if (is_tpm)
+			data->dasd_read_tpm++;
+		data->dasd_read_secs[sectors_ind]++;
+		data->dasd_read_times[tottime_ind]++;
+		data->dasd_read_time1[strtime_ind]++;
+		data->dasd_read_time2[irqtime_ind]++;
+		data->dasd_read_time3[endtime_ind]++;
+	}
+}
+
+static void dasd_profile_end(struct dasd_block *block,
+			     struct dasd_ccw_req *cqr,
+			     struct request *req)
+{
+	long strtime, irqtime, endtime, tottime;	/* in microseconds */
+	long tottimeps, sectors;
+	struct dasd_device *device;
+	int sectors_ind, tottime_ind, tottimeps_ind, strtime_ind;
+	int irqtime_ind, irqtimeps_ind, endtime_ind;
+
+	device = cqr->startdev;
+	if (!(dasd_global_profile_level ||
+	      block->profile.data ||
+	      device->profile.data))
+		return;
+
+	sectors = blk_rq_sectors(req);
+	if (!cqr->buildclk || !cqr->startclk ||
+	    !cqr->stopclk || !cqr->endclk ||
+	    !sectors)
+		return;
+
+	strtime = ((cqr->startclk - cqr->buildclk) >> 12);
+	irqtime = ((cqr->stopclk - cqr->startclk) >> 12);
+	endtime = ((cqr->endclk - cqr->stopclk) >> 12);
+	tottime = ((cqr->endclk - cqr->buildclk) >> 12);
+	tottimeps = tottime / sectors;
+
+	dasd_profile_counter(sectors, sectors_ind);
+	dasd_profile_counter(tottime, tottime_ind);
+	dasd_profile_counter(tottimeps, tottimeps_ind);
+	dasd_profile_counter(strtime, strtime_ind);
+	dasd_profile_counter(irqtime, irqtime_ind);
+	dasd_profile_counter(irqtime / sectors, irqtimeps_ind);
+	dasd_profile_counter(endtime, endtime_ind);
+
+	if (dasd_global_profile_level) {
+		dasd_profile_end_add_data(&dasd_global_profile_data,
+					  cqr->startdev != block->base,
+					  cqr->cpmode == 1,
+					  rq_data_dir(req) == READ,
+					  sectors, sectors_ind, tottime_ind,
+					  tottimeps_ind, strtime_ind,
+					  irqtime_ind, irqtimeps_ind,
+					  endtime_ind);
+	}
+
+	spin_lock(&block->profile.lock);
+	if (block->profile.data)
+		dasd_profile_end_add_data(block->profile.data,
+					  cqr->startdev != block->base,
+					  cqr->cpmode == 1,
+					  rq_data_dir(req) == READ,
+					  sectors, sectors_ind, tottime_ind,
+					  tottimeps_ind, strtime_ind,
+					  irqtime_ind, irqtimeps_ind,
+					  endtime_ind);
+	spin_unlock(&block->profile.lock);
+
+	spin_lock(&device->profile.lock);
+	if (device->profile.data)
+		dasd_profile_end_add_data(device->profile.data,
+					  cqr->startdev != block->base,
+					  cqr->cpmode == 1,
+					  rq_data_dir(req) == READ,
+					  sectors, sectors_ind, tottime_ind,
+					  tottimeps_ind, strtime_ind,
+					  irqtime_ind, irqtimeps_ind,
+					  endtime_ind);
+	spin_unlock(&device->profile.lock);
+}
+
+void dasd_profile_reset(struct dasd_profile *profile)
+{
+	struct dasd_profile_info *data;
+
+	spin_lock_bh(&profile->lock);
+	data = profile->data;
+	if (!data) {
+		spin_unlock_bh(&profile->lock);
+		return;
+	}
+	memset(data, 0, sizeof(*data));
+	getnstimeofday(&data->starttod);
+	spin_unlock_bh(&profile->lock);
+}
+
+void dasd_global_profile_reset(void)
+{
+	memset(&dasd_global_profile_data, 0, sizeof(dasd_global_profile_data));
+	getnstimeofday(&dasd_global_profile_data.starttod);
+}
+
+int dasd_profile_on(struct dasd_profile *profile)
+{
+	struct dasd_profile_info *data;
+
+	data = kzalloc(sizeof(*data), GFP_KERNEL);
+	if (!data)
+		return -ENOMEM;
+	spin_lock_bh(&profile->lock);
+	if (profile->data) {
+		spin_unlock_bh(&profile->lock);
+		kfree(data);
+		return 0;
+	}
+	getnstimeofday(&data->starttod);
+	profile->data = data;
+	spin_unlock_bh(&profile->lock);
+	return 0;
+}
+
+void dasd_profile_off(struct dasd_profile *profile)
+{
+	spin_lock_bh(&profile->lock);
+	kfree(profile->data);
+	profile->data = NULL;
+	spin_unlock_bh(&profile->lock);
+}
+
+char *dasd_get_user_string(const char __user *user_buf, size_t user_len)
+{
+	char *buffer;
+
+	buffer = vmalloc(user_len + 1);
+	if (buffer == NULL)
+		return ERR_PTR(-ENOMEM);
+	if (copy_from_user(buffer, user_buf, user_len) != 0) {
+		vfree(buffer);
+		return ERR_PTR(-EFAULT);
+	}
+	/* got the string, now strip linefeed. */
+	if (buffer[user_len - 1] == '\n')
+		buffer[user_len - 1] = 0;
+	else
+		buffer[user_len] = 0;
+	return buffer;
+}
+
+static ssize_t dasd_stats_write(struct file *file,
+				const char __user *user_buf,
+				size_t user_len, loff_t *pos)
+{
+	char *buffer, *str;
+	int rc;
+	struct seq_file *m = (struct seq_file *)file->private_data;
+	struct dasd_profile *prof = m->private;
+
+	if (user_len > 65536)
+		user_len = 65536;
+	buffer = dasd_get_user_string(user_buf, user_len);
+	if (IS_ERR(buffer))
+		return PTR_ERR(buffer);
+
+	str = skip_spaces(buffer);
+	rc = user_len;
+	if (strncmp(str, "reset", 5) == 0) {
+		dasd_profile_reset(prof);
+	} else if (strncmp(str, "on", 2) == 0) {
+		rc = dasd_profile_on(prof);
+		if (!rc)
+			rc = user_len;
+	} else if (strncmp(str, "off", 3) == 0) {
+		dasd_profile_off(prof);
+	} else
+		rc = -EINVAL;
+	vfree(buffer);
+	return rc;
+}
+
+static void dasd_stats_array(struct seq_file *m, unsigned int *array)
+{
+	int i;
+
+	for (i = 0; i < 32; i++)
+		seq_printf(m, "%u ", array[i]);
+	seq_putc(m, '\n');
+}
+
+static void dasd_stats_seq_print(struct seq_file *m,
+				 struct dasd_profile_info *data)
+{
+	seq_printf(m, "start_time %ld.%09ld\n",
+		   data->starttod.tv_sec, data->starttod.tv_nsec);
+	seq_printf(m, "total_requests %u\n", data->dasd_io_reqs);
+	seq_printf(m, "total_sectors %u\n", data->dasd_io_sects);
+	seq_printf(m, "total_pav %u\n", data->dasd_io_alias);
+	seq_printf(m, "total_hpf %u\n", data->dasd_io_tpm);
+	seq_printf(m, "histogram_sectors ");
+	dasd_stats_array(m, data->dasd_io_secs);
+	seq_printf(m, "histogram_io_times ");
+	dasd_stats_array(m, data->dasd_io_times);
+	seq_printf(m, "histogram_io_times_weighted ");
+	dasd_stats_array(m, data->dasd_io_timps);
+	seq_printf(m, "histogram_time_build_to_ssch ");
+	dasd_stats_array(m, data->dasd_io_time1);
+	seq_printf(m, "histogram_time_ssch_to_irq ");
+	dasd_stats_array(m, data->dasd_io_time2);
+	seq_printf(m, "histogram_time_ssch_to_irq_weighted ");
+	dasd_stats_array(m, data->dasd_io_time2ps);
+	seq_printf(m, "histogram_time_irq_to_end ");
+	dasd_stats_array(m, data->dasd_io_time3);
+	seq_printf(m, "histogram_ccw_queue_length ");
+	dasd_stats_array(m, data->dasd_io_nr_req);
+	seq_printf(m, "total_read_requests %u\n", data->dasd_read_reqs);
+	seq_printf(m, "total_read_sectors %u\n", data->dasd_read_sects);
+	seq_printf(m, "total_read_pav %u\n", data->dasd_read_alias);
+	seq_printf(m, "total_read_hpf %u\n", data->dasd_read_tpm);
+	seq_printf(m, "histogram_read_sectors ");
+	dasd_stats_array(m, data->dasd_read_secs);
+	seq_printf(m, "histogram_read_times ");
+	dasd_stats_array(m, data->dasd_read_times);
+	seq_printf(m, "histogram_read_time_build_to_ssch ");
+	dasd_stats_array(m, data->dasd_read_time1);
+	seq_printf(m, "histogram_read_time_ssch_to_irq ");
+	dasd_stats_array(m, data->dasd_read_time2);
+	seq_printf(m, "histogram_read_time_irq_to_end ");
+	dasd_stats_array(m, data->dasd_read_time3);
+	seq_printf(m, "histogram_read_ccw_queue_length ");
+	dasd_stats_array(m, data->dasd_read_nr_req);
+}
+
+static int dasd_stats_show(struct seq_file *m, void *v)
+{
+	struct dasd_profile *profile;
+	struct dasd_profile_info *data;
+
+	profile = m->private;
+	spin_lock_bh(&profile->lock);
+	data = profile->data;
+	if (!data) {
+		spin_unlock_bh(&profile->lock);
+		seq_printf(m, "disabled\n");
+		return 0;
+	}
+	dasd_stats_seq_print(m, data);
+	spin_unlock_bh(&profile->lock);
+	return 0;
+}
+
+static int dasd_stats_open(struct inode *inode, struct file *file)
+{
+	struct dasd_profile *profile = inode->i_private;
+	return single_open(file, dasd_stats_show, profile);
+}
+
+static const struct file_operations dasd_stats_raw_fops = {
+	.owner		= THIS_MODULE,
+	.open		= dasd_stats_open,
+	.read		= seq_read,
+	.llseek		= seq_lseek,
+	.release	= single_release,
+	.write		= dasd_stats_write,
+};
+
+static ssize_t dasd_stats_global_write(struct file *file,
+				       const char __user *user_buf,
+				       size_t user_len, loff_t *pos)
+{
+	char *buffer, *str;
+	ssize_t rc;
+
+	if (user_len > 65536)
+		user_len = 65536;
+	buffer = dasd_get_user_string(user_buf, user_len);
+	if (IS_ERR(buffer))
+		return PTR_ERR(buffer);
+	str = skip_spaces(buffer);
+	rc = user_len;
+	if (strncmp(str, "reset", 5) == 0) {
+		dasd_global_profile_reset();
+	} else if (strncmp(str, "on", 2) == 0) {
+		dasd_global_profile_reset();
+		dasd_global_profile_level = DASD_PROFILE_GLOBAL_ONLY;
+	} else if (strncmp(str, "off", 3) == 0) {
+		dasd_global_profile_level = DASD_PROFILE_OFF;
+	} else
+		rc = -EINVAL;
+	vfree(buffer);
+	return rc;
+}
+
+static int dasd_stats_global_show(struct seq_file *m, void *v)
+{
+	if (!dasd_global_profile_level) {
+		seq_printf(m, "disabled\n");
+		return 0;
+	}
+	dasd_stats_seq_print(m, &dasd_global_profile_data);
+	return 0;
+}
+
+static int dasd_stats_global_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, dasd_stats_global_show, NULL);
+}
+
+static const struct file_operations dasd_stats_global_fops = {
+	.owner		= THIS_MODULE,
+	.open		= dasd_stats_global_open,
+	.read		= seq_read,
+	.llseek		= seq_lseek,
+	.release	= single_release,
+	.write		= dasd_stats_global_write,
+};
+
+static void dasd_profile_init(struct dasd_profile *profile,
+			      struct dentry *base_dentry)
+{
+	umode_t mode;
+	struct dentry *pde;
+
+	if (!base_dentry)
+		return;
+	profile->dentry = NULL;
+	profile->data = NULL;
+	mode = (S_IRUSR | S_IWUSR | S_IFREG);
+	pde = debugfs_create_file("statistics", mode, base_dentry,
+				  profile, &dasd_stats_raw_fops);
+	if (pde && !IS_ERR(pde))
+		profile->dentry = pde;
+	return;
+}
+
+static void dasd_profile_exit(struct dasd_profile *profile)
+{
+	dasd_profile_off(profile);
+	if (profile->dentry) {
+		debugfs_remove(profile->dentry);
+		profile->dentry = NULL;
+	}
+}
+
+static void dasd_statistics_removeroot(void)
+{
+	dasd_global_profile_level = DASD_PROFILE_OFF;
+	if (dasd_global_profile_dentry) {
+		debugfs_remove(dasd_global_profile_dentry);
+		dasd_global_profile_dentry = NULL;
+	}
+	if (dasd_debugfs_global_entry)
+		debugfs_remove(dasd_debugfs_global_entry);
+	if (dasd_debugfs_root_entry)
+		debugfs_remove(dasd_debugfs_root_entry);
+}
+
+static void dasd_statistics_createroot(void)
+{
+	umode_t mode;
+	struct dentry *pde;
+
+	dasd_debugfs_root_entry = NULL;
+	dasd_debugfs_global_entry = NULL;
+	dasd_global_profile_dentry = NULL;
+	pde = debugfs_create_dir("dasd", NULL);
+	if (!pde || IS_ERR(pde))
+		goto error;
+	dasd_debugfs_root_entry = pde;
+	pde = debugfs_create_dir("global", dasd_debugfs_root_entry);
+	if (!pde || IS_ERR(pde))
+		goto error;
+	dasd_debugfs_global_entry = pde;
+
+	mode = (S_IRUSR | S_IWUSR | S_IFREG);
+	pde = debugfs_create_file("statistics", mode, dasd_debugfs_global_entry,
+				  NULL, &dasd_stats_global_fops);
+	if (!pde || IS_ERR(pde))
+		goto error;
+	dasd_global_profile_dentry = pde;
+	return;
+
+error:
+	DBF_EVENT(DBF_ERR, "%s",
+		  "Creation of the dasd debugfs interface failed");
+	dasd_statistics_removeroot();
+	return;
+}
+
+#else
+#define dasd_profile_start(block, cqr, req) do {} while (0)
+#define dasd_profile_end(block, cqr, req) do {} while (0)
+
+static void dasd_statistics_createroot(void)
+{
+	return;
+}
+
+static void dasd_statistics_removeroot(void)
+{
+	return;
+}
+
+int dasd_stats_generic_show(struct seq_file *m, void *v)
+{
+	seq_printf(m, "Statistics are not activated in this kernel\n");
+	return 0;
+}
+
+static void dasd_profile_init(struct dasd_profile *profile,
+			      struct dentry *base_dentry)
+{
+	return;
+}
+
+static void dasd_profile_exit(struct dasd_profile *profile)
+{
+	return;
+}
+
+int dasd_profile_on(struct dasd_profile *profile)
+{
+	return 0;
+}
+
+#endif				/* CONFIG_DASD_PROFILE */
+
+/*
+ * Allocate memory for a channel program with 'cplength' channel
+ * command words and 'datasize' additional space. There are two
+ * variantes: 1) dasd_kmalloc_request uses kmalloc to get the needed
+ * memory and 2) dasd_smalloc_request uses the static ccw memory
+ * that gets allocated for each device.
+ */
+struct dasd_ccw_req *dasd_kmalloc_request(int magic, int cplength,
+					  int datasize,
+					  struct dasd_device *device)
+{
+	struct dasd_ccw_req *cqr;
+
+	/* Sanity checks */
+	BUG_ON(datasize > PAGE_SIZE ||
+	     (cplength*sizeof(struct ccw1)) > PAGE_SIZE);
+
+	cqr = kzalloc(sizeof(struct dasd_ccw_req), GFP_ATOMIC);
+	if (cqr == NULL)
+		return ERR_PTR(-ENOMEM);
+	cqr->cpaddr = NULL;
+	if (cplength > 0) {
+		cqr->cpaddr = kcalloc(cplength, sizeof(struct ccw1),
+				      GFP_ATOMIC | GFP_DMA);
+		if (cqr->cpaddr == NULL) {
+			kfree(cqr);
+			return ERR_PTR(-ENOMEM);
+		}
+	}
+	cqr->data = NULL;
+	if (datasize > 0) {
+		cqr->data = kzalloc(datasize, GFP_ATOMIC | GFP_DMA);
+		if (cqr->data == NULL) {
+			kfree(cqr->cpaddr);
+			kfree(cqr);
+			return ERR_PTR(-ENOMEM);
+		}
+	}
+	cqr->magic =  magic;
+	set_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
+	dasd_get_device(device);
+	return cqr;
+}
+
+struct dasd_ccw_req *dasd_smalloc_request(int magic, int cplength,
+					  int datasize,
+					  struct dasd_device *device)
+{
+	unsigned long flags;
+	struct dasd_ccw_req *cqr;
+	char *data;
+	int size;
+
+	size = (sizeof(struct dasd_ccw_req) + 7L) & -8L;
+	if (cplength > 0)
+		size += cplength * sizeof(struct ccw1);
+	if (datasize > 0)
+		size += datasize;
+	spin_lock_irqsave(&device->mem_lock, flags);
+	cqr = (struct dasd_ccw_req *)
+		dasd_alloc_chunk(&device->ccw_chunks, size);
+	spin_unlock_irqrestore(&device->mem_lock, flags);
+	if (cqr == NULL)
+		return ERR_PTR(-ENOMEM);
+	memset(cqr, 0, sizeof(struct dasd_ccw_req));
+	data = (char *) cqr + ((sizeof(struct dasd_ccw_req) + 7L) & -8L);
+	cqr->cpaddr = NULL;
+	if (cplength > 0) {
+		cqr->cpaddr = (struct ccw1 *) data;
+		data += cplength*sizeof(struct ccw1);
+		memset(cqr->cpaddr, 0, cplength*sizeof(struct ccw1));
+	}
+	cqr->data = NULL;
+	if (datasize > 0) {
+		cqr->data = data;
+ 		memset(cqr->data, 0, datasize);
+	}
+	cqr->magic = magic;
+	set_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
+	dasd_get_device(device);
+	return cqr;
+}
+
+/*
+ * Free memory of a channel program. This function needs to free all the
+ * idal lists that might have been created by dasd_set_cda and the
+ * struct dasd_ccw_req itself.
+ */
+void dasd_kfree_request(struct dasd_ccw_req *cqr, struct dasd_device *device)
+{
+#ifdef CONFIG_64BIT
+	struct ccw1 *ccw;
+
+	/* Clear any idals used for the request. */
+	ccw = cqr->cpaddr;
+	do {
+		clear_normalized_cda(ccw);
+	} while (ccw++->flags & (CCW_FLAG_CC | CCW_FLAG_DC));
+#endif
+	kfree(cqr->cpaddr);
+	kfree(cqr->data);
+	kfree(cqr);
+	dasd_put_device(device);
+}
+
+void dasd_sfree_request(struct dasd_ccw_req *cqr, struct dasd_device *device)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&device->mem_lock, flags);
+	dasd_free_chunk(&device->ccw_chunks, cqr);
+	spin_unlock_irqrestore(&device->mem_lock, flags);
+	dasd_put_device(device);
+}
+
+/*
+ * Check discipline magic in cqr.
+ */
+static inline int dasd_check_cqr(struct dasd_ccw_req *cqr)
+{
+	struct dasd_device *device;
+
+	if (cqr == NULL)
+		return -EINVAL;
+	device = cqr->startdev;
+	if (strncmp((char *) &cqr->magic, device->discipline->ebcname, 4)) {
+		DBF_DEV_EVENT(DBF_WARNING, device,
+			    " dasd_ccw_req 0x%08x magic doesn't match"
+			    " discipline 0x%08x",
+			    cqr->magic,
+			    *(unsigned int *) device->discipline->name);
+		return -EINVAL;
+	}
+	return 0;
+}
+
+/*
+ * Terminate the current i/o and set the request to clear_pending.
+ * Timer keeps device runnig.
+ * ccw_device_clear can fail if the i/o subsystem
+ * is in a bad mood.
+ */
+int dasd_term_IO(struct dasd_ccw_req *cqr)
+{
+	struct dasd_device *device;
+	int retries, rc;
+	char errorstring[ERRORLENGTH];
+
+	/* Check the cqr */
+	rc = dasd_check_cqr(cqr);
+	if (rc)
+		return rc;
+	retries = 0;
+	device = (struct dasd_device *) cqr->startdev;
+	while ((retries < 5) && (cqr->status == DASD_CQR_IN_IO)) {
+		rc = ccw_device_clear(device->cdev, (long) cqr);
+		switch (rc) {
+		case 0:	/* termination successful */
+			cqr->status = DASD_CQR_CLEAR_PENDING;
+			cqr->stopclk = get_clock();
+			cqr->starttime = 0;
+			DBF_DEV_EVENT(DBF_DEBUG, device,
+				      "terminate cqr %p successful",
+				      cqr);
+			break;
+		case -ENODEV:
+			DBF_DEV_EVENT(DBF_ERR, device, "%s",
+				      "device gone, retry");
+			break;
+		case -EIO:
+			DBF_DEV_EVENT(DBF_ERR, device, "%s",
+				      "I/O error, retry");
+			break;
+		case -EINVAL:
+		case -EBUSY:
+			DBF_DEV_EVENT(DBF_ERR, device, "%s",
+				      "device busy, retry later");
+			break;
+		default:
+			/* internal error 10 - unknown rc*/
+			snprintf(errorstring, ERRORLENGTH, "10 %d", rc);
+			dev_err(&device->cdev->dev, "An error occurred in the "
+				"DASD device driver, reason=%s\n", errorstring);
+			BUG();
+			break;
+		}
+		retries++;
+	}
+	dasd_schedule_device_bh(device);
+	return rc;
+}
+
+/*
+ * Start the i/o. This start_IO can fail if the channel is really busy.
+ * In that case set up a timer to start the request later.
+ */
+int dasd_start_IO(struct dasd_ccw_req *cqr)
+{
+	struct dasd_device *device;
+	int rc;
+	char errorstring[ERRORLENGTH];
+
+	/* Check the cqr */
+	rc = dasd_check_cqr(cqr);
+	if (rc) {
+		cqr->intrc = rc;
+		return rc;
+	}
+	device = (struct dasd_device *) cqr->startdev;
+	if (((cqr->block &&
+	      test_bit(DASD_FLAG_LOCK_STOLEN, &cqr->block->base->flags)) ||
+	     test_bit(DASD_FLAG_LOCK_STOLEN, &device->flags)) &&
+	    !test_bit(DASD_CQR_ALLOW_SLOCK, &cqr->flags)) {
+		DBF_DEV_EVENT(DBF_DEBUG, device, "start_IO: return request %p "
+			      "because of stolen lock", cqr);
+		cqr->status = DASD_CQR_ERROR;
+		cqr->intrc = -EPERM;
+		return -EPERM;
+	}
+	if (cqr->retries < 0) {
+		/* internal error 14 - start_IO run out of retries */
+		sprintf(errorstring, "14 %p", cqr);
+		dev_err(&device->cdev->dev, "An error occurred in the DASD "
+			"device driver, reason=%s\n", errorstring);
+		cqr->status = DASD_CQR_ERROR;
+		return -EIO;
+	}
+	cqr->startclk = get_clock();
+	cqr->starttime = jiffies;
+	cqr->retries--;
+	if (!test_bit(DASD_CQR_VERIFY_PATH, &cqr->flags)) {
+		cqr->lpm &= device->path_data.opm;
+		if (!cqr->lpm)
+			cqr->lpm = device->path_data.opm;
+	}
+	if (cqr->cpmode == 1) {
+		rc = ccw_device_tm_start(device->cdev, cqr->cpaddr,
+					 (long) cqr, cqr->lpm);
+	} else {
+		rc = ccw_device_start(device->cdev, cqr->cpaddr,
+				      (long) cqr, cqr->lpm, 0);
+	}
+	switch (rc) {
+	case 0:
+		cqr->status = DASD_CQR_IN_IO;
+		break;
+	case -EBUSY:
+		DBF_DEV_EVENT(DBF_WARNING, device, "%s",
+			      "start_IO: device busy, retry later");
+		break;
+	case -ETIMEDOUT:
+		DBF_DEV_EVENT(DBF_WARNING, device, "%s",
+			      "start_IO: request timeout, retry later");
+		break;
+	case -EACCES:
+		/* -EACCES indicates that the request used only a subset of the
+		 * available paths and all these paths are gone. If the lpm of
+		 * this request was only a subset of the opm (e.g. the ppm) then
+		 * we just do a retry with all available paths.
+		 * If we already use the full opm, something is amiss, and we
+		 * need a full path verification.
+		 */
+		if (test_bit(DASD_CQR_VERIFY_PATH, &cqr->flags)) {
+			DBF_DEV_EVENT(DBF_WARNING, device,
+				      "start_IO: selected paths gone (%x)",
+				      cqr->lpm);
+		} else if (cqr->lpm != device->path_data.opm) {
+			cqr->lpm = device->path_data.opm;
+			DBF_DEV_EVENT(DBF_DEBUG, device, "%s",
+				      "start_IO: selected paths gone,"
+				      " retry on all paths");
+		} else {
+			DBF_DEV_EVENT(DBF_WARNING, device, "%s",
+				      "start_IO: all paths in opm gone,"
+				      " do path verification");
+			dasd_generic_last_path_gone(device);
+			device->path_data.opm = 0;
+			device->path_data.ppm = 0;
+			device->path_data.npm = 0;
+			device->path_data.tbvpm =
+				ccw_device_get_path_mask(device->cdev);
+		}
+		break;
+	case -ENODEV:
+		DBF_DEV_EVENT(DBF_WARNING, device, "%s",
+			      "start_IO: -ENODEV device gone, retry");
+		break;
+	case -EIO:
+		DBF_DEV_EVENT(DBF_WARNING, device, "%s",
+			      "start_IO: -EIO device gone, retry");
+		break;
+	case -EINVAL:
+		/* most likely caused in power management context */
+		DBF_DEV_EVENT(DBF_WARNING, device, "%s",
+			      "start_IO: -EINVAL device currently "
+			      "not accessible");
+		break;
+	default:
+		/* internal error 11 - unknown rc */
+		snprintf(errorstring, ERRORLENGTH, "11 %d", rc);
+		dev_err(&device->cdev->dev,
+			"An error occurred in the DASD device driver, "
+			"reason=%s\n", errorstring);
+		BUG();
+		break;
+	}
+	cqr->intrc = rc;
+	return rc;
+}
+
+/*
+ * Timeout function for dasd devices. This is used for different purposes
+ *  1) missing interrupt handler for normal operation
+ *  2) delayed start of request where start_IO failed with -EBUSY
+ *  3) timeout for missing state change interrupts
+ * The head of the ccw queue will have status DASD_CQR_IN_IO for 1),
+ * DASD_CQR_QUEUED for 2) and 3).
+ */
+static void dasd_device_timeout(unsigned long ptr)
+{
+	unsigned long flags;
+	struct dasd_device *device;
+
+	device = (struct dasd_device *) ptr;
+	spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
+	/* re-activate request queue */
+	dasd_device_remove_stop_bits(device, DASD_STOPPED_PENDING);
+	spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
+	dasd_schedule_device_bh(device);
+}
+
+/*
+ * Setup timeout for a device in jiffies.
+ */
+void dasd_device_set_timer(struct dasd_device *device, int expires)
+{
+	if (expires == 0)
+		del_timer(&device->timer);
+	else
+		mod_timer(&device->timer, jiffies + expires);
+}
+
+/*
+ * Clear timeout for a device.
+ */
+void dasd_device_clear_timer(struct dasd_device *device)
+{
+	del_timer(&device->timer);
+}
+
+static void dasd_handle_killed_request(struct ccw_device *cdev,
+				       unsigned long intparm)
+{
+	struct dasd_ccw_req *cqr;
+	struct dasd_device *device;
+
+	if (!intparm)
+		return;
+	cqr = (struct dasd_ccw_req *) intparm;
+	if (cqr->status != DASD_CQR_IN_IO) {
+		DBF_EVENT_DEVID(DBF_DEBUG, cdev,
+				"invalid status in handle_killed_request: "
+				"%02x", cqr->status);
+		return;
+	}
+
+	device = dasd_device_from_cdev_locked(cdev);
+	if (IS_ERR(device)) {
+		DBF_EVENT_DEVID(DBF_DEBUG, cdev, "%s",
+				"unable to get device from cdev");
+		return;
+	}
+
+	if (!cqr->startdev ||
+	    device != cqr->startdev ||
+	    strncmp(cqr->startdev->discipline->ebcname,
+		    (char *) &cqr->magic, 4)) {
+		DBF_EVENT_DEVID(DBF_DEBUG, cdev, "%s",
+				"invalid device in request");
+		dasd_put_device(device);
+		return;
+	}
+
+	/* Schedule request to be retried. */
+	cqr->status = DASD_CQR_QUEUED;
+
+	dasd_device_clear_timer(device);
+	dasd_schedule_device_bh(device);
+	dasd_put_device(device);
+}
+
+void dasd_generic_handle_state_change(struct dasd_device *device)
+{
+	/* First of all start sense subsystem status request. */
+	dasd_eer_snss(device);
+
+	dasd_device_remove_stop_bits(device, DASD_STOPPED_PENDING);
+	dasd_schedule_device_bh(device);
+	if (device->block)
+		dasd_schedule_block_bh(device->block);
+}
+
+/*
+ * Interrupt handler for "normal" ssch-io based dasd devices.
+ */
+void dasd_int_handler(struct ccw_device *cdev, unsigned long intparm,
+		      struct irb *irb)
+{
+	struct dasd_ccw_req *cqr, *next;
+	struct dasd_device *device;
+	unsigned long long now;
+	int expires;
+
+	if (IS_ERR(irb)) {
+		switch (PTR_ERR(irb)) {
+		case -EIO:
+			break;
+		case -ETIMEDOUT:
+			DBF_EVENT_DEVID(DBF_WARNING, cdev, "%s: "
+					"request timed out\n", __func__);
+			break;
+		default:
+			DBF_EVENT_DEVID(DBF_WARNING, cdev, "%s: "
+					"unknown error %ld\n", __func__,
+					PTR_ERR(irb));
+		}
+		dasd_handle_killed_request(cdev, intparm);
+		return;
+	}
+
+	now = get_clock();
+	cqr = (struct dasd_ccw_req *) intparm;
+	/* check for conditions that should be handled immediately */
+	if (!cqr ||
+	    !(scsw_dstat(&irb->scsw) == (DEV_STAT_CHN_END | DEV_STAT_DEV_END) &&
+	      scsw_cstat(&irb->scsw) == 0)) {
+		if (cqr)
+			memcpy(&cqr->irb, irb, sizeof(*irb));
+		device = dasd_device_from_cdev_locked(cdev);
+		if (IS_ERR(device))
+			return;
+		/* ignore unsolicited interrupts for DIAG discipline */
+		if (device->discipline == dasd_diag_discipline_pointer) {
+			dasd_put_device(device);
+			return;
+		}
+		device->discipline->dump_sense_dbf(device, irb, "int");
+		if (device->features & DASD_FEATURE_ERPLOG)
+			device->discipline->dump_sense(device, cqr, irb);
+		device->discipline->check_for_device_change(device, cqr, irb);
+		dasd_put_device(device);
+	}
+	if (!cqr)
+		return;
+
+	device = (struct dasd_device *) cqr->startdev;
+	if (!device ||
+	    strncmp(device->discipline->ebcname, (char *) &cqr->magic, 4)) {
+		DBF_EVENT_DEVID(DBF_DEBUG, cdev, "%s",
+				"invalid device in request");
+		return;
+	}
+
+	/* Check for clear pending */
+	if (cqr->status == DASD_CQR_CLEAR_PENDING &&
+	    scsw_fctl(&irb->scsw) & SCSW_FCTL_CLEAR_FUNC) {
+		cqr->status = DASD_CQR_CLEARED;
+		dasd_device_clear_timer(device);
+		wake_up(&dasd_flush_wq);
+		dasd_schedule_device_bh(device);
+		return;
+	}
+
+	/* check status - the request might have been killed by dyn detach */
+	if (cqr->status != DASD_CQR_IN_IO) {
+		DBF_DEV_EVENT(DBF_DEBUG, device, "invalid status: bus_id %s, "
+			      "status %02x", dev_name(&cdev->dev), cqr->status);
+		return;
+	}
+
+	next = NULL;
+	expires = 0;
+	if (scsw_dstat(&irb->scsw) == (DEV_STAT_CHN_END | DEV_STAT_DEV_END) &&
+	    scsw_cstat(&irb->scsw) == 0) {
+		/* request was completed successfully */
+		cqr->status = DASD_CQR_SUCCESS;
+		cqr->stopclk = now;
+		/* Start first request on queue if possible -> fast_io. */
+		if (cqr->devlist.next != &device->ccw_queue) {
+			next = list_entry(cqr->devlist.next,
+					  struct dasd_ccw_req, devlist);
+		}
+	} else {  /* error */
+		/*
+		 * If we don't want complex ERP for this request, then just
+		 * reset this and retry it in the fastpath
+		 */
+		if (!test_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags) &&
+		    cqr->retries > 0) {
+			if (cqr->lpm == device->path_data.opm)
+				DBF_DEV_EVENT(DBF_DEBUG, device,
+					      "default ERP in fastpath "
+					      "(%i retries left)",
+					      cqr->retries);
+			if (!test_bit(DASD_CQR_VERIFY_PATH, &cqr->flags))
+				cqr->lpm = device->path_data.opm;
+			cqr->status = DASD_CQR_QUEUED;
+			next = cqr;
+		} else
+			cqr->status = DASD_CQR_ERROR;
+	}
+	if (next && (next->status == DASD_CQR_QUEUED) &&
+	    (!device->stopped)) {
+		if (device->discipline->start_IO(next) == 0)
+			expires = next->expires;
+	}
+	if (expires != 0)
+		dasd_device_set_timer(device, expires);
+	else
+		dasd_device_clear_timer(device);
+	dasd_schedule_device_bh(device);
+}
+
+enum uc_todo dasd_generic_uc_handler(struct ccw_device *cdev, struct irb *irb)
+{
+	struct dasd_device *device;
+
+	device = dasd_device_from_cdev_locked(cdev);
+
+	if (IS_ERR(device))
+		goto out;
+	if (test_bit(DASD_FLAG_OFFLINE, &device->flags) ||
+	   device->state != device->target ||
+	   !device->discipline->check_for_device_change){
+		dasd_put_device(device);
+		goto out;
+	}
+	if (device->discipline->dump_sense_dbf)
+		device->discipline->dump_sense_dbf(device, irb, "uc");
+	device->discipline->check_for_device_change(device, NULL, irb);
+	dasd_put_device(device);
+out:
+	return UC_TODO_RETRY;
+}
+EXPORT_SYMBOL_GPL(dasd_generic_uc_handler);
+
+/*
+ * If we have an error on a dasd_block layer request then we cancel
+ * and return all further requests from the same dasd_block as well.
+ */
+static void __dasd_device_recovery(struct dasd_device *device,
+				   struct dasd_ccw_req *ref_cqr)
+{
+	struct list_head *l, *n;
+	struct dasd_ccw_req *cqr;
+
+	/*
+	 * only requeue request that came from the dasd_block layer
+	 */
+	if (!ref_cqr->block)
+		return;
+
+	list_for_each_safe(l, n, &device->ccw_queue) {
+		cqr = list_entry(l, struct dasd_ccw_req, devlist);
+		if (cqr->status == DASD_CQR_QUEUED &&
+		    ref_cqr->block == cqr->block) {
+			cqr->status = DASD_CQR_CLEARED;
+		}
+	}
+};
+
+/*
+ * Remove those ccw requests from the queue that need to be returned
+ * to the upper layer.
+ */
+static void __dasd_device_process_ccw_queue(struct dasd_device *device,
+					    struct list_head *final_queue)
+{
+	struct list_head *l, *n;
+	struct dasd_ccw_req *cqr;
+
+	/* Process request with final status. */
+	list_for_each_safe(l, n, &device->ccw_queue) {
+		cqr = list_entry(l, struct dasd_ccw_req, devlist);
+
+		/* Stop list processing at the first non-final request. */
+		if (cqr->status == DASD_CQR_QUEUED ||
+		    cqr->status == DASD_CQR_IN_IO ||
+		    cqr->status == DASD_CQR_CLEAR_PENDING)
+			break;
+		if (cqr->status == DASD_CQR_ERROR) {
+			__dasd_device_recovery(device, cqr);
+		}
+		/* Rechain finished requests to final queue */
+		list_move_tail(&cqr->devlist, final_queue);
+	}
+}
+
+/*
+ * the cqrs from the final queue are returned to the upper layer
+ * by setting a dasd_block state and calling the callback function
+ */
+static void __dasd_device_process_final_queue(struct dasd_device *device,
+					      struct list_head *final_queue)
+{
+	struct list_head *l, *n;
+	struct dasd_ccw_req *cqr;
+	struct dasd_block *block;
+	void (*callback)(struct dasd_ccw_req *, void *data);
+	void *callback_data;
+	char errorstring[ERRORLENGTH];
+
+	list_for_each_safe(l, n, final_queue) {
+		cqr = list_entry(l, struct dasd_ccw_req, devlist);
+		list_del_init(&cqr->devlist);
+		block = cqr->block;
+		callback = cqr->callback;
+		callback_data = cqr->callback_data;
+		if (block)
+			spin_lock_bh(&block->queue_lock);
+		switch (cqr->status) {
+		case DASD_CQR_SUCCESS:
+			cqr->status = DASD_CQR_DONE;
+			break;
+		case DASD_CQR_ERROR:
+			cqr->status = DASD_CQR_NEED_ERP;
+			break;
+		case DASD_CQR_CLEARED:
+			cqr->status = DASD_CQR_TERMINATED;
+			break;
+		default:
+			/* internal error 12 - wrong cqr status*/
+			snprintf(errorstring, ERRORLENGTH, "12 %p %x02", cqr, cqr->status);
+			dev_err(&device->cdev->dev,
+				"An error occurred in the DASD device driver, "
+				"reason=%s\n", errorstring);
+			BUG();
+		}
+		if (cqr->callback != NULL)
+			(callback)(cqr, callback_data);
+		if (block)
+			spin_unlock_bh(&block->queue_lock);
+	}
+}
+
+/*
+ * Take a look at the first request on the ccw queue and check
+ * if it reached its expire time. If so, terminate the IO.
+ */
+static void __dasd_device_check_expire(struct dasd_device *device)
+{
+	struct dasd_ccw_req *cqr;
+
+	if (list_empty(&device->ccw_queue))
+		return;
+	cqr = list_entry(device->ccw_queue.next, struct dasd_ccw_req, devlist);
+	if ((cqr->status == DASD_CQR_IN_IO && cqr->expires != 0) &&
+	    (time_after_eq(jiffies, cqr->expires + cqr->starttime))) {
+		if (device->discipline->term_IO(cqr) != 0) {
+			/* Hmpf, try again in 5 sec */
+			dev_err(&device->cdev->dev,
+				"cqr %p timed out (%lus) but cannot be "
+				"ended, retrying in 5 s\n",
+				cqr, (cqr->expires/HZ));
+			cqr->expires += 5*HZ;
+			dasd_device_set_timer(device, 5*HZ);
+		} else {
+			dev_err(&device->cdev->dev,
+				"cqr %p timed out (%lus), %i retries "
+				"remaining\n", cqr, (cqr->expires/HZ),
+				cqr->retries);
+		}
+	}
+}
+
+/*
+ * Take a look at the first request on the ccw queue and check
+ * if it needs to be started.
+ */
+static void __dasd_device_start_head(struct dasd_device *device)
+{
+	struct dasd_ccw_req *cqr;
+	int rc;
+
+	if (list_empty(&device->ccw_queue))
+		return;
+	cqr = list_entry(device->ccw_queue.next, struct dasd_ccw_req, devlist);
+	if (cqr->status != DASD_CQR_QUEUED)
+		return;
+	/* when device is stopped, return request to previous layer
+	 * exception: only the disconnect or unresumed bits are set and the
+	 * cqr is a path verification request
+	 */
+	if (device->stopped &&
+	    !(!(device->stopped & ~(DASD_STOPPED_DC_WAIT | DASD_UNRESUMED_PM))
+	      && test_bit(DASD_CQR_VERIFY_PATH, &cqr->flags))) {
+		cqr->intrc = -EAGAIN;
+		cqr->status = DASD_CQR_CLEARED;
+		dasd_schedule_device_bh(device);
+		return;
+	}
+
+	rc = device->discipline->start_IO(cqr);
+	if (rc == 0)
+		dasd_device_set_timer(device, cqr->expires);
+	else if (rc == -EACCES) {
+		dasd_schedule_device_bh(device);
+	} else
+		/* Hmpf, try again in 1/2 sec */
+		dasd_device_set_timer(device, 50);
+}
+
+static void __dasd_device_check_path_events(struct dasd_device *device)
+{
+	int rc;
+
+	if (device->path_data.tbvpm) {
+		if (device->stopped & ~(DASD_STOPPED_DC_WAIT |
+					DASD_UNRESUMED_PM))
+			return;
+		rc = device->discipline->verify_path(
+			device, device->path_data.tbvpm);
+		if (rc)
+			dasd_device_set_timer(device, 50);
+		else
+			device->path_data.tbvpm = 0;
+	}
+};
+
+/*
+ * Go through all request on the dasd_device request queue,
+ * terminate them on the cdev if necessary, and return them to the
+ * submitting layer via callback.
+ * Note:
+ * Make sure that all 'submitting layers' still exist when
+ * this function is called!. In other words, when 'device' is a base
+ * device then all block layer requests must have been removed before
+ * via dasd_flush_block_queue.
+ */
+int dasd_flush_device_queue(struct dasd_device *device)
+{
+	struct dasd_ccw_req *cqr, *n;
+	int rc;
+	struct list_head flush_queue;
+
+	INIT_LIST_HEAD(&flush_queue);
+	spin_lock_irq(get_ccwdev_lock(device->cdev));
+	rc = 0;
+	list_for_each_entry_safe(cqr, n, &device->ccw_queue, devlist) {
+		/* Check status and move request to flush_queue */
+		switch (cqr->status) {
+		case DASD_CQR_IN_IO:
+			rc = device->discipline->term_IO(cqr);
+			if (rc) {
+				/* unable to terminate requeust */
+				dev_err(&device->cdev->dev,
+					"Flushing the DASD request queue "
+					"failed for request %p\n", cqr);
+				/* stop flush processing */
+				goto finished;
+			}
+			break;
+		case DASD_CQR_QUEUED:
+			cqr->stopclk = get_clock();
+			cqr->status = DASD_CQR_CLEARED;
+			break;
+		default: /* no need to modify the others */
+			break;
+		}
+		list_move_tail(&cqr->devlist, &flush_queue);
+	}
+finished:
+	spin_unlock_irq(get_ccwdev_lock(device->cdev));
+	/*
+	 * After this point all requests must be in state CLEAR_PENDING,
+	 * CLEARED, SUCCESS or ERROR. Now wait for CLEAR_PENDING to become
+	 * one of the others.
+	 */
+	list_for_each_entry_safe(cqr, n, &flush_queue, devlist)
+		wait_event(dasd_flush_wq,
+			   (cqr->status != DASD_CQR_CLEAR_PENDING));
+	/*
+	 * Now set each request back to TERMINATED, DONE or NEED_ERP
+	 * and call the callback function of flushed requests
+	 */
+	__dasd_device_process_final_queue(device, &flush_queue);
+	return rc;
+}
+
+/*
+ * Acquire the device lock and process queues for the device.
+ */
+static void dasd_device_tasklet(struct dasd_device *device)
+{
+	struct list_head final_queue;
+
+	atomic_set (&device->tasklet_scheduled, 0);
+	INIT_LIST_HEAD(&final_queue);
+	spin_lock_irq(get_ccwdev_lock(device->cdev));
+	/* Check expire time of first request on the ccw queue. */
+	__dasd_device_check_expire(device);
+	/* find final requests on ccw queue */
+	__dasd_device_process_ccw_queue(device, &final_queue);
+	__dasd_device_check_path_events(device);
+	spin_unlock_irq(get_ccwdev_lock(device->cdev));
+	/* Now call the callback function of requests with final status */
+	__dasd_device_process_final_queue(device, &final_queue);
+	spin_lock_irq(get_ccwdev_lock(device->cdev));
+	/* Now check if the head of the ccw queue needs to be started. */
+	__dasd_device_start_head(device);
+	spin_unlock_irq(get_ccwdev_lock(device->cdev));
+	dasd_put_device(device);
+}
+
+/*
+ * Schedules a call to dasd_tasklet over the device tasklet.
+ */
+void dasd_schedule_device_bh(struct dasd_device *device)
+{
+	/* Protect against rescheduling. */
+	if (atomic_cmpxchg (&device->tasklet_scheduled, 0, 1) != 0)
+		return;
+	dasd_get_device(device);
+	tasklet_hi_schedule(&device->tasklet);
+}
+
+void dasd_device_set_stop_bits(struct dasd_device *device, int bits)
+{
+	device->stopped |= bits;
+}
+EXPORT_SYMBOL_GPL(dasd_device_set_stop_bits);
+
+void dasd_device_remove_stop_bits(struct dasd_device *device, int bits)
+{
+	device->stopped &= ~bits;
+	if (!device->stopped)
+		wake_up(&generic_waitq);
+}
+EXPORT_SYMBOL_GPL(dasd_device_remove_stop_bits);
+
+/*
+ * Queue a request to the head of the device ccw_queue.
+ * Start the I/O if possible.
+ */
+void dasd_add_request_head(struct dasd_ccw_req *cqr)
+{
+	struct dasd_device *device;
+	unsigned long flags;
+
+	device = cqr->startdev;
+	spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
+	cqr->status = DASD_CQR_QUEUED;
+	list_add(&cqr->devlist, &device->ccw_queue);
+	/* let the bh start the request to keep them in order */
+	dasd_schedule_device_bh(device);
+	spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
+}
+
+/*
+ * Queue a request to the tail of the device ccw_queue.
+ * Start the I/O if possible.
+ */
+void dasd_add_request_tail(struct dasd_ccw_req *cqr)
+{
+	struct dasd_device *device;
+	unsigned long flags;
+
+	device = cqr->startdev;
+	spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
+	cqr->status = DASD_CQR_QUEUED;
+	list_add_tail(&cqr->devlist, &device->ccw_queue);
+	/* let the bh start the request to keep them in order */
+	dasd_schedule_device_bh(device);
+	spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
+}
+
+/*
+ * Wakeup helper for the 'sleep_on' functions.
+ */
+void dasd_wakeup_cb(struct dasd_ccw_req *cqr, void *data)
+{
+	spin_lock_irq(get_ccwdev_lock(cqr->startdev->cdev));
+	cqr->callback_data = DASD_SLEEPON_END_TAG;
+	spin_unlock_irq(get_ccwdev_lock(cqr->startdev->cdev));
+	wake_up(&generic_waitq);
+}
+EXPORT_SYMBOL_GPL(dasd_wakeup_cb);
+
+static inline int _wait_for_wakeup(struct dasd_ccw_req *cqr)
+{
+	struct dasd_device *device;
+	int rc;
+
+	device = cqr->startdev;
+	spin_lock_irq(get_ccwdev_lock(device->cdev));
+	rc = (cqr->callback_data == DASD_SLEEPON_END_TAG);
+	spin_unlock_irq(get_ccwdev_lock(device->cdev));
+	return rc;
+}
+
+/*
+ * checks if error recovery is necessary, returns 1 if yes, 0 otherwise.
+ */
+static int __dasd_sleep_on_erp(struct dasd_ccw_req *cqr)
+{
+	struct dasd_device *device;
+	dasd_erp_fn_t erp_fn;
+
+	if (cqr->status == DASD_CQR_FILLED)
+		return 0;
+	device = cqr->startdev;
+	if (test_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags)) {
+		if (cqr->status == DASD_CQR_TERMINATED) {
+			device->discipline->handle_terminated_request(cqr);
+			return 1;
+		}
+		if (cqr->status == DASD_CQR_NEED_ERP) {
+			erp_fn = device->discipline->erp_action(cqr);
+			erp_fn(cqr);
+			return 1;
+		}
+		if (cqr->status == DASD_CQR_FAILED)
+			dasd_log_sense(cqr, &cqr->irb);
+		if (cqr->refers) {
+			__dasd_process_erp(device, cqr);
+			return 1;
+		}
+	}
+	return 0;
+}
+
+static int __dasd_sleep_on_loop_condition(struct dasd_ccw_req *cqr)
+{
+	if (test_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags)) {
+		if (cqr->refers) /* erp is not done yet */
+			return 1;
+		return ((cqr->status != DASD_CQR_DONE) &&
+			(cqr->status != DASD_CQR_FAILED));
+	} else
+		return (cqr->status == DASD_CQR_FILLED);
+}
+
+static int _dasd_sleep_on(struct dasd_ccw_req *maincqr, int interruptible)
+{
+	struct dasd_device *device;
+	int rc;
+	struct list_head ccw_queue;
+	struct dasd_ccw_req *cqr;
+
+	INIT_LIST_HEAD(&ccw_queue);
+	maincqr->status = DASD_CQR_FILLED;
+	device = maincqr->startdev;
+	list_add(&maincqr->blocklist, &ccw_queue);
+	for (cqr = maincqr;  __dasd_sleep_on_loop_condition(cqr);
+	     cqr = list_first_entry(&ccw_queue,
+				    struct dasd_ccw_req, blocklist)) {
+
+		if (__dasd_sleep_on_erp(cqr))
+			continue;
+		if (cqr->status != DASD_CQR_FILLED) /* could be failed */
+			continue;
+		if (test_bit(DASD_FLAG_LOCK_STOLEN, &device->flags) &&
+		    !test_bit(DASD_CQR_ALLOW_SLOCK, &cqr->flags)) {
+			cqr->status = DASD_CQR_FAILED;
+			cqr->intrc = -EPERM;
+			continue;
+		}
+		/* Non-temporary stop condition will trigger fail fast */
+		if (device->stopped & ~DASD_STOPPED_PENDING &&
+		    test_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags) &&
+		    (!dasd_eer_enabled(device))) {
+			cqr->status = DASD_CQR_FAILED;
+			continue;
+		}
+		/* Don't try to start requests if device is stopped */
+		if (interruptible) {
+			rc = wait_event_interruptible(
+				generic_waitq, !(device->stopped));
+			if (rc == -ERESTARTSYS) {
+				cqr->status = DASD_CQR_FAILED;
+				maincqr->intrc = rc;
+				continue;
+			}
+		} else
+			wait_event(generic_waitq, !(device->stopped));
+
+		if (!cqr->callback)
+			cqr->callback = dasd_wakeup_cb;
+
+		cqr->callback_data = DASD_SLEEPON_START_TAG;
+		dasd_add_request_tail(cqr);
+		if (interruptible) {
+			rc = wait_event_interruptible(
+				generic_waitq, _wait_for_wakeup(cqr));
+			if (rc == -ERESTARTSYS) {
+				dasd_cancel_req(cqr);
+				/* wait (non-interruptible) for final status */
+				wait_event(generic_waitq,
+					   _wait_for_wakeup(cqr));
+				cqr->status = DASD_CQR_FAILED;
+				maincqr->intrc = rc;
+				continue;
+			}
+		} else
+			wait_event(generic_waitq, _wait_for_wakeup(cqr));
+	}
+
+	maincqr->endclk = get_clock();
+	if ((maincqr->status != DASD_CQR_DONE) &&
+	    (maincqr->intrc != -ERESTARTSYS))
+		dasd_log_sense(maincqr, &maincqr->irb);
+	if (maincqr->status == DASD_CQR_DONE)
+		rc = 0;
+	else if (maincqr->intrc)
+		rc = maincqr->intrc;
+	else
+		rc = -EIO;
+	return rc;
+}
+
+/*
+ * Queue a request to the tail of the device ccw_queue and wait for
+ * it's completion.
+ */
+int dasd_sleep_on(struct dasd_ccw_req *cqr)
+{
+	return _dasd_sleep_on(cqr, 0);
+}
+
+/*
+ * Queue a request to the tail of the device ccw_queue and wait
+ * interruptible for it's completion.
+ */
+int dasd_sleep_on_interruptible(struct dasd_ccw_req *cqr)
+{
+	return _dasd_sleep_on(cqr, 1);
+}
+
+/*
+ * Whoa nelly now it gets really hairy. For some functions (e.g. steal lock
+ * for eckd devices) the currently running request has to be terminated
+ * and be put back to status queued, before the special request is added
+ * to the head of the queue. Then the special request is waited on normally.
+ */
+static inline int _dasd_term_running_cqr(struct dasd_device *device)
+{
+	struct dasd_ccw_req *cqr;
+	int rc;
+
+	if (list_empty(&device->ccw_queue))
+		return 0;
+	cqr = list_entry(device->ccw_queue.next, struct dasd_ccw_req, devlist);
+	rc = device->discipline->term_IO(cqr);
+	if (!rc)
+		/*
+		 * CQR terminated because a more important request is pending.
+		 * Undo decreasing of retry counter because this is
+		 * not an error case.
+		 */
+		cqr->retries++;
+	return rc;
+}
+
+int dasd_sleep_on_immediatly(struct dasd_ccw_req *cqr)
+{
+	struct dasd_device *device;
+	int rc;
+
+	device = cqr->startdev;
+	if (test_bit(DASD_FLAG_LOCK_STOLEN, &device->flags) &&
+	    !test_bit(DASD_CQR_ALLOW_SLOCK, &cqr->flags)) {
+		cqr->status = DASD_CQR_FAILED;
+		cqr->intrc = -EPERM;
+		return -EIO;
+	}
+	spin_lock_irq(get_ccwdev_lock(device->cdev));
+	rc = _dasd_term_running_cqr(device);
+	if (rc) {
+		spin_unlock_irq(get_ccwdev_lock(device->cdev));
+		return rc;
+	}
+	cqr->callback = dasd_wakeup_cb;
+	cqr->callback_data = DASD_SLEEPON_START_TAG;
+	cqr->status = DASD_CQR_QUEUED;
+	/*
+	 * add new request as second
+	 * first the terminated cqr needs to be finished
+	 */
+	list_add(&cqr->devlist, device->ccw_queue.next);
+
+	/* let the bh start the request to keep them in order */
+	dasd_schedule_device_bh(device);
+
+	spin_unlock_irq(get_ccwdev_lock(device->cdev));
+
+	wait_event(generic_waitq, _wait_for_wakeup(cqr));
+
+	if (cqr->status == DASD_CQR_DONE)
+		rc = 0;
+	else if (cqr->intrc)
+		rc = cqr->intrc;
+	else
+		rc = -EIO;
+	return rc;
+}
+
+/*
+ * Cancels a request that was started with dasd_sleep_on_req.
+ * This is useful to timeout requests. The request will be
+ * terminated if it is currently in i/o.
+ * Returns 1 if the request has been terminated.
+ *	   0 if there was no need to terminate the request (not started yet)
+ *	   negative error code if termination failed
+ * Cancellation of a request is an asynchronous operation! The calling
+ * function has to wait until the request is properly returned via callback.
+ */
+int dasd_cancel_req(struct dasd_ccw_req *cqr)
+{
+	struct dasd_device *device = cqr->startdev;
+	unsigned long flags;
+	int rc;
+
+	rc = 0;
+	spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
+	switch (cqr->status) {
+	case DASD_CQR_QUEUED:
+		/* request was not started - just set to cleared */
+		cqr->status = DASD_CQR_CLEARED;
+		break;
+	case DASD_CQR_IN_IO:
+		/* request in IO - terminate IO and release again */
+		rc = device->discipline->term_IO(cqr);
+		if (rc) {
+			dev_err(&device->cdev->dev,
+				"Cancelling request %p failed with rc=%d\n",
+				cqr, rc);
+		} else {
+			cqr->stopclk = get_clock();
+		}
+		break;
+	default: /* already finished or clear pending - do nothing */
+		break;
+	}
+	spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
+	dasd_schedule_device_bh(device);
+	return rc;
+}
+
+
+/*
+ * SECTION: Operations of the dasd_block layer.
+ */
+
+/*
+ * Timeout function for dasd_block. This is used when the block layer
+ * is waiting for something that may not come reliably, (e.g. a state
+ * change interrupt)
+ */
+static void dasd_block_timeout(unsigned long ptr)
+{
+	unsigned long flags;
+	struct dasd_block *block;
+
+	block = (struct dasd_block *) ptr;
+	spin_lock_irqsave(get_ccwdev_lock(block->base->cdev), flags);
+	/* re-activate request queue */
+	dasd_device_remove_stop_bits(block->base, DASD_STOPPED_PENDING);
+	spin_unlock_irqrestore(get_ccwdev_lock(block->base->cdev), flags);
+	dasd_schedule_block_bh(block);
+}
+
+/*
+ * Setup timeout for a dasd_block in jiffies.
+ */
+void dasd_block_set_timer(struct dasd_block *block, int expires)
+{
+	if (expires == 0)
+		del_timer(&block->timer);
+	else
+		mod_timer(&block->timer, jiffies + expires);
+}
+
+/*
+ * Clear timeout for a dasd_block.
+ */
+void dasd_block_clear_timer(struct dasd_block *block)
+{
+	del_timer(&block->timer);
+}
+
+/*
+ * Process finished error recovery ccw.
+ */
+static void __dasd_process_erp(struct dasd_device *device,
+			       struct dasd_ccw_req *cqr)
+{
+	dasd_erp_fn_t erp_fn;
+
+	if (cqr->status == DASD_CQR_DONE)
+		DBF_DEV_EVENT(DBF_NOTICE, device, "%s", "ERP successful");
+	else
+		dev_err(&device->cdev->dev, "ERP failed for the DASD\n");
+	erp_fn = device->discipline->erp_postaction(cqr);
+	erp_fn(cqr);
+}
+
+/*
+ * Fetch requests from the block device queue.
+ */
+static void __dasd_process_request_queue(struct dasd_block *block)
+{
+	struct request_queue *queue;
+	struct request *req;
+	struct dasd_ccw_req *cqr;
+	struct dasd_device *basedev;
+	unsigned long flags;
+	queue = block->request_queue;
+	basedev = block->base;
+	/* No queue ? Then there is nothing to do. */
+	if (queue == NULL)
+		return;
+
+	/*
+	 * We requeue request from the block device queue to the ccw
+	 * queue only in two states. In state DASD_STATE_READY the
+	 * partition detection is done and we need to requeue requests
+	 * for that. State DASD_STATE_ONLINE is normal block device
+	 * operation.
+	 */
+	if (basedev->state < DASD_STATE_READY) {
+		while ((req = blk_fetch_request(block->request_queue)))
+			__blk_end_request_all(req, -EIO);
+		return;
+	}
+	/* Now we try to fetch requests from the request queue */
+	while ((req = blk_peek_request(queue))) {
+		if (basedev->features & DASD_FEATURE_READONLY &&
+		    rq_data_dir(req) == WRITE) {
+			DBF_DEV_EVENT(DBF_ERR, basedev,
+				      "Rejecting write request %p",
+				      req);
+			blk_start_request(req);
+			__blk_end_request_all(req, -EIO);
+			continue;
+		}
+		cqr = basedev->discipline->build_cp(basedev, block, req);
+		if (IS_ERR(cqr)) {
+			if (PTR_ERR(cqr) == -EBUSY)
+				break;	/* normal end condition */
+			if (PTR_ERR(cqr) == -ENOMEM)
+				break;	/* terminate request queue loop */
+			if (PTR_ERR(cqr) == -EAGAIN) {
+				/*
+				 * The current request cannot be build right
+				 * now, we have to try later. If this request
+				 * is the head-of-queue we stop the device
+				 * for 1/2 second.
+				 */
+				if (!list_empty(&block->ccw_queue))
+					break;
+				spin_lock_irqsave(
+					get_ccwdev_lock(basedev->cdev), flags);
+				dasd_device_set_stop_bits(basedev,
+							  DASD_STOPPED_PENDING);
+				spin_unlock_irqrestore(
+					get_ccwdev_lock(basedev->cdev), flags);
+				dasd_block_set_timer(block, HZ/2);
+				break;
+			}
+			DBF_DEV_EVENT(DBF_ERR, basedev,
+				      "CCW creation failed (rc=%ld) "
+				      "on request %p",
+				      PTR_ERR(cqr), req);
+			blk_start_request(req);
+			__blk_end_request_all(req, -EIO);
+			continue;
+		}
+		/*
+		 *  Note: callback is set to dasd_return_cqr_cb in
+		 * __dasd_block_start_head to cover erp requests as well
+		 */
+		cqr->callback_data = (void *) req;
+		cqr->status = DASD_CQR_FILLED;
+		blk_start_request(req);
+		list_add_tail(&cqr->blocklist, &block->ccw_queue);
+		dasd_profile_start(block, cqr, req);
+	}
+}
+
+static void __dasd_cleanup_cqr(struct dasd_ccw_req *cqr)
+{
+	struct request *req;
+	int status;
+	int error = 0;
+
+	req = (struct request *) cqr->callback_data;
+	dasd_profile_end(cqr->block, cqr, req);
+	status = cqr->block->base->discipline->free_cp(cqr, req);
+	if (status <= 0)
+		error = status ? status : -EIO;
+	__blk_end_request_all(req, error);
+}
+
+/*
+ * Process ccw request queue.
+ */
+static void __dasd_process_block_ccw_queue(struct dasd_block *block,
+					   struct list_head *final_queue)
+{
+	struct list_head *l, *n;
+	struct dasd_ccw_req *cqr;
+	dasd_erp_fn_t erp_fn;
+	unsigned long flags;
+	struct dasd_device *base = block->base;
+
+restart:
+	/* Process request with final status. */
+	list_for_each_safe(l, n, &block->ccw_queue) {
+		cqr = list_entry(l, struct dasd_ccw_req, blocklist);
+		if (cqr->status != DASD_CQR_DONE &&
+		    cqr->status != DASD_CQR_FAILED &&
+		    cqr->status != DASD_CQR_NEED_ERP &&
+		    cqr->status != DASD_CQR_TERMINATED)
+			continue;
+
+		if (cqr->status == DASD_CQR_TERMINATED) {
+			base->discipline->handle_terminated_request(cqr);
+			goto restart;
+		}
+
+		/*  Process requests that may be recovered */
+		if (cqr->status == DASD_CQR_NEED_ERP) {
+			erp_fn = base->discipline->erp_action(cqr);
+			if (IS_ERR(erp_fn(cqr)))
+				continue;
+			goto restart;
+		}
+
+		/* log sense for fatal error */
+		if (cqr->status == DASD_CQR_FAILED) {
+			dasd_log_sense(cqr, &cqr->irb);
+		}
+
+		/* First of all call extended error reporting. */
+		if (dasd_eer_enabled(base) &&
+		    cqr->status == DASD_CQR_FAILED) {
+			dasd_eer_write(base, cqr, DASD_EER_FATALERROR);
+
+			/* restart request  */
+			cqr->status = DASD_CQR_FILLED;
+			cqr->retries = 255;
+			spin_lock_irqsave(get_ccwdev_lock(base->cdev), flags);
+			dasd_device_set_stop_bits(base, DASD_STOPPED_QUIESCE);
+			spin_unlock_irqrestore(get_ccwdev_lock(base->cdev),
+					       flags);
+			goto restart;
+		}
+
+		/* Process finished ERP request. */
+		if (cqr->refers) {
+			__dasd_process_erp(base, cqr);
+			goto restart;
+		}
+
+		/* Rechain finished requests to final queue */
+		cqr->endclk = get_clock();
+		list_move_tail(&cqr->blocklist, final_queue);
+	}
+}
+
+static void dasd_return_cqr_cb(struct dasd_ccw_req *cqr, void *data)
+{
+	dasd_schedule_block_bh(cqr->block);
+}
+
+static void __dasd_block_start_head(struct dasd_block *block)
+{
+	struct dasd_ccw_req *cqr;
+
+	if (list_empty(&block->ccw_queue))
+		return;
+	/* We allways begin with the first requests on the queue, as some
+	 * of previously started requests have to be enqueued on a
+	 * dasd_device again for error recovery.
+	 */
+	list_for_each_entry(cqr, &block->ccw_queue, blocklist) {
+		if (cqr->status != DASD_CQR_FILLED)
+			continue;
+		if (test_bit(DASD_FLAG_LOCK_STOLEN, &block->base->flags) &&
+		    !test_bit(DASD_CQR_ALLOW_SLOCK, &cqr->flags)) {
+			cqr->status = DASD_CQR_FAILED;
+			cqr->intrc = -EPERM;
+			dasd_schedule_block_bh(block);
+			continue;
+		}
+		/* Non-temporary stop condition will trigger fail fast */
+		if (block->base->stopped & ~DASD_STOPPED_PENDING &&
+		    test_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags) &&
+		    (!dasd_eer_enabled(block->base))) {
+			cqr->status = DASD_CQR_FAILED;
+			dasd_schedule_block_bh(block);
+			continue;
+		}
+		/* Don't try to start requests if device is stopped */
+		if (block->base->stopped)
+			return;
+
+		/* just a fail safe check, should not happen */
+		if (!cqr->startdev)
+			cqr->startdev = block->base;
+
+		/* make sure that the requests we submit find their way back */
+		cqr->callback = dasd_return_cqr_cb;
+
+		dasd_add_request_tail(cqr);
+	}
+}
+
+/*
+ * Central dasd_block layer routine. Takes requests from the generic
+ * block layer request queue, creates ccw requests, enqueues them on
+ * a dasd_device and processes ccw requests that have been returned.
+ */
+static void dasd_block_tasklet(struct dasd_block *block)
+{
+	struct list_head final_queue;
+	struct list_head *l, *n;
+	struct dasd_ccw_req *cqr;
+
+	atomic_set(&block->tasklet_scheduled, 0);
+	INIT_LIST_HEAD(&final_queue);
+	spin_lock(&block->queue_lock);
+	/* Finish off requests on ccw queue */
+	__dasd_process_block_ccw_queue(block, &final_queue);
+	spin_unlock(&block->queue_lock);
+	/* Now call the callback function of requests with final status */
+	spin_lock_irq(&block->request_queue_lock);
+	list_for_each_safe(l, n, &final_queue) {
+		cqr = list_entry(l, struct dasd_ccw_req, blocklist);
+		list_del_init(&cqr->blocklist);
+		__dasd_cleanup_cqr(cqr);
+	}
+	spin_lock(&block->queue_lock);
+	/* Get new request from the block device request queue */
+	__dasd_process_request_queue(block);
+	/* Now check if the head of the ccw queue needs to be started. */
+	__dasd_block_start_head(block);
+	spin_unlock(&block->queue_lock);
+	spin_unlock_irq(&block->request_queue_lock);
+	dasd_put_device(block->base);
+}
+
+static void _dasd_wake_block_flush_cb(struct dasd_ccw_req *cqr, void *data)
+{
+	wake_up(&dasd_flush_wq);
+}
+
+/*
+ * Go through all request on the dasd_block request queue, cancel them
+ * on the respective dasd_device, and return them to the generic
+ * block layer.
+ */
+static int dasd_flush_block_queue(struct dasd_block *block)
+{
+	struct dasd_ccw_req *cqr, *n;
+	int rc, i;
+	struct list_head flush_queue;
+
+	INIT_LIST_HEAD(&flush_queue);
+	spin_lock_bh(&block->queue_lock);
+	rc = 0;
+restart:
+	list_for_each_entry_safe(cqr, n, &block->ccw_queue, blocklist) {
+		/* if this request currently owned by a dasd_device cancel it */
+		if (cqr->status >= DASD_CQR_QUEUED)
+			rc = dasd_cancel_req(cqr);
+		if (rc < 0)
+			break;
+		/* Rechain request (including erp chain) so it won't be
+		 * touched by the dasd_block_tasklet anymore.
+		 * Replace the callback so we notice when the request
+		 * is returned from the dasd_device layer.
+		 */
+		cqr->callback = _dasd_wake_block_flush_cb;
+		for (i = 0; cqr != NULL; cqr = cqr->refers, i++)
+			list_move_tail(&cqr->blocklist, &flush_queue);
+		if (i > 1)
+			/* moved more than one request - need to restart */
+			goto restart;
+	}
+	spin_unlock_bh(&block->queue_lock);
+	/* Now call the callback function of flushed requests */
+restart_cb:
+	list_for_each_entry_safe(cqr, n, &flush_queue, blocklist) {
+		wait_event(dasd_flush_wq, (cqr->status < DASD_CQR_QUEUED));
+		/* Process finished ERP request. */
+		if (cqr->refers) {
+			spin_lock_bh(&block->queue_lock);
+			__dasd_process_erp(block->base, cqr);
+			spin_unlock_bh(&block->queue_lock);
+			/* restart list_for_xx loop since dasd_process_erp
+			 * might remove multiple elements */
+			goto restart_cb;
+		}
+		/* call the callback function */
+		spin_lock_irq(&block->request_queue_lock);
+		cqr->endclk = get_clock();
+		list_del_init(&cqr->blocklist);
+		__dasd_cleanup_cqr(cqr);
+		spin_unlock_irq(&block->request_queue_lock);
+	}
+	return rc;
+}
+
+/*
+ * Schedules a call to dasd_tasklet over the device tasklet.
+ */
+void dasd_schedule_block_bh(struct dasd_block *block)
+{
+	/* Protect against rescheduling. */
+	if (atomic_cmpxchg(&block->tasklet_scheduled, 0, 1) != 0)
+		return;
+	/* life cycle of block is bound to it's base device */
+	dasd_get_device(block->base);
+	tasklet_hi_schedule(&block->tasklet);
+}
+
+
+/*
+ * SECTION: external block device operations
+ * (request queue handling, open, release, etc.)
+ */
+
+/*
+ * Dasd request queue function. Called from ll_rw_blk.c
+ */
+static void do_dasd_request(struct request_queue *queue)
+{
+	struct dasd_block *block;
+
+	block = queue->queuedata;
+	spin_lock(&block->queue_lock);
+	/* Get new request from the block device request queue */
+	__dasd_process_request_queue(block);
+	/* Now check if the head of the ccw queue needs to be started. */
+	__dasd_block_start_head(block);
+	spin_unlock(&block->queue_lock);
+}
+
+/*
+ * Allocate and initialize request queue and default I/O scheduler.
+ */
+static int dasd_alloc_queue(struct dasd_block *block)
+{
+	int rc;
+
+	block->request_queue = blk_init_queue(do_dasd_request,
+					       &block->request_queue_lock);
+	if (block->request_queue == NULL)
+		return -ENOMEM;
+
+	block->request_queue->queuedata = block;
+
+	elevator_exit(block->request_queue->elevator);
+	block->request_queue->elevator = NULL;
+	rc = elevator_init(block->request_queue, "deadline");
+	if (rc) {
+		blk_cleanup_queue(block->request_queue);
+		return rc;
+	}
+	return 0;
+}
+
+/*
+ * Allocate and initialize request queue.
+ */
+static void dasd_setup_queue(struct dasd_block *block)
+{
+	int max;
+
+	if (block->base->features & DASD_FEATURE_USERAW) {
+		/*
+		 * the max_blocks value for raw_track access is 256
+		 * it is higher than the native ECKD value because we
+		 * only need one ccw per track
+		 * so the max_hw_sectors are
+		 * 2048 x 512B = 1024kB = 16 tracks
+		 */
+		max = 2048;
+	} else {
+		max = block->base->discipline->max_blocks << block->s2b_shift;
+	}
+	blk_queue_logical_block_size(block->request_queue,
+				     block->bp_block);
+	blk_queue_max_hw_sectors(block->request_queue, max);
+	blk_queue_max_segments(block->request_queue, -1L);
+	/* with page sized segments we can translate each segement into
+	 * one idaw/tidaw
+	 */
+	blk_queue_max_segment_size(block->request_queue, PAGE_SIZE);
+	blk_queue_segment_boundary(block->request_queue, PAGE_SIZE - 1);
+}
+
+/*
+ * Deactivate and free request queue.
+ */
+static void dasd_free_queue(struct dasd_block *block)
+{
+	if (block->request_queue) {
+		blk_cleanup_queue(block->request_queue);
+		block->request_queue = NULL;
+	}
+}
+
+/*
+ * Flush request on the request queue.
+ */
+static void dasd_flush_request_queue(struct dasd_block *block)
+{
+	struct request *req;
+
+	if (!block->request_queue)
+		return;
+
+	spin_lock_irq(&block->request_queue_lock);
+	while ((req = blk_fetch_request(block->request_queue)))
+		__blk_end_request_all(req, -EIO);
+	spin_unlock_irq(&block->request_queue_lock);
+}
+
+static int dasd_open(struct block_device *bdev, fmode_t mode)
+{
+	struct dasd_device *base;
+	int rc;
+
+	base = dasd_device_from_gendisk(bdev->bd_disk);
+	if (!base)
+		return -ENODEV;
+
+	atomic_inc(&base->block->open_count);
+	if (test_bit(DASD_FLAG_OFFLINE, &base->flags)) {
+		rc = -ENODEV;
+		goto unlock;
+	}
+
+	if (!try_module_get(base->discipline->owner)) {
+		rc = -EINVAL;
+		goto unlock;
+	}
+
+	if (dasd_probeonly) {
+		dev_info(&base->cdev->dev,
+			 "Accessing the DASD failed because it is in "
+			 "probeonly mode\n");
+		rc = -EPERM;
+		goto out;
+	}
+
+	if (base->state <= DASD_STATE_BASIC) {
+		DBF_DEV_EVENT(DBF_ERR, base, " %s",
+			      " Cannot open unrecognized device");
+		rc = -ENODEV;
+		goto out;
+	}
+
+	if ((mode & FMODE_WRITE) &&
+	    (test_bit(DASD_FLAG_DEVICE_RO, &base->flags) ||
+	     (base->features & DASD_FEATURE_READONLY))) {
+		rc = -EROFS;
+		goto out;
+	}
+
+	dasd_put_device(base);
+	return 0;
+
+out:
+	module_put(base->discipline->owner);
+unlock:
+	atomic_dec(&base->block->open_count);
+	dasd_put_device(base);
+	return rc;
+}
+
+static int dasd_release(struct gendisk *disk, fmode_t mode)
+{
+	struct dasd_device *base;
+
+	base = dasd_device_from_gendisk(disk);
+	if (!base)
+		return -ENODEV;
+
+	atomic_dec(&base->block->open_count);
+	module_put(base->discipline->owner);
+	dasd_put_device(base);
+	return 0;
+}
+
+/*
+ * Return disk geometry.
+ */
+static int dasd_getgeo(struct block_device *bdev, struct hd_geometry *geo)
+{
+	struct dasd_device *base;
+
+	base = dasd_device_from_gendisk(bdev->bd_disk);
+	if (!base)
+		return -ENODEV;
+
+	if (!base->discipline ||
+	    !base->discipline->fill_geometry) {
+		dasd_put_device(base);
+		return -EINVAL;
+	}
+	base->discipline->fill_geometry(base->block, geo);
+	geo->start = get_start_sect(bdev) >> base->block->s2b_shift;
+	dasd_put_device(base);
+	return 0;
+}
+
+const struct block_device_operations
+dasd_device_operations = {
+	.owner		= THIS_MODULE,
+	.open		= dasd_open,
+	.release	= dasd_release,
+	.ioctl		= dasd_ioctl,
+	.compat_ioctl	= dasd_ioctl,
+	.getgeo		= dasd_getgeo,
+};
+
+/*******************************************************************************
+ * end of block device operations
+ */
+
+static void
+dasd_exit(void)
+{
+#ifdef CONFIG_PROC_FS
+	dasd_proc_exit();
+#endif
+	dasd_eer_exit();
+        if (dasd_page_cache != NULL) {
+		kmem_cache_destroy(dasd_page_cache);
+		dasd_page_cache = NULL;
+	}
+	dasd_gendisk_exit();
+	dasd_devmap_exit();
+	if (dasd_debug_area != NULL) {
+		debug_unregister(dasd_debug_area);
+		dasd_debug_area = NULL;
+	}
+	dasd_statistics_removeroot();
+}
+
+/*
+ * SECTION: common functions for ccw_driver use
+ */
+
+/*
+ * Is the device read-only?
+ * Note that this function does not report the setting of the
+ * readonly device attribute, but how it is configured in z/VM.
+ */
+int dasd_device_is_ro(struct dasd_device *device)
+{
+	struct ccw_dev_id dev_id;
+	struct diag210 diag_data;
+	int rc;
+
+	if (!MACHINE_IS_VM)
+		return 0;
+	ccw_device_get_id(device->cdev, &dev_id);
+	memset(&diag_data, 0, sizeof(diag_data));
+	diag_data.vrdcdvno = dev_id.devno;
+	diag_data.vrdclen = sizeof(diag_data);
+	rc = diag210(&diag_data);
+	if (rc == 0 || rc == 2) {
+		return diag_data.vrdcvfla & 0x80;
+	} else {
+		DBF_EVENT(DBF_WARNING, "diag210 failed for dev=%04x with rc=%d",
+			  dev_id.devno, rc);
+		return 0;
+	}
+}
+EXPORT_SYMBOL_GPL(dasd_device_is_ro);
+
+static void dasd_generic_auto_online(void *data, async_cookie_t cookie)
+{
+	struct ccw_device *cdev = data;
+	int ret;
+
+	ret = ccw_device_set_online(cdev);
+	if (ret)
+		pr_warning("%s: Setting the DASD online failed with rc=%d\n",
+			   dev_name(&cdev->dev), ret);
+}
+
+/*
+ * Initial attempt at a probe function. this can be simplified once
+ * the other detection code is gone.
+ */
+int dasd_generic_probe(struct ccw_device *cdev,
+		       struct dasd_discipline *discipline)
+{
+	int ret;
+
+	ret = dasd_add_sysfs_files(cdev);
+	if (ret) {
+		DBF_EVENT_DEVID(DBF_WARNING, cdev, "%s",
+				"dasd_generic_probe: could not add "
+				"sysfs entries");
+		return ret;
+	}
+	cdev->handler = &dasd_int_handler;
+
+	/*
+	 * Automatically online either all dasd devices (dasd_autodetect)
+	 * or all devices specified with dasd= parameters during
+	 * initial probe.
+	 */
+	if ((dasd_get_feature(cdev, DASD_FEATURE_INITIAL_ONLINE) > 0 ) ||
+	    (dasd_autodetect && dasd_busid_known(dev_name(&cdev->dev)) != 0))
+		async_schedule(dasd_generic_auto_online, cdev);
+	return 0;
+}
+
+/*
+ * This will one day be called from a global not_oper handler.
+ * It is also used by driver_unregister during module unload.
+ */
+void dasd_generic_remove(struct ccw_device *cdev)
+{
+	struct dasd_device *device;
+	struct dasd_block *block;
+
+	cdev->handler = NULL;
+
+	dasd_remove_sysfs_files(cdev);
+	device = dasd_device_from_cdev(cdev);
+	if (IS_ERR(device))
+		return;
+	if (test_and_set_bit(DASD_FLAG_OFFLINE, &device->flags)) {
+		/* Already doing offline processing */
+		dasd_put_device(device);
+		return;
+	}
+	/*
+	 * This device is removed unconditionally. Set offline
+	 * flag to prevent dasd_open from opening it while it is
+	 * no quite down yet.
+	 */
+	dasd_set_target_state(device, DASD_STATE_NEW);
+	/* dasd_delete_device destroys the device reference. */
+	block = device->block;
+	dasd_delete_device(device);
+	/*
+	 * life cycle of block is bound to device, so delete it after
+	 * device was safely removed
+	 */
+	if (block)
+		dasd_free_block(block);
+}
+
+/*
+ * Activate a device. This is called from dasd_{eckd,fba}_probe() when either
+ * the device is detected for the first time and is supposed to be used
+ * or the user has started activation through sysfs.
+ */
+int dasd_generic_set_online(struct ccw_device *cdev,
+			    struct dasd_discipline *base_discipline)
+{
+	struct dasd_discipline *discipline;
+	struct dasd_device *device;
+	int rc;
+
+	/* first online clears initial online feature flag */
+	dasd_set_feature(cdev, DASD_FEATURE_INITIAL_ONLINE, 0);
+	device = dasd_create_device(cdev);
+	if (IS_ERR(device))
+		return PTR_ERR(device);
+
+	discipline = base_discipline;
+	if (device->features & DASD_FEATURE_USEDIAG) {
+	  	if (!dasd_diag_discipline_pointer) {
+			pr_warning("%s Setting the DASD online failed because "
+				   "of missing DIAG discipline\n",
+				   dev_name(&cdev->dev));
+			dasd_delete_device(device);
+			return -ENODEV;
+		}
+		discipline = dasd_diag_discipline_pointer;
+	}
+	if (!try_module_get(base_discipline->owner)) {
+		dasd_delete_device(device);
+		return -EINVAL;
+	}
+	if (!try_module_get(discipline->owner)) {
+		module_put(base_discipline->owner);
+		dasd_delete_device(device);
+		return -EINVAL;
+	}
+	device->base_discipline = base_discipline;
+	device->discipline = discipline;
+
+	/* check_device will allocate block device if necessary */
+	rc = discipline->check_device(device);
+	if (rc) {
+		pr_warning("%s Setting the DASD online with discipline %s "
+			   "failed with rc=%i\n",
+			   dev_name(&cdev->dev), discipline->name, rc);
+		module_put(discipline->owner);
+		module_put(base_discipline->owner);
+		dasd_delete_device(device);
+		return rc;
+	}
+
+	dasd_set_target_state(device, DASD_STATE_ONLINE);
+	if (device->state <= DASD_STATE_KNOWN) {
+		pr_warning("%s Setting the DASD online failed because of a "
+			   "missing discipline\n", dev_name(&cdev->dev));
+		rc = -ENODEV;
+		dasd_set_target_state(device, DASD_STATE_NEW);
+		if (device->block)
+			dasd_free_block(device->block);
+		dasd_delete_device(device);
+	} else
+		pr_debug("dasd_generic device %s found\n",
+				dev_name(&cdev->dev));
+
+	wait_event(dasd_init_waitq, _wait_for_device(device));
+
+	dasd_put_device(device);
+	return rc;
+}
+
+int dasd_generic_set_offline(struct ccw_device *cdev)
+{
+	struct dasd_device *device;
+	struct dasd_block *block;
+	int max_count, open_count;
+
+	device = dasd_device_from_cdev(cdev);
+	if (IS_ERR(device))
+		return PTR_ERR(device);
+	if (test_and_set_bit(DASD_FLAG_OFFLINE, &device->flags)) {
+		/* Already doing offline processing */
+		dasd_put_device(device);
+		return 0;
+	}
+	/*
+	 * We must make sure that this device is currently not in use.
+	 * The open_count is increased for every opener, that includes
+	 * the blkdev_get in dasd_scan_partitions. We are only interested
+	 * in the other openers.
+	 */
+	if (device->block) {
+		max_count = device->block->bdev ? 0 : -1;
+		open_count = atomic_read(&device->block->open_count);
+		if (open_count > max_count) {
+			if (open_count > 0)
+				pr_warning("%s: The DASD cannot be set offline "
+					   "with open count %i\n",
+					   dev_name(&cdev->dev), open_count);
+			else
+				pr_warning("%s: The DASD cannot be set offline "
+					   "while it is in use\n",
+					   dev_name(&cdev->dev));
+			clear_bit(DASD_FLAG_OFFLINE, &device->flags);
+			dasd_put_device(device);
+			return -EBUSY;
+		}
+	}
+	dasd_set_target_state(device, DASD_STATE_NEW);
+	/* dasd_delete_device destroys the device reference. */
+	block = device->block;
+	dasd_delete_device(device);
+	/*
+	 * life cycle of block is bound to device, so delete it after
+	 * device was safely removed
+	 */
+	if (block)
+		dasd_free_block(block);
+	return 0;
+}
+
+int dasd_generic_last_path_gone(struct dasd_device *device)
+{
+	struct dasd_ccw_req *cqr;
+
+	dev_warn(&device->cdev->dev, "No operational channel path is left "
+		 "for the device\n");
+	DBF_DEV_EVENT(DBF_WARNING, device, "%s", "last path gone");
+	/* First of all call extended error reporting. */
+	dasd_eer_write(device, NULL, DASD_EER_NOPATH);
+
+	if (device->state < DASD_STATE_BASIC)
+		return 0;
+	/* Device is active. We want to keep it. */
+	list_for_each_entry(cqr, &device->ccw_queue, devlist)
+		if ((cqr->status == DASD_CQR_IN_IO) ||
+		    (cqr->status == DASD_CQR_CLEAR_PENDING)) {
+			cqr->status = DASD_CQR_QUEUED;
+			cqr->retries++;
+		}
+	dasd_device_set_stop_bits(device, DASD_STOPPED_DC_WAIT);
+	dasd_device_clear_timer(device);
+	dasd_schedule_device_bh(device);
+	return 1;
+}
+EXPORT_SYMBOL_GPL(dasd_generic_last_path_gone);
+
+int dasd_generic_path_operational(struct dasd_device *device)
+{
+	dev_info(&device->cdev->dev, "A channel path to the device has become "
+		 "operational\n");
+	DBF_DEV_EVENT(DBF_WARNING, device, "%s", "path operational");
+	dasd_device_remove_stop_bits(device, DASD_STOPPED_DC_WAIT);
+	if (device->stopped & DASD_UNRESUMED_PM) {
+		dasd_device_remove_stop_bits(device, DASD_UNRESUMED_PM);
+		dasd_restore_device(device);
+		return 1;
+	}
+	dasd_schedule_device_bh(device);
+	if (device->block)
+		dasd_schedule_block_bh(device->block);
+	return 1;
+}
+EXPORT_SYMBOL_GPL(dasd_generic_path_operational);
+
+int dasd_generic_notify(struct ccw_device *cdev, int event)
+{
+	struct dasd_device *device;
+	int ret;
+
+	device = dasd_device_from_cdev_locked(cdev);
+	if (IS_ERR(device))
+		return 0;
+	ret = 0;
+	switch (event) {
+	case CIO_GONE:
+	case CIO_BOXED:
+	case CIO_NO_PATH:
+		device->path_data.opm = 0;
+		device->path_data.ppm = 0;
+		device->path_data.npm = 0;
+		ret = dasd_generic_last_path_gone(device);
+		break;
+	case CIO_OPER:
+		ret = 1;
+		if (device->path_data.opm)
+			ret = dasd_generic_path_operational(device);
+		break;
+	}
+	dasd_put_device(device);
+	return ret;
+}
+
+void dasd_generic_path_event(struct ccw_device *cdev, int *path_event)
+{
+	int chp;
+	__u8 oldopm, eventlpm;
+	struct dasd_device *device;
+
+	device = dasd_device_from_cdev_locked(cdev);
+	if (IS_ERR(device))
+		return;
+	for (chp = 0; chp < 8; chp++) {
+		eventlpm = 0x80 >> chp;
+		if (path_event[chp] & PE_PATH_GONE) {
+			oldopm = device->path_data.opm;
+			device->path_data.opm &= ~eventlpm;
+			device->path_data.ppm &= ~eventlpm;
+			device->path_data.npm &= ~eventlpm;
+			if (oldopm && !device->path_data.opm)
+				dasd_generic_last_path_gone(device);
+		}
+		if (path_event[chp] & PE_PATH_AVAILABLE) {
+			device->path_data.opm &= ~eventlpm;
+			device->path_data.ppm &= ~eventlpm;
+			device->path_data.npm &= ~eventlpm;
+			device->path_data.tbvpm |= eventlpm;
+			dasd_schedule_device_bh(device);
+		}
+		if (path_event[chp] & PE_PATHGROUP_ESTABLISHED) {
+			DBF_DEV_EVENT(DBF_WARNING, device, "%s",
+				      "Pathgroup re-established\n");
+			if (device->discipline->kick_validate)
+				device->discipline->kick_validate(device);
+		}
+	}
+	dasd_put_device(device);
+}
+EXPORT_SYMBOL_GPL(dasd_generic_path_event);
+
+int dasd_generic_verify_path(struct dasd_device *device, __u8 lpm)
+{
+	if (!device->path_data.opm && lpm) {
+		device->path_data.opm = lpm;
+		dasd_generic_path_operational(device);
+	} else
+		device->path_data.opm |= lpm;
+	return 0;
+}
+EXPORT_SYMBOL_GPL(dasd_generic_verify_path);
+
+
+int dasd_generic_pm_freeze(struct ccw_device *cdev)
+{
+	struct dasd_ccw_req *cqr, *n;
+	int rc;
+	struct list_head freeze_queue;
+	struct dasd_device *device = dasd_device_from_cdev(cdev);
+
+	if (IS_ERR(device))
+		return PTR_ERR(device);
+
+	/* mark device as suspended */
+	set_bit(DASD_FLAG_SUSPENDED, &device->flags);
+
+	if (device->discipline->freeze)
+		rc = device->discipline->freeze(device);
+
+	/* disallow new I/O  */
+	dasd_device_set_stop_bits(device, DASD_STOPPED_PM);
+	/* clear active requests */
+	INIT_LIST_HEAD(&freeze_queue);
+	spin_lock_irq(get_ccwdev_lock(cdev));
+	rc = 0;
+	list_for_each_entry_safe(cqr, n, &device->ccw_queue, devlist) {
+		/* Check status and move request to flush_queue */
+		if (cqr->status == DASD_CQR_IN_IO) {
+			rc = device->discipline->term_IO(cqr);
+			if (rc) {
+				/* unable to terminate requeust */
+				dev_err(&device->cdev->dev,
+					"Unable to terminate request %p "
+					"on suspend\n", cqr);
+				spin_unlock_irq(get_ccwdev_lock(cdev));
+				dasd_put_device(device);
+				return rc;
+			}
+		}
+		list_move_tail(&cqr->devlist, &freeze_queue);
+	}
+
+	spin_unlock_irq(get_ccwdev_lock(cdev));
+
+	list_for_each_entry_safe(cqr, n, &freeze_queue, devlist) {
+		wait_event(dasd_flush_wq,
+			   (cqr->status != DASD_CQR_CLEAR_PENDING));
+		if (cqr->status == DASD_CQR_CLEARED)
+			cqr->status = DASD_CQR_QUEUED;
+	}
+	/* move freeze_queue to start of the ccw_queue */
+	spin_lock_irq(get_ccwdev_lock(cdev));
+	list_splice_tail(&freeze_queue, &device->ccw_queue);
+	spin_unlock_irq(get_ccwdev_lock(cdev));
+
+	dasd_put_device(device);
+	return rc;
+}
+EXPORT_SYMBOL_GPL(dasd_generic_pm_freeze);
+
+int dasd_generic_restore_device(struct ccw_device *cdev)
+{
+	struct dasd_device *device = dasd_device_from_cdev(cdev);
+	int rc = 0;
+
+	if (IS_ERR(device))
+		return PTR_ERR(device);
+
+	/* allow new IO again */
+	dasd_device_remove_stop_bits(device,
+				     (DASD_STOPPED_PM | DASD_UNRESUMED_PM));
+
+	dasd_schedule_device_bh(device);
+
+	/*
+	 * call discipline restore function
+	 * if device is stopped do nothing e.g. for disconnected devices
+	 */
+	if (device->discipline->restore && !(device->stopped))
+		rc = device->discipline->restore(device);
+	if (rc || device->stopped)
+		/*
+		 * if the resume failed for the DASD we put it in
+		 * an UNRESUMED stop state
+		 */
+		device->stopped |= DASD_UNRESUMED_PM;
+
+	if (device->block)
+		dasd_schedule_block_bh(device->block);
+
+	clear_bit(DASD_FLAG_SUSPENDED, &device->flags);
+	dasd_put_device(device);
+	return 0;
+}
+EXPORT_SYMBOL_GPL(dasd_generic_restore_device);
+
+static struct dasd_ccw_req *dasd_generic_build_rdc(struct dasd_device *device,
+						   void *rdc_buffer,
+						   int rdc_buffer_size,
+						   int magic)
+{
+	struct dasd_ccw_req *cqr;
+	struct ccw1 *ccw;
+	unsigned long *idaw;
+
+	cqr = dasd_smalloc_request(magic, 1 /* RDC */, rdc_buffer_size, device);
+
+	if (IS_ERR(cqr)) {
+		/* internal error 13 - Allocating the RDC request failed*/
+		dev_err(&device->cdev->dev,
+			 "An error occurred in the DASD device driver, "
+			 "reason=%s\n", "13");
+		return cqr;
+	}
+
+	ccw = cqr->cpaddr;
+	ccw->cmd_code = CCW_CMD_RDC;
+	if (idal_is_needed(rdc_buffer, rdc_buffer_size)) {
+		idaw = (unsigned long *) (cqr->data);
+		ccw->cda = (__u32)(addr_t) idaw;
+		ccw->flags = CCW_FLAG_IDA;
+		idaw = idal_create_words(idaw, rdc_buffer, rdc_buffer_size);
+	} else {
+		ccw->cda = (__u32)(addr_t) rdc_buffer;
+		ccw->flags = 0;
+	}
+
+	ccw->count = rdc_buffer_size;
+	cqr->startdev = device;
+	cqr->memdev = device;
+	cqr->expires = 10*HZ;
+	cqr->retries = 256;
+	cqr->buildclk = get_clock();
+	cqr->status = DASD_CQR_FILLED;
+	return cqr;
+}
+
+
+int dasd_generic_read_dev_chars(struct dasd_device *device, int magic,
+				void *rdc_buffer, int rdc_buffer_size)
+{
+	int ret;
+	struct dasd_ccw_req *cqr;
+
+	cqr = dasd_generic_build_rdc(device, rdc_buffer, rdc_buffer_size,
+				     magic);
+	if (IS_ERR(cqr))
+		return PTR_ERR(cqr);
+
+	ret = dasd_sleep_on(cqr);
+	dasd_sfree_request(cqr, cqr->memdev);
+	return ret;
+}
+EXPORT_SYMBOL_GPL(dasd_generic_read_dev_chars);
+
+/*
+ *   In command mode and transport mode we need to look for sense
+ *   data in different places. The sense data itself is allways
+ *   an array of 32 bytes, so we can unify the sense data access
+ *   for both modes.
+ */
+char *dasd_get_sense(struct irb *irb)
+{
+	struct tsb *tsb = NULL;
+	char *sense = NULL;
+
+	if (scsw_is_tm(&irb->scsw) && (irb->scsw.tm.fcxs == 0x01)) {
+		if (irb->scsw.tm.tcw)
+			tsb = tcw_get_tsb((struct tcw *)(unsigned long)
+					  irb->scsw.tm.tcw);
+		if (tsb && tsb->length == 64 && tsb->flags)
+			switch (tsb->flags & 0x07) {
+			case 1:	/* tsa_iostat */
+				sense = tsb->tsa.iostat.sense;
+				break;
+			case 2: /* tsa_ddpc */
+				sense = tsb->tsa.ddpc.sense;
+				break;
+			default:
+				/* currently we don't use interrogate data */
+				break;
+			}
+	} else if (irb->esw.esw0.erw.cons) {
+		sense = irb->ecw;
+	}
+	return sense;
+}
+EXPORT_SYMBOL_GPL(dasd_get_sense);
+
+static int __init dasd_init(void)
+{
+	int rc;
+
+	init_waitqueue_head(&dasd_init_waitq);
+	init_waitqueue_head(&dasd_flush_wq);
+	init_waitqueue_head(&generic_waitq);
+
+	/* register 'common' DASD debug area, used for all DBF_XXX calls */
+	dasd_debug_area = debug_register("dasd", 1, 1, 8 * sizeof(long));
+	if (dasd_debug_area == NULL) {
+		rc = -ENOMEM;
+		goto failed;
+	}
+	debug_register_view(dasd_debug_area, &debug_sprintf_view);
+	debug_set_level(dasd_debug_area, DBF_WARNING);
+
+	DBF_EVENT(DBF_EMERG, "%s", "debug area created");
+
+	dasd_diag_discipline_pointer = NULL;
+
+	dasd_statistics_createroot();
+
+	rc = dasd_devmap_init();
+	if (rc)
+		goto failed;
+	rc = dasd_gendisk_init();
+	if (rc)
+		goto failed;
+	rc = dasd_parse();
+	if (rc)
+		goto failed;
+	rc = dasd_eer_init();
+	if (rc)
+		goto failed;
+#ifdef CONFIG_PROC_FS
+	rc = dasd_proc_init();
+	if (rc)
+		goto failed;
+#endif
+
+	return 0;
+failed:
+	pr_info("The DASD device driver could not be initialized\n");
+	dasd_exit();
+	return rc;
+}
+
+module_init(dasd_init);
+module_exit(dasd_exit);
+
+EXPORT_SYMBOL(dasd_debug_area);
+EXPORT_SYMBOL(dasd_diag_discipline_pointer);
+
+EXPORT_SYMBOL(dasd_add_request_head);
+EXPORT_SYMBOL(dasd_add_request_tail);
+EXPORT_SYMBOL(dasd_cancel_req);
+EXPORT_SYMBOL(dasd_device_clear_timer);
+EXPORT_SYMBOL(dasd_block_clear_timer);
+EXPORT_SYMBOL(dasd_enable_device);
+EXPORT_SYMBOL(dasd_int_handler);
+EXPORT_SYMBOL(dasd_kfree_request);
+EXPORT_SYMBOL(dasd_kick_device);
+EXPORT_SYMBOL(dasd_kmalloc_request);
+EXPORT_SYMBOL(dasd_schedule_device_bh);
+EXPORT_SYMBOL(dasd_schedule_block_bh);
+EXPORT_SYMBOL(dasd_set_target_state);
+EXPORT_SYMBOL(dasd_device_set_timer);
+EXPORT_SYMBOL(dasd_block_set_timer);
+EXPORT_SYMBOL(dasd_sfree_request);
+EXPORT_SYMBOL(dasd_sleep_on);
+EXPORT_SYMBOL(dasd_sleep_on_immediatly);
+EXPORT_SYMBOL(dasd_sleep_on_interruptible);
+EXPORT_SYMBOL(dasd_smalloc_request);
+EXPORT_SYMBOL(dasd_start_IO);
+EXPORT_SYMBOL(dasd_term_IO);
+
+EXPORT_SYMBOL_GPL(dasd_generic_probe);
+EXPORT_SYMBOL_GPL(dasd_generic_remove);
+EXPORT_SYMBOL_GPL(dasd_generic_notify);
+EXPORT_SYMBOL_GPL(dasd_generic_set_online);
+EXPORT_SYMBOL_GPL(dasd_generic_set_offline);
+EXPORT_SYMBOL_GPL(dasd_generic_handle_state_change);
+EXPORT_SYMBOL_GPL(dasd_flush_device_queue);
+EXPORT_SYMBOL_GPL(dasd_alloc_block);
+EXPORT_SYMBOL_GPL(dasd_free_block);
diff --git a/ap/os/linux/linux-3.4.x/drivers/s390/block/dasd_3990_erp.c b/ap/os/linux/linux-3.4.x/drivers/s390/block/dasd_3990_erp.c
new file mode 100644
index 0000000..0326571
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/s390/block/dasd_3990_erp.c
@@ -0,0 +1,2772 @@
+/*
+ * File...........: linux/drivers/s390/block/dasd_3990_erp.c
+ * Author(s)......: Horst  Hummel    <Horst.Hummel@de.ibm.com>
+ *		    Holger Smolinski <Holger.Smolinski@de.ibm.com>
+ * Bugreports.to..: <Linux390@de.ibm.com>
+ * (C) IBM Corporation, IBM Deutschland Entwicklung GmbH, 2000, 2001
+ *
+ */
+
+#define KMSG_COMPONENT "dasd-eckd"
+
+#include <linux/timer.h>
+#include <asm/idals.h>
+
+#define PRINTK_HEADER "dasd_erp(3990): "
+
+#include "dasd_int.h"
+#include "dasd_eckd.h"
+
+
+struct DCTL_data {
+	unsigned char subcommand;  /* e.g Inhibit Write, Enable Write,... */
+	unsigned char modifier;	   /* Subcommand modifier */
+	unsigned short res;	   /* reserved */
+} __attribute__ ((packed));
+
+/*
+ *****************************************************************************
+ * SECTION ERP HANDLING
+ *****************************************************************************
+ */
+/*
+ *****************************************************************************
+ * 24 and 32 byte sense ERP functions
+ *****************************************************************************
+ */
+
+/*
+ * DASD_3990_ERP_CLEANUP
+ *
+ * DESCRIPTION
+ *   Removes the already build but not necessary ERP request and sets
+ *   the status of the original cqr / erp to the given (final) status
+ *
+ *  PARAMETER
+ *   erp		request to be blocked
+ *   final_status	either DASD_CQR_DONE or DASD_CQR_FAILED
+ *
+ * RETURN VALUES
+ *   cqr		original cqr
+ */
+static struct dasd_ccw_req *
+dasd_3990_erp_cleanup(struct dasd_ccw_req * erp, char final_status)
+{
+	struct dasd_ccw_req *cqr = erp->refers;
+
+	dasd_free_erp_request(erp, erp->memdev);
+	cqr->status = final_status;
+	return cqr;
+
+}				/* end dasd_3990_erp_cleanup */
+
+/*
+ * DASD_3990_ERP_BLOCK_QUEUE
+ *
+ * DESCRIPTION
+ *   Block the given device request queue to prevent from further
+ *   processing until the started timer has expired or an related
+ *   interrupt was received.
+ */
+static void dasd_3990_erp_block_queue(struct dasd_ccw_req *erp, int expires)
+{
+
+	struct dasd_device *device = erp->startdev;
+	unsigned long flags;
+
+	DBF_DEV_EVENT(DBF_INFO, device,
+		    "blocking request queue for %is", expires/HZ);
+
+	spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
+	dasd_device_set_stop_bits(device, DASD_STOPPED_PENDING);
+	spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
+	erp->status = DASD_CQR_FILLED;
+	if (erp->block)
+		dasd_block_set_timer(erp->block, expires);
+	else
+		dasd_device_set_timer(device, expires);
+}
+
+/*
+ * DASD_3990_ERP_INT_REQ
+ *
+ * DESCRIPTION
+ *   Handles 'Intervention Required' error.
+ *   This means either device offline or not installed.
+ *
+ * PARAMETER
+ *   erp		current erp
+ * RETURN VALUES
+ *   erp		modified erp
+ */
+static struct dasd_ccw_req *
+dasd_3990_erp_int_req(struct dasd_ccw_req * erp)
+{
+
+	struct dasd_device *device = erp->startdev;
+
+	/* first time set initial retry counter and erp_function */
+	/* and retry once without blocking queue		 */
+	/* (this enables easier enqueing of the cqr)		 */
+	if (erp->function != dasd_3990_erp_int_req) {
+
+		erp->retries = 256;
+		erp->function = dasd_3990_erp_int_req;
+
+	} else {
+
+		/* issue a message and wait for 'device ready' interrupt */
+		dev_err(&device->cdev->dev,
+			    "is offline or not installed - "
+			    "INTERVENTION REQUIRED!!\n");
+
+		dasd_3990_erp_block_queue(erp, 60*HZ);
+	}
+
+	return erp;
+
+}				/* end dasd_3990_erp_int_req */
+
+/*
+ * DASD_3990_ERP_ALTERNATE_PATH
+ *
+ * DESCRIPTION
+ *   Repeat the operation on a different channel path.
+ *   If all alternate paths have been tried, the request is posted with a
+ *   permanent error.
+ *
+ *  PARAMETER
+ *   erp		pointer to the current ERP
+ *
+ * RETURN VALUES
+ *   erp		modified pointer to the ERP
+ */
+static void
+dasd_3990_erp_alternate_path(struct dasd_ccw_req * erp)
+{
+	struct dasd_device *device = erp->startdev;
+	__u8 opm;
+	unsigned long flags;
+
+	/* try alternate valid path */
+	spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
+	opm = ccw_device_get_path_mask(device->cdev);
+	spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
+	if (erp->lpm == 0)
+		erp->lpm = device->path_data.opm &
+			~(erp->irb.esw.esw0.sublog.lpum);
+	else
+		erp->lpm &= ~(erp->irb.esw.esw0.sublog.lpum);
+
+	if ((erp->lpm & opm) != 0x00) {
+
+		DBF_DEV_EVENT(DBF_WARNING, device,
+			    "try alternate lpm=%x (lpum=%x / opm=%x)",
+			    erp->lpm, erp->irb.esw.esw0.sublog.lpum, opm);
+
+		/* reset status to submit the request again... */
+		erp->status = DASD_CQR_FILLED;
+		erp->retries = 10;
+	} else {
+		dev_err(&device->cdev->dev,
+			"The DASD cannot be reached on any path (lpum=%x"
+			"/opm=%x)\n", erp->irb.esw.esw0.sublog.lpum, opm);
+
+		/* post request with permanent error */
+		erp->status = DASD_CQR_FAILED;
+	}
+}				/* end dasd_3990_erp_alternate_path */
+
+/*
+ * DASD_3990_ERP_DCTL
+ *
+ * DESCRIPTION
+ *   Setup cqr to do the Diagnostic Control (DCTL) command with an
+ *   Inhibit Write subcommand (0x20) and the given modifier.
+ *
+ *  PARAMETER
+ *   erp		pointer to the current (failed) ERP
+ *   modifier		subcommand modifier
+ *
+ * RETURN VALUES
+ *   dctl_cqr		pointer to NEW dctl_cqr
+ *
+ */
+static struct dasd_ccw_req *
+dasd_3990_erp_DCTL(struct dasd_ccw_req * erp, char modifier)
+{
+
+	struct dasd_device *device = erp->startdev;
+	struct DCTL_data *DCTL_data;
+	struct ccw1 *ccw;
+	struct dasd_ccw_req *dctl_cqr;
+
+	dctl_cqr = dasd_alloc_erp_request((char *) &erp->magic, 1,
+					  sizeof(struct DCTL_data),
+					  device);
+	if (IS_ERR(dctl_cqr)) {
+		dev_err(&device->cdev->dev,
+			    "Unable to allocate DCTL-CQR\n");
+		erp->status = DASD_CQR_FAILED;
+		return erp;
+	}
+
+	DCTL_data = dctl_cqr->data;
+
+	DCTL_data->subcommand = 0x02;	/* Inhibit Write */
+	DCTL_data->modifier = modifier;
+
+	ccw = dctl_cqr->cpaddr;
+	memset(ccw, 0, sizeof(struct ccw1));
+	ccw->cmd_code = CCW_CMD_DCTL;
+	ccw->count = 4;
+	ccw->cda = (__u32)(addr_t) DCTL_data;
+	dctl_cqr->flags = erp->flags;
+	dctl_cqr->function = dasd_3990_erp_DCTL;
+	dctl_cqr->refers = erp;
+	dctl_cqr->startdev = device;
+	dctl_cqr->memdev = device;
+	dctl_cqr->magic = erp->magic;
+	dctl_cqr->expires = 5 * 60 * HZ;
+	dctl_cqr->retries = 2;
+
+	dctl_cqr->buildclk = get_clock();
+
+	dctl_cqr->status = DASD_CQR_FILLED;
+
+	return dctl_cqr;
+
+}				/* end dasd_3990_erp_DCTL */
+
+/*
+ * DASD_3990_ERP_ACTION_1
+ *
+ * DESCRIPTION
+ *   Setup ERP to do the ERP action 1 (see Reference manual).
+ *   Repeat the operation on a different channel path.
+ *   As deviation from the recommended recovery action, we reset the path mask
+ *   after we have tried each path and go through all paths a second time.
+ *   This will cover situations where only one path at a time is actually down,
+ *   but all paths fail and recover just with the same sequence and timing as
+ *   we try to use them (flapping links).
+ *   If all alternate paths have been tried twice, the request is posted with
+ *   a permanent error.
+ *
+ *  PARAMETER
+ *   erp		pointer to the current ERP
+ *
+ * RETURN VALUES
+ *   erp		pointer to the ERP
+ *
+ */
+static struct dasd_ccw_req *dasd_3990_erp_action_1_sec(struct dasd_ccw_req *erp)
+{
+	erp->function = dasd_3990_erp_action_1_sec;
+	dasd_3990_erp_alternate_path(erp);
+	return erp;
+}
+
+static struct dasd_ccw_req *dasd_3990_erp_action_1(struct dasd_ccw_req *erp)
+{
+	erp->function = dasd_3990_erp_action_1;
+	dasd_3990_erp_alternate_path(erp);
+	if (erp->status == DASD_CQR_FAILED &&
+	    !test_bit(DASD_CQR_VERIFY_PATH, &erp->flags)) {
+		erp->status = DASD_CQR_FILLED;
+		erp->retries = 10;
+		erp->lpm = erp->startdev->path_data.opm;
+		erp->function = dasd_3990_erp_action_1_sec;
+	}
+	return erp;
+}				/* end dasd_3990_erp_action_1(b) */
+
+/*
+ * DASD_3990_ERP_ACTION_4
+ *
+ * DESCRIPTION
+ *   Setup ERP to do the ERP action 4 (see Reference manual).
+ *   Set the current request to PENDING to block the CQR queue for that device
+ *   until the state change interrupt appears.
+ *   Use a timer (20 seconds) to retry the cqr if the interrupt is still
+ *   missing.
+ *
+ *  PARAMETER
+ *   sense		sense data of the actual error
+ *   erp		pointer to the current ERP
+ *
+ * RETURN VALUES
+ *   erp		pointer to the ERP
+ *
+ */
+static struct dasd_ccw_req *
+dasd_3990_erp_action_4(struct dasd_ccw_req * erp, char *sense)
+{
+
+	struct dasd_device *device = erp->startdev;
+
+	/* first time set initial retry counter and erp_function    */
+	/* and retry once without waiting for state change pending  */
+	/* interrupt (this enables easier enqueing of the cqr)	    */
+	if (erp->function != dasd_3990_erp_action_4) {
+
+		DBF_DEV_EVENT(DBF_INFO, device, "%s",
+			    "dasd_3990_erp_action_4: first time retry");
+
+		erp->retries = 256;
+		erp->function = dasd_3990_erp_action_4;
+
+	} else {
+		if (sense && (sense[25] == 0x1D)) { /* state change pending */
+
+			DBF_DEV_EVENT(DBF_INFO, device,
+				    "waiting for state change pending "
+				    "interrupt, %d retries left",
+				    erp->retries);
+
+			dasd_3990_erp_block_queue(erp, 30*HZ);
+
+		} else if (sense && (sense[25] == 0x1E)) {	/* busy */
+			DBF_DEV_EVENT(DBF_INFO, device,
+				    "busy - redriving request later, "
+				    "%d retries left",
+				    erp->retries);
+                        dasd_3990_erp_block_queue(erp, HZ);
+		} else {
+			/* no state change pending - retry */
+			DBF_DEV_EVENT(DBF_INFO, device,
+				     "redriving request immediately, "
+				     "%d retries left",
+				     erp->retries);
+			erp->status = DASD_CQR_FILLED;
+		}
+	}
+
+	return erp;
+
+}				/* end dasd_3990_erp_action_4 */
+
+/*
+ *****************************************************************************
+ * 24 byte sense ERP functions (only)
+ *****************************************************************************
+ */
+
+/*
+ * DASD_3990_ERP_ACTION_5
+ *
+ * DESCRIPTION
+ *   Setup ERP to do the ERP action 5 (see Reference manual).
+ *   NOTE: Further handling is done in xxx_further_erp after the retries.
+ *
+ *  PARAMETER
+ *   erp		pointer to the current ERP
+ *
+ * RETURN VALUES
+ *   erp		pointer to the ERP
+ *
+ */
+static struct dasd_ccw_req *
+dasd_3990_erp_action_5(struct dasd_ccw_req * erp)
+{
+
+	/* first of all retry */
+	erp->retries = 10;
+	erp->function = dasd_3990_erp_action_5;
+
+	return erp;
+
+}				/* end dasd_3990_erp_action_5 */
+
+/*
+ * DASD_3990_HANDLE_ENV_DATA
+ *
+ * DESCRIPTION
+ *   Handles 24 byte 'Environmental data present'.
+ *   Does a analysis of the sense data (message Format)
+ *   and prints the error messages.
+ *
+ * PARAMETER
+ *   sense		current sense data
+ *
+ * RETURN VALUES
+ *   void
+ */
+static void
+dasd_3990_handle_env_data(struct dasd_ccw_req * erp, char *sense)
+{
+
+	struct dasd_device *device = erp->startdev;
+	char msg_format = (sense[7] & 0xF0);
+	char msg_no = (sense[7] & 0x0F);
+	char errorstring[ERRORLENGTH];
+
+	switch (msg_format) {
+	case 0x00:		/* Format 0 - Program or System Checks */
+
+		if (sense[1] & 0x10) {	/* check message to operator bit */
+
+			switch (msg_no) {
+			case 0x00:	/* No Message */
+				break;
+			case 0x01:
+				dev_warn(&device->cdev->dev,
+					    "FORMAT 0 - Invalid Command\n");
+				break;
+			case 0x02:
+				dev_warn(&device->cdev->dev,
+					    "FORMAT 0 - Invalid Command "
+					    "Sequence\n");
+				break;
+			case 0x03:
+				dev_warn(&device->cdev->dev,
+					    "FORMAT 0 - CCW Count less than "
+					    "required\n");
+				break;
+			case 0x04:
+				dev_warn(&device->cdev->dev,
+					    "FORMAT 0 - Invalid Parameter\n");
+				break;
+			case 0x05:
+				dev_warn(&device->cdev->dev,
+					    "FORMAT 0 - Diagnostic of Special"
+					    " Command Violates File Mask\n");
+				break;
+			case 0x07:
+				dev_warn(&device->cdev->dev,
+					    "FORMAT 0 - Channel Returned with "
+					    "Incorrect retry CCW\n");
+				break;
+			case 0x08:
+				dev_warn(&device->cdev->dev,
+					    "FORMAT 0 - Reset Notification\n");
+				break;
+			case 0x09:
+				dev_warn(&device->cdev->dev,
+					 "FORMAT 0 - Storage Path Restart\n");
+				break;
+			case 0x0A:
+				dev_warn(&device->cdev->dev,
+					    "FORMAT 0 - Channel requested "
+					    "... %02x\n", sense[8]);
+				break;
+			case 0x0B:
+				dev_warn(&device->cdev->dev,
+					    "FORMAT 0 - Invalid Defective/"
+					    "Alternate Track Pointer\n");
+				break;
+			case 0x0C:
+				dev_warn(&device->cdev->dev,
+					    "FORMAT 0 - DPS Installation "
+					    "Check\n");
+				break;
+			case 0x0E:
+				dev_warn(&device->cdev->dev,
+					    "FORMAT 0 - Command Invalid on "
+					    "Secondary Address\n");
+				break;
+			case 0x0F:
+				dev_warn(&device->cdev->dev,
+					    "FORMAT 0 - Status Not As "
+					    "Required: reason %02x\n",
+					 sense[8]);
+				break;
+			default:
+				dev_warn(&device->cdev->dev,
+					    "FORMAT 0 - Reserved\n");
+			}
+		} else {
+			switch (msg_no) {
+			case 0x00:	/* No Message */
+				break;
+			case 0x01:
+				dev_warn(&device->cdev->dev,
+					 "FORMAT 0 - Device Error "
+					 "Source\n");
+				break;
+			case 0x02:
+				dev_warn(&device->cdev->dev,
+					    "FORMAT 0 - Reserved\n");
+				break;
+			case 0x03:
+				dev_warn(&device->cdev->dev,
+					    "FORMAT 0 - Device Fenced - "
+					    "device = %02x\n", sense[4]);
+				break;
+			case 0x04:
+				dev_warn(&device->cdev->dev,
+					    "FORMAT 0 - Data Pinned for "
+					    "Device\n");
+				break;
+			default:
+				dev_warn(&device->cdev->dev,
+					    "FORMAT 0 - Reserved\n");
+			}
+		}
+		break;
+
+	case 0x10:		/* Format 1 - Device Equipment Checks */
+		switch (msg_no) {
+		case 0x00:	/* No Message */
+			break;
+		case 0x01:
+			dev_warn(&device->cdev->dev,
+				    "FORMAT 1 - Device Status 1 not as "
+				    "expected\n");
+			break;
+		case 0x03:
+			dev_warn(&device->cdev->dev,
+				    "FORMAT 1 - Index missing\n");
+			break;
+		case 0x04:
+			dev_warn(&device->cdev->dev,
+				 "FORMAT 1 - Interruption cannot be "
+				 "reset\n");
+			break;
+		case 0x05:
+			dev_warn(&device->cdev->dev,
+				    "FORMAT 1 - Device did not respond to "
+				    "selection\n");
+			break;
+		case 0x06:
+			dev_warn(&device->cdev->dev,
+				    "FORMAT 1 - Device check-2 error or Set "
+				    "Sector is not complete\n");
+			break;
+		case 0x07:
+			dev_warn(&device->cdev->dev,
+				    "FORMAT 1 - Head address does not "
+				    "compare\n");
+			break;
+		case 0x08:
+			dev_warn(&device->cdev->dev,
+				    "FORMAT 1 - Device status 1 not valid\n");
+			break;
+		case 0x09:
+			dev_warn(&device->cdev->dev,
+				    "FORMAT 1 - Device not ready\n");
+			break;
+		case 0x0A:
+			dev_warn(&device->cdev->dev,
+				    "FORMAT 1 - Track physical address did "
+				    "not compare\n");
+			break;
+		case 0x0B:
+			dev_warn(&device->cdev->dev,
+				    "FORMAT 1 - Missing device address bit\n");
+			break;
+		case 0x0C:
+			dev_warn(&device->cdev->dev,
+				    "FORMAT 1 - Drive motor switch is off\n");
+			break;
+		case 0x0D:
+			dev_warn(&device->cdev->dev,
+				    "FORMAT 1 - Seek incomplete\n");
+			break;
+		case 0x0E:
+			dev_warn(&device->cdev->dev,
+				    "FORMAT 1 - Cylinder address did not "
+				    "compare\n");
+			break;
+		case 0x0F:
+			dev_warn(&device->cdev->dev,
+				    "FORMAT 1 - Offset active cannot be "
+				    "reset\n");
+			break;
+		default:
+			dev_warn(&device->cdev->dev,
+				    "FORMAT 1 - Reserved\n");
+		}
+		break;
+
+	case 0x20:		/* Format 2 - 3990 Equipment Checks */
+		switch (msg_no) {
+		case 0x08:
+			dev_warn(&device->cdev->dev,
+				    "FORMAT 2 - 3990 check-2 error\n");
+			break;
+		case 0x0E:
+			dev_warn(&device->cdev->dev,
+				    "FORMAT 2 - Support facility errors\n");
+			break;
+		case 0x0F:
+			dev_warn(&device->cdev->dev,
+				 "FORMAT 2 - Microcode detected error "
+				 "%02x\n",
+				 sense[8]);
+			break;
+		default:
+			dev_warn(&device->cdev->dev,
+				    "FORMAT 2 - Reserved\n");
+		}
+		break;
+
+	case 0x30:		/* Format 3 - 3990 Control Checks */
+		switch (msg_no) {
+		case 0x0F:
+			dev_warn(&device->cdev->dev,
+				    "FORMAT 3 - Allegiance terminated\n");
+			break;
+		default:
+			dev_warn(&device->cdev->dev,
+				    "FORMAT 3 - Reserved\n");
+		}
+		break;
+
+	case 0x40:		/* Format 4 - Data Checks */
+		switch (msg_no) {
+		case 0x00:
+			dev_warn(&device->cdev->dev,
+				    "FORMAT 4 - Home address area error\n");
+			break;
+		case 0x01:
+			dev_warn(&device->cdev->dev,
+				    "FORMAT 4 - Count area error\n");
+			break;
+		case 0x02:
+			dev_warn(&device->cdev->dev,
+				    "FORMAT 4 - Key area error\n");
+			break;
+		case 0x03:
+			dev_warn(&device->cdev->dev,
+				    "FORMAT 4 - Data area error\n");
+			break;
+		case 0x04:
+			dev_warn(&device->cdev->dev,
+				    "FORMAT 4 - No sync byte in home address "
+				    "area\n");
+			break;
+		case 0x05:
+			dev_warn(&device->cdev->dev,
+				    "FORMAT 4 - No sync byte in count address "
+				    "area\n");
+			break;
+		case 0x06:
+			dev_warn(&device->cdev->dev,
+				    "FORMAT 4 - No sync byte in key area\n");
+			break;
+		case 0x07:
+			dev_warn(&device->cdev->dev,
+				    "FORMAT 4 - No sync byte in data area\n");
+			break;
+		case 0x08:
+			dev_warn(&device->cdev->dev,
+				    "FORMAT 4 - Home address area error; "
+				    "offset active\n");
+			break;
+		case 0x09:
+			dev_warn(&device->cdev->dev,
+				    "FORMAT 4 - Count area error; offset "
+				    "active\n");
+			break;
+		case 0x0A:
+			dev_warn(&device->cdev->dev,
+				    "FORMAT 4 - Key area error; offset "
+				    "active\n");
+			break;
+		case 0x0B:
+			dev_warn(&device->cdev->dev,
+				    "FORMAT 4 - Data area error; "
+				    "offset active\n");
+			break;
+		case 0x0C:
+			dev_warn(&device->cdev->dev,
+				    "FORMAT 4 - No sync byte in home "
+				    "address area; offset active\n");
+			break;
+		case 0x0D:
+			dev_warn(&device->cdev->dev,
+				    "FORMAT 4 - No syn byte in count "
+				    "address area; offset active\n");
+			break;
+		case 0x0E:
+			dev_warn(&device->cdev->dev,
+				    "FORMAT 4 - No sync byte in key area; "
+				    "offset active\n");
+			break;
+		case 0x0F:
+			dev_warn(&device->cdev->dev,
+				    "FORMAT 4 - No syn byte in data area; "
+				    "offset active\n");
+			break;
+		default:
+			dev_warn(&device->cdev->dev,
+				    "FORMAT 4 - Reserved\n");
+		}
+		break;
+
+	case 0x50:  /* Format 5 - Data Check with displacement information */
+		switch (msg_no) {
+		case 0x00:
+			dev_warn(&device->cdev->dev,
+				    "FORMAT 5 - Data Check in the "
+				    "home address area\n");
+			break;
+		case 0x01:
+			dev_warn(&device->cdev->dev,
+				 "FORMAT 5 - Data Check in the count "
+				 "area\n");
+			break;
+		case 0x02:
+			dev_warn(&device->cdev->dev,
+				    "FORMAT 5 - Data Check in the key area\n");
+			break;
+		case 0x03:
+			dev_warn(&device->cdev->dev,
+				 "FORMAT 5 - Data Check in the data "
+				 "area\n");
+			break;
+		case 0x08:
+			dev_warn(&device->cdev->dev,
+				    "FORMAT 5 - Data Check in the "
+				    "home address area; offset active\n");
+			break;
+		case 0x09:
+			dev_warn(&device->cdev->dev,
+				    "FORMAT 5 - Data Check in the count area; "
+				    "offset active\n");
+			break;
+		case 0x0A:
+			dev_warn(&device->cdev->dev,
+				    "FORMAT 5 - Data Check in the key area; "
+				    "offset active\n");
+			break;
+		case 0x0B:
+			dev_warn(&device->cdev->dev,
+				    "FORMAT 5 - Data Check in the data area; "
+				    "offset active\n");
+			break;
+		default:
+			dev_warn(&device->cdev->dev,
+				    "FORMAT 5 - Reserved\n");
+		}
+		break;
+
+	case 0x60:  /* Format 6 - Usage Statistics/Overrun Errors */
+		switch (msg_no) {
+		case 0x00:
+			dev_warn(&device->cdev->dev,
+				    "FORMAT 6 - Overrun on channel A\n");
+			break;
+		case 0x01:
+			dev_warn(&device->cdev->dev,
+				    "FORMAT 6 - Overrun on channel B\n");
+			break;
+		case 0x02:
+			dev_warn(&device->cdev->dev,
+				    "FORMAT 6 - Overrun on channel C\n");
+			break;
+		case 0x03:
+			dev_warn(&device->cdev->dev,
+				    "FORMAT 6 - Overrun on channel D\n");
+			break;
+		case 0x04:
+			dev_warn(&device->cdev->dev,
+				    "FORMAT 6 - Overrun on channel E\n");
+			break;
+		case 0x05:
+			dev_warn(&device->cdev->dev,
+				    "FORMAT 6 - Overrun on channel F\n");
+			break;
+		case 0x06:
+			dev_warn(&device->cdev->dev,
+				    "FORMAT 6 - Overrun on channel G\n");
+			break;
+		case 0x07:
+			dev_warn(&device->cdev->dev,
+				    "FORMAT 6 - Overrun on channel H\n");
+			break;
+		default:
+			dev_warn(&device->cdev->dev,
+				    "FORMAT 6 - Reserved\n");
+		}
+		break;
+
+	case 0x70:  /* Format 7 - Device Connection Control Checks */
+		switch (msg_no) {
+		case 0x00:
+			dev_warn(&device->cdev->dev,
+				    "FORMAT 7 - RCC initiated by a connection "
+				    "check alert\n");
+			break;
+		case 0x01:
+			dev_warn(&device->cdev->dev,
+				    "FORMAT 7 - RCC 1 sequence not "
+				    "successful\n");
+			break;
+		case 0x02:
+			dev_warn(&device->cdev->dev,
+				    "FORMAT 7 - RCC 1 and RCC 2 sequences not "
+				    "successful\n");
+			break;
+		case 0x03:
+			dev_warn(&device->cdev->dev,
+				    "FORMAT 7 - Invalid tag-in during "
+				    "selection sequence\n");
+			break;
+		case 0x04:
+			dev_warn(&device->cdev->dev,
+				    "FORMAT 7 - extra RCC required\n");
+			break;
+		case 0x05:
+			dev_warn(&device->cdev->dev,
+				    "FORMAT 7 - Invalid DCC selection "
+				    "response or timeout\n");
+			break;
+		case 0x06:
+			dev_warn(&device->cdev->dev,
+				    "FORMAT 7 - Missing end operation; device "
+				    "transfer complete\n");
+			break;
+		case 0x07:
+			dev_warn(&device->cdev->dev,
+				    "FORMAT 7 - Missing end operation; device "
+				    "transfer incomplete\n");
+			break;
+		case 0x08:
+			dev_warn(&device->cdev->dev,
+				    "FORMAT 7 - Invalid tag-in for an "
+				    "immediate command sequence\n");
+			break;
+		case 0x09:
+			dev_warn(&device->cdev->dev,
+				    "FORMAT 7 - Invalid tag-in for an "
+				    "extended command sequence\n");
+			break;
+		case 0x0A:
+			dev_warn(&device->cdev->dev,
+				    "FORMAT 7 - 3990 microcode time out when "
+				    "stopping selection\n");
+			break;
+		case 0x0B:
+			dev_warn(&device->cdev->dev,
+				    "FORMAT 7 - No response to selection "
+				    "after a poll interruption\n");
+			break;
+		case 0x0C:
+			dev_warn(&device->cdev->dev,
+				    "FORMAT 7 - Permanent path error (DASD "
+				    "controller not available)\n");
+			break;
+		case 0x0D:
+			dev_warn(&device->cdev->dev,
+				    "FORMAT 7 - DASD controller not available"
+				    " on disconnected command chain\n");
+			break;
+		default:
+			dev_warn(&device->cdev->dev,
+				    "FORMAT 7 - Reserved\n");
+		}
+		break;
+
+	case 0x80:  /* Format 8 - Additional Device Equipment Checks */
+		switch (msg_no) {
+		case 0x00:	/* No Message */
+		case 0x01:
+			dev_warn(&device->cdev->dev,
+				    "FORMAT 8 - Error correction code "
+				    "hardware fault\n");
+			break;
+		case 0x03:
+			dev_warn(&device->cdev->dev,
+				    "FORMAT 8 - Unexpected end operation "
+				    "response code\n");
+			break;
+		case 0x04:
+			dev_warn(&device->cdev->dev,
+				    "FORMAT 8 - End operation with transfer "
+				    "count not zero\n");
+			break;
+		case 0x05:
+			dev_warn(&device->cdev->dev,
+				    "FORMAT 8 - End operation with transfer "
+				    "count zero\n");
+			break;
+		case 0x06:
+			dev_warn(&device->cdev->dev,
+				    "FORMAT 8 - DPS checks after a system "
+				    "reset or selective reset\n");
+			break;
+		case 0x07:
+			dev_warn(&device->cdev->dev,
+				    "FORMAT 8 - DPS cannot be filled\n");
+			break;
+		case 0x08:
+			dev_warn(&device->cdev->dev,
+				    "FORMAT 8 - Short busy time-out during "
+				    "device selection\n");
+			break;
+		case 0x09:
+			dev_warn(&device->cdev->dev,
+				    "FORMAT 8 - DASD controller failed to "
+				    "set or reset the long busy latch\n");
+			break;
+		case 0x0A:
+			dev_warn(&device->cdev->dev,
+				    "FORMAT 8 - No interruption from device "
+				    "during a command chain\n");
+			break;
+		default:
+			dev_warn(&device->cdev->dev,
+				    "FORMAT 8 - Reserved\n");
+		}
+		break;
+
+	case 0x90:  /* Format 9 - Device Read, Write, and Seek Checks */
+		switch (msg_no) {
+		case 0x00:
+			break;	/* No Message */
+		case 0x06:
+			dev_warn(&device->cdev->dev,
+				    "FORMAT 9 - Device check-2 error\n");
+			break;
+		case 0x07:
+			dev_warn(&device->cdev->dev,
+				 "FORMAT 9 - Head address did not "
+				 "compare\n");
+			break;
+		case 0x0A:
+			dev_warn(&device->cdev->dev,
+				    "FORMAT 9 - Track physical address did "
+				    "not compare while oriented\n");
+			break;
+		case 0x0E:
+			dev_warn(&device->cdev->dev,
+				    "FORMAT 9 - Cylinder address did not "
+				    "compare\n");
+			break;
+		default:
+			dev_warn(&device->cdev->dev,
+				    "FORMAT 9 - Reserved\n");
+		}
+		break;
+
+	case 0xF0:		/* Format F - Cache Storage Checks */
+		switch (msg_no) {
+		case 0x00:
+			dev_warn(&device->cdev->dev,
+				    "FORMAT F - Operation Terminated\n");
+			break;
+		case 0x01:
+			dev_warn(&device->cdev->dev,
+				    "FORMAT F - Subsystem Processing Error\n");
+			break;
+		case 0x02:
+			dev_warn(&device->cdev->dev,
+				    "FORMAT F - Cache or nonvolatile storage "
+				    "equipment failure\n");
+			break;
+		case 0x04:
+			dev_warn(&device->cdev->dev,
+				    "FORMAT F - Caching terminated\n");
+			break;
+		case 0x06:
+			dev_warn(&device->cdev->dev,
+				    "FORMAT F - Cache fast write access not "
+				    "authorized\n");
+			break;
+		case 0x07:
+			dev_warn(&device->cdev->dev,
+				    "FORMAT F - Track format incorrect\n");
+			break;
+		case 0x09:
+			dev_warn(&device->cdev->dev,
+				    "FORMAT F - Caching reinitiated\n");
+			break;
+		case 0x0A:
+			dev_warn(&device->cdev->dev,
+				    "FORMAT F - Nonvolatile storage "
+				    "terminated\n");
+			break;
+		case 0x0B:
+			dev_warn(&device->cdev->dev,
+				    "FORMAT F - Volume is suspended duplex\n");
+			/* call extended error reporting (EER) */
+			dasd_eer_write(device, erp->refers,
+				       DASD_EER_PPRCSUSPEND);
+			break;
+		case 0x0C:
+			dev_warn(&device->cdev->dev,
+				    "FORMAT F - Subsystem status cannot be "
+				    "determined\n");
+			break;
+		case 0x0D:
+			dev_warn(&device->cdev->dev,
+				    "FORMAT F - Caching status reset to "
+				    "default\n");
+			break;
+		case 0x0E:
+			dev_warn(&device->cdev->dev,
+				    "FORMAT F - DASD Fast Write inhibited\n");
+			break;
+		default:
+			dev_warn(&device->cdev->dev,
+				    "FORMAT D - Reserved\n");
+		}
+		break;
+
+	default:	/* unknown message format - should not happen
+			   internal error 03 - unknown message format */
+		snprintf(errorstring, ERRORLENGTH, "03 %x02", msg_format);
+		dev_err(&device->cdev->dev,
+			 "An error occurred in the DASD device driver, "
+			 "reason=%s\n", errorstring);
+		break;
+	}			/* end switch message format */
+
+}				/* end dasd_3990_handle_env_data */
+
+/*
+ * DASD_3990_ERP_COM_REJ
+ *
+ * DESCRIPTION
+ *   Handles 24 byte 'Command Reject' error.
+ *
+ * PARAMETER
+ *   erp		current erp_head
+ *   sense		current sense data
+ *
+ * RETURN VALUES
+ *   erp		'new' erp_head - pointer to new ERP
+ */
+static struct dasd_ccw_req *
+dasd_3990_erp_com_rej(struct dasd_ccw_req * erp, char *sense)
+{
+
+	struct dasd_device *device = erp->startdev;
+
+	erp->function = dasd_3990_erp_com_rej;
+
+	/* env data present (ACTION 10 - retry should work) */
+	if (sense[2] & SNS2_ENV_DATA_PRESENT) {
+
+		DBF_DEV_EVENT(DBF_WARNING, device, "%s",
+			    "Command Reject - environmental data present");
+
+		dasd_3990_handle_env_data(erp, sense);
+
+		erp->retries = 5;
+
+	} else if (sense[1] & SNS1_WRITE_INHIBITED) {
+		dev_err(&device->cdev->dev, "An I/O request was rejected"
+			" because writing is inhibited\n");
+		erp = dasd_3990_erp_cleanup(erp, DASD_CQR_FAILED);
+	} else {
+		/* fatal error -  set status to FAILED
+		   internal error 09 - Command Reject */
+		dev_err(&device->cdev->dev, "An error occurred in the DASD "
+			"device driver, reason=%s\n", "09");
+
+		erp = dasd_3990_erp_cleanup(erp, DASD_CQR_FAILED);
+	}
+
+	return erp;
+
+}				/* end dasd_3990_erp_com_rej */
+
+/*
+ * DASD_3990_ERP_BUS_OUT
+ *
+ * DESCRIPTION
+ *   Handles 24 byte 'Bus Out Parity Check' error.
+ *
+ * PARAMETER
+ *   erp		current erp_head
+ * RETURN VALUES
+ *   erp		new erp_head - pointer to new ERP
+ */
+static struct dasd_ccw_req *
+dasd_3990_erp_bus_out(struct dasd_ccw_req * erp)
+{
+
+	struct dasd_device *device = erp->startdev;
+
+	/* first time set initial retry counter and erp_function */
+	/* and retry once without blocking queue		 */
+	/* (this enables easier enqueing of the cqr)		 */
+	if (erp->function != dasd_3990_erp_bus_out) {
+		erp->retries = 256;
+		erp->function = dasd_3990_erp_bus_out;
+
+	} else {
+
+		/* issue a message and wait for 'device ready' interrupt */
+		DBF_DEV_EVENT(DBF_WARNING, device, "%s",
+			    "bus out parity error or BOPC requested by "
+			    "channel");
+
+		dasd_3990_erp_block_queue(erp, 60*HZ);
+
+	}
+
+	return erp;
+
+}				/* end dasd_3990_erp_bus_out */
+
+/*
+ * DASD_3990_ERP_EQUIP_CHECK
+ *
+ * DESCRIPTION
+ *   Handles 24 byte 'Equipment Check' error.
+ *
+ * PARAMETER
+ *   erp		current erp_head
+ * RETURN VALUES
+ *   erp		new erp_head - pointer to new ERP
+ */
+static struct dasd_ccw_req *
+dasd_3990_erp_equip_check(struct dasd_ccw_req * erp, char *sense)
+{
+
+	struct dasd_device *device = erp->startdev;
+
+	erp->function = dasd_3990_erp_equip_check;
+
+	if (sense[1] & SNS1_WRITE_INHIBITED) {
+		dev_info(&device->cdev->dev,
+			    "Write inhibited path encountered\n");
+
+		/* vary path offline
+		   internal error 04 - Path should be varied off-line.*/
+		dev_err(&device->cdev->dev, "An error occurred in the DASD "
+			"device driver, reason=%s\n", "04");
+
+		erp = dasd_3990_erp_action_1(erp);
+
+	} else if (sense[2] & SNS2_ENV_DATA_PRESENT) {
+
+		DBF_DEV_EVENT(DBF_WARNING, device, "%s",
+			    "Equipment Check - " "environmental data present");
+
+		dasd_3990_handle_env_data(erp, sense);
+
+		erp = dasd_3990_erp_action_4(erp, sense);
+
+	} else if (sense[1] & SNS1_PERM_ERR) {
+
+		DBF_DEV_EVENT(DBF_WARNING, device, "%s",
+			    "Equipment Check - retry exhausted or "
+			    "undesirable");
+
+		erp = dasd_3990_erp_action_1(erp);
+
+	} else {
+		/* all other equipment checks - Action 5 */
+		/* rest is done when retries == 0 */
+		DBF_DEV_EVENT(DBF_WARNING, device, "%s",
+			    "Equipment check or processing error");
+
+		erp = dasd_3990_erp_action_5(erp);
+	}
+	return erp;
+
+}				/* end dasd_3990_erp_equip_check */
+
+/*
+ * DASD_3990_ERP_DATA_CHECK
+ *
+ * DESCRIPTION
+ *   Handles 24 byte 'Data Check' error.
+ *
+ * PARAMETER
+ *   erp		current erp_head
+ * RETURN VALUES
+ *   erp		new erp_head - pointer to new ERP
+ */
+static struct dasd_ccw_req *
+dasd_3990_erp_data_check(struct dasd_ccw_req * erp, char *sense)
+{
+
+	struct dasd_device *device = erp->startdev;
+
+	erp->function = dasd_3990_erp_data_check;
+
+	if (sense[2] & SNS2_CORRECTABLE) {	/* correctable data check */
+
+		/* issue message that the data has been corrected */
+		dev_emerg(&device->cdev->dev,
+			    "Data recovered during retry with PCI "
+			    "fetch mode active\n");
+
+		/* not possible to handle this situation in Linux */
+		panic("No way to inform application about the possibly "
+		      "incorrect data");
+
+	} else if (sense[2] & SNS2_ENV_DATA_PRESENT) {
+
+		DBF_DEV_EVENT(DBF_WARNING, device, "%s",
+			    "Uncorrectable data check recovered secondary "
+			    "addr of duplex pair");
+
+		erp = dasd_3990_erp_action_4(erp, sense);
+
+	} else if (sense[1] & SNS1_PERM_ERR) {
+
+		DBF_DEV_EVENT(DBF_WARNING, device, "%s",
+			    "Uncorrectable data check with internal "
+			    "retry exhausted");
+
+		erp = dasd_3990_erp_action_1(erp);
+
+	} else {
+		/* all other data checks */
+		DBF_DEV_EVENT(DBF_WARNING, device, "%s",
+			    "Uncorrectable data check with retry count "
+			    "exhausted...");
+
+		erp = dasd_3990_erp_action_5(erp);
+	}
+
+	return erp;
+
+}				/* end dasd_3990_erp_data_check */
+
+/*
+ * DASD_3990_ERP_OVERRUN
+ *
+ * DESCRIPTION
+ *   Handles 24 byte 'Overrun' error.
+ *
+ * PARAMETER
+ *   erp		current erp_head
+ * RETURN VALUES
+ *   erp		new erp_head - pointer to new ERP
+ */
+static struct dasd_ccw_req *
+dasd_3990_erp_overrun(struct dasd_ccw_req * erp, char *sense)
+{
+
+	struct dasd_device *device = erp->startdev;
+
+	erp->function = dasd_3990_erp_overrun;
+
+	DBF_DEV_EVENT(DBF_WARNING, device, "%s",
+		    "Overrun - service overrun or overrun"
+		    " error requested by channel");
+
+	erp = dasd_3990_erp_action_5(erp);
+
+	return erp;
+
+}				/* end dasd_3990_erp_overrun */
+
+/*
+ * DASD_3990_ERP_INV_FORMAT
+ *
+ * DESCRIPTION
+ *   Handles 24 byte 'Invalid Track Format' error.
+ *
+ * PARAMETER
+ *   erp		current erp_head
+ * RETURN VALUES
+ *   erp		new erp_head - pointer to new ERP
+ */
+static struct dasd_ccw_req *
+dasd_3990_erp_inv_format(struct dasd_ccw_req * erp, char *sense)
+{
+
+	struct dasd_device *device = erp->startdev;
+
+	erp->function = dasd_3990_erp_inv_format;
+
+	if (sense[2] & SNS2_ENV_DATA_PRESENT) {
+
+		DBF_DEV_EVENT(DBF_WARNING, device, "%s",
+			    "Track format error when destaging or "
+			    "staging data");
+
+		dasd_3990_handle_env_data(erp, sense);
+
+		erp = dasd_3990_erp_action_4(erp, sense);
+
+	} else {
+		/* internal error 06 - The track format is not valid*/
+		dev_err(&device->cdev->dev,
+			"An error occurred in the DASD device driver, "
+			"reason=%s\n", "06");
+
+		erp = dasd_3990_erp_cleanup(erp, DASD_CQR_FAILED);
+	}
+
+	return erp;
+
+}				/* end dasd_3990_erp_inv_format */
+
+/*
+ * DASD_3990_ERP_EOC
+ *
+ * DESCRIPTION
+ *   Handles 24 byte 'End-of-Cylinder' error.
+ *
+ * PARAMETER
+ *   erp		already added default erp
+ * RETURN VALUES
+ *   erp		pointer to original (failed) cqr.
+ */
+static struct dasd_ccw_req *
+dasd_3990_erp_EOC(struct dasd_ccw_req * default_erp, char *sense)
+{
+
+	struct dasd_device *device = default_erp->startdev;
+
+	dev_err(&device->cdev->dev,
+		"The cylinder data for accessing the DASD is inconsistent\n");
+
+	/* implement action 7 - BUG */
+	return dasd_3990_erp_cleanup(default_erp, DASD_CQR_FAILED);
+
+}				/* end dasd_3990_erp_EOC */
+
+/*
+ * DASD_3990_ERP_ENV_DATA
+ *
+ * DESCRIPTION
+ *   Handles 24 byte 'Environmental-Data Present' error.
+ *
+ * PARAMETER
+ *   erp		current erp_head
+ * RETURN VALUES
+ *   erp		new erp_head - pointer to new ERP
+ */
+static struct dasd_ccw_req *
+dasd_3990_erp_env_data(struct dasd_ccw_req * erp, char *sense)
+{
+
+	struct dasd_device *device = erp->startdev;
+
+	erp->function = dasd_3990_erp_env_data;
+
+	DBF_DEV_EVENT(DBF_WARNING, device, "%s", "Environmental data present");
+
+	dasd_3990_handle_env_data(erp, sense);
+
+	/* don't retry on disabled interface */
+	if (sense[7] != 0x0F) {
+		erp = dasd_3990_erp_action_4(erp, sense);
+	} else {
+		erp->status = DASD_CQR_FILLED;
+	}
+
+	return erp;
+
+}				/* end dasd_3990_erp_env_data */
+
+/*
+ * DASD_3990_ERP_NO_REC
+ *
+ * DESCRIPTION
+ *   Handles 24 byte 'No Record Found' error.
+ *
+ * PARAMETER
+ *   erp		already added default ERP
+ *
+ * RETURN VALUES
+ *   erp		new erp_head - pointer to new ERP
+ */
+static struct dasd_ccw_req *
+dasd_3990_erp_no_rec(struct dasd_ccw_req * default_erp, char *sense)
+{
+
+	struct dasd_device *device = default_erp->startdev;
+
+	dev_err(&device->cdev->dev,
+		    "The specified record was not found\n");
+
+	return dasd_3990_erp_cleanup(default_erp, DASD_CQR_FAILED);
+
+}				/* end dasd_3990_erp_no_rec */
+
+/*
+ * DASD_3990_ERP_FILE_PROT
+ *
+ * DESCRIPTION
+ *   Handles 24 byte 'File Protected' error.
+ *   Note: Seek related recovery is not implemented because
+ *	   wee don't use the seek command yet.
+ *
+ * PARAMETER
+ *   erp		current erp_head
+ * RETURN VALUES
+ *   erp		new erp_head - pointer to new ERP
+ */
+static struct dasd_ccw_req *
+dasd_3990_erp_file_prot(struct dasd_ccw_req * erp)
+{
+
+	struct dasd_device *device = erp->startdev;
+
+	dev_err(&device->cdev->dev, "Accessing the DASD failed because of "
+		"a hardware error\n");
+
+	return dasd_3990_erp_cleanup(erp, DASD_CQR_FAILED);
+
+}				/* end dasd_3990_erp_file_prot */
+
+/*
+ * DASD_3990_ERP_INSPECT_ALIAS
+ *
+ * DESCRIPTION
+ *   Checks if the original request was started on an alias device.
+ *   If yes, it modifies the original and the erp request so that
+ *   the erp request can be started on a base device.
+ *
+ * PARAMETER
+ *   erp		pointer to the currently created default ERP
+ *
+ * RETURN VALUES
+ *   erp		pointer to the modified ERP, or NULL
+ */
+
+static struct dasd_ccw_req *dasd_3990_erp_inspect_alias(
+						struct dasd_ccw_req *erp)
+{
+	struct dasd_ccw_req *cqr = erp->refers;
+	char *sense;
+
+	if (cqr->block &&
+	    (cqr->block->base != cqr->startdev)) {
+
+		sense = dasd_get_sense(&erp->refers->irb);
+		/*
+		 * dynamic pav may have changed base alias mapping
+		 */
+		if (!test_bit(DASD_FLAG_OFFLINE, &cqr->startdev->flags) && sense
+		    && (sense[0] == 0x10) && (sense[7] == 0x0F)
+		    && (sense[8] == 0x67)) {
+			/*
+			 * remove device from alias handling to prevent new
+			 * requests from being scheduled on the
+			 * wrong alias device
+			 */
+			dasd_alias_remove_device(cqr->startdev);
+
+			/* schedule worker to reload device */
+			dasd_reload_device(cqr->startdev);
+		}
+
+		if (cqr->startdev->features & DASD_FEATURE_ERPLOG) {
+			DBF_DEV_EVENT(DBF_ERR, cqr->startdev,
+				    "ERP on alias device for request %p,"
+				    " recover on base device %s", cqr,
+				    dev_name(&cqr->block->base->cdev->dev));
+		}
+		dasd_eckd_reset_ccw_to_base_io(cqr);
+		erp->startdev = cqr->block->base;
+		erp->function = dasd_3990_erp_inspect_alias;
+		return erp;
+	} else
+		return NULL;
+}
+
+
+/*
+ * DASD_3990_ERP_INSPECT_24
+ *
+ * DESCRIPTION
+ *   Does a detailed inspection of the 24 byte sense data
+ *   and sets up a related error recovery action.
+ *
+ * PARAMETER
+ *   sense		sense data of the actual error
+ *   erp		pointer to the currently created default ERP
+ *
+ * RETURN VALUES
+ *   erp		pointer to the (addtitional) ERP
+ */
+static struct dasd_ccw_req *
+dasd_3990_erp_inspect_24(struct dasd_ccw_req * erp, char *sense)
+{
+
+	struct dasd_ccw_req *erp_filled = NULL;
+
+	/* Check sense for ....	   */
+	/* 'Command Reject'	   */
+	if ((erp_filled == NULL) && (sense[0] & SNS0_CMD_REJECT)) {
+		erp_filled = dasd_3990_erp_com_rej(erp, sense);
+	}
+	/* 'Intervention Required' */
+	if ((erp_filled == NULL) && (sense[0] & SNS0_INTERVENTION_REQ)) {
+		erp_filled = dasd_3990_erp_int_req(erp);
+	}
+	/* 'Bus Out Parity Check'  */
+	if ((erp_filled == NULL) && (sense[0] & SNS0_BUS_OUT_CHECK)) {
+		erp_filled = dasd_3990_erp_bus_out(erp);
+	}
+	/* 'Equipment Check'	   */
+	if ((erp_filled == NULL) && (sense[0] & SNS0_EQUIPMENT_CHECK)) {
+		erp_filled = dasd_3990_erp_equip_check(erp, sense);
+	}
+	/* 'Data Check'		   */
+	if ((erp_filled == NULL) && (sense[0] & SNS0_DATA_CHECK)) {
+		erp_filled = dasd_3990_erp_data_check(erp, sense);
+	}
+	/* 'Overrun'		   */
+	if ((erp_filled == NULL) && (sense[0] & SNS0_OVERRUN)) {
+		erp_filled = dasd_3990_erp_overrun(erp, sense);
+	}
+	/* 'Invalid Track Format'  */
+	if ((erp_filled == NULL) && (sense[1] & SNS1_INV_TRACK_FORMAT)) {
+		erp_filled = dasd_3990_erp_inv_format(erp, sense);
+	}
+	/* 'End-of-Cylinder'	   */
+	if ((erp_filled == NULL) && (sense[1] & SNS1_EOC)) {
+		erp_filled = dasd_3990_erp_EOC(erp, sense);
+	}
+	/* 'Environmental Data'	   */
+	if ((erp_filled == NULL) && (sense[2] & SNS2_ENV_DATA_PRESENT)) {
+		erp_filled = dasd_3990_erp_env_data(erp, sense);
+	}
+	/* 'No Record Found'	   */
+	if ((erp_filled == NULL) && (sense[1] & SNS1_NO_REC_FOUND)) {
+		erp_filled = dasd_3990_erp_no_rec(erp, sense);
+	}
+	/* 'File Protected'	   */
+	if ((erp_filled == NULL) && (sense[1] & SNS1_FILE_PROTECTED)) {
+		erp_filled = dasd_3990_erp_file_prot(erp);
+	}
+	/* other (unknown) error - do default ERP */
+	if (erp_filled == NULL) {
+
+		erp_filled = erp;
+	}
+
+	return erp_filled;
+
+}				/* END dasd_3990_erp_inspect_24 */
+
+/*
+ *****************************************************************************
+ * 32 byte sense ERP functions (only)
+ *****************************************************************************
+ */
+
+/*
+ * DASD_3990_ERPACTION_10_32
+ *
+ * DESCRIPTION
+ *   Handles 32 byte 'Action 10' of Single Program Action Codes.
+ *   Just retry and if retry doesn't work, return with error.
+ *
+ * PARAMETER
+ *   erp		current erp_head
+ *   sense		current sense data
+ * RETURN VALUES
+ *   erp		modified erp_head
+ */
+static struct dasd_ccw_req *
+dasd_3990_erp_action_10_32(struct dasd_ccw_req * erp, char *sense)
+{
+
+	struct dasd_device *device = erp->startdev;
+
+	erp->retries = 256;
+	erp->function = dasd_3990_erp_action_10_32;
+
+	DBF_DEV_EVENT(DBF_WARNING, device, "%s", "Perform logging requested");
+
+	return erp;
+
+}				/* end dasd_3990_erp_action_10_32 */
+
+/*
+ * DASD_3990_ERP_ACTION_1B_32
+ *
+ * DESCRIPTION
+ *   Handles 32 byte 'Action 1B' of Single Program Action Codes.
+ *   A write operation could not be finished because of an unexpected
+ *   condition.
+ *   The already created 'default erp' is used to get the link to
+ *   the erp chain, but it can not be used for this recovery
+ *   action because it contains no DE/LO data space.
+ *
+ * PARAMETER
+ *   default_erp	already added default erp.
+ *   sense		current sense data
+ *
+ * RETURN VALUES
+ *   erp		new erp or
+ *			default_erp in case of imprecise ending or error
+ */
+static struct dasd_ccw_req *
+dasd_3990_erp_action_1B_32(struct dasd_ccw_req * default_erp, char *sense)
+{
+
+	struct dasd_device *device = default_erp->startdev;
+	__u32 cpa = 0;
+	struct dasd_ccw_req *cqr;
+	struct dasd_ccw_req *erp;
+	struct DE_eckd_data *DE_data;
+	struct PFX_eckd_data *PFX_data;
+	char *LO_data;		/* LO_eckd_data_t */
+	struct ccw1 *ccw, *oldccw;
+
+	DBF_DEV_EVENT(DBF_WARNING, device, "%s",
+		    "Write not finished because of unexpected condition");
+
+	default_erp->function = dasd_3990_erp_action_1B_32;
+
+	/* determine the original cqr */
+	cqr = default_erp;
+
+	while (cqr->refers != NULL) {
+		cqr = cqr->refers;
+	}
+
+	if (scsw_is_tm(&cqr->irb.scsw)) {
+		DBF_DEV_EVENT(DBF_WARNING, device, "%s",
+			      "32 bit sense, action 1B is not defined"
+			      " in transport mode - just retry");
+		return default_erp;
+	}
+
+	/* for imprecise ending just do default erp */
+	if (sense[1] & 0x01) {
+		DBF_DEV_EVENT(DBF_WARNING, device, "%s",
+			    "Imprecise ending is set - just retry");
+
+		return default_erp;
+	}
+
+	/* determine the address of the CCW to be restarted */
+	/* Imprecise ending is not set -> addr from IRB-SCSW */
+	cpa = default_erp->refers->irb.scsw.cmd.cpa;
+
+	if (cpa == 0) {
+		DBF_DEV_EVENT(DBF_WARNING, device, "%s",
+			    "Unable to determine address of the CCW "
+			    "to be restarted");
+
+		return dasd_3990_erp_cleanup(default_erp, DASD_CQR_FAILED);
+	}
+
+	/* Build new ERP request including DE/LO */
+	erp = dasd_alloc_erp_request((char *) &cqr->magic,
+				     2 + 1,/* DE/LO + TIC */
+				     sizeof(struct DE_eckd_data) +
+				     sizeof(struct LO_eckd_data), device);
+
+	if (IS_ERR(erp)) {
+		/* internal error 01 - Unable to allocate ERP */
+		dev_err(&device->cdev->dev, "An error occurred in the DASD "
+			"device driver, reason=%s\n", "01");
+		return dasd_3990_erp_cleanup(default_erp, DASD_CQR_FAILED);
+	}
+
+	/* use original DE */
+	DE_data = erp->data;
+	oldccw = cqr->cpaddr;
+	if (oldccw->cmd_code == DASD_ECKD_CCW_PFX) {
+		PFX_data = cqr->data;
+		memcpy(DE_data, &PFX_data->define_extent,
+		       sizeof(struct DE_eckd_data));
+	} else
+		memcpy(DE_data, cqr->data, sizeof(struct DE_eckd_data));
+
+	/* create LO */
+	LO_data = erp->data + sizeof(struct DE_eckd_data);
+
+	if ((sense[3] == 0x01) && (LO_data[1] & 0x01)) {
+		/* should not */
+		return dasd_3990_erp_cleanup(default_erp, DASD_CQR_FAILED);
+	}
+
+	if ((sense[7] & 0x3F) == 0x01) {
+		/* operation code is WRITE DATA -> data area orientation */
+		LO_data[0] = 0x81;
+
+	} else if ((sense[7] & 0x3F) == 0x03) {
+		/* operation code is FORMAT WRITE -> index orientation */
+		LO_data[0] = 0xC3;
+
+	} else {
+		LO_data[0] = sense[7];	/* operation */
+	}
+
+	LO_data[1] = sense[8];	/* auxiliary */
+	LO_data[2] = sense[9];
+	LO_data[3] = sense[3];	/* count */
+	LO_data[4] = sense[29];	/* seek_addr.cyl */
+	LO_data[5] = sense[30];	/* seek_addr.cyl 2nd byte */
+	LO_data[7] = sense[31];	/* seek_addr.head 2nd byte */
+
+	memcpy(&(LO_data[8]), &(sense[11]), 8);
+
+	/* create DE ccw */
+	ccw = erp->cpaddr;
+	memset(ccw, 0, sizeof(struct ccw1));
+	ccw->cmd_code = DASD_ECKD_CCW_DEFINE_EXTENT;
+	ccw->flags = CCW_FLAG_CC;
+	ccw->count = 16;
+	ccw->cda = (__u32)(addr_t) DE_data;
+
+	/* create LO ccw */
+	ccw++;
+	memset(ccw, 0, sizeof(struct ccw1));
+	ccw->cmd_code = DASD_ECKD_CCW_LOCATE_RECORD;
+	ccw->flags = CCW_FLAG_CC;
+	ccw->count = 16;
+	ccw->cda = (__u32)(addr_t) LO_data;
+
+	/* TIC to the failed ccw */
+	ccw++;
+	ccw->cmd_code = CCW_CMD_TIC;
+	ccw->cda = cpa;
+
+	/* fill erp related fields */
+	erp->flags = default_erp->flags;
+	erp->function = dasd_3990_erp_action_1B_32;
+	erp->refers = default_erp->refers;
+	erp->startdev = device;
+	erp->memdev = device;
+	erp->magic = default_erp->magic;
+	erp->expires = default_erp->expires;
+	erp->retries = 256;
+	erp->buildclk = get_clock();
+	erp->status = DASD_CQR_FILLED;
+
+	/* remove the default erp */
+	dasd_free_erp_request(default_erp, device);
+
+	return erp;
+
+}				/* end dasd_3990_erp_action_1B_32 */
+
+/*
+ * DASD_3990_UPDATE_1B
+ *
+ * DESCRIPTION
+ *   Handles the update to the 32 byte 'Action 1B' of Single Program
+ *   Action Codes in case the first action was not successful.
+ *   The already created 'previous_erp' is the currently not successful
+ *   ERP.
+ *
+ * PARAMETER
+ *   previous_erp	already created previous erp.
+ *   sense		current sense data
+ * RETURN VALUES
+ *   erp		modified erp
+ */
+static struct dasd_ccw_req *
+dasd_3990_update_1B(struct dasd_ccw_req * previous_erp, char *sense)
+{
+
+	struct dasd_device *device = previous_erp->startdev;
+	__u32 cpa = 0;
+	struct dasd_ccw_req *cqr;
+	struct dasd_ccw_req *erp;
+	char *LO_data;		/* struct LO_eckd_data */
+	struct ccw1 *ccw;
+
+	DBF_DEV_EVENT(DBF_WARNING, device, "%s",
+		    "Write not finished because of unexpected condition"
+		    " - follow on");
+
+	/* determine the original cqr */
+	cqr = previous_erp;
+
+	while (cqr->refers != NULL) {
+		cqr = cqr->refers;
+	}
+
+	if (scsw_is_tm(&cqr->irb.scsw)) {
+		DBF_DEV_EVENT(DBF_WARNING, device, "%s",
+			      "32 bit sense, action 1B, update,"
+			      " in transport mode - just retry");
+		return previous_erp;
+	}
+
+	/* for imprecise ending just do default erp */
+	if (sense[1] & 0x01) {
+		DBF_DEV_EVENT(DBF_WARNING, device, "%s",
+			    "Imprecise ending is set - just retry");
+
+		previous_erp->status = DASD_CQR_FILLED;
+
+		return previous_erp;
+	}
+
+	/* determine the address of the CCW to be restarted */
+	/* Imprecise ending is not set -> addr from IRB-SCSW */
+	cpa = previous_erp->irb.scsw.cmd.cpa;
+
+	if (cpa == 0) {
+		/* internal error 02 -
+		   Unable to determine address of the CCW to be restarted */
+		dev_err(&device->cdev->dev, "An error occurred in the DASD "
+			"device driver, reason=%s\n", "02");
+
+		previous_erp->status = DASD_CQR_FAILED;
+
+		return previous_erp;
+	}
+
+	erp = previous_erp;
+
+	/* update the LO with the new returned sense data  */
+	LO_data = erp->data + sizeof(struct DE_eckd_data);
+
+	if ((sense[3] == 0x01) && (LO_data[1] & 0x01)) {
+		/* should not happen */
+		previous_erp->status = DASD_CQR_FAILED;
+
+		return previous_erp;
+	}
+
+	if ((sense[7] & 0x3F) == 0x01) {
+		/* operation code is WRITE DATA -> data area orientation */
+		LO_data[0] = 0x81;
+
+	} else if ((sense[7] & 0x3F) == 0x03) {
+		/* operation code is FORMAT WRITE -> index orientation */
+		LO_data[0] = 0xC3;
+
+	} else {
+		LO_data[0] = sense[7];	/* operation */
+	}
+
+	LO_data[1] = sense[8];	/* auxiliary */
+	LO_data[2] = sense[9];
+	LO_data[3] = sense[3];	/* count */
+	LO_data[4] = sense[29];	/* seek_addr.cyl */
+	LO_data[5] = sense[30];	/* seek_addr.cyl 2nd byte */
+	LO_data[7] = sense[31];	/* seek_addr.head 2nd byte */
+
+	memcpy(&(LO_data[8]), &(sense[11]), 8);
+
+	/* TIC to the failed ccw */
+	ccw = erp->cpaddr;	/* addr of DE ccw */
+	ccw++;			/* addr of LE ccw */
+	ccw++;			/* addr of TIC ccw */
+	ccw->cda = cpa;
+
+	erp->status = DASD_CQR_FILLED;
+
+	return erp;
+
+}				/* end dasd_3990_update_1B */
+
+/*
+ * DASD_3990_ERP_COMPOUND_RETRY
+ *
+ * DESCRIPTION
+ *   Handles the compound ERP action retry code.
+ *   NOTE: At least one retry is done even if zero is specified
+ *	   by the sense data. This makes enqueueing of the request
+ *	   easier.
+ *
+ * PARAMETER
+ *   sense		sense data of the actual error
+ *   erp		pointer to the currently created ERP
+ *
+ * RETURN VALUES
+ *   erp		modified ERP pointer
+ *
+ */
+static void
+dasd_3990_erp_compound_retry(struct dasd_ccw_req * erp, char *sense)
+{
+
+	switch (sense[25] & 0x03) {
+	case 0x00:		/* no not retry */
+		erp->retries = 1;
+		break;
+
+	case 0x01:		/* retry 2 times */
+		erp->retries = 2;
+		break;
+
+	case 0x02:		/* retry 10 times */
+		erp->retries = 10;
+		break;
+
+	case 0x03:		/* retry 256 times */
+		erp->retries = 256;
+		break;
+
+	default:
+		BUG();
+	}
+
+	erp->function = dasd_3990_erp_compound_retry;
+
+}				/* end dasd_3990_erp_compound_retry */
+
+/*
+ * DASD_3990_ERP_COMPOUND_PATH
+ *
+ * DESCRIPTION
+ *   Handles the compound ERP action for retry on alternate
+ *   channel path.
+ *
+ * PARAMETER
+ *   sense		sense data of the actual error
+ *   erp		pointer to the currently created ERP
+ *
+ * RETURN VALUES
+ *   erp		modified ERP pointer
+ *
+ */
+static void
+dasd_3990_erp_compound_path(struct dasd_ccw_req * erp, char *sense)
+{
+	if (sense[25] & DASD_SENSE_BIT_3) {
+		dasd_3990_erp_alternate_path(erp);
+
+		if (erp->status == DASD_CQR_FAILED &&
+		    !test_bit(DASD_CQR_VERIFY_PATH, &erp->flags)) {
+			/* reset the lpm and the status to be able to
+			 * try further actions. */
+			erp->lpm = erp->startdev->path_data.opm;
+			erp->status = DASD_CQR_NEED_ERP;
+		}
+	}
+
+	erp->function = dasd_3990_erp_compound_path;
+
+}				/* end dasd_3990_erp_compound_path */
+
+/*
+ * DASD_3990_ERP_COMPOUND_CODE
+ *
+ * DESCRIPTION
+ *   Handles the compound ERP action for retry code.
+ *
+ * PARAMETER
+ *   sense		sense data of the actual error
+ *   erp		pointer to the currently created ERP
+ *
+ * RETURN VALUES
+ *   erp		NEW ERP pointer
+ *
+ */
+static struct dasd_ccw_req *
+dasd_3990_erp_compound_code(struct dasd_ccw_req * erp, char *sense)
+{
+
+	if (sense[25] & DASD_SENSE_BIT_2) {
+
+		switch (sense[28]) {
+		case 0x17:
+			/* issue a Diagnostic Control command with an
+			 * Inhibit Write subcommand and controller modifier */
+			erp = dasd_3990_erp_DCTL(erp, 0x20);
+			break;
+
+		case 0x25:
+			/* wait for 5 seconds and retry again */
+			erp->retries = 1;
+
+			dasd_3990_erp_block_queue (erp, 5*HZ);
+			break;
+
+		default:
+			/* should not happen - continue */
+			break;
+		}
+	}
+
+	erp->function = dasd_3990_erp_compound_code;
+
+	return erp;
+
+}				/* end dasd_3990_erp_compound_code */
+
+/*
+ * DASD_3990_ERP_COMPOUND_CONFIG
+ *
+ * DESCRIPTION
+ *   Handles the compound ERP action for configruation
+ *   dependent error.
+ *   Note: duplex handling is not implemented (yet).
+ *
+ * PARAMETER
+ *   sense		sense data of the actual error
+ *   erp		pointer to the currently created ERP
+ *
+ * RETURN VALUES
+ *   erp		modified ERP pointer
+ *
+ */
+static void
+dasd_3990_erp_compound_config(struct dasd_ccw_req * erp, char *sense)
+{
+
+	if ((sense[25] & DASD_SENSE_BIT_1) && (sense[26] & DASD_SENSE_BIT_2)) {
+
+		/* set to suspended duplex state then restart
+		   internal error 05 - Set device to suspended duplex state
+		   should be done */
+		struct dasd_device *device = erp->startdev;
+		dev_err(&device->cdev->dev,
+			"An error occurred in the DASD device driver, "
+			"reason=%s\n", "05");
+
+	}
+
+	erp->function = dasd_3990_erp_compound_config;
+
+}				/* end dasd_3990_erp_compound_config */
+
+/*
+ * DASD_3990_ERP_COMPOUND
+ *
+ * DESCRIPTION
+ *   Does the further compound program action if
+ *   compound retry was not successful.
+ *
+ * PARAMETER
+ *   sense		sense data of the actual error
+ *   erp		pointer to the current (failed) ERP
+ *
+ * RETURN VALUES
+ *   erp		(additional) ERP pointer
+ *
+ */
+static struct dasd_ccw_req *
+dasd_3990_erp_compound(struct dasd_ccw_req * erp, char *sense)
+{
+
+	if ((erp->function == dasd_3990_erp_compound_retry) &&
+	    (erp->status == DASD_CQR_NEED_ERP)) {
+
+		dasd_3990_erp_compound_path(erp, sense);
+	}
+
+	if ((erp->function == dasd_3990_erp_compound_path) &&
+	    (erp->status == DASD_CQR_NEED_ERP)) {
+
+		erp = dasd_3990_erp_compound_code(erp, sense);
+	}
+
+	if ((erp->function == dasd_3990_erp_compound_code) &&
+	    (erp->status == DASD_CQR_NEED_ERP)) {
+
+		dasd_3990_erp_compound_config(erp, sense);
+	}
+
+	/* if no compound action ERP specified, the request failed */
+	if (erp->status == DASD_CQR_NEED_ERP)
+		erp->status = DASD_CQR_FAILED;
+
+	return erp;
+
+}				/* end dasd_3990_erp_compound */
+
+/*
+ *DASD_3990_ERP_HANDLE_SIM
+ *
+ *DESCRIPTION
+ *  inspects the SIM SENSE data and starts an appropriate action
+ *
+ * PARAMETER
+ *   sense	   sense data of the actual error
+ *
+ * RETURN VALUES
+ *   none
+ */
+void
+dasd_3990_erp_handle_sim(struct dasd_device *device, char *sense)
+{
+	/* print message according to log or message to operator mode */
+	if ((sense[24] & DASD_SIM_MSG_TO_OP) || (sense[1] & 0x10)) {
+		/* print SIM SRC from RefCode */
+		dev_err(&device->cdev->dev, "SIM - SRC: "
+			    "%02x%02x%02x%02x\n", sense[22],
+			    sense[23], sense[11], sense[12]);
+	} else if (sense[24] & DASD_SIM_LOG) {
+		/* print SIM SRC Refcode */
+		dev_warn(&device->cdev->dev, "log SIM - SRC: "
+			    "%02x%02x%02x%02x\n", sense[22],
+			    sense[23], sense[11], sense[12]);
+	}
+}
+
+/*
+ * DASD_3990_ERP_INSPECT_32
+ *
+ * DESCRIPTION
+ *   Does a detailed inspection of the 32 byte sense data
+ *   and sets up a related error recovery action.
+ *
+ * PARAMETER
+ *   sense		sense data of the actual error
+ *   erp		pointer to the currently created default ERP
+ *
+ * RETURN VALUES
+ *   erp_filled		pointer to the ERP
+ *
+ */
+static struct dasd_ccw_req *
+dasd_3990_erp_inspect_32(struct dasd_ccw_req * erp, char *sense)
+{
+
+	struct dasd_device *device = erp->startdev;
+
+	erp->function = dasd_3990_erp_inspect_32;
+
+	/* check for SIM sense data */
+	if ((sense[6] & DASD_SIM_SENSE) == DASD_SIM_SENSE)
+		dasd_3990_erp_handle_sim(device, sense);
+
+	if (sense[25] & DASD_SENSE_BIT_0) {
+
+		/* compound program action codes (byte25 bit 0 == '1') */
+		dasd_3990_erp_compound_retry(erp, sense);
+
+	} else {
+
+		/* single program action codes (byte25 bit 0 == '0') */
+		switch (sense[25]) {
+
+		case 0x00:	/* success - use default ERP for retries */
+			DBF_DEV_EVENT(DBF_DEBUG, device, "%s",
+				    "ERP called for successful request"
+				    " - just retry");
+			break;
+
+		case 0x01:	/* fatal error */
+			dev_err(&device->cdev->dev,
+				    "ERP failed for the DASD\n");
+
+			erp = dasd_3990_erp_cleanup(erp, DASD_CQR_FAILED);
+			break;
+
+		case 0x02:	/* intervention required */
+		case 0x03:	/* intervention required during dual copy */
+			erp = dasd_3990_erp_int_req(erp);
+			break;
+
+		case 0x0F:  /* length mismatch during update write command
+			       internal error 08 - update write command error*/
+			dev_err(&device->cdev->dev, "An error occurred in the "
+				"DASD device driver, reason=%s\n", "08");
+
+			erp = dasd_3990_erp_cleanup(erp, DASD_CQR_FAILED);
+			break;
+
+		case 0x10:  /* logging required for other channel program */
+			erp = dasd_3990_erp_action_10_32(erp, sense);
+			break;
+
+		case 0x15:	/* next track outside defined extend
+				   internal error 07 - The next track is not
+				   within the defined storage extent */
+			dev_err(&device->cdev->dev,
+				"An error occurred in the DASD device driver, "
+				"reason=%s\n", "07");
+
+			erp = dasd_3990_erp_cleanup(erp, DASD_CQR_FAILED);
+			break;
+
+		case 0x1B:	/* unexpected condition during write */
+
+			erp = dasd_3990_erp_action_1B_32(erp, sense);
+			break;
+
+		case 0x1C:	/* invalid data */
+			dev_emerg(&device->cdev->dev,
+				    "Data recovered during retry with PCI "
+				    "fetch mode active\n");
+
+			/* not possible to handle this situation in Linux */
+			panic
+			    ("Invalid data - No way to inform application "
+			     "about the possibly incorrect data");
+			break;
+
+		case 0x1D:	/* state-change pending */
+			DBF_DEV_EVENT(DBF_WARNING, device, "%s",
+				    "A State change pending condition exists "
+				    "for the subsystem or device");
+
+			erp = dasd_3990_erp_action_4(erp, sense);
+			break;
+
+		case 0x1E:	/* busy */
+			DBF_DEV_EVENT(DBF_WARNING, device, "%s",
+				    "Busy condition exists "
+				    "for the subsystem or device");
+                        erp = dasd_3990_erp_action_4(erp, sense);
+			break;
+
+		default:	/* all others errors - default erp  */
+			break;
+		}
+	}
+
+	return erp;
+
+}				/* end dasd_3990_erp_inspect_32 */
+
+/*
+ *****************************************************************************
+ * main ERP control functions (24 and 32 byte sense)
+ *****************************************************************************
+ */
+
+/*
+ * DASD_3990_ERP_CONTROL_CHECK
+ *
+ * DESCRIPTION
+ *   Does a generic inspection if a control check occurred and sets up
+ *   the related error recovery procedure
+ *
+ * PARAMETER
+ *   erp		pointer to the currently created default ERP
+ *
+ * RETURN VALUES
+ *   erp_filled		pointer to the erp
+ */
+
+static struct dasd_ccw_req *
+dasd_3990_erp_control_check(struct dasd_ccw_req *erp)
+{
+	struct dasd_device *device = erp->startdev;
+
+	if (scsw_cstat(&erp->refers->irb.scsw) & (SCHN_STAT_INTF_CTRL_CHK
+					   | SCHN_STAT_CHN_CTRL_CHK)) {
+		DBF_DEV_EVENT(DBF_WARNING, device, "%s",
+			    "channel or interface control check");
+		erp = dasd_3990_erp_action_4(erp, NULL);
+	}
+	return erp;
+}
+
+/*
+ * DASD_3990_ERP_INSPECT
+ *
+ * DESCRIPTION
+ *   Does a detailed inspection for sense data by calling either
+ *   the 24-byte or the 32-byte inspection routine.
+ *
+ * PARAMETER
+ *   erp		pointer to the currently created default ERP
+ * RETURN VALUES
+ *   erp_new		contens was possibly modified
+ */
+static struct dasd_ccw_req *
+dasd_3990_erp_inspect(struct dasd_ccw_req *erp)
+{
+
+	struct dasd_ccw_req *erp_new = NULL;
+	char *sense;
+
+	/* if this problem occurred on an alias retry on base */
+	erp_new = dasd_3990_erp_inspect_alias(erp);
+	if (erp_new)
+		return erp_new;
+
+	/* sense data are located in the refers record of the
+	 * already set up new ERP !
+	 * check if concurrent sens is available
+	 */
+	sense = dasd_get_sense(&erp->refers->irb);
+	if (!sense)
+		erp_new = dasd_3990_erp_control_check(erp);
+	/* distinguish between 24 and 32 byte sense data */
+	else if (sense[27] & DASD_SENSE_BIT_0) {
+
+		/* inspect the 24 byte sense data */
+		erp_new = dasd_3990_erp_inspect_24(erp, sense);
+
+	} else {
+
+		/* inspect the 32 byte sense data */
+		erp_new = dasd_3990_erp_inspect_32(erp, sense);
+
+	}	/* end distinguish between 24 and 32 byte sense data */
+
+	return erp_new;
+}
+
+/*
+ * DASD_3990_ERP_ADD_ERP
+ *
+ * DESCRIPTION
+ *   This function adds an additional request block (ERP) to the head of
+ *   the given cqr (or erp).
+ *   For a command mode cqr the erp is initialized as an default erp
+ *   (retry TIC).
+ *   For transport mode we make a copy of the original TCW (points to
+ *   the original TCCB, TIDALs, etc.) but give it a fresh
+ *   TSB so the original sense data will not be changed.
+ *
+ * PARAMETER
+ *   cqr		head of the current ERP-chain (or single cqr if
+ *			first error)
+ * RETURN VALUES
+ *   erp		pointer to new ERP-chain head
+ */
+static struct dasd_ccw_req *dasd_3990_erp_add_erp(struct dasd_ccw_req *cqr)
+{
+
+	struct dasd_device *device = cqr->startdev;
+	struct ccw1 *ccw;
+	struct dasd_ccw_req *erp;
+	int cplength, datasize;
+	struct tcw *tcw;
+	struct tsb *tsb;
+
+	if (cqr->cpmode == 1) {
+		cplength = 0;
+		/* TCW needs to be 64 byte aligned, so leave enough room */
+		datasize = 64 + sizeof(struct tcw) + sizeof(struct tsb);
+	} else {
+		cplength = 2;
+		datasize = 0;
+	}
+
+	/* allocate additional request block */
+	erp = dasd_alloc_erp_request((char *) &cqr->magic,
+				     cplength, datasize, device);
+	if (IS_ERR(erp)) {
+                if (cqr->retries <= 0) {
+			DBF_DEV_EVENT(DBF_ERR, device, "%s",
+				    "Unable to allocate ERP request");
+			cqr->status = DASD_CQR_FAILED;
+                        cqr->stopclk = get_clock ();
+		} else {
+			DBF_DEV_EVENT(DBF_ERR, device,
+                                     "Unable to allocate ERP request "
+				     "(%i retries left)",
+                                     cqr->retries);
+			dasd_block_set_timer(device->block, (HZ << 3));
+                }
+		return erp;
+	}
+
+	ccw = cqr->cpaddr;
+	if (cqr->cpmode == 1) {
+		/* make a shallow copy of the original tcw but set new tsb */
+		erp->cpmode = 1;
+		erp->cpaddr = PTR_ALIGN(erp->data, 64);
+		tcw = erp->cpaddr;
+		tsb = (struct tsb *) &tcw[1];
+		*tcw = *((struct tcw *)cqr->cpaddr);
+		tcw->tsb = (long)tsb;
+	} else if (ccw->cmd_code == DASD_ECKD_CCW_PSF) {
+		/* PSF cannot be chained from NOOP/TIC */
+		erp->cpaddr = cqr->cpaddr;
+	} else {
+		/* initialize request with default TIC to current ERP/CQR */
+		ccw = erp->cpaddr;
+		ccw->cmd_code = CCW_CMD_NOOP;
+		ccw->flags = CCW_FLAG_CC;
+		ccw++;
+		ccw->cmd_code = CCW_CMD_TIC;
+		ccw->cda      = (long)(cqr->cpaddr);
+	}
+
+	erp->flags = cqr->flags;
+	erp->function = dasd_3990_erp_add_erp;
+	erp->refers   = cqr;
+	erp->startdev = device;
+	erp->memdev   = device;
+	erp->block    = cqr->block;
+	erp->magic    = cqr->magic;
+	erp->expires  = cqr->expires;
+	erp->retries  = 256;
+	erp->buildclk = get_clock();
+	erp->status = DASD_CQR_FILLED;
+
+	return erp;
+}
+
+/*
+ * DASD_3990_ERP_ADDITIONAL_ERP
+ *
+ * DESCRIPTION
+ *   An additional ERP is needed to handle the current error.
+ *   Add ERP to the head of the ERP-chain containing the ERP processing
+ *   determined based on the sense data.
+ *
+ * PARAMETER
+ *   cqr		head of the current ERP-chain (or single cqr if
+ *			first error)
+ *
+ * RETURN VALUES
+ *   erp		pointer to new ERP-chain head
+ */
+static struct dasd_ccw_req *
+dasd_3990_erp_additional_erp(struct dasd_ccw_req * cqr)
+{
+
+	struct dasd_ccw_req *erp = NULL;
+
+	/* add erp and initialize with default TIC */
+	erp = dasd_3990_erp_add_erp(cqr);
+
+	if (IS_ERR(erp))
+		return erp;
+
+	/* inspect sense, determine specific ERP if possible */
+	if (erp != cqr) {
+
+		erp = dasd_3990_erp_inspect(erp);
+	}
+
+	return erp;
+
+}				/* end dasd_3990_erp_additional_erp */
+
+/*
+ * DASD_3990_ERP_ERROR_MATCH
+ *
+ * DESCRIPTION
+ *   Check if the device status of the given cqr is the same.
+ *   This means that the failed CCW and the relevant sense data
+ *   must match.
+ *   I don't distinguish between 24 and 32 byte sense because in case of
+ *   24 byte sense byte 25 and 27 is set as well.
+ *
+ * PARAMETER
+ *   cqr1		first cqr, which will be compared with the
+ *   cqr2		second cqr.
+ *
+ * RETURN VALUES
+ *   match		'boolean' for match found
+ *			returns 1 if match found, otherwise 0.
+ */
+static int dasd_3990_erp_error_match(struct dasd_ccw_req *cqr1,
+				     struct dasd_ccw_req *cqr2)
+{
+	char *sense1, *sense2;
+
+	if (cqr1->startdev != cqr2->startdev)
+		return 0;
+
+	sense1 = dasd_get_sense(&cqr1->irb);
+	sense2 = dasd_get_sense(&cqr2->irb);
+
+	/* one request has sense data, the other not -> no match, return 0 */
+	if (!sense1 != !sense2)
+		return 0;
+	/* no sense data in both cases -> check cstat for IFCC */
+	if (!sense1 && !sense2)	{
+		if ((scsw_cstat(&cqr1->irb.scsw) & (SCHN_STAT_INTF_CTRL_CHK |
+						    SCHN_STAT_CHN_CTRL_CHK)) ==
+		    (scsw_cstat(&cqr2->irb.scsw) & (SCHN_STAT_INTF_CTRL_CHK |
+						    SCHN_STAT_CHN_CTRL_CHK)))
+			return 1; /* match with ifcc*/
+	}
+	/* check sense data; byte 0-2,25,27 */
+	if (!(sense1 && sense2 &&
+	      (memcmp(sense1, sense2, 3) == 0) &&
+	      (sense1[27] == sense2[27]) &&
+	      (sense1[25] == sense2[25]))) {
+
+		return 0;	/* sense doesn't match */
+	}
+
+	return 1;		/* match */
+
+}				/* end dasd_3990_erp_error_match */
+
+/*
+ * DASD_3990_ERP_IN_ERP
+ *
+ * DESCRIPTION
+ *   check if the current error already happened before.
+ *   quick exit if current cqr is not an ERP (cqr->refers=NULL)
+ *
+ * PARAMETER
+ *   cqr		failed cqr (either original cqr or already an erp)
+ *
+ * RETURN VALUES
+ *   erp		erp-pointer to the already defined error
+ *			recovery procedure OR
+ *			NULL if a 'new' error occurred.
+ */
+static struct dasd_ccw_req *
+dasd_3990_erp_in_erp(struct dasd_ccw_req *cqr)
+{
+
+	struct dasd_ccw_req *erp_head = cqr,	/* save erp chain head */
+	*erp_match = NULL;	/* save erp chain head */
+	int match = 0;		/* 'boolean' for matching error found */
+
+	if (cqr->refers == NULL) {	/* return if not in erp */
+		return NULL;
+	}
+
+	/* check the erp/cqr chain for current error */
+	do {
+		match = dasd_3990_erp_error_match(erp_head, cqr->refers);
+		erp_match = cqr;	/* save possible matching erp  */
+		cqr = cqr->refers;	/* check next erp/cqr in queue */
+
+	} while ((cqr->refers != NULL) && (!match));
+
+	if (!match) {
+		return NULL;	/* no match was found */
+	}
+
+	return erp_match;	/* return address of matching erp */
+
+}				/* END dasd_3990_erp_in_erp */
+
+/*
+ * DASD_3990_ERP_FURTHER_ERP (24 & 32 byte sense)
+ *
+ * DESCRIPTION
+ *   No retry is left for the current ERP. Check what has to be done
+ *   with the ERP.
+ *     - do further defined ERP action or
+ *     - wait for interrupt or
+ *     - exit with permanent error
+ *
+ * PARAMETER
+ *   erp		ERP which is in progress with no retry left
+ *
+ * RETURN VALUES
+ *   erp		modified/additional ERP
+ */
+static struct dasd_ccw_req *
+dasd_3990_erp_further_erp(struct dasd_ccw_req *erp)
+{
+
+	struct dasd_device *device = erp->startdev;
+	char *sense = dasd_get_sense(&erp->irb);
+
+	/* check for 24 byte sense ERP */
+	if ((erp->function == dasd_3990_erp_bus_out) ||
+	    (erp->function == dasd_3990_erp_action_1) ||
+	    (erp->function == dasd_3990_erp_action_4)) {
+
+		erp = dasd_3990_erp_action_1(erp);
+
+	} else if (erp->function == dasd_3990_erp_action_1_sec) {
+		erp = dasd_3990_erp_action_1_sec(erp);
+	} else if (erp->function == dasd_3990_erp_action_5) {
+
+		/* retries have not been successful */
+		/* prepare erp for retry on different channel path */
+		erp = dasd_3990_erp_action_1(erp);
+
+		if (sense && !(sense[2] & DASD_SENSE_BIT_0)) {
+
+			/* issue a Diagnostic Control command with an
+			 * Inhibit Write subcommand */
+
+			switch (sense[25]) {
+			case 0x17:
+			case 0x57:{	/* controller */
+					erp = dasd_3990_erp_DCTL(erp, 0x20);
+					break;
+				}
+			case 0x18:
+			case 0x58:{	/* channel path */
+					erp = dasd_3990_erp_DCTL(erp, 0x40);
+					break;
+				}
+			case 0x19:
+			case 0x59:{	/* storage director */
+					erp = dasd_3990_erp_DCTL(erp, 0x80);
+					break;
+				}
+			default:
+				DBF_DEV_EVENT(DBF_WARNING, device,
+					    "invalid subcommand modifier 0x%x "
+					    "for Diagnostic Control Command",
+					    sense[25]);
+			}
+		}
+
+		/* check for 32 byte sense ERP */
+	} else if (sense &&
+		   ((erp->function == dasd_3990_erp_compound_retry) ||
+		    (erp->function == dasd_3990_erp_compound_path) ||
+		    (erp->function == dasd_3990_erp_compound_code) ||
+		    (erp->function == dasd_3990_erp_compound_config))) {
+
+		erp = dasd_3990_erp_compound(erp, sense);
+
+	} else {
+		/*
+		 * No retry left and no additional special handling
+		 * necessary
+		 */
+		dev_err(&device->cdev->dev,
+			"ERP %p has run out of retries and failed\n", erp);
+
+		erp->status = DASD_CQR_FAILED;
+	}
+
+	return erp;
+
+}				/* end dasd_3990_erp_further_erp */
+
+/*
+ * DASD_3990_ERP_HANDLE_MATCH_ERP
+ *
+ * DESCRIPTION
+ *   An error occurred again and an ERP has been detected which is already
+ *   used to handle this error (e.g. retries).
+ *   All prior ERP's are asumed to be successful and therefore removed
+ *   from queue.
+ *   If retry counter of matching erp is already 0, it is checked if further
+ *   action is needed (besides retry) or if the ERP has failed.
+ *
+ * PARAMETER
+ *   erp_head		first ERP in ERP-chain
+ *   erp		ERP that handles the actual error.
+ *			(matching erp)
+ *
+ * RETURN VALUES
+ *   erp		modified/additional ERP
+ */
+static struct dasd_ccw_req *
+dasd_3990_erp_handle_match_erp(struct dasd_ccw_req *erp_head,
+			       struct dasd_ccw_req *erp)
+{
+
+	struct dasd_device *device = erp_head->startdev;
+	struct dasd_ccw_req *erp_done = erp_head;	/* finished req */
+	struct dasd_ccw_req *erp_free = NULL;	/* req to be freed */
+
+	/* loop over successful ERPs and remove them from chanq */
+	while (erp_done != erp) {
+
+		if (erp_done == NULL)	/* end of chain reached */
+			panic(PRINTK_HEADER "Programming error in ERP! The "
+			      "original request was lost\n");
+
+		/* remove the request from the device queue */
+		list_del(&erp_done->blocklist);
+
+		erp_free = erp_done;
+		erp_done = erp_done->refers;
+
+		/* free the finished erp request */
+		dasd_free_erp_request(erp_free, erp_free->memdev);
+
+	}			/* end while */
+
+	if (erp->retries > 0) {
+
+		char *sense = dasd_get_sense(&erp->refers->irb);
+
+		/* check for special retries */
+		if (sense && erp->function == dasd_3990_erp_action_4) {
+
+			erp = dasd_3990_erp_action_4(erp, sense);
+
+		} else if (sense &&
+			   erp->function == dasd_3990_erp_action_1B_32) {
+
+			erp = dasd_3990_update_1B(erp, sense);
+
+		} else if (sense && erp->function == dasd_3990_erp_int_req) {
+
+			erp = dasd_3990_erp_int_req(erp);
+
+		} else {
+			/* simple retry	  */
+			DBF_DEV_EVENT(DBF_DEBUG, device,
+				    "%i retries left for erp %p",
+				    erp->retries, erp);
+
+			/* handle the request again... */
+			erp->status = DASD_CQR_FILLED;
+		}
+
+	} else {
+		/* no retry left - check for further necessary action	 */
+		/* if no further actions, handle rest as permanent error */
+		erp = dasd_3990_erp_further_erp(erp);
+	}
+
+	return erp;
+
+}				/* end dasd_3990_erp_handle_match_erp */
+
+/*
+ * DASD_3990_ERP_ACTION
+ *
+ * DESCRIPTION
+ *   control routine for 3990 erp actions.
+ *   Has to be called with the queue lock (namely the s390_irq_lock) acquired.
+ *
+ * PARAMETER
+ *   cqr		failed cqr (either original cqr or already an erp)
+ *
+ * RETURN VALUES
+ *   erp		erp-pointer to the head of the ERP action chain.
+ *			This means:
+ *			 - either a ptr to an additional ERP cqr or
+ *			 - the original given cqr (which's status might
+ *			   be modified)
+ */
+struct dasd_ccw_req *
+dasd_3990_erp_action(struct dasd_ccw_req * cqr)
+{
+	struct dasd_ccw_req *erp = NULL;
+	struct dasd_device *device = cqr->startdev;
+	struct dasd_ccw_req *temp_erp = NULL;
+
+	if (device->features & DASD_FEATURE_ERPLOG) {
+		/* print current erp_chain */
+		dev_err(&device->cdev->dev,
+			    "ERP chain at BEGINNING of ERP-ACTION\n");
+		for (temp_erp = cqr;
+		     temp_erp != NULL; temp_erp = temp_erp->refers) {
+
+			dev_err(&device->cdev->dev,
+				    "ERP %p (%02x) refers to %p\n",
+				    temp_erp, temp_erp->status,
+				    temp_erp->refers);
+		}
+	}
+
+	/* double-check if current erp/cqr was successful */
+	if ((scsw_cstat(&cqr->irb.scsw) == 0x00) &&
+	    (scsw_dstat(&cqr->irb.scsw) ==
+	     (DEV_STAT_CHN_END | DEV_STAT_DEV_END))) {
+
+		DBF_DEV_EVENT(DBF_DEBUG, device,
+			    "ERP called for successful request %p"
+			    " - NO ERP necessary", cqr);
+
+		cqr->status = DASD_CQR_DONE;
+
+		return cqr;
+	}
+
+	/* check if error happened before */
+	erp = dasd_3990_erp_in_erp(cqr);
+
+	if (erp == NULL) {
+		/* no matching erp found - set up erp */
+		erp = dasd_3990_erp_additional_erp(cqr);
+		if (IS_ERR(erp))
+			return erp;
+	} else {
+		/* matching erp found - set all leading erp's to DONE */
+		erp = dasd_3990_erp_handle_match_erp(cqr, erp);
+	}
+
+	if (device->features & DASD_FEATURE_ERPLOG) {
+		/* print current erp_chain */
+		dev_err(&device->cdev->dev,
+			    "ERP chain at END of ERP-ACTION\n");
+		for (temp_erp = erp;
+		     temp_erp != NULL; temp_erp = temp_erp->refers) {
+
+			dev_err(&device->cdev->dev,
+				    "ERP %p (%02x) refers to %p\n",
+				    temp_erp, temp_erp->status,
+				    temp_erp->refers);
+		}
+	}
+
+	/* enqueue ERP request if it's a new one */
+	if (list_empty(&erp->blocklist)) {
+		cqr->status = DASD_CQR_IN_ERP;
+		/* add erp request before the cqr */
+		list_add_tail(&erp->blocklist, &cqr->blocklist);
+	}
+
+
+
+	return erp;
+
+}				/* end dasd_3990_erp_action */
diff --git a/ap/os/linux/linux-3.4.x/drivers/s390/block/dasd_alias.c b/ap/os/linux/linux-3.4.x/drivers/s390/block/dasd_alias.c
new file mode 100644
index 0000000..b3beed5
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/s390/block/dasd_alias.c
@@ -0,0 +1,965 @@
+/*
+ * PAV alias management for the DASD ECKD discipline
+ *
+ * Copyright IBM Corporation, 2007
+ * Author(s): Stefan Weinhuber <wein@de.ibm.com>
+ */
+
+#define KMSG_COMPONENT "dasd-eckd"
+
+#include <linux/list.h>
+#include <linux/slab.h>
+#include <asm/ebcdic.h>
+#include "dasd_int.h"
+#include "dasd_eckd.h"
+
+#ifdef PRINTK_HEADER
+#undef PRINTK_HEADER
+#endif				/* PRINTK_HEADER */
+#define PRINTK_HEADER "dasd(eckd):"
+
+
+/*
+ * General concept of alias management:
+ * - PAV and DASD alias management is specific to the eckd discipline.
+ * - A device is connected to an lcu as long as the device exists.
+ *   dasd_alias_make_device_known_to_lcu will be called wenn the
+ *   device is checked by the eckd discipline and
+ *   dasd_alias_disconnect_device_from_lcu will be called
+ *   before the device is deleted.
+ * - The dasd_alias_add_device / dasd_alias_remove_device
+ *   functions mark the point when a device is 'ready for service'.
+ * - A summary unit check is a rare occasion, but it is mandatory to
+ *   support it. It requires some complex recovery actions before the
+ *   devices can be used again (see dasd_alias_handle_summary_unit_check).
+ * - dasd_alias_get_start_dev will find an alias device that can be used
+ *   instead of the base device and does some (very simple) load balancing.
+ *   This is the function that gets called for each I/O, so when improving
+ *   something, this function should get faster or better, the rest has just
+ *   to be correct.
+ */
+
+
+static void summary_unit_check_handling_work(struct work_struct *);
+static void lcu_update_work(struct work_struct *);
+static int _schedule_lcu_update(struct alias_lcu *, struct dasd_device *);
+
+static struct alias_root aliastree = {
+	.serverlist = LIST_HEAD_INIT(aliastree.serverlist),
+	.lock = __SPIN_LOCK_UNLOCKED(aliastree.lock),
+};
+
+static struct alias_server *_find_server(struct dasd_uid *uid)
+{
+	struct alias_server *pos;
+	list_for_each_entry(pos, &aliastree.serverlist, server) {
+		if (!strncmp(pos->uid.vendor, uid->vendor,
+			     sizeof(uid->vendor))
+		    && !strncmp(pos->uid.serial, uid->serial,
+				sizeof(uid->serial)))
+			return pos;
+	};
+	return NULL;
+}
+
+static struct alias_lcu *_find_lcu(struct alias_server *server,
+				   struct dasd_uid *uid)
+{
+	struct alias_lcu *pos;
+	list_for_each_entry(pos, &server->lculist, lcu) {
+		if (pos->uid.ssid == uid->ssid)
+			return pos;
+	};
+	return NULL;
+}
+
+static struct alias_pav_group *_find_group(struct alias_lcu *lcu,
+					   struct dasd_uid *uid)
+{
+	struct alias_pav_group *pos;
+	__u8 search_unit_addr;
+
+	/* for hyper pav there is only one group */
+	if (lcu->pav == HYPER_PAV) {
+		if (list_empty(&lcu->grouplist))
+			return NULL;
+		else
+			return list_first_entry(&lcu->grouplist,
+						struct alias_pav_group, group);
+	}
+
+	/* for base pav we have to find the group that matches the base */
+	if (uid->type == UA_BASE_DEVICE)
+		search_unit_addr = uid->real_unit_addr;
+	else
+		search_unit_addr = uid->base_unit_addr;
+	list_for_each_entry(pos, &lcu->grouplist, group) {
+		if (pos->uid.base_unit_addr == search_unit_addr &&
+		    !strncmp(pos->uid.vduit, uid->vduit, sizeof(uid->vduit)))
+			return pos;
+	};
+	return NULL;
+}
+
+static struct alias_server *_allocate_server(struct dasd_uid *uid)
+{
+	struct alias_server *server;
+
+	server = kzalloc(sizeof(*server), GFP_KERNEL);
+	if (!server)
+		return ERR_PTR(-ENOMEM);
+	memcpy(server->uid.vendor, uid->vendor, sizeof(uid->vendor));
+	memcpy(server->uid.serial, uid->serial, sizeof(uid->serial));
+	INIT_LIST_HEAD(&server->server);
+	INIT_LIST_HEAD(&server->lculist);
+	return server;
+}
+
+static void _free_server(struct alias_server *server)
+{
+	kfree(server);
+}
+
+static struct alias_lcu *_allocate_lcu(struct dasd_uid *uid)
+{
+	struct alias_lcu *lcu;
+
+	lcu = kzalloc(sizeof(*lcu), GFP_KERNEL);
+	if (!lcu)
+		return ERR_PTR(-ENOMEM);
+	lcu->uac = kzalloc(sizeof(*(lcu->uac)), GFP_KERNEL | GFP_DMA);
+	if (!lcu->uac)
+		goto out_err1;
+	lcu->rsu_cqr = kzalloc(sizeof(*lcu->rsu_cqr), GFP_KERNEL | GFP_DMA);
+	if (!lcu->rsu_cqr)
+		goto out_err2;
+	lcu->rsu_cqr->cpaddr = kzalloc(sizeof(struct ccw1),
+				       GFP_KERNEL | GFP_DMA);
+	if (!lcu->rsu_cqr->cpaddr)
+		goto out_err3;
+	lcu->rsu_cqr->data = kzalloc(16, GFP_KERNEL | GFP_DMA);
+	if (!lcu->rsu_cqr->data)
+		goto out_err4;
+
+	memcpy(lcu->uid.vendor, uid->vendor, sizeof(uid->vendor));
+	memcpy(lcu->uid.serial, uid->serial, sizeof(uid->serial));
+	lcu->uid.ssid = uid->ssid;
+	lcu->pav = NO_PAV;
+	lcu->flags = NEED_UAC_UPDATE | UPDATE_PENDING;
+	INIT_LIST_HEAD(&lcu->lcu);
+	INIT_LIST_HEAD(&lcu->inactive_devices);
+	INIT_LIST_HEAD(&lcu->active_devices);
+	INIT_LIST_HEAD(&lcu->grouplist);
+	INIT_WORK(&lcu->suc_data.worker, summary_unit_check_handling_work);
+	INIT_DELAYED_WORK(&lcu->ruac_data.dwork, lcu_update_work);
+	spin_lock_init(&lcu->lock);
+	init_completion(&lcu->lcu_setup);
+	return lcu;
+
+out_err4:
+	kfree(lcu->rsu_cqr->cpaddr);
+out_err3:
+	kfree(lcu->rsu_cqr);
+out_err2:
+	kfree(lcu->uac);
+out_err1:
+	kfree(lcu);
+	return ERR_PTR(-ENOMEM);
+}
+
+static void _free_lcu(struct alias_lcu *lcu)
+{
+	kfree(lcu->rsu_cqr->data);
+	kfree(lcu->rsu_cqr->cpaddr);
+	kfree(lcu->rsu_cqr);
+	kfree(lcu->uac);
+	kfree(lcu);
+}
+
+/*
+ * This is the function that will allocate all the server and lcu data,
+ * so this function must be called first for a new device.
+ * If the return value is 1, the lcu was already known before, if it
+ * is 0, this is a new lcu.
+ * Negative return code indicates that something went wrong (e.g. -ENOMEM)
+ */
+int dasd_alias_make_device_known_to_lcu(struct dasd_device *device)
+{
+	struct dasd_eckd_private *private;
+	unsigned long flags;
+	struct alias_server *server, *newserver;
+	struct alias_lcu *lcu, *newlcu;
+	struct dasd_uid uid;
+
+	private = (struct dasd_eckd_private *) device->private;
+
+	device->discipline->get_uid(device, &uid);
+	spin_lock_irqsave(&aliastree.lock, flags);
+	server = _find_server(&uid);
+	if (!server) {
+		spin_unlock_irqrestore(&aliastree.lock, flags);
+		newserver = _allocate_server(&uid);
+		if (IS_ERR(newserver))
+			return PTR_ERR(newserver);
+		spin_lock_irqsave(&aliastree.lock, flags);
+		server = _find_server(&uid);
+		if (!server) {
+			list_add(&newserver->server, &aliastree.serverlist);
+			server = newserver;
+		} else {
+			/* someone was faster */
+			_free_server(newserver);
+		}
+	}
+
+	lcu = _find_lcu(server, &uid);
+	if (!lcu) {
+		spin_unlock_irqrestore(&aliastree.lock, flags);
+		newlcu = _allocate_lcu(&uid);
+		if (IS_ERR(newlcu))
+			return PTR_ERR(newlcu);
+		spin_lock_irqsave(&aliastree.lock, flags);
+		lcu = _find_lcu(server, &uid);
+		if (!lcu) {
+			list_add(&newlcu->lcu, &server->lculist);
+			lcu = newlcu;
+		} else {
+			/* someone was faster */
+			_free_lcu(newlcu);
+		}
+	}
+	spin_lock(&lcu->lock);
+	list_add(&device->alias_list, &lcu->inactive_devices);
+	private->lcu = lcu;
+	spin_unlock(&lcu->lock);
+	spin_unlock_irqrestore(&aliastree.lock, flags);
+
+	return 0;
+}
+
+/*
+ * This function removes a device from the scope of alias management.
+ * The complicated part is to make sure that it is not in use by
+ * any of the workers. If necessary cancel the work.
+ */
+void dasd_alias_disconnect_device_from_lcu(struct dasd_device *device)
+{
+	struct dasd_eckd_private *private;
+	unsigned long flags;
+	struct alias_lcu *lcu;
+	struct alias_server *server;
+	int was_pending;
+	struct dasd_uid uid;
+
+	private = (struct dasd_eckd_private *) device->private;
+	lcu = private->lcu;
+	/* nothing to do if already disconnected */
+	if (!lcu)
+		return;
+	device->discipline->get_uid(device, &uid);
+	spin_lock_irqsave(&lcu->lock, flags);
+	list_del_init(&device->alias_list);
+	/* make sure that the workers don't use this device */
+	if (device == lcu->suc_data.device) {
+		spin_unlock_irqrestore(&lcu->lock, flags);
+		cancel_work_sync(&lcu->suc_data.worker);
+		spin_lock_irqsave(&lcu->lock, flags);
+		if (device == lcu->suc_data.device)
+			lcu->suc_data.device = NULL;
+	}
+	was_pending = 0;
+	if (device == lcu->ruac_data.device) {
+		spin_unlock_irqrestore(&lcu->lock, flags);
+		was_pending = 1;
+		cancel_delayed_work_sync(&lcu->ruac_data.dwork);
+		spin_lock_irqsave(&lcu->lock, flags);
+		if (device == lcu->ruac_data.device)
+			lcu->ruac_data.device = NULL;
+	}
+	private->lcu = NULL;
+	spin_unlock_irqrestore(&lcu->lock, flags);
+
+	spin_lock_irqsave(&aliastree.lock, flags);
+	spin_lock(&lcu->lock);
+	if (list_empty(&lcu->grouplist) &&
+	    list_empty(&lcu->active_devices) &&
+	    list_empty(&lcu->inactive_devices)) {
+		list_del(&lcu->lcu);
+		spin_unlock(&lcu->lock);
+		_free_lcu(lcu);
+		lcu = NULL;
+	} else {
+		if (was_pending)
+			_schedule_lcu_update(lcu, NULL);
+		spin_unlock(&lcu->lock);
+	}
+	server = _find_server(&uid);
+	if (server && list_empty(&server->lculist)) {
+		list_del(&server->server);
+		_free_server(server);
+	}
+	spin_unlock_irqrestore(&aliastree.lock, flags);
+}
+
+/*
+ * This function assumes that the unit address configuration stored
+ * in the lcu is up to date and will update the device uid before
+ * adding it to a pav group.
+ */
+
+static int _add_device_to_lcu(struct alias_lcu *lcu,
+			      struct dasd_device *device,
+			      struct dasd_device *pos)
+{
+
+	struct dasd_eckd_private *private;
+	struct alias_pav_group *group;
+	struct dasd_uid uid;
+	unsigned long flags;
+
+	private = (struct dasd_eckd_private *) device->private;
+
+	/* only lock if not already locked */
+	if (device != pos)
+		spin_lock_irqsave_nested(get_ccwdev_lock(device->cdev), flags,
+					 CDEV_NESTED_SECOND);
+	private->uid.type = lcu->uac->unit[private->uid.real_unit_addr].ua_type;
+	private->uid.base_unit_addr =
+		lcu->uac->unit[private->uid.real_unit_addr].base_ua;
+	uid = private->uid;
+
+	if (device != pos)
+		spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
+
+	/* if we have no PAV anyway, we don't need to bother with PAV groups */
+	if (lcu->pav == NO_PAV) {
+		list_move(&device->alias_list, &lcu->active_devices);
+		return 0;
+	}
+
+	group = _find_group(lcu, &uid);
+	if (!group) {
+		group = kzalloc(sizeof(*group), GFP_ATOMIC);
+		if (!group)
+			return -ENOMEM;
+		memcpy(group->uid.vendor, uid.vendor, sizeof(uid.vendor));
+		memcpy(group->uid.serial, uid.serial, sizeof(uid.serial));
+		group->uid.ssid = uid.ssid;
+		if (uid.type == UA_BASE_DEVICE)
+			group->uid.base_unit_addr = uid.real_unit_addr;
+		else
+			group->uid.base_unit_addr = uid.base_unit_addr;
+		memcpy(group->uid.vduit, uid.vduit, sizeof(uid.vduit));
+		INIT_LIST_HEAD(&group->group);
+		INIT_LIST_HEAD(&group->baselist);
+		INIT_LIST_HEAD(&group->aliaslist);
+		list_add(&group->group, &lcu->grouplist);
+	}
+	if (uid.type == UA_BASE_DEVICE)
+		list_move(&device->alias_list, &group->baselist);
+	else
+		list_move(&device->alias_list, &group->aliaslist);
+	private->pavgroup = group;
+	return 0;
+};
+
+static void _remove_device_from_lcu(struct alias_lcu *lcu,
+				    struct dasd_device *device)
+{
+	struct dasd_eckd_private *private;
+	struct alias_pav_group *group;
+
+	private = (struct dasd_eckd_private *) device->private;
+	list_move(&device->alias_list, &lcu->inactive_devices);
+	group = private->pavgroup;
+	if (!group)
+		return;
+	private->pavgroup = NULL;
+	if (list_empty(&group->baselist) && list_empty(&group->aliaslist)) {
+		list_del(&group->group);
+		kfree(group);
+		return;
+	}
+	if (group->next == device)
+		group->next = NULL;
+};
+
+static int read_unit_address_configuration(struct dasd_device *device,
+					   struct alias_lcu *lcu)
+{
+	struct dasd_psf_prssd_data *prssdp;
+	struct dasd_ccw_req *cqr;
+	struct ccw1 *ccw;
+	int rc;
+	unsigned long flags;
+
+	cqr = dasd_kmalloc_request(DASD_ECKD_MAGIC, 1 /* PSF */	+ 1 /* RSSD */,
+				   (sizeof(struct dasd_psf_prssd_data)),
+				   device);
+	if (IS_ERR(cqr))
+		return PTR_ERR(cqr);
+	cqr->startdev = device;
+	cqr->memdev = device;
+	clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
+	cqr->retries = 10;
+	cqr->expires = 20 * HZ;
+
+	/* Prepare for Read Subsystem Data */
+	prssdp = (struct dasd_psf_prssd_data *) cqr->data;
+	memset(prssdp, 0, sizeof(struct dasd_psf_prssd_data));
+	prssdp->order = PSF_ORDER_PRSSD;
+	prssdp->suborder = 0x0e;	/* Read unit address configuration */
+	/* all other bytes of prssdp must be zero */
+
+	ccw = cqr->cpaddr;
+	ccw->cmd_code = DASD_ECKD_CCW_PSF;
+	ccw->count = sizeof(struct dasd_psf_prssd_data);
+	ccw->flags |= CCW_FLAG_CC;
+	ccw->cda = (__u32)(addr_t) prssdp;
+
+	/* Read Subsystem Data - feature codes */
+	memset(lcu->uac, 0, sizeof(*(lcu->uac)));
+
+	ccw++;
+	ccw->cmd_code = DASD_ECKD_CCW_RSSD;
+	ccw->count = sizeof(*(lcu->uac));
+	ccw->cda = (__u32)(addr_t) lcu->uac;
+
+	cqr->buildclk = get_clock();
+	cqr->status = DASD_CQR_FILLED;
+
+	/* need to unset flag here to detect race with summary unit check */
+	spin_lock_irqsave(&lcu->lock, flags);
+	lcu->flags &= ~NEED_UAC_UPDATE;
+	spin_unlock_irqrestore(&lcu->lock, flags);
+
+	do {
+		rc = dasd_sleep_on(cqr);
+	} while (rc && (cqr->retries > 0));
+	if (rc) {
+		spin_lock_irqsave(&lcu->lock, flags);
+		lcu->flags |= NEED_UAC_UPDATE;
+		spin_unlock_irqrestore(&lcu->lock, flags);
+	}
+	dasd_kfree_request(cqr, cqr->memdev);
+	return rc;
+}
+
+static int _lcu_update(struct dasd_device *refdev, struct alias_lcu *lcu)
+{
+	unsigned long flags;
+	struct alias_pav_group *pavgroup, *tempgroup;
+	struct dasd_device *device, *tempdev;
+	int i, rc;
+	struct dasd_eckd_private *private;
+
+	spin_lock_irqsave(&lcu->lock, flags);
+	list_for_each_entry_safe(pavgroup, tempgroup, &lcu->grouplist, group) {
+		list_for_each_entry_safe(device, tempdev, &pavgroup->baselist,
+					 alias_list) {
+			list_move(&device->alias_list, &lcu->active_devices);
+			private = (struct dasd_eckd_private *) device->private;
+			private->pavgroup = NULL;
+		}
+		list_for_each_entry_safe(device, tempdev, &pavgroup->aliaslist,
+					 alias_list) {
+			list_move(&device->alias_list, &lcu->active_devices);
+			private = (struct dasd_eckd_private *) device->private;
+			private->pavgroup = NULL;
+		}
+		list_del(&pavgroup->group);
+		kfree(pavgroup);
+	}
+	spin_unlock_irqrestore(&lcu->lock, flags);
+
+	rc = read_unit_address_configuration(refdev, lcu);
+	if (rc)
+		return rc;
+
+	/* need to take cdev lock before lcu lock */
+	spin_lock_irqsave_nested(get_ccwdev_lock(refdev->cdev), flags,
+				 CDEV_NESTED_FIRST);
+	spin_lock(&lcu->lock);
+	lcu->pav = NO_PAV;
+	for (i = 0; i < MAX_DEVICES_PER_LCU; ++i) {
+		switch (lcu->uac->unit[i].ua_type) {
+		case UA_BASE_PAV_ALIAS:
+			lcu->pav = BASE_PAV;
+			break;
+		case UA_HYPER_PAV_ALIAS:
+			lcu->pav = HYPER_PAV;
+			break;
+		}
+		if (lcu->pav != NO_PAV)
+			break;
+	}
+
+	list_for_each_entry_safe(device, tempdev, &lcu->active_devices,
+				 alias_list) {
+		_add_device_to_lcu(lcu, device, refdev);
+	}
+	spin_unlock(&lcu->lock);
+	spin_unlock_irqrestore(get_ccwdev_lock(refdev->cdev), flags);
+	return 0;
+}
+
+static void lcu_update_work(struct work_struct *work)
+{
+	struct alias_lcu *lcu;
+	struct read_uac_work_data *ruac_data;
+	struct dasd_device *device;
+	unsigned long flags;
+	int rc;
+
+	ruac_data = container_of(work, struct read_uac_work_data, dwork.work);
+	lcu = container_of(ruac_data, struct alias_lcu, ruac_data);
+	device = ruac_data->device;
+	rc = _lcu_update(device, lcu);
+	/*
+	 * Need to check flags again, as there could have been another
+	 * prepare_update or a new device a new device while we were still
+	 * processing the data
+	 */
+	spin_lock_irqsave(&lcu->lock, flags);
+	if (rc || (lcu->flags & NEED_UAC_UPDATE)) {
+		DBF_DEV_EVENT(DBF_WARNING, device, "could not update"
+			    " alias data in lcu (rc = %d), retry later", rc);
+		schedule_delayed_work(&lcu->ruac_data.dwork, 30*HZ);
+	} else {
+		lcu->ruac_data.device = NULL;
+		lcu->flags &= ~UPDATE_PENDING;
+	}
+	spin_unlock_irqrestore(&lcu->lock, flags);
+}
+
+static int _schedule_lcu_update(struct alias_lcu *lcu,
+				struct dasd_device *device)
+{
+	struct dasd_device *usedev = NULL;
+	struct alias_pav_group *group;
+
+	lcu->flags |= NEED_UAC_UPDATE;
+	if (lcu->ruac_data.device) {
+		/* already scheduled or running */
+		return 0;
+	}
+	if (device && !list_empty(&device->alias_list))
+		usedev = device;
+
+	if (!usedev && !list_empty(&lcu->grouplist)) {
+		group = list_first_entry(&lcu->grouplist,
+					 struct alias_pav_group, group);
+		if (!list_empty(&group->baselist))
+			usedev = list_first_entry(&group->baselist,
+						  struct dasd_device,
+						  alias_list);
+		else if (!list_empty(&group->aliaslist))
+			usedev = list_first_entry(&group->aliaslist,
+						  struct dasd_device,
+						  alias_list);
+	}
+	if (!usedev && !list_empty(&lcu->active_devices)) {
+		usedev = list_first_entry(&lcu->active_devices,
+					  struct dasd_device, alias_list);
+	}
+	/*
+	 * if we haven't found a proper device yet, give up for now, the next
+	 * device that will be set active will trigger an lcu update
+	 */
+	if (!usedev)
+		return -EINVAL;
+	lcu->ruac_data.device = usedev;
+	schedule_delayed_work(&lcu->ruac_data.dwork, 0);
+	return 0;
+}
+
+int dasd_alias_add_device(struct dasd_device *device)
+{
+	struct dasd_eckd_private *private;
+	struct alias_lcu *lcu;
+	unsigned long flags;
+	int rc;
+
+	private = (struct dasd_eckd_private *) device->private;
+	lcu = private->lcu;
+	rc = 0;
+
+	/* need to take cdev lock before lcu lock */
+	spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
+	spin_lock(&lcu->lock);
+	if (!(lcu->flags & UPDATE_PENDING)) {
+		rc = _add_device_to_lcu(lcu, device, device);
+		if (rc)
+			lcu->flags |= UPDATE_PENDING;
+	}
+	if (lcu->flags & UPDATE_PENDING) {
+		list_move(&device->alias_list, &lcu->active_devices);
+		_schedule_lcu_update(lcu, device);
+	}
+	spin_unlock(&lcu->lock);
+	spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
+	return rc;
+}
+
+int dasd_alias_update_add_device(struct dasd_device *device)
+{
+	struct dasd_eckd_private *private;
+	private = (struct dasd_eckd_private *) device->private;
+	private->lcu->flags |= UPDATE_PENDING;
+	return dasd_alias_add_device(device);
+}
+
+int dasd_alias_remove_device(struct dasd_device *device)
+{
+	struct dasd_eckd_private *private;
+	struct alias_lcu *lcu;
+	unsigned long flags;
+
+	private = (struct dasd_eckd_private *) device->private;
+	lcu = private->lcu;
+	/* nothing to do if already removed */
+	if (!lcu)
+		return 0;
+	spin_lock_irqsave(&lcu->lock, flags);
+	_remove_device_from_lcu(lcu, device);
+	spin_unlock_irqrestore(&lcu->lock, flags);
+	return 0;
+}
+
+struct dasd_device *dasd_alias_get_start_dev(struct dasd_device *base_device)
+{
+
+	struct dasd_device *alias_device;
+	struct alias_pav_group *group;
+	struct alias_lcu *lcu;
+	struct dasd_eckd_private *private, *alias_priv;
+	unsigned long flags;
+
+	private = (struct dasd_eckd_private *) base_device->private;
+	group = private->pavgroup;
+	lcu = private->lcu;
+	if (!group || !lcu)
+		return NULL;
+	if (lcu->pav == NO_PAV ||
+	    lcu->flags & (NEED_UAC_UPDATE | UPDATE_PENDING))
+		return NULL;
+	if (unlikely(!(private->features.feature[8] & 0x01))) {
+		/*
+		 * PAV enabled but prefix not, very unlikely
+		 * seems to be a lost pathgroup
+		 * use base device to do IO
+		 */
+		DBF_DEV_EVENT(DBF_ERR, base_device, "%s",
+			      "Prefix not enabled with PAV enabled\n");
+		return NULL;
+	}
+
+	spin_lock_irqsave(&lcu->lock, flags);
+	alias_device = group->next;
+	if (!alias_device) {
+		if (list_empty(&group->aliaslist)) {
+			spin_unlock_irqrestore(&lcu->lock, flags);
+			return NULL;
+		} else {
+			alias_device = list_first_entry(&group->aliaslist,
+							struct dasd_device,
+							alias_list);
+		}
+	}
+	if (list_is_last(&alias_device->alias_list, &group->aliaslist))
+		group->next = list_first_entry(&group->aliaslist,
+					       struct dasd_device, alias_list);
+	else
+		group->next = list_first_entry(&alias_device->alias_list,
+					       struct dasd_device, alias_list);
+	spin_unlock_irqrestore(&lcu->lock, flags);
+	alias_priv = (struct dasd_eckd_private *) alias_device->private;
+	if ((alias_priv->count < private->count) && !alias_device->stopped)
+		return alias_device;
+	else
+		return NULL;
+}
+
+/*
+ * Summary unit check handling depends on the way alias devices
+ * are handled so it is done here rather then in dasd_eckd.c
+ */
+static int reset_summary_unit_check(struct alias_lcu *lcu,
+				    struct dasd_device *device,
+				    char reason)
+{
+	struct dasd_ccw_req *cqr;
+	int rc = 0;
+	struct ccw1 *ccw;
+
+	cqr = lcu->rsu_cqr;
+	strncpy((char *) &cqr->magic, "ECKD", 4);
+	ASCEBC((char *) &cqr->magic, 4);
+	ccw = cqr->cpaddr;
+	ccw->cmd_code = DASD_ECKD_CCW_RSCK;
+	ccw->flags = 0 ;
+	ccw->count = 16;
+	ccw->cda = (__u32)(addr_t) cqr->data;
+	((char *)cqr->data)[0] = reason;
+
+	clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
+	cqr->retries = 255;	/* set retry counter to enable basic ERP */
+	cqr->startdev = device;
+	cqr->memdev = device;
+	cqr->block = NULL;
+	cqr->expires = 5 * HZ;
+	cqr->buildclk = get_clock();
+	cqr->status = DASD_CQR_FILLED;
+
+	rc = dasd_sleep_on_immediatly(cqr);
+	return rc;
+}
+
+static void _restart_all_base_devices_on_lcu(struct alias_lcu *lcu)
+{
+	struct alias_pav_group *pavgroup;
+	struct dasd_device *device;
+	struct dasd_eckd_private *private;
+	unsigned long flags;
+
+	/* active and inactive list can contain alias as well as base devices */
+	list_for_each_entry(device, &lcu->active_devices, alias_list) {
+		private = (struct dasd_eckd_private *) device->private;
+		spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
+		if (private->uid.type != UA_BASE_DEVICE) {
+			spin_unlock_irqrestore(get_ccwdev_lock(device->cdev),
+					       flags);
+			continue;
+		}
+		spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
+		dasd_schedule_block_bh(device->block);
+		dasd_schedule_device_bh(device);
+	}
+	list_for_each_entry(device, &lcu->inactive_devices, alias_list) {
+		private = (struct dasd_eckd_private *) device->private;
+		spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
+		if (private->uid.type != UA_BASE_DEVICE) {
+			spin_unlock_irqrestore(get_ccwdev_lock(device->cdev),
+					       flags);
+			continue;
+		}
+		spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
+		dasd_schedule_block_bh(device->block);
+		dasd_schedule_device_bh(device);
+	}
+	list_for_each_entry(pavgroup, &lcu->grouplist, group) {
+		list_for_each_entry(device, &pavgroup->baselist, alias_list) {
+			dasd_schedule_block_bh(device->block);
+			dasd_schedule_device_bh(device);
+		}
+	}
+}
+
+static void flush_all_alias_devices_on_lcu(struct alias_lcu *lcu)
+{
+	struct alias_pav_group *pavgroup;
+	struct dasd_device *device, *temp;
+	struct dasd_eckd_private *private;
+	int rc;
+	unsigned long flags;
+	LIST_HEAD(active);
+
+	/*
+	 * Problem here ist that dasd_flush_device_queue may wait
+	 * for termination of a request to complete. We can't keep
+	 * the lcu lock during that time, so we must assume that
+	 * the lists may have changed.
+	 * Idea: first gather all active alias devices in a separate list,
+	 * then flush the first element of this list unlocked, and afterwards
+	 * check if it is still on the list before moving it to the
+	 * active_devices list.
+	 */
+
+	spin_lock_irqsave(&lcu->lock, flags);
+	list_for_each_entry_safe(device, temp, &lcu->active_devices,
+				 alias_list) {
+		private = (struct dasd_eckd_private *) device->private;
+		if (private->uid.type == UA_BASE_DEVICE)
+			continue;
+		list_move(&device->alias_list, &active);
+	}
+
+	list_for_each_entry(pavgroup, &lcu->grouplist, group) {
+		list_splice_init(&pavgroup->aliaslist, &active);
+	}
+	while (!list_empty(&active)) {
+		device = list_first_entry(&active, struct dasd_device,
+					  alias_list);
+		spin_unlock_irqrestore(&lcu->lock, flags);
+		rc = dasd_flush_device_queue(device);
+		spin_lock_irqsave(&lcu->lock, flags);
+		/*
+		 * only move device around if it wasn't moved away while we
+		 * were waiting for the flush
+		 */
+		if (device == list_first_entry(&active,
+					       struct dasd_device, alias_list))
+			list_move(&device->alias_list, &lcu->active_devices);
+	}
+	spin_unlock_irqrestore(&lcu->lock, flags);
+}
+
+static void __stop_device_on_lcu(struct dasd_device *device,
+				 struct dasd_device *pos)
+{
+	/* If pos == device then device is already locked! */
+	if (pos == device) {
+		dasd_device_set_stop_bits(pos, DASD_STOPPED_SU);
+		return;
+	}
+	spin_lock(get_ccwdev_lock(pos->cdev));
+	dasd_device_set_stop_bits(pos, DASD_STOPPED_SU);
+	spin_unlock(get_ccwdev_lock(pos->cdev));
+}
+
+/*
+ * This function is called in interrupt context, so the
+ * cdev lock for device is already locked!
+ */
+static void _stop_all_devices_on_lcu(struct alias_lcu *lcu,
+				     struct dasd_device *device)
+{
+	struct alias_pav_group *pavgroup;
+	struct dasd_device *pos;
+
+	list_for_each_entry(pos, &lcu->active_devices, alias_list)
+		__stop_device_on_lcu(device, pos);
+	list_for_each_entry(pos, &lcu->inactive_devices, alias_list)
+		__stop_device_on_lcu(device, pos);
+	list_for_each_entry(pavgroup, &lcu->grouplist, group) {
+		list_for_each_entry(pos, &pavgroup->baselist, alias_list)
+			__stop_device_on_lcu(device, pos);
+		list_for_each_entry(pos, &pavgroup->aliaslist, alias_list)
+			__stop_device_on_lcu(device, pos);
+	}
+}
+
+static void _unstop_all_devices_on_lcu(struct alias_lcu *lcu)
+{
+	struct alias_pav_group *pavgroup;
+	struct dasd_device *device;
+	unsigned long flags;
+
+	list_for_each_entry(device, &lcu->active_devices, alias_list) {
+		spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
+		dasd_device_remove_stop_bits(device, DASD_STOPPED_SU);
+		spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
+	}
+
+	list_for_each_entry(device, &lcu->inactive_devices, alias_list) {
+		spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
+		dasd_device_remove_stop_bits(device, DASD_STOPPED_SU);
+		spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
+	}
+
+	list_for_each_entry(pavgroup, &lcu->grouplist, group) {
+		list_for_each_entry(device, &pavgroup->baselist, alias_list) {
+			spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
+			dasd_device_remove_stop_bits(device, DASD_STOPPED_SU);
+			spin_unlock_irqrestore(get_ccwdev_lock(device->cdev),
+					       flags);
+		}
+		list_for_each_entry(device, &pavgroup->aliaslist, alias_list) {
+			spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
+			dasd_device_remove_stop_bits(device, DASD_STOPPED_SU);
+			spin_unlock_irqrestore(get_ccwdev_lock(device->cdev),
+					       flags);
+		}
+	}
+}
+
+static void summary_unit_check_handling_work(struct work_struct *work)
+{
+	struct alias_lcu *lcu;
+	struct summary_unit_check_work_data *suc_data;
+	unsigned long flags;
+	struct dasd_device *device;
+
+	suc_data = container_of(work, struct summary_unit_check_work_data,
+				worker);
+	lcu = container_of(suc_data, struct alias_lcu, suc_data);
+	device = suc_data->device;
+
+	/* 1. flush alias devices */
+	flush_all_alias_devices_on_lcu(lcu);
+
+	/* 2. reset summary unit check */
+	spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
+	dasd_device_remove_stop_bits(device,
+				     (DASD_STOPPED_SU | DASD_STOPPED_PENDING));
+	spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
+	reset_summary_unit_check(lcu, device, suc_data->reason);
+
+	spin_lock_irqsave(&lcu->lock, flags);
+	_unstop_all_devices_on_lcu(lcu);
+	_restart_all_base_devices_on_lcu(lcu);
+	/* 3. read new alias configuration */
+	_schedule_lcu_update(lcu, device);
+	lcu->suc_data.device = NULL;
+	spin_unlock_irqrestore(&lcu->lock, flags);
+}
+
+/*
+ * note: this will be called from int handler context (cdev locked)
+ */
+void dasd_alias_handle_summary_unit_check(struct dasd_device *device,
+					  struct irb *irb)
+{
+	struct alias_lcu *lcu;
+	char reason;
+	struct dasd_eckd_private *private;
+	char *sense;
+
+	private = (struct dasd_eckd_private *) device->private;
+
+	sense = dasd_get_sense(irb);
+	if (sense) {
+		reason = sense[8];
+		DBF_DEV_EVENT(DBF_NOTICE, device, "%s %x",
+			    "eckd handle summary unit check: reason", reason);
+	} else {
+		DBF_DEV_EVENT(DBF_WARNING, device, "%s",
+			    "eckd handle summary unit check:"
+			    " no reason code available");
+		return;
+	}
+
+	lcu = private->lcu;
+	if (!lcu) {
+		DBF_DEV_EVENT(DBF_WARNING, device, "%s",
+			    "device not ready to handle summary"
+			    " unit check (no lcu structure)");
+		return;
+	}
+	spin_lock(&lcu->lock);
+	_stop_all_devices_on_lcu(lcu, device);
+	/* prepare for lcu_update */
+	private->lcu->flags |= NEED_UAC_UPDATE | UPDATE_PENDING;
+	/* If this device is about to be removed just return and wait for
+	 * the next interrupt on a different device
+	 */
+	if (list_empty(&device->alias_list)) {
+		DBF_DEV_EVENT(DBF_WARNING, device, "%s",
+			    "device is in offline processing,"
+			    " don't do summary unit check handling");
+		spin_unlock(&lcu->lock);
+		return;
+	}
+	if (lcu->suc_data.device) {
+		/* already scheduled or running */
+		DBF_DEV_EVENT(DBF_WARNING, device, "%s",
+			    "previous instance of summary unit check worker"
+			    " still pending");
+		spin_unlock(&lcu->lock);
+		return ;
+	}
+	lcu->suc_data.reason = reason;
+	lcu->suc_data.device = device;
+	spin_unlock(&lcu->lock);
+	schedule_work(&lcu->suc_data.worker);
+};
diff --git a/ap/os/linux/linux-3.4.x/drivers/s390/block/dasd_devmap.c b/ap/os/linux/linux-3.4.x/drivers/s390/block/dasd_devmap.c
new file mode 100644
index 0000000..d71511c
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/s390/block/dasd_devmap.c
@@ -0,0 +1,1400 @@
+/*
+ * File...........: linux/drivers/s390/block/dasd_devmap.c
+ * Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com>
+ *		    Horst Hummel <Horst.Hummel@de.ibm.com>
+ *		    Carsten Otte <Cotte@de.ibm.com>
+ *		    Martin Schwidefsky <schwidefsky@de.ibm.com>
+ * Bugreports.to..: <Linux390@de.ibm.com>
+ * (C) IBM Corporation, IBM Deutschland Entwicklung GmbH, 1999-2001
+ *
+ * Device mapping and dasd= parameter parsing functions. All devmap
+ * functions may not be called from interrupt context. In particular
+ * dasd_get_device is a no-no from interrupt context.
+ *
+ */
+
+#define KMSG_COMPONENT "dasd"
+
+#include <linux/ctype.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+
+#include <asm/debug.h>
+#include <asm/uaccess.h>
+#include <asm/ipl.h>
+
+/* This is ugly... */
+#define PRINTK_HEADER "dasd_devmap:"
+#define DASD_BUS_ID_SIZE 20
+
+#include "dasd_int.h"
+
+struct kmem_cache *dasd_page_cache;
+EXPORT_SYMBOL_GPL(dasd_page_cache);
+
+/*
+ * dasd_devmap_t is used to store the features and the relation
+ * between device number and device index. To find a dasd_devmap_t
+ * that corresponds to a device number of a device index each
+ * dasd_devmap_t is added to two linked lists, one to search by
+ * the device number and one to search by the device index. As
+ * soon as big minor numbers are available the device index list
+ * can be removed since the device number will then be identical
+ * to the device index.
+ */
+struct dasd_devmap {
+	struct list_head list;
+	char bus_id[DASD_BUS_ID_SIZE];
+        unsigned int devindex;
+        unsigned short features;
+	struct dasd_device *device;
+};
+
+/*
+ * Parameter parsing functions for dasd= parameter. The syntax is:
+ *   <devno>		: (0x)?[0-9a-fA-F]+
+ *   <busid>		: [0-0a-f]\.[0-9a-f]\.(0x)?[0-9a-fA-F]+
+ *   <feature>		: ro
+ *   <feature_list>	: \(<feature>(:<feature>)*\)
+ *   <devno-range>	: <devno>(-<devno>)?<feature_list>?
+ *   <busid-range>	: <busid>(-<busid>)?<feature_list>?
+ *   <devices>		: <devno-range>|<busid-range>
+ *   <dasd_module>	: dasd_diag_mod|dasd_eckd_mod|dasd_fba_mod
+ *
+ *   <dasd>		: autodetect|probeonly|<devices>(,<devices>)*
+ */
+
+int dasd_probeonly =  0;	/* is true, when probeonly mode is active */
+int dasd_autodetect = 0;	/* is true, when autodetection is active */
+int dasd_nopav = 0;		/* is true, when PAV is disabled */
+EXPORT_SYMBOL_GPL(dasd_nopav);
+int dasd_nofcx;			/* disable High Performance Ficon */
+EXPORT_SYMBOL_GPL(dasd_nofcx);
+
+/*
+ * char *dasd[] is intended to hold the ranges supplied by the dasd= statement
+ * it is named 'dasd' to directly be filled by insmod with the comma separated
+ * strings when running as a module.
+ */
+static char *dasd[256];
+module_param_array(dasd, charp, NULL, 0);
+
+/*
+ * Single spinlock to protect devmap and servermap structures and lists.
+ */
+static DEFINE_SPINLOCK(dasd_devmap_lock);
+
+/*
+ * Hash lists for devmap structures.
+ */
+static struct list_head dasd_hashlists[256];
+int dasd_max_devindex;
+
+static struct dasd_devmap *dasd_add_busid(const char *, int);
+
+static inline int
+dasd_hash_busid(const char *bus_id)
+{
+	int hash, i;
+
+	hash = 0;
+	for (i = 0; (i < DASD_BUS_ID_SIZE) && *bus_id; i++, bus_id++)
+		hash += *bus_id;
+	return hash & 0xff;
+}
+
+#ifndef MODULE
+/*
+ * The parameter parsing functions for builtin-drivers are called
+ * before kmalloc works. Store the pointers to the parameters strings
+ * into dasd[] for later processing.
+ */
+static int __init
+dasd_call_setup(char *str)
+{
+	static int count = 0;
+
+	if (count < 256)
+		dasd[count++] = str;
+	return 1;
+}
+
+__setup ("dasd=", dasd_call_setup);
+#endif	/* #ifndef MODULE */
+
+#define	DASD_IPLDEV	"ipldev"
+
+/*
+ * Read a device busid/devno from a string.
+ */
+static int
+
+dasd_busid(char **str, int *id0, int *id1, int *devno)
+{
+	int val, old_style;
+
+	/* Interpret ipldev busid */
+	if (strncmp(DASD_IPLDEV, *str, strlen(DASD_IPLDEV)) == 0) {
+		if (ipl_info.type != IPL_TYPE_CCW) {
+			pr_err("The IPL device is not a CCW device\n");
+			return -EINVAL;
+		}
+		*id0 = 0;
+		*id1 = ipl_info.data.ccw.dev_id.ssid;
+		*devno = ipl_info.data.ccw.dev_id.devno;
+		*str += strlen(DASD_IPLDEV);
+
+		return 0;
+	}
+	/* check for leading '0x' */
+	old_style = 0;
+	if ((*str)[0] == '0' && (*str)[1] == 'x') {
+		*str += 2;
+		old_style = 1;
+	}
+	if (!isxdigit((*str)[0]))	/* We require at least one hex digit */
+		return -EINVAL;
+	val = simple_strtoul(*str, str, 16);
+	if (old_style || (*str)[0] != '.') {
+		*id0 = *id1 = 0;
+		if (val < 0 || val > 0xffff)
+			return -EINVAL;
+		*devno = val;
+		return 0;
+	}
+	/* New style x.y.z busid */
+	if (val < 0 || val > 0xff)
+		return -EINVAL;
+	*id0 = val;
+	(*str)++;
+	if (!isxdigit((*str)[0]))	/* We require at least one hex digit */
+		return -EINVAL;
+	val = simple_strtoul(*str, str, 16);
+	if (val < 0 || val > 0xff || (*str)++[0] != '.')
+		return -EINVAL;
+	*id1 = val;
+	if (!isxdigit((*str)[0]))	/* We require at least one hex digit */
+		return -EINVAL;
+	val = simple_strtoul(*str, str, 16);
+	if (val < 0 || val > 0xffff)
+		return -EINVAL;
+	*devno = val;
+	return 0;
+}
+
+/*
+ * Read colon separated list of dasd features. Currently there is
+ * only one: "ro" for read-only devices. The default feature set
+ * is empty (value 0).
+ */
+static int
+dasd_feature_list(char *str, char **endp)
+{
+	int features, len, rc;
+
+	rc = 0;
+	if (*str != '(') {
+		*endp = str;
+		return DASD_FEATURE_DEFAULT;
+	}
+	str++;
+	features = 0;
+
+	while (1) {
+		for (len = 0;
+		     str[len] && str[len] != ':' && str[len] != ')'; len++);
+		if (len == 2 && !strncmp(str, "ro", 2))
+			features |= DASD_FEATURE_READONLY;
+		else if (len == 4 && !strncmp(str, "diag", 4))
+			features |= DASD_FEATURE_USEDIAG;
+		else if (len == 3 && !strncmp(str, "raw", 3))
+			features |= DASD_FEATURE_USERAW;
+		else if (len == 6 && !strncmp(str, "erplog", 6))
+			features |= DASD_FEATURE_ERPLOG;
+		else if (len == 8 && !strncmp(str, "failfast", 8))
+			features |= DASD_FEATURE_FAILFAST;
+		else {
+			pr_warning("%*s is not a supported device option\n",
+				   len, str);
+			rc = -EINVAL;
+		}
+		str += len;
+		if (*str != ':')
+			break;
+		str++;
+	}
+	if (*str != ')') {
+		pr_warning("A closing parenthesis ')' is missing in the "
+			   "dasd= parameter\n");
+		rc = -EINVAL;
+	} else
+		str++;
+	*endp = str;
+	if (rc != 0)
+		return rc;
+	return features;
+}
+
+/*
+ * Try to match the first element on the comma separated parse string
+ * with one of the known keywords. If a keyword is found, take the approprate
+ * action and return a pointer to the residual string. If the first element
+ * could not be matched to any keyword then return an error code.
+ */
+static char *
+dasd_parse_keyword( char *parsestring ) {
+
+	char *nextcomma, *residual_str;
+	int length;
+
+	nextcomma = strchr(parsestring,',');
+	if (nextcomma) {
+		length = nextcomma - parsestring;
+		residual_str = nextcomma + 1;
+	} else {
+		length = strlen(parsestring);
+		residual_str = parsestring + length;
+        }
+	if (strncmp("autodetect", parsestring, length) == 0) {
+		dasd_autodetect = 1;
+		pr_info("The autodetection mode has been activated\n");
+                return residual_str;
+        }
+	if (strncmp("probeonly", parsestring, length) == 0) {
+		dasd_probeonly = 1;
+		pr_info("The probeonly mode has been activated\n");
+                return residual_str;
+        }
+	if (strncmp("nopav", parsestring, length) == 0) {
+		if (MACHINE_IS_VM)
+			pr_info("'nopav' is not supported on z/VM\n");
+		else {
+			dasd_nopav = 1;
+			pr_info("PAV support has be deactivated\n");
+		}
+		return residual_str;
+	}
+	if (strncmp("nofcx", parsestring, length) == 0) {
+		dasd_nofcx = 1;
+		pr_info("High Performance FICON support has been "
+			"deactivated\n");
+		return residual_str;
+	}
+	if (strncmp("fixedbuffers", parsestring, length) == 0) {
+		if (dasd_page_cache)
+			return residual_str;
+		dasd_page_cache =
+			kmem_cache_create("dasd_page_cache", PAGE_SIZE,
+					  PAGE_SIZE, SLAB_CACHE_DMA,
+					  NULL);
+		if (!dasd_page_cache)
+			DBF_EVENT(DBF_WARNING, "%s", "Failed to create slab, "
+				"fixed buffer mode disabled.");
+		else
+			DBF_EVENT(DBF_INFO, "%s",
+				 "turning on fixed buffer mode");
+                return residual_str;
+        }
+	return ERR_PTR(-EINVAL);
+}
+
+/*
+ * Try to interprete the first element on the comma separated parse string
+ * as a device number or a range of devices. If the interpretation is
+ * successful, create the matching dasd_devmap entries and return a pointer
+ * to the residual string.
+ * If interpretation fails or in case of an error, return an error code.
+ */
+static char *
+dasd_parse_range( char *parsestring ) {
+
+	struct dasd_devmap *devmap;
+	int from, from_id0, from_id1;
+	int to, to_id0, to_id1;
+	int features, rc;
+	char bus_id[DASD_BUS_ID_SIZE+1], *str;
+
+	str = parsestring;
+	rc = dasd_busid(&str, &from_id0, &from_id1, &from);
+	if (rc == 0) {
+		to = from;
+		to_id0 = from_id0;
+		to_id1 = from_id1;
+		if (*str == '-') {
+			str++;
+			rc = dasd_busid(&str, &to_id0, &to_id1, &to);
+		}
+	}
+	if (rc == 0 &&
+	    (from_id0 != to_id0 || from_id1 != to_id1 || from > to))
+		rc = -EINVAL;
+	if (rc) {
+		pr_err("%s is not a valid device range\n", parsestring);
+		return ERR_PTR(rc);
+	}
+	features = dasd_feature_list(str, &str);
+	if (features < 0)
+		return ERR_PTR(-EINVAL);
+	/* each device in dasd= parameter should be set initially online */
+	features |= DASD_FEATURE_INITIAL_ONLINE;
+	while (from <= to) {
+		sprintf(bus_id, "%01x.%01x.%04x",
+			from_id0, from_id1, from++);
+		devmap = dasd_add_busid(bus_id, features);
+		if (IS_ERR(devmap))
+			return (char *)devmap;
+	}
+	if (*str == ',')
+		return str + 1;
+	if (*str == '\0')
+		return str;
+	pr_warning("The dasd= parameter value %s has an invalid ending\n",
+		   str);
+	return ERR_PTR(-EINVAL);
+}
+
+static char *
+dasd_parse_next_element( char *parsestring ) {
+	char * residual_str;
+	residual_str = dasd_parse_keyword(parsestring);
+	if (!IS_ERR(residual_str))
+		return residual_str;
+	residual_str = dasd_parse_range(parsestring);
+	return residual_str;
+}
+
+/*
+ * Parse parameters stored in dasd[]
+ * The 'dasd=...' parameter allows to specify a comma separated list of
+ * keywords and device ranges. When the dasd driver is build into the kernel,
+ * the complete list will be stored as one element of the dasd[] array.
+ * When the dasd driver is build as a module, then the list is broken into
+ * it's elements and each dasd[] entry contains one element.
+ */
+int
+dasd_parse(void)
+{
+	int rc, i;
+	char *parsestring;
+
+	rc = 0;
+	for (i = 0; i < 256; i++) {
+		if (dasd[i] == NULL)
+			break;
+		parsestring = dasd[i];
+		/* loop over the comma separated list in the parsestring */
+		while (*parsestring) {
+			parsestring = dasd_parse_next_element(parsestring);
+			if(IS_ERR(parsestring)) {
+				rc = PTR_ERR(parsestring);
+				break;
+			}
+		}
+		if (rc) {
+			DBF_EVENT(DBF_ALERT, "%s", "invalid range found");
+			break;
+		}
+	}
+	return rc;
+}
+
+/*
+ * Add a devmap for the device specified by busid. It is possible that
+ * the devmap already exists (dasd= parameter). The order of the devices
+ * added through this function will define the kdevs for the individual
+ * devices.
+ */
+static struct dasd_devmap *
+dasd_add_busid(const char *bus_id, int features)
+{
+	struct dasd_devmap *devmap, *new, *tmp;
+	int hash;
+
+	new = (struct dasd_devmap *)
+		kzalloc(sizeof(struct dasd_devmap), GFP_KERNEL);
+	if (!new)
+		return ERR_PTR(-ENOMEM);
+	spin_lock(&dasd_devmap_lock);
+	devmap = NULL;
+	hash = dasd_hash_busid(bus_id);
+	list_for_each_entry(tmp, &dasd_hashlists[hash], list)
+		if (strncmp(tmp->bus_id, bus_id, DASD_BUS_ID_SIZE) == 0) {
+			devmap = tmp;
+			break;
+		}
+	if (!devmap) {
+		/* This bus_id is new. */
+		new->devindex = dasd_max_devindex++;
+		strncpy(new->bus_id, bus_id, DASD_BUS_ID_SIZE);
+		new->features = features;
+		new->device = NULL;
+		list_add(&new->list, &dasd_hashlists[hash]);
+		devmap = new;
+		new = NULL;
+	}
+	spin_unlock(&dasd_devmap_lock);
+	kfree(new);
+	return devmap;
+}
+
+/*
+ * Find devmap for device with given bus_id.
+ */
+static struct dasd_devmap *
+dasd_find_busid(const char *bus_id)
+{
+	struct dasd_devmap *devmap, *tmp;
+	int hash;
+
+	spin_lock(&dasd_devmap_lock);
+	devmap = ERR_PTR(-ENODEV);
+	hash = dasd_hash_busid(bus_id);
+	list_for_each_entry(tmp, &dasd_hashlists[hash], list) {
+		if (strncmp(tmp->bus_id, bus_id, DASD_BUS_ID_SIZE) == 0) {
+			devmap = tmp;
+			break;
+		}
+	}
+	spin_unlock(&dasd_devmap_lock);
+	return devmap;
+}
+
+/*
+ * Check if busid has been added to the list of dasd ranges.
+ */
+int
+dasd_busid_known(const char *bus_id)
+{
+	return IS_ERR(dasd_find_busid(bus_id)) ? -ENOENT : 0;
+}
+
+/*
+ * Forget all about the device numbers added so far.
+ * This may only be called at module unload or system shutdown.
+ */
+static void
+dasd_forget_ranges(void)
+{
+	struct dasd_devmap *devmap, *n;
+	int i;
+
+	spin_lock(&dasd_devmap_lock);
+	for (i = 0; i < 256; i++) {
+		list_for_each_entry_safe(devmap, n, &dasd_hashlists[i], list) {
+			BUG_ON(devmap->device != NULL);
+			list_del(&devmap->list);
+			kfree(devmap);
+		}
+	}
+	spin_unlock(&dasd_devmap_lock);
+}
+
+/*
+ * Find the device struct by its device index.
+ */
+struct dasd_device *
+dasd_device_from_devindex(int devindex)
+{
+	struct dasd_devmap *devmap, *tmp;
+	struct dasd_device *device;
+	int i;
+
+	spin_lock(&dasd_devmap_lock);
+	devmap = NULL;
+	for (i = 0; (i < 256) && !devmap; i++)
+		list_for_each_entry(tmp, &dasd_hashlists[i], list)
+			if (tmp->devindex == devindex) {
+				/* Found the devmap for the device. */
+				devmap = tmp;
+				break;
+			}
+	if (devmap && devmap->device) {
+		device = devmap->device;
+		dasd_get_device(device);
+	} else
+		device = ERR_PTR(-ENODEV);
+	spin_unlock(&dasd_devmap_lock);
+	return device;
+}
+
+/*
+ * Return devmap for cdev. If no devmap exists yet, create one and
+ * connect it to the cdev.
+ */
+static struct dasd_devmap *
+dasd_devmap_from_cdev(struct ccw_device *cdev)
+{
+	struct dasd_devmap *devmap;
+
+	devmap = dasd_find_busid(dev_name(&cdev->dev));
+	if (IS_ERR(devmap))
+		devmap = dasd_add_busid(dev_name(&cdev->dev),
+					DASD_FEATURE_DEFAULT);
+	return devmap;
+}
+
+/*
+ * Create a dasd device structure for cdev.
+ */
+struct dasd_device *
+dasd_create_device(struct ccw_device *cdev)
+{
+	struct dasd_devmap *devmap;
+	struct dasd_device *device;
+	unsigned long flags;
+	int rc;
+
+	devmap = dasd_devmap_from_cdev(cdev);
+	if (IS_ERR(devmap))
+		return (void *) devmap;
+
+	device = dasd_alloc_device();
+	if (IS_ERR(device))
+		return device;
+	atomic_set(&device->ref_count, 3);
+
+	spin_lock(&dasd_devmap_lock);
+	if (!devmap->device) {
+		devmap->device = device;
+		device->devindex = devmap->devindex;
+		device->features = devmap->features;
+		get_device(&cdev->dev);
+		device->cdev = cdev;
+		rc = 0;
+	} else
+		/* Someone else was faster. */
+		rc = -EBUSY;
+	spin_unlock(&dasd_devmap_lock);
+
+	if (rc) {
+		dasd_free_device(device);
+		return ERR_PTR(rc);
+	}
+
+	spin_lock_irqsave(get_ccwdev_lock(cdev), flags);
+	dev_set_drvdata(&cdev->dev, device);
+	spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
+
+	return device;
+}
+
+/*
+ * Wait queue for dasd_delete_device waits.
+ */
+static DECLARE_WAIT_QUEUE_HEAD(dasd_delete_wq);
+
+/*
+ * Remove a dasd device structure. The passed referenced
+ * is destroyed.
+ */
+void
+dasd_delete_device(struct dasd_device *device)
+{
+	struct ccw_device *cdev;
+	struct dasd_devmap *devmap;
+	unsigned long flags;
+
+	/* First remove device pointer from devmap. */
+	devmap = dasd_find_busid(dev_name(&device->cdev->dev));
+	BUG_ON(IS_ERR(devmap));
+	spin_lock(&dasd_devmap_lock);
+	if (devmap->device != device) {
+		spin_unlock(&dasd_devmap_lock);
+		dasd_put_device(device);
+		return;
+	}
+	devmap->device = NULL;
+	spin_unlock(&dasd_devmap_lock);
+
+	/* Disconnect dasd_device structure from ccw_device structure. */
+	spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
+	dev_set_drvdata(&device->cdev->dev, NULL);
+	spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
+
+	/*
+	 * Drop ref_count by 3, one for the devmap reference, one for
+	 * the cdev reference and one for the passed reference.
+	 */
+	atomic_sub(3, &device->ref_count);
+
+	/* Wait for reference counter to drop to zero. */
+	wait_event(dasd_delete_wq, atomic_read(&device->ref_count) == 0);
+
+	/* Disconnect dasd_device structure from ccw_device structure. */
+	cdev = device->cdev;
+	device->cdev = NULL;
+
+	/* Put ccw_device structure. */
+	put_device(&cdev->dev);
+
+	/* Now the device structure can be freed. */
+	dasd_free_device(device);
+}
+
+/*
+ * Reference counter dropped to zero. Wake up waiter
+ * in dasd_delete_device.
+ */
+void
+dasd_put_device_wake(struct dasd_device *device)
+{
+	wake_up(&dasd_delete_wq);
+}
+EXPORT_SYMBOL_GPL(dasd_put_device_wake);
+
+/*
+ * Return dasd_device structure associated with cdev.
+ * This function needs to be called with the ccw device
+ * lock held. It can be used from interrupt context.
+ */
+struct dasd_device *
+dasd_device_from_cdev_locked(struct ccw_device *cdev)
+{
+	struct dasd_device *device = dev_get_drvdata(&cdev->dev);
+
+	if (!device)
+		return ERR_PTR(-ENODEV);
+	dasd_get_device(device);
+	return device;
+}
+
+/*
+ * Return dasd_device structure associated with cdev.
+ */
+struct dasd_device *
+dasd_device_from_cdev(struct ccw_device *cdev)
+{
+	struct dasd_device *device;
+	unsigned long flags;
+
+	spin_lock_irqsave(get_ccwdev_lock(cdev), flags);
+	device = dasd_device_from_cdev_locked(cdev);
+	spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
+	return device;
+}
+
+void dasd_add_link_to_gendisk(struct gendisk *gdp, struct dasd_device *device)
+{
+	struct dasd_devmap *devmap;
+
+	devmap = dasd_find_busid(dev_name(&device->cdev->dev));
+	if (IS_ERR(devmap))
+		return;
+	spin_lock(&dasd_devmap_lock);
+	gdp->private_data = devmap;
+	spin_unlock(&dasd_devmap_lock);
+}
+
+struct dasd_device *dasd_device_from_gendisk(struct gendisk *gdp)
+{
+	struct dasd_device *device;
+	struct dasd_devmap *devmap;
+
+	if (!gdp->private_data)
+		return NULL;
+	device = NULL;
+	spin_lock(&dasd_devmap_lock);
+	devmap = gdp->private_data;
+	if (devmap && devmap->device) {
+		device = devmap->device;
+		dasd_get_device(device);
+	}
+	spin_unlock(&dasd_devmap_lock);
+	return device;
+}
+
+/*
+ * SECTION: files in sysfs
+ */
+
+/*
+ * failfast controls the behaviour, if no path is available
+ */
+static ssize_t dasd_ff_show(struct device *dev, struct device_attribute *attr,
+			    char *buf)
+{
+	struct dasd_devmap *devmap;
+	int ff_flag;
+
+	devmap = dasd_find_busid(dev_name(dev));
+	if (!IS_ERR(devmap))
+		ff_flag = (devmap->features & DASD_FEATURE_FAILFAST) != 0;
+	else
+		ff_flag = (DASD_FEATURE_DEFAULT & DASD_FEATURE_FAILFAST) != 0;
+	return snprintf(buf, PAGE_SIZE, ff_flag ? "1\n" : "0\n");
+}
+
+static ssize_t dasd_ff_store(struct device *dev, struct device_attribute *attr,
+	      const char *buf, size_t count)
+{
+	struct dasd_devmap *devmap;
+	int val;
+	char *endp;
+
+	devmap = dasd_devmap_from_cdev(to_ccwdev(dev));
+	if (IS_ERR(devmap))
+		return PTR_ERR(devmap);
+
+	val = simple_strtoul(buf, &endp, 0);
+	if (((endp + 1) < (buf + count)) || (val > 1))
+		return -EINVAL;
+
+	spin_lock(&dasd_devmap_lock);
+	if (val)
+		devmap->features |= DASD_FEATURE_FAILFAST;
+	else
+		devmap->features &= ~DASD_FEATURE_FAILFAST;
+	if (devmap->device)
+		devmap->device->features = devmap->features;
+	spin_unlock(&dasd_devmap_lock);
+	return count;
+}
+
+static DEVICE_ATTR(failfast, 0644, dasd_ff_show, dasd_ff_store);
+
+/*
+ * readonly controls the readonly status of a dasd
+ */
+static ssize_t
+dasd_ro_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+	struct dasd_devmap *devmap;
+	int ro_flag;
+
+	devmap = dasd_find_busid(dev_name(dev));
+	if (!IS_ERR(devmap))
+		ro_flag = (devmap->features & DASD_FEATURE_READONLY) != 0;
+	else
+		ro_flag = (DASD_FEATURE_DEFAULT & DASD_FEATURE_READONLY) != 0;
+	return snprintf(buf, PAGE_SIZE, ro_flag ? "1\n" : "0\n");
+}
+
+static ssize_t
+dasd_ro_store(struct device *dev, struct device_attribute *attr,
+	      const char *buf, size_t count)
+{
+	struct dasd_devmap *devmap;
+	struct dasd_device *device;
+	int val;
+	char *endp;
+
+	devmap = dasd_devmap_from_cdev(to_ccwdev(dev));
+	if (IS_ERR(devmap))
+		return PTR_ERR(devmap);
+
+	val = simple_strtoul(buf, &endp, 0);
+	if (((endp + 1) < (buf + count)) || (val > 1))
+		return -EINVAL;
+
+	spin_lock(&dasd_devmap_lock);
+	if (val)
+		devmap->features |= DASD_FEATURE_READONLY;
+	else
+		devmap->features &= ~DASD_FEATURE_READONLY;
+	device = devmap->device;
+	if (device) {
+		device->features = devmap->features;
+		val = val || test_bit(DASD_FLAG_DEVICE_RO, &device->flags);
+	}
+	spin_unlock(&dasd_devmap_lock);
+	if (device && device->block && device->block->gdp)
+		set_disk_ro(device->block->gdp, val);
+	return count;
+}
+
+static DEVICE_ATTR(readonly, 0644, dasd_ro_show, dasd_ro_store);
+/*
+ * erplog controls the logging of ERP related data
+ * (e.g. failing channel programs).
+ */
+static ssize_t
+dasd_erplog_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+	struct dasd_devmap *devmap;
+	int erplog;
+
+	devmap = dasd_find_busid(dev_name(dev));
+	if (!IS_ERR(devmap))
+		erplog = (devmap->features & DASD_FEATURE_ERPLOG) != 0;
+	else
+		erplog = (DASD_FEATURE_DEFAULT & DASD_FEATURE_ERPLOG) != 0;
+	return snprintf(buf, PAGE_SIZE, erplog ? "1\n" : "0\n");
+}
+
+static ssize_t
+dasd_erplog_store(struct device *dev, struct device_attribute *attr,
+	      const char *buf, size_t count)
+{
+	struct dasd_devmap *devmap;
+	int val;
+	char *endp;
+
+	devmap = dasd_devmap_from_cdev(to_ccwdev(dev));
+	if (IS_ERR(devmap))
+		return PTR_ERR(devmap);
+
+	val = simple_strtoul(buf, &endp, 0);
+	if (((endp + 1) < (buf + count)) || (val > 1))
+		return -EINVAL;
+
+	spin_lock(&dasd_devmap_lock);
+	if (val)
+		devmap->features |= DASD_FEATURE_ERPLOG;
+	else
+		devmap->features &= ~DASD_FEATURE_ERPLOG;
+	if (devmap->device)
+		devmap->device->features = devmap->features;
+	spin_unlock(&dasd_devmap_lock);
+	return count;
+}
+
+static DEVICE_ATTR(erplog, 0644, dasd_erplog_show, dasd_erplog_store);
+
+/*
+ * use_diag controls whether the driver should use diag rather than ssch
+ * to talk to the device
+ */
+static ssize_t
+dasd_use_diag_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+	struct dasd_devmap *devmap;
+	int use_diag;
+
+	devmap = dasd_find_busid(dev_name(dev));
+	if (!IS_ERR(devmap))
+		use_diag = (devmap->features & DASD_FEATURE_USEDIAG) != 0;
+	else
+		use_diag = (DASD_FEATURE_DEFAULT & DASD_FEATURE_USEDIAG) != 0;
+	return sprintf(buf, use_diag ? "1\n" : "0\n");
+}
+
+static ssize_t
+dasd_use_diag_store(struct device *dev, struct device_attribute *attr,
+		    const char *buf, size_t count)
+{
+	struct dasd_devmap *devmap;
+	ssize_t rc;
+	int val;
+	char *endp;
+
+	devmap = dasd_devmap_from_cdev(to_ccwdev(dev));
+	if (IS_ERR(devmap))
+		return PTR_ERR(devmap);
+
+	val = simple_strtoul(buf, &endp, 0);
+	if (((endp + 1) < (buf + count)) || (val > 1))
+		return -EINVAL;
+
+	spin_lock(&dasd_devmap_lock);
+	/* Changing diag discipline flag is only allowed in offline state. */
+	rc = count;
+	if (!devmap->device && !(devmap->features & DASD_FEATURE_USERAW)) {
+		if (val)
+			devmap->features |= DASD_FEATURE_USEDIAG;
+		else
+			devmap->features &= ~DASD_FEATURE_USEDIAG;
+	} else
+		rc = -EPERM;
+	spin_unlock(&dasd_devmap_lock);
+	return rc;
+}
+
+static DEVICE_ATTR(use_diag, 0644, dasd_use_diag_show, dasd_use_diag_store);
+
+/*
+ * use_raw controls whether the driver should give access to raw eckd data or
+ * operate in standard mode
+ */
+static ssize_t
+dasd_use_raw_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+	struct dasd_devmap *devmap;
+	int use_raw;
+
+	devmap = dasd_find_busid(dev_name(dev));
+	if (!IS_ERR(devmap))
+		use_raw = (devmap->features & DASD_FEATURE_USERAW) != 0;
+	else
+		use_raw = (DASD_FEATURE_DEFAULT & DASD_FEATURE_USERAW) != 0;
+	return sprintf(buf, use_raw ? "1\n" : "0\n");
+}
+
+static ssize_t
+dasd_use_raw_store(struct device *dev, struct device_attribute *attr,
+		    const char *buf, size_t count)
+{
+	struct dasd_devmap *devmap;
+	ssize_t rc;
+	unsigned long val;
+
+	devmap = dasd_devmap_from_cdev(to_ccwdev(dev));
+	if (IS_ERR(devmap))
+		return PTR_ERR(devmap);
+
+	if ((strict_strtoul(buf, 10, &val) != 0) || val > 1)
+		return -EINVAL;
+
+	spin_lock(&dasd_devmap_lock);
+	/* Changing diag discipline flag is only allowed in offline state. */
+	rc = count;
+	if (!devmap->device && !(devmap->features & DASD_FEATURE_USEDIAG)) {
+		if (val)
+			devmap->features |= DASD_FEATURE_USERAW;
+		else
+			devmap->features &= ~DASD_FEATURE_USERAW;
+	} else
+		rc = -EPERM;
+	spin_unlock(&dasd_devmap_lock);
+	return rc;
+}
+
+static DEVICE_ATTR(raw_track_access, 0644, dasd_use_raw_show,
+		   dasd_use_raw_store);
+
+static ssize_t
+dasd_discipline_show(struct device *dev, struct device_attribute *attr,
+		     char *buf)
+{
+	struct dasd_device *device;
+	ssize_t len;
+
+	device = dasd_device_from_cdev(to_ccwdev(dev));
+	if (IS_ERR(device))
+		goto out;
+	else if (!device->discipline) {
+		dasd_put_device(device);
+		goto out;
+	} else {
+		len = snprintf(buf, PAGE_SIZE, "%s\n",
+			       device->discipline->name);
+		dasd_put_device(device);
+		return len;
+	}
+out:
+	len = snprintf(buf, PAGE_SIZE, "none\n");
+	return len;
+}
+
+static DEVICE_ATTR(discipline, 0444, dasd_discipline_show, NULL);
+
+static ssize_t
+dasd_device_status_show(struct device *dev, struct device_attribute *attr,
+		     char *buf)
+{
+	struct dasd_device *device;
+	ssize_t len;
+
+	device = dasd_device_from_cdev(to_ccwdev(dev));
+	if (!IS_ERR(device)) {
+		switch (device->state) {
+		case DASD_STATE_NEW:
+			len = snprintf(buf, PAGE_SIZE, "new\n");
+			break;
+		case DASD_STATE_KNOWN:
+			len = snprintf(buf, PAGE_SIZE, "detected\n");
+			break;
+		case DASD_STATE_BASIC:
+			len = snprintf(buf, PAGE_SIZE, "basic\n");
+			break;
+		case DASD_STATE_UNFMT:
+			len = snprintf(buf, PAGE_SIZE, "unformatted\n");
+			break;
+		case DASD_STATE_READY:
+			len = snprintf(buf, PAGE_SIZE, "ready\n");
+			break;
+		case DASD_STATE_ONLINE:
+			len = snprintf(buf, PAGE_SIZE, "online\n");
+			break;
+		default:
+			len = snprintf(buf, PAGE_SIZE, "no stat\n");
+			break;
+		}
+		dasd_put_device(device);
+	} else
+		len = snprintf(buf, PAGE_SIZE, "unknown\n");
+	return len;
+}
+
+static DEVICE_ATTR(status, 0444, dasd_device_status_show, NULL);
+
+static ssize_t dasd_alias_show(struct device *dev,
+			       struct device_attribute *attr, char *buf)
+{
+	struct dasd_device *device;
+	struct dasd_uid uid;
+
+	device = dasd_device_from_cdev(to_ccwdev(dev));
+	if (IS_ERR(device))
+		return sprintf(buf, "0\n");
+
+	if (device->discipline && device->discipline->get_uid &&
+	    !device->discipline->get_uid(device, &uid)) {
+		if (uid.type == UA_BASE_PAV_ALIAS ||
+		    uid.type == UA_HYPER_PAV_ALIAS) {
+			dasd_put_device(device);
+			return sprintf(buf, "1\n");
+		}
+	}
+	dasd_put_device(device);
+
+	return sprintf(buf, "0\n");
+}
+
+static DEVICE_ATTR(alias, 0444, dasd_alias_show, NULL);
+
+static ssize_t dasd_vendor_show(struct device *dev,
+				struct device_attribute *attr, char *buf)
+{
+	struct dasd_device *device;
+	struct dasd_uid uid;
+	char *vendor;
+
+	device = dasd_device_from_cdev(to_ccwdev(dev));
+	vendor = "";
+	if (IS_ERR(device))
+		return snprintf(buf, PAGE_SIZE, "%s\n", vendor);
+
+	if (device->discipline && device->discipline->get_uid &&
+	    !device->discipline->get_uid(device, &uid))
+			vendor = uid.vendor;
+
+	dasd_put_device(device);
+
+	return snprintf(buf, PAGE_SIZE, "%s\n", vendor);
+}
+
+static DEVICE_ATTR(vendor, 0444, dasd_vendor_show, NULL);
+
+#define UID_STRLEN ( /* vendor */ 3 + 1 + /* serial    */ 14 + 1 +\
+		     /* SSID   */ 4 + 1 + /* unit addr */ 2 + 1 +\
+		     /* vduit */ 32 + 1)
+
+static ssize_t
+dasd_uid_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+	struct dasd_device *device;
+	struct dasd_uid uid;
+	char uid_string[UID_STRLEN];
+	char ua_string[3];
+
+	device = dasd_device_from_cdev(to_ccwdev(dev));
+	uid_string[0] = 0;
+	if (IS_ERR(device))
+		return snprintf(buf, PAGE_SIZE, "%s\n", uid_string);
+
+	if (device->discipline && device->discipline->get_uid &&
+	    !device->discipline->get_uid(device, &uid)) {
+		switch (uid.type) {
+		case UA_BASE_DEVICE:
+			snprintf(ua_string, sizeof(ua_string), "%02x",
+				 uid.real_unit_addr);
+			break;
+		case UA_BASE_PAV_ALIAS:
+			snprintf(ua_string, sizeof(ua_string), "%02x",
+				 uid.base_unit_addr);
+			break;
+		case UA_HYPER_PAV_ALIAS:
+			snprintf(ua_string, sizeof(ua_string), "xx");
+			break;
+		default:
+			/* should not happen, treat like base device */
+			snprintf(ua_string, sizeof(ua_string), "%02x",
+				 uid.real_unit_addr);
+			break;
+		}
+
+		if (strlen(uid.vduit) > 0)
+			snprintf(uid_string, sizeof(uid_string),
+				 "%s.%s.%04x.%s.%s",
+				 uid.vendor, uid.serial, uid.ssid, ua_string,
+				 uid.vduit);
+		else
+			snprintf(uid_string, sizeof(uid_string),
+				 "%s.%s.%04x.%s",
+				 uid.vendor, uid.serial, uid.ssid, ua_string);
+	}
+	dasd_put_device(device);
+
+	return snprintf(buf, PAGE_SIZE, "%s\n", uid_string);
+}
+static DEVICE_ATTR(uid, 0444, dasd_uid_show, NULL);
+
+/*
+ * extended error-reporting
+ */
+static ssize_t
+dasd_eer_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+	struct dasd_devmap *devmap;
+	int eer_flag;
+
+	devmap = dasd_find_busid(dev_name(dev));
+	if (!IS_ERR(devmap) && devmap->device)
+		eer_flag = dasd_eer_enabled(devmap->device);
+	else
+		eer_flag = 0;
+	return snprintf(buf, PAGE_SIZE, eer_flag ? "1\n" : "0\n");
+}
+
+static ssize_t
+dasd_eer_store(struct device *dev, struct device_attribute *attr,
+	       const char *buf, size_t count)
+{
+	struct dasd_devmap *devmap;
+	int val, rc;
+	char *endp;
+
+	devmap = dasd_devmap_from_cdev(to_ccwdev(dev));
+	if (IS_ERR(devmap))
+		return PTR_ERR(devmap);
+	if (!devmap->device)
+		return -ENODEV;
+
+	val = simple_strtoul(buf, &endp, 0);
+	if (((endp + 1) < (buf + count)) || (val > 1))
+		return -EINVAL;
+
+	if (val) {
+		rc = dasd_eer_enable(devmap->device);
+		if (rc)
+			return rc;
+	} else
+		dasd_eer_disable(devmap->device);
+	return count;
+}
+
+static DEVICE_ATTR(eer_enabled, 0644, dasd_eer_show, dasd_eer_store);
+
+/*
+ * expiration time for default requests
+ */
+static ssize_t
+dasd_expires_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+	struct dasd_device *device;
+	int len;
+
+	device = dasd_device_from_cdev(to_ccwdev(dev));
+	if (IS_ERR(device))
+		return -ENODEV;
+	len = snprintf(buf, PAGE_SIZE, "%lu\n", device->default_expires);
+	dasd_put_device(device);
+	return len;
+}
+
+static ssize_t
+dasd_expires_store(struct device *dev, struct device_attribute *attr,
+	       const char *buf, size_t count)
+{
+	struct dasd_device *device;
+	unsigned long val;
+
+	device = dasd_device_from_cdev(to_ccwdev(dev));
+	if (IS_ERR(device))
+		return -ENODEV;
+
+	if ((strict_strtoul(buf, 10, &val) != 0) ||
+	    (val > DASD_EXPIRES_MAX) || val == 0) {
+		dasd_put_device(device);
+		return -EINVAL;
+	}
+
+	if (val)
+		device->default_expires = val;
+
+	dasd_put_device(device);
+	return count;
+}
+
+static DEVICE_ATTR(expires, 0644, dasd_expires_show, dasd_expires_store);
+
+static ssize_t dasd_reservation_policy_show(struct device *dev,
+					    struct device_attribute *attr,
+					    char *buf)
+{
+	struct dasd_devmap *devmap;
+	int rc = 0;
+
+	devmap = dasd_find_busid(dev_name(dev));
+	if (IS_ERR(devmap)) {
+		rc = snprintf(buf, PAGE_SIZE, "ignore\n");
+	} else {
+		spin_lock(&dasd_devmap_lock);
+		if (devmap->features & DASD_FEATURE_FAILONSLCK)
+			rc = snprintf(buf, PAGE_SIZE, "fail\n");
+		else
+			rc = snprintf(buf, PAGE_SIZE, "ignore\n");
+		spin_unlock(&dasd_devmap_lock);
+	}
+	return rc;
+}
+
+static ssize_t dasd_reservation_policy_store(struct device *dev,
+					     struct device_attribute *attr,
+					     const char *buf, size_t count)
+{
+	struct dasd_devmap *devmap;
+	int rc;
+
+	devmap = dasd_devmap_from_cdev(to_ccwdev(dev));
+	if (IS_ERR(devmap))
+		return PTR_ERR(devmap);
+	rc = 0;
+	spin_lock(&dasd_devmap_lock);
+	if (sysfs_streq("ignore", buf))
+		devmap->features &= ~DASD_FEATURE_FAILONSLCK;
+	else if (sysfs_streq("fail", buf))
+		devmap->features |= DASD_FEATURE_FAILONSLCK;
+	else
+		rc = -EINVAL;
+	if (devmap->device)
+		devmap->device->features = devmap->features;
+	spin_unlock(&dasd_devmap_lock);
+	if (rc)
+		return rc;
+	else
+		return count;
+}
+
+static DEVICE_ATTR(reservation_policy, 0644,
+		   dasd_reservation_policy_show, dasd_reservation_policy_store);
+
+static ssize_t dasd_reservation_state_show(struct device *dev,
+					   struct device_attribute *attr,
+					   char *buf)
+{
+	struct dasd_device *device;
+	int rc = 0;
+
+	device = dasd_device_from_cdev(to_ccwdev(dev));
+	if (IS_ERR(device))
+		return snprintf(buf, PAGE_SIZE, "none\n");
+
+	if (test_bit(DASD_FLAG_IS_RESERVED, &device->flags))
+		rc = snprintf(buf, PAGE_SIZE, "reserved\n");
+	else if (test_bit(DASD_FLAG_LOCK_STOLEN, &device->flags))
+		rc = snprintf(buf, PAGE_SIZE, "lost\n");
+	else
+		rc = snprintf(buf, PAGE_SIZE, "none\n");
+	dasd_put_device(device);
+	return rc;
+}
+
+static ssize_t dasd_reservation_state_store(struct device *dev,
+					    struct device_attribute *attr,
+					    const char *buf, size_t count)
+{
+	struct dasd_device *device;
+	int rc = 0;
+
+	device = dasd_device_from_cdev(to_ccwdev(dev));
+	if (IS_ERR(device))
+		return -ENODEV;
+	if (sysfs_streq("reset", buf))
+		clear_bit(DASD_FLAG_LOCK_STOLEN, &device->flags);
+	else
+		rc = -EINVAL;
+	dasd_put_device(device);
+
+	if (rc)
+		return rc;
+	else
+		return count;
+}
+
+static DEVICE_ATTR(last_known_reservation_state, 0644,
+		   dasd_reservation_state_show, dasd_reservation_state_store);
+
+static struct attribute * dasd_attrs[] = {
+	&dev_attr_readonly.attr,
+	&dev_attr_discipline.attr,
+	&dev_attr_status.attr,
+	&dev_attr_alias.attr,
+	&dev_attr_vendor.attr,
+	&dev_attr_uid.attr,
+	&dev_attr_use_diag.attr,
+	&dev_attr_raw_track_access.attr,
+	&dev_attr_eer_enabled.attr,
+	&dev_attr_erplog.attr,
+	&dev_attr_failfast.attr,
+	&dev_attr_expires.attr,
+	&dev_attr_reservation_policy.attr,
+	&dev_attr_last_known_reservation_state.attr,
+	NULL,
+};
+
+static struct attribute_group dasd_attr_group = {
+	.attrs = dasd_attrs,
+};
+
+/*
+ * Return value of the specified feature.
+ */
+int
+dasd_get_feature(struct ccw_device *cdev, int feature)
+{
+	struct dasd_devmap *devmap;
+
+	devmap = dasd_find_busid(dev_name(&cdev->dev));
+	if (IS_ERR(devmap))
+		return PTR_ERR(devmap);
+
+	return ((devmap->features & feature) != 0);
+}
+
+/*
+ * Set / reset given feature.
+ * Flag indicates wether to set (!=0) or the reset (=0) the feature.
+ */
+int
+dasd_set_feature(struct ccw_device *cdev, int feature, int flag)
+{
+	struct dasd_devmap *devmap;
+
+	devmap = dasd_find_busid(dev_name(&cdev->dev));
+	if (IS_ERR(devmap))
+		return PTR_ERR(devmap);
+
+	spin_lock(&dasd_devmap_lock);
+	if (flag)
+		devmap->features |= feature;
+	else
+		devmap->features &= ~feature;
+	if (devmap->device)
+		devmap->device->features = devmap->features;
+	spin_unlock(&dasd_devmap_lock);
+	return 0;
+}
+
+
+int
+dasd_add_sysfs_files(struct ccw_device *cdev)
+{
+	return sysfs_create_group(&cdev->dev.kobj, &dasd_attr_group);
+}
+
+void
+dasd_remove_sysfs_files(struct ccw_device *cdev)
+{
+	sysfs_remove_group(&cdev->dev.kobj, &dasd_attr_group);
+}
+
+
+int
+dasd_devmap_init(void)
+{
+	int i;
+
+	/* Initialize devmap structures. */
+	dasd_max_devindex = 0;
+	for (i = 0; i < 256; i++)
+		INIT_LIST_HEAD(&dasd_hashlists[i]);
+	return 0;
+}
+
+void
+dasd_devmap_exit(void)
+{
+	dasd_forget_ranges();
+}
diff --git a/ap/os/linux/linux-3.4.x/drivers/s390/block/dasd_diag.c b/ap/os/linux/linux-3.4.x/drivers/s390/block/dasd_diag.c
new file mode 100644
index 0000000..0cea7e9
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/s390/block/dasd_diag.c
@@ -0,0 +1,660 @@
+/*
+ * File...........: linux/drivers/s390/block/dasd_diag.c
+ * Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com>
+ * Based on.......: linux/drivers/s390/block/mdisk.c
+ * ...............: by Hartmunt Penner <hpenner@de.ibm.com>
+ * Bugreports.to..: <Linux390@de.ibm.com>
+ * (C) IBM Corporation, IBM Deutschland Entwicklung GmbH, 1999,2000
+ *
+ */
+
+#define KMSG_COMPONENT "dasd"
+
+#include <linux/kernel_stat.h>
+#include <linux/stddef.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/hdreg.h>
+#include <linux/bio.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/jiffies.h>
+
+#include <asm/dasd.h>
+#include <asm/debug.h>
+#include <asm/ebcdic.h>
+#include <asm/io.h>
+#include <asm/irq.h>
+#include <asm/vtoc.h>
+#include <asm/diag.h>
+
+#include "dasd_int.h"
+#include "dasd_diag.h"
+
+#define PRINTK_HEADER "dasd(diag):"
+
+MODULE_LICENSE("GPL");
+
+/* The maximum number of blocks per request (max_blocks) is dependent on the
+ * amount of storage that is available in the static I/O buffer for each
+ * device. Currently each device gets 2 pages. We want to fit two requests
+ * into the available memory so that we can immediately start the next if one
+ * finishes. */
+#define DIAG_MAX_BLOCKS	(((2 * PAGE_SIZE - sizeof(struct dasd_ccw_req) - \
+			   sizeof(struct dasd_diag_req)) / \
+		           sizeof(struct dasd_diag_bio)) / 2)
+#define DIAG_MAX_RETRIES	32
+#define DIAG_TIMEOUT		50
+
+static struct dasd_discipline dasd_diag_discipline;
+
+struct dasd_diag_private {
+	struct dasd_diag_characteristics rdc_data;
+	struct dasd_diag_rw_io iob;
+	struct dasd_diag_init_io iib;
+	blocknum_t pt_block;
+	struct ccw_dev_id dev_id;
+};
+
+struct dasd_diag_req {
+	unsigned int block_count;
+	struct dasd_diag_bio bio[0];
+};
+
+static const u8 DASD_DIAG_CMS1[] = { 0xc3, 0xd4, 0xe2, 0xf1 };/* EBCDIC CMS1 */
+
+/* Perform DIAG250 call with block I/O parameter list iob (input and output)
+ * and function code cmd.
+ * In case of an exception return 3. Otherwise return result of bitwise OR of
+ * resulting condition code and DIAG return code. */
+static inline int dia250(void *iob, int cmd)
+{
+	register unsigned long reg2 asm ("2") = (unsigned long) iob;
+	typedef union {
+		struct dasd_diag_init_io init_io;
+		struct dasd_diag_rw_io rw_io;
+	} addr_type;
+	int rc;
+
+	rc = 3;
+	asm volatile(
+		"	diag	2,%2,0x250\n"
+		"0:	ipm	%0\n"
+		"	srl	%0,28\n"
+		"	or	%0,3\n"
+		"1:\n"
+		EX_TABLE(0b,1b)
+		: "+d" (rc), "=m" (*(addr_type *) iob)
+		: "d" (cmd), "d" (reg2), "m" (*(addr_type *) iob)
+		: "3", "cc");
+	return rc;
+}
+
+/* Initialize block I/O to DIAG device using the specified blocksize and
+ * block offset. On success, return zero and set end_block to contain the
+ * number of blocks on the device minus the specified offset. Return non-zero
+ * otherwise. */
+static inline int
+mdsk_init_io(struct dasd_device *device, unsigned int blocksize,
+	     blocknum_t offset, blocknum_t *end_block)
+{
+	struct dasd_diag_private *private;
+	struct dasd_diag_init_io *iib;
+	int rc;
+
+	private = (struct dasd_diag_private *) device->private;
+	iib = &private->iib;
+	memset(iib, 0, sizeof (struct dasd_diag_init_io));
+
+	iib->dev_nr = private->dev_id.devno;
+	iib->block_size = blocksize;
+	iib->offset = offset;
+	iib->flaga = DASD_DIAG_FLAGA_DEFAULT;
+
+	rc = dia250(iib, INIT_BIO);
+
+	if ((rc & 3) == 0 && end_block)
+		*end_block = iib->end_block;
+
+	return rc;
+}
+
+/* Remove block I/O environment for device. Return zero on success, non-zero
+ * otherwise. */
+static inline int
+mdsk_term_io(struct dasd_device * device)
+{
+	struct dasd_diag_private *private;
+	struct dasd_diag_init_io *iib;
+	int rc;
+
+	private = (struct dasd_diag_private *) device->private;
+	iib = &private->iib;
+	memset(iib, 0, sizeof (struct dasd_diag_init_io));
+	iib->dev_nr = private->dev_id.devno;
+	rc = dia250(iib, TERM_BIO);
+	return rc;
+}
+
+/* Error recovery for failed DIAG requests - try to reestablish the DIAG
+ * environment. */
+static void
+dasd_diag_erp(struct dasd_device *device)
+{
+	int rc;
+
+	mdsk_term_io(device);
+	rc = mdsk_init_io(device, device->block->bp_block, 0, NULL);
+	if (rc == 4) {
+		if (!(test_and_set_bit(DASD_FLAG_DEVICE_RO, &device->flags)))
+			pr_warning("%s: The access mode of a DIAG device "
+				   "changed to read-only\n",
+				   dev_name(&device->cdev->dev));
+		rc = 0;
+	}
+	if (rc)
+		pr_warning("%s: DIAG ERP failed with "
+			    "rc=%d\n", dev_name(&device->cdev->dev), rc);
+}
+
+/* Start a given request at the device. Return zero on success, non-zero
+ * otherwise. */
+static int
+dasd_start_diag(struct dasd_ccw_req * cqr)
+{
+	struct dasd_device *device;
+	struct dasd_diag_private *private;
+	struct dasd_diag_req *dreq;
+	int rc;
+
+	device = cqr->startdev;
+	if (cqr->retries < 0) {
+		DBF_DEV_EVENT(DBF_ERR, device, "DIAG start_IO: request %p "
+			    "- no retry left)", cqr);
+		cqr->status = DASD_CQR_ERROR;
+		return -EIO;
+	}
+	private = (struct dasd_diag_private *) device->private;
+	dreq = (struct dasd_diag_req *) cqr->data;
+
+	private->iob.dev_nr = private->dev_id.devno;
+	private->iob.key = 0;
+	private->iob.flags = DASD_DIAG_RWFLAG_ASYNC;
+	private->iob.block_count = dreq->block_count;
+	private->iob.interrupt_params = (addr_t) cqr;
+	private->iob.bio_list = dreq->bio;
+	private->iob.flaga = DASD_DIAG_FLAGA_DEFAULT;
+
+	cqr->startclk = get_clock();
+	cqr->starttime = jiffies;
+	cqr->retries--;
+
+	rc = dia250(&private->iob, RW_BIO);
+	switch (rc) {
+	case 0: /* Synchronous I/O finished successfully */
+		cqr->stopclk = get_clock();
+		cqr->status = DASD_CQR_SUCCESS;
+		/* Indicate to calling function that only a dasd_schedule_bh()
+		   and no timer is needed */
+                rc = -EACCES;
+		break;
+	case 8: /* Asynchronous I/O was started */
+		cqr->status = DASD_CQR_IN_IO;
+		rc = 0;
+		break;
+	default: /* Error condition */
+		cqr->status = DASD_CQR_QUEUED;
+		DBF_DEV_EVENT(DBF_WARNING, device, "dia250 returned rc=%d", rc);
+		dasd_diag_erp(device);
+		rc = -EIO;
+		break;
+	}
+	cqr->intrc = rc;
+	return rc;
+}
+
+/* Terminate given request at the device. */
+static int
+dasd_diag_term_IO(struct dasd_ccw_req * cqr)
+{
+	struct dasd_device *device;
+
+	device = cqr->startdev;
+	mdsk_term_io(device);
+	mdsk_init_io(device, device->block->bp_block, 0, NULL);
+	cqr->status = DASD_CQR_CLEAR_PENDING;
+	cqr->stopclk = get_clock();
+	dasd_schedule_device_bh(device);
+	return 0;
+}
+
+/* Handle external interruption. */
+static void dasd_ext_handler(struct ext_code ext_code,
+			     unsigned int param32, unsigned long param64)
+{
+	struct dasd_ccw_req *cqr, *next;
+	struct dasd_device *device;
+	unsigned long long expires;
+	unsigned long flags;
+	addr_t ip;
+	int rc;
+
+	switch (ext_code.subcode >> 8) {
+	case DASD_DIAG_CODE_31BIT:
+		ip = (addr_t) param32;
+		break;
+	case DASD_DIAG_CODE_64BIT:
+		ip = (addr_t) param64;
+		break;
+	default:
+		return;
+	}
+	kstat_cpu(smp_processor_id()).irqs[EXTINT_DSD]++;
+	if (!ip) {		/* no intparm: unsolicited interrupt */
+		DBF_EVENT(DBF_NOTICE, "%s", "caught unsolicited "
+			      "interrupt");
+		return;
+	}
+	cqr = (struct dasd_ccw_req *) ip;
+	device = (struct dasd_device *) cqr->startdev;
+	if (strncmp(device->discipline->ebcname, (char *) &cqr->magic, 4)) {
+		DBF_DEV_EVENT(DBF_WARNING, device,
+			    " magic number of dasd_ccw_req 0x%08X doesn't"
+			    " match discipline 0x%08X",
+			    cqr->magic, *(int *) (&device->discipline->name));
+		return;
+	}
+
+	/* get irq lock to modify request queue */
+	spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
+
+	/* Check for a pending clear operation */
+	if (cqr->status == DASD_CQR_CLEAR_PENDING) {
+		cqr->status = DASD_CQR_CLEARED;
+		dasd_device_clear_timer(device);
+		dasd_schedule_device_bh(device);
+		spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
+		return;
+	}
+
+	cqr->stopclk = get_clock();
+
+	expires = 0;
+	if ((ext_code.subcode & 0xff) == 0) {
+		cqr->status = DASD_CQR_SUCCESS;
+		/* Start first request on queue if possible -> fast_io. */
+		if (!list_empty(&device->ccw_queue)) {
+			next = list_entry(device->ccw_queue.next,
+					  struct dasd_ccw_req, devlist);
+			if (next->status == DASD_CQR_QUEUED) {
+				rc = dasd_start_diag(next);
+				if (rc == 0)
+					expires = next->expires;
+			}
+		}
+	} else {
+		cqr->status = DASD_CQR_QUEUED;
+		DBF_DEV_EVENT(DBF_DEBUG, device, "interrupt status for "
+			      "request %p was %d (%d retries left)", cqr,
+			      ext_code.subcode & 0xff, cqr->retries);
+		dasd_diag_erp(device);
+	}
+
+	if (expires != 0)
+		dasd_device_set_timer(device, expires);
+	else
+		dasd_device_clear_timer(device);
+	dasd_schedule_device_bh(device);
+
+	spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
+}
+
+/* Check whether device can be controlled by DIAG discipline. Return zero on
+ * success, non-zero otherwise. */
+static int
+dasd_diag_check_device(struct dasd_device *device)
+{
+	struct dasd_block *block;
+	struct dasd_diag_private *private;
+	struct dasd_diag_characteristics *rdc_data;
+	struct dasd_diag_bio bio;
+	struct vtoc_cms_label *label;
+	blocknum_t end_block;
+	unsigned int sb, bsize;
+	int rc;
+
+	private = (struct dasd_diag_private *) device->private;
+	if (private == NULL) {
+		private = kzalloc(sizeof(struct dasd_diag_private),GFP_KERNEL);
+		if (private == NULL) {
+			DBF_DEV_EVENT(DBF_WARNING, device, "%s",
+				"Allocating memory for private DASD data "
+				      "failed\n");
+			return -ENOMEM;
+		}
+		ccw_device_get_id(device->cdev, &private->dev_id);
+		device->private = (void *) private;
+	}
+	block = dasd_alloc_block();
+	if (IS_ERR(block)) {
+		DBF_DEV_EVENT(DBF_WARNING, device, "%s",
+			    "could not allocate dasd block structure");
+		device->private = NULL;
+		kfree(private);
+		return PTR_ERR(block);
+	}
+	device->block = block;
+	block->base = device;
+
+	/* Read Device Characteristics */
+	rdc_data = (void *) &(private->rdc_data);
+	rdc_data->dev_nr = private->dev_id.devno;
+	rdc_data->rdc_len = sizeof (struct dasd_diag_characteristics);
+
+	rc = diag210((struct diag210 *) rdc_data);
+	if (rc) {
+		DBF_DEV_EVENT(DBF_WARNING, device, "failed to retrieve device "
+			    "information (rc=%d)", rc);
+		rc = -EOPNOTSUPP;
+		goto out;
+	}
+
+	device->default_expires = DIAG_TIMEOUT;
+
+	/* Figure out position of label block */
+	switch (private->rdc_data.vdev_class) {
+	case DEV_CLASS_FBA:
+		private->pt_block = 1;
+		break;
+	case DEV_CLASS_ECKD:
+		private->pt_block = 2;
+		break;
+	default:
+		pr_warning("%s: Device type %d is not supported "
+			   "in DIAG mode\n", dev_name(&device->cdev->dev),
+			   private->rdc_data.vdev_class);
+		rc = -EOPNOTSUPP;
+		goto out;
+	}
+
+	DBF_DEV_EVENT(DBF_INFO, device,
+		      "%04X: %04X on real %04X/%02X",
+		      rdc_data->dev_nr,
+		      rdc_data->vdev_type,
+		      rdc_data->rdev_type, rdc_data->rdev_model);
+
+	/* terminate all outstanding operations */
+	mdsk_term_io(device);
+
+	/* figure out blocksize of device */
+	label = (struct vtoc_cms_label *) get_zeroed_page(GFP_KERNEL);
+	if (label == NULL)  {
+		DBF_DEV_EVENT(DBF_WARNING, device, "%s",
+			    "No memory to allocate initialization request");
+		rc = -ENOMEM;
+		goto out;
+	}
+	rc = 0;
+	end_block = 0;
+	/* try all sizes - needed for ECKD devices */
+	for (bsize = 512; bsize <= PAGE_SIZE; bsize <<= 1) {
+		mdsk_init_io(device, bsize, 0, &end_block);
+		memset(&bio, 0, sizeof (struct dasd_diag_bio));
+		bio.type = MDSK_READ_REQ;
+		bio.block_number = private->pt_block + 1;
+		bio.buffer = label;
+		memset(&private->iob, 0, sizeof (struct dasd_diag_rw_io));
+		private->iob.dev_nr = rdc_data->dev_nr;
+		private->iob.key = 0;
+		private->iob.flags = 0;	/* do synchronous io */
+		private->iob.block_count = 1;
+		private->iob.interrupt_params = 0;
+		private->iob.bio_list = &bio;
+		private->iob.flaga = DASD_DIAG_FLAGA_DEFAULT;
+		rc = dia250(&private->iob, RW_BIO);
+		if (rc == 3) {
+			pr_warning("%s: A 64-bit DIAG call failed\n",
+				   dev_name(&device->cdev->dev));
+			rc = -EOPNOTSUPP;
+			goto out_label;
+		}
+		mdsk_term_io(device);
+		if (rc == 0)
+			break;
+	}
+	if (bsize > PAGE_SIZE) {
+		pr_warning("%s: Accessing the DASD failed because of an "
+			   "incorrect format (rc=%d)\n",
+			   dev_name(&device->cdev->dev), rc);
+		rc = -EIO;
+		goto out_label;
+	}
+	/* check for label block */
+	if (memcmp(label->label_id, DASD_DIAG_CMS1,
+		  sizeof(DASD_DIAG_CMS1)) == 0) {
+		/* get formatted blocksize from label block */
+		bsize = (unsigned int) label->block_size;
+		block->blocks = (unsigned long) label->block_count;
+	} else
+		block->blocks = end_block;
+	block->bp_block = bsize;
+	block->s2b_shift = 0;	/* bits to shift 512 to get a block */
+	for (sb = 512; sb < bsize; sb = sb << 1)
+		block->s2b_shift++;
+	rc = mdsk_init_io(device, block->bp_block, 0, NULL);
+	if (rc && (rc != 4)) {
+		pr_warning("%s: DIAG initialization failed with rc=%d\n",
+			   dev_name(&device->cdev->dev), rc);
+		rc = -EIO;
+	} else {
+		if (rc == 4)
+			set_bit(DASD_FLAG_DEVICE_RO, &device->flags);
+		pr_info("%s: New DASD with %ld byte/block, total size %ld "
+			"KB%s\n", dev_name(&device->cdev->dev),
+			(unsigned long) block->bp_block,
+			(unsigned long) (block->blocks <<
+					 block->s2b_shift) >> 1,
+			(rc == 4) ? ", read-only device" : "");
+		rc = 0;
+	}
+out_label:
+	free_page((long) label);
+out:
+	if (rc) {
+		device->block = NULL;
+		dasd_free_block(block);
+		device->private = NULL;
+		kfree(private);
+	}
+	return rc;
+}
+
+/* Fill in virtual disk geometry for device. Return zero on success, non-zero
+ * otherwise. */
+static int
+dasd_diag_fill_geometry(struct dasd_block *block, struct hd_geometry *geo)
+{
+	if (dasd_check_blocksize(block->bp_block) != 0)
+		return -EINVAL;
+	geo->cylinders = (block->blocks << block->s2b_shift) >> 10;
+	geo->heads = 16;
+	geo->sectors = 128 >> block->s2b_shift;
+	return 0;
+}
+
+static dasd_erp_fn_t
+dasd_diag_erp_action(struct dasd_ccw_req * cqr)
+{
+	return dasd_default_erp_action;
+}
+
+static dasd_erp_fn_t
+dasd_diag_erp_postaction(struct dasd_ccw_req * cqr)
+{
+	return dasd_default_erp_postaction;
+}
+
+/* Create DASD request from block device request. Return pointer to new
+ * request on success, ERR_PTR otherwise. */
+static struct dasd_ccw_req *dasd_diag_build_cp(struct dasd_device *memdev,
+					       struct dasd_block *block,
+					       struct request *req)
+{
+	struct dasd_ccw_req *cqr;
+	struct dasd_diag_req *dreq;
+	struct dasd_diag_bio *dbio;
+	struct req_iterator iter;
+	struct bio_vec *bv;
+	char *dst;
+	unsigned int count, datasize;
+	sector_t recid, first_rec, last_rec;
+	unsigned int blksize, off;
+	unsigned char rw_cmd;
+
+	if (rq_data_dir(req) == READ)
+		rw_cmd = MDSK_READ_REQ;
+	else if (rq_data_dir(req) == WRITE)
+		rw_cmd = MDSK_WRITE_REQ;
+	else
+		return ERR_PTR(-EINVAL);
+	blksize = block->bp_block;
+	/* Calculate record id of first and last block. */
+	first_rec = blk_rq_pos(req) >> block->s2b_shift;
+	last_rec =
+		(blk_rq_pos(req) + blk_rq_sectors(req) - 1) >> block->s2b_shift;
+	/* Check struct bio and count the number of blocks for the request. */
+	count = 0;
+	rq_for_each_segment(bv, req, iter) {
+		if (bv->bv_len & (blksize - 1))
+			/* Fba can only do full blocks. */
+			return ERR_PTR(-EINVAL);
+		count += bv->bv_len >> (block->s2b_shift + 9);
+	}
+	/* Paranoia. */
+	if (count != last_rec - first_rec + 1)
+		return ERR_PTR(-EINVAL);
+	/* Build the request */
+	datasize = sizeof(struct dasd_diag_req) +
+		count*sizeof(struct dasd_diag_bio);
+	cqr = dasd_smalloc_request(DASD_DIAG_MAGIC, 0, datasize, memdev);
+	if (IS_ERR(cqr))
+		return cqr;
+
+	dreq = (struct dasd_diag_req *) cqr->data;
+	dreq->block_count = count;
+	dbio = dreq->bio;
+	recid = first_rec;
+	rq_for_each_segment(bv, req, iter) {
+		dst = page_address(bv->bv_page) + bv->bv_offset;
+		for (off = 0; off < bv->bv_len; off += blksize) {
+			memset(dbio, 0, sizeof (struct dasd_diag_bio));
+			dbio->type = rw_cmd;
+			dbio->block_number = recid + 1;
+			dbio->buffer = dst;
+			dbio++;
+			dst += blksize;
+			recid++;
+		}
+	}
+	cqr->retries = DIAG_MAX_RETRIES;
+	cqr->buildclk = get_clock();
+	if (blk_noretry_request(req) ||
+	    block->base->features & DASD_FEATURE_FAILFAST)
+		set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags);
+	cqr->startdev = memdev;
+	cqr->memdev = memdev;
+	cqr->block = block;
+	cqr->expires = memdev->default_expires * HZ;
+	cqr->status = DASD_CQR_FILLED;
+	return cqr;
+}
+
+/* Release DASD request. Return non-zero if request was successful, zero
+ * otherwise. */
+static int
+dasd_diag_free_cp(struct dasd_ccw_req *cqr, struct request *req)
+{
+	int status;
+
+	status = cqr->status == DASD_CQR_DONE;
+	dasd_sfree_request(cqr, cqr->memdev);
+	return status;
+}
+
+static void dasd_diag_handle_terminated_request(struct dasd_ccw_req *cqr)
+{
+	cqr->status = DASD_CQR_FILLED;
+};
+
+/* Fill in IOCTL data for device. */
+static int
+dasd_diag_fill_info(struct dasd_device * device,
+		    struct dasd_information2_t * info)
+{
+	struct dasd_diag_private *private;
+
+	private = (struct dasd_diag_private *) device->private;
+	info->label_block = (unsigned int) private->pt_block;
+	info->FBA_layout = 1;
+	info->format = DASD_FORMAT_LDL;
+	info->characteristics_size = sizeof (struct dasd_diag_characteristics);
+	memcpy(info->characteristics,
+	       &((struct dasd_diag_private *) device->private)->rdc_data,
+	       sizeof (struct dasd_diag_characteristics));
+	info->confdata_size = 0;
+	return 0;
+}
+
+static void
+dasd_diag_dump_sense(struct dasd_device *device, struct dasd_ccw_req * req,
+		     struct irb *stat)
+{
+	DBF_DEV_EVENT(DBF_WARNING, device, "%s",
+		    "dump sense not available for DIAG data");
+}
+
+static struct dasd_discipline dasd_diag_discipline = {
+	.owner = THIS_MODULE,
+	.name = "DIAG",
+	.ebcname = "DIAG",
+	.max_blocks = DIAG_MAX_BLOCKS,
+	.check_device = dasd_diag_check_device,
+	.verify_path = dasd_generic_verify_path,
+	.fill_geometry = dasd_diag_fill_geometry,
+	.start_IO = dasd_start_diag,
+	.term_IO = dasd_diag_term_IO,
+	.handle_terminated_request = dasd_diag_handle_terminated_request,
+	.erp_action = dasd_diag_erp_action,
+	.erp_postaction = dasd_diag_erp_postaction,
+	.build_cp = dasd_diag_build_cp,
+	.free_cp = dasd_diag_free_cp,
+	.dump_sense = dasd_diag_dump_sense,
+	.fill_info = dasd_diag_fill_info,
+};
+
+static int __init
+dasd_diag_init(void)
+{
+	if (!MACHINE_IS_VM) {
+		pr_info("Discipline %s cannot be used without z/VM\n",
+			dasd_diag_discipline.name);
+		return -ENODEV;
+	}
+	ASCEBC(dasd_diag_discipline.ebcname, 4);
+
+	service_subclass_irq_register();
+	register_external_interrupt(0x2603, dasd_ext_handler);
+	dasd_diag_discipline_pointer = &dasd_diag_discipline;
+	return 0;
+}
+
+static void __exit
+dasd_diag_cleanup(void)
+{
+	unregister_external_interrupt(0x2603, dasd_ext_handler);
+	service_subclass_irq_unregister();
+	dasd_diag_discipline_pointer = NULL;
+}
+
+module_init(dasd_diag_init);
+module_exit(dasd_diag_cleanup);
diff --git a/ap/os/linux/linux-3.4.x/drivers/s390/block/dasd_diag.h b/ap/os/linux/linux-3.4.x/drivers/s390/block/dasd_diag.h
new file mode 100644
index 0000000..4f71fbe
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/s390/block/dasd_diag.h
@@ -0,0 +1,123 @@
+/*
+ * File...........: linux/drivers/s390/block/dasd_diag.h
+ * Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com>
+ * Based on.......: linux/drivers/s390/block/mdisk.h
+ * ...............: by Hartmunt Penner <hpenner@de.ibm.com>
+ * Bugreports.to..: <Linux390@de.ibm.com>
+ * (C) IBM Corporation, IBM Deutschland Entwicklung GmbH, 1999,2000
+ *
+ */
+
+#define MDSK_WRITE_REQ 0x01
+#define MDSK_READ_REQ  0x02
+
+#define INIT_BIO	0x00
+#define RW_BIO		0x01
+#define TERM_BIO	0x02
+
+#define DEV_CLASS_FBA	0x01
+#define DEV_CLASS_ECKD	0x04
+
+#define DASD_DIAG_CODE_31BIT		0x03
+#define DASD_DIAG_CODE_64BIT		0x07
+
+#define DASD_DIAG_RWFLAG_ASYNC		0x02
+#define DASD_DIAG_RWFLAG_NOCACHE	0x01
+
+#define DASD_DIAG_FLAGA_FORMAT_64BIT	0x80
+
+struct dasd_diag_characteristics {
+	u16 dev_nr;
+	u16 rdc_len;
+	u8 vdev_class;
+	u8 vdev_type;
+	u8 vdev_status;
+	u8 vdev_flags;
+	u8 rdev_class;
+	u8 rdev_type;
+	u8 rdev_model;
+	u8 rdev_features;
+} __attribute__ ((packed, aligned(4)));
+
+
+#ifdef CONFIG_64BIT
+#define DASD_DIAG_FLAGA_DEFAULT		DASD_DIAG_FLAGA_FORMAT_64BIT
+
+typedef u64 blocknum_t;
+typedef s64 sblocknum_t;
+
+struct dasd_diag_bio {
+	u8 type;
+	u8 status;
+	u8 spare1[2];
+	u32 alet;
+	blocknum_t block_number;
+	void *buffer;
+} __attribute__ ((packed, aligned(8)));
+
+struct dasd_diag_init_io {
+	u16 dev_nr;
+	u8 flaga;
+	u8 spare1[21];
+	u32 block_size;
+	u8 spare2[4];
+	blocknum_t offset;
+	sblocknum_t start_block;
+	blocknum_t end_block;
+	u8  spare3[8];
+} __attribute__ ((packed, aligned(8)));
+
+struct dasd_diag_rw_io {
+	u16 dev_nr;
+	u8  flaga;
+	u8  spare1[21];
+	u8  key;
+	u8  flags;
+	u8  spare2[2];
+	u32 block_count;
+	u32 alet;
+	u8  spare3[4];
+	u64 interrupt_params;
+	struct dasd_diag_bio *bio_list;
+	u8  spare4[8];
+} __attribute__ ((packed, aligned(8)));
+#else /* CONFIG_64BIT */
+#define DASD_DIAG_FLAGA_DEFAULT		0x0
+
+typedef u32 blocknum_t;
+typedef s32 sblocknum_t;
+
+struct dasd_diag_bio {
+	u8 type;
+	u8 status;
+	u16 spare1;
+	blocknum_t block_number;
+	u32 alet;
+	void *buffer;
+} __attribute__ ((packed, aligned(8)));
+
+struct dasd_diag_init_io {
+	u16 dev_nr;
+	u8 flaga;
+	u8 spare1[21];
+	u32 block_size;
+	blocknum_t offset;
+	sblocknum_t start_block;
+	blocknum_t end_block;
+	u8 spare2[24];
+} __attribute__ ((packed, aligned(8)));
+
+struct dasd_diag_rw_io {
+	u16 dev_nr;
+	u8 flaga;
+	u8 spare1[21];
+	u8 key;
+	u8 flags;
+	u8 spare2[2];
+	u32 block_count;
+	u32 alet;
+	struct dasd_diag_bio *bio_list;
+	u32 interrupt_params;
+	u8 spare3[20];
+} __attribute__ ((packed, aligned(8)));
+#endif /* CONFIG_64BIT */
diff --git a/ap/os/linux/linux-3.4.x/drivers/s390/block/dasd_eckd.c b/ap/os/linux/linux-3.4.x/drivers/s390/block/dasd_eckd.c
new file mode 100644
index 0000000..bc2e8a7
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/s390/block/dasd_eckd.c
@@ -0,0 +1,4337 @@
+/*
+ * File...........: linux/drivers/s390/block/dasd_eckd.c
+ * Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com>
+ *		    Horst Hummel <Horst.Hummel@de.ibm.com>
+ *		    Carsten Otte <Cotte@de.ibm.com>
+ *		    Martin Schwidefsky <schwidefsky@de.ibm.com>
+ * Bugreports.to..: <Linux390@de.ibm.com>
+ * Copyright IBM Corp. 1999, 2009
+ * EMC Symmetrix ioctl Copyright EMC Corporation, 2008
+ * Author.........: Nigel Hislop <hislop_nigel@emc.com>
+ */
+
+#define KMSG_COMPONENT "dasd-eckd"
+
+#include <linux/stddef.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/hdreg.h>	/* HDIO_GETGEO			    */
+#include <linux/bio.h>
+#include <linux/module.h>
+#include <linux/compat.h>
+#include <linux/init.h>
+
+#include <asm/debug.h>
+#include <asm/idals.h>
+#include <asm/ebcdic.h>
+#include <asm/io.h>
+#include <asm/uaccess.h>
+#include <asm/cio.h>
+#include <asm/ccwdev.h>
+#include <asm/itcw.h>
+
+#include "dasd_int.h"
+#include "dasd_eckd.h"
+#include "../cio/chsc.h"
+
+
+#ifdef PRINTK_HEADER
+#undef PRINTK_HEADER
+#endif				/* PRINTK_HEADER */
+#define PRINTK_HEADER "dasd(eckd):"
+
+#define ECKD_C0(i) (i->home_bytes)
+#define ECKD_F(i) (i->formula)
+#define ECKD_F1(i) (ECKD_F(i)==0x01?(i->factors.f_0x01.f1):\
+		    (i->factors.f_0x02.f1))
+#define ECKD_F2(i) (ECKD_F(i)==0x01?(i->factors.f_0x01.f2):\
+		    (i->factors.f_0x02.f2))
+#define ECKD_F3(i) (ECKD_F(i)==0x01?(i->factors.f_0x01.f3):\
+		    (i->factors.f_0x02.f3))
+#define ECKD_F4(i) (ECKD_F(i)==0x02?(i->factors.f_0x02.f4):0)
+#define ECKD_F5(i) (ECKD_F(i)==0x02?(i->factors.f_0x02.f5):0)
+#define ECKD_F6(i) (i->factor6)
+#define ECKD_F7(i) (i->factor7)
+#define ECKD_F8(i) (i->factor8)
+
+/*
+ * raw track access always map to 64k in memory
+ * so it maps to 16 blocks of 4k per track
+ */
+#define DASD_RAW_BLOCK_PER_TRACK 16
+#define DASD_RAW_BLOCKSIZE 4096
+/* 64k are 128 x 512 byte sectors  */
+#define DASD_RAW_SECTORS_PER_TRACK 128
+
+MODULE_LICENSE("GPL");
+
+static struct dasd_discipline dasd_eckd_discipline;
+
+/* The ccw bus type uses this table to find devices that it sends to
+ * dasd_eckd_probe */
+static struct ccw_device_id dasd_eckd_ids[] = {
+	{ CCW_DEVICE_DEVTYPE (0x3990, 0, 0x3390, 0), .driver_info = 0x1},
+	{ CCW_DEVICE_DEVTYPE (0x2105, 0, 0x3390, 0), .driver_info = 0x2},
+	{ CCW_DEVICE_DEVTYPE (0x3880, 0, 0x3380, 0), .driver_info = 0x3},
+	{ CCW_DEVICE_DEVTYPE (0x3990, 0, 0x3380, 0), .driver_info = 0x4},
+	{ CCW_DEVICE_DEVTYPE (0x2105, 0, 0x3380, 0), .driver_info = 0x5},
+	{ CCW_DEVICE_DEVTYPE (0x9343, 0, 0x9345, 0), .driver_info = 0x6},
+	{ CCW_DEVICE_DEVTYPE (0x2107, 0, 0x3390, 0), .driver_info = 0x7},
+	{ CCW_DEVICE_DEVTYPE (0x2107, 0, 0x3380, 0), .driver_info = 0x8},
+	{ CCW_DEVICE_DEVTYPE (0x1750, 0, 0x3390, 0), .driver_info = 0x9},
+	{ CCW_DEVICE_DEVTYPE (0x1750, 0, 0x3380, 0), .driver_info = 0xa},
+	{ /* end of list */ },
+};
+
+MODULE_DEVICE_TABLE(ccw, dasd_eckd_ids);
+
+static struct ccw_driver dasd_eckd_driver; /* see below */
+
+#define INIT_CQR_OK 0
+#define INIT_CQR_UNFORMATTED 1
+#define INIT_CQR_ERROR 2
+
+/* emergency request for reserve/release */
+static struct {
+	struct dasd_ccw_req cqr;
+	struct ccw1 ccw;
+	char data[32];
+} *dasd_reserve_req;
+static DEFINE_MUTEX(dasd_reserve_mutex);
+
+/* definitions for the path verification worker */
+struct path_verification_work_data {
+	struct work_struct worker;
+	struct dasd_device *device;
+	struct dasd_ccw_req cqr;
+	struct ccw1 ccw;
+	__u8 rcd_buffer[DASD_ECKD_RCD_DATA_SIZE];
+	int isglobal;
+	__u8 tbvpm;
+};
+static struct path_verification_work_data *path_verification_worker;
+static DEFINE_MUTEX(dasd_path_verification_mutex);
+
+/* initial attempt at a probe function. this can be simplified once
+ * the other detection code is gone */
+static int
+dasd_eckd_probe (struct ccw_device *cdev)
+{
+	int ret;
+
+	/* set ECKD specific ccw-device options */
+	ret = ccw_device_set_options(cdev, CCWDEV_ALLOW_FORCE |
+				     CCWDEV_DO_PATHGROUP | CCWDEV_DO_MULTIPATH);
+	if (ret) {
+		DBF_EVENT_DEVID(DBF_WARNING, cdev, "%s",
+				"dasd_eckd_probe: could not set "
+				"ccw-device options");
+		return ret;
+	}
+	ret = dasd_generic_probe(cdev, &dasd_eckd_discipline);
+	return ret;
+}
+
+static int
+dasd_eckd_set_online(struct ccw_device *cdev)
+{
+	return dasd_generic_set_online(cdev, &dasd_eckd_discipline);
+}
+
+static const int sizes_trk0[] = { 28, 148, 84 };
+#define LABEL_SIZE 140
+
+static inline unsigned int
+round_up_multiple(unsigned int no, unsigned int mult)
+{
+	int rem = no % mult;
+	return (rem ? no - rem + mult : no);
+}
+
+static inline unsigned int
+ceil_quot(unsigned int d1, unsigned int d2)
+{
+	return (d1 + (d2 - 1)) / d2;
+}
+
+static unsigned int
+recs_per_track(struct dasd_eckd_characteristics * rdc,
+	       unsigned int kl, unsigned int dl)
+{
+	int dn, kn;
+
+	switch (rdc->dev_type) {
+	case 0x3380:
+		if (kl)
+			return 1499 / (15 + 7 + ceil_quot(kl + 12, 32) +
+				       ceil_quot(dl + 12, 32));
+		else
+			return 1499 / (15 + ceil_quot(dl + 12, 32));
+	case 0x3390:
+		dn = ceil_quot(dl + 6, 232) + 1;
+		if (kl) {
+			kn = ceil_quot(kl + 6, 232) + 1;
+			return 1729 / (10 + 9 + ceil_quot(kl + 6 * kn, 34) +
+				       9 + ceil_quot(dl + 6 * dn, 34));
+		} else
+			return 1729 / (10 + 9 + ceil_quot(dl + 6 * dn, 34));
+	case 0x9345:
+		dn = ceil_quot(dl + 6, 232) + 1;
+		if (kl) {
+			kn = ceil_quot(kl + 6, 232) + 1;
+			return 1420 / (18 + 7 + ceil_quot(kl + 6 * kn, 34) +
+				       ceil_quot(dl + 6 * dn, 34));
+		} else
+			return 1420 / (18 + 7 + ceil_quot(dl + 6 * dn, 34));
+	}
+	return 0;
+}
+
+static void set_ch_t(struct ch_t *geo, __u32 cyl, __u8 head)
+{
+	geo->cyl = (__u16) cyl;
+	geo->head = cyl >> 16;
+	geo->head <<= 4;
+	geo->head |= head;
+}
+
+static int
+check_XRC (struct ccw1         *de_ccw,
+           struct DE_eckd_data *data,
+           struct dasd_device  *device)
+{
+        struct dasd_eckd_private *private;
+	int rc;
+
+        private = (struct dasd_eckd_private *) device->private;
+	if (!private->rdc_data.facilities.XRC_supported)
+		return 0;
+
+        /* switch on System Time Stamp - needed for XRC Support */
+	data->ga_extended |= 0x08; /* switch on 'Time Stamp Valid'   */
+	data->ga_extended |= 0x02; /* switch on 'Extended Parameter' */
+
+	rc = get_sync_clock(&data->ep_sys_time);
+	/* Ignore return code if sync clock is switched off. */
+	if (rc == -ENOSYS || rc == -EACCES)
+		rc = 0;
+
+	de_ccw->count = sizeof(struct DE_eckd_data);
+	de_ccw->flags |= CCW_FLAG_SLI;
+	return rc;
+}
+
+static int
+define_extent(struct ccw1 *ccw, struct DE_eckd_data *data, unsigned int trk,
+	      unsigned int totrk, int cmd, struct dasd_device *device)
+{
+	struct dasd_eckd_private *private;
+	u32 begcyl, endcyl;
+	u16 heads, beghead, endhead;
+	int rc = 0;
+
+	private = (struct dasd_eckd_private *) device->private;
+
+	ccw->cmd_code = DASD_ECKD_CCW_DEFINE_EXTENT;
+	ccw->flags = 0;
+	ccw->count = 16;
+	ccw->cda = (__u32) __pa(data);
+
+	memset(data, 0, sizeof(struct DE_eckd_data));
+	switch (cmd) {
+	case DASD_ECKD_CCW_READ_HOME_ADDRESS:
+	case DASD_ECKD_CCW_READ_RECORD_ZERO:
+	case DASD_ECKD_CCW_READ:
+	case DASD_ECKD_CCW_READ_MT:
+	case DASD_ECKD_CCW_READ_CKD:
+	case DASD_ECKD_CCW_READ_CKD_MT:
+	case DASD_ECKD_CCW_READ_KD:
+	case DASD_ECKD_CCW_READ_KD_MT:
+	case DASD_ECKD_CCW_READ_COUNT:
+		data->mask.perm = 0x1;
+		data->attributes.operation = private->attrib.operation;
+		break;
+	case DASD_ECKD_CCW_WRITE:
+	case DASD_ECKD_CCW_WRITE_MT:
+	case DASD_ECKD_CCW_WRITE_KD:
+	case DASD_ECKD_CCW_WRITE_KD_MT:
+		data->mask.perm = 0x02;
+		data->attributes.operation = private->attrib.operation;
+		rc = check_XRC (ccw, data, device);
+		break;
+	case DASD_ECKD_CCW_WRITE_CKD:
+	case DASD_ECKD_CCW_WRITE_CKD_MT:
+		data->attributes.operation = DASD_BYPASS_CACHE;
+		rc = check_XRC (ccw, data, device);
+		break;
+	case DASD_ECKD_CCW_ERASE:
+	case DASD_ECKD_CCW_WRITE_HOME_ADDRESS:
+	case DASD_ECKD_CCW_WRITE_RECORD_ZERO:
+		data->mask.perm = 0x3;
+		data->mask.auth = 0x1;
+		data->attributes.operation = DASD_BYPASS_CACHE;
+		rc = check_XRC (ccw, data, device);
+		break;
+	default:
+		dev_err(&device->cdev->dev,
+			"0x%x is not a known command\n", cmd);
+		break;
+	}
+
+	data->attributes.mode = 0x3;	/* ECKD */
+
+	if ((private->rdc_data.cu_type == 0x2105 ||
+	     private->rdc_data.cu_type == 0x2107 ||
+	     private->rdc_data.cu_type == 0x1750)
+	    && !(private->uses_cdl && trk < 2))
+		data->ga_extended |= 0x40; /* Regular Data Format Mode */
+
+	heads = private->rdc_data.trk_per_cyl;
+	begcyl = trk / heads;
+	beghead = trk % heads;
+	endcyl = totrk / heads;
+	endhead = totrk % heads;
+
+	/* check for sequential prestage - enhance cylinder range */
+	if (data->attributes.operation == DASD_SEQ_PRESTAGE ||
+	    data->attributes.operation == DASD_SEQ_ACCESS) {
+
+		if (endcyl + private->attrib.nr_cyl < private->real_cyl)
+			endcyl += private->attrib.nr_cyl;
+		else
+			endcyl = (private->real_cyl - 1);
+	}
+
+	set_ch_t(&data->beg_ext, begcyl, beghead);
+	set_ch_t(&data->end_ext, endcyl, endhead);
+	return rc;
+}
+
+static int check_XRC_on_prefix(struct PFX_eckd_data *pfxdata,
+			       struct dasd_device  *device)
+{
+	struct dasd_eckd_private *private;
+	int rc;
+
+	private = (struct dasd_eckd_private *) device->private;
+	if (!private->rdc_data.facilities.XRC_supported)
+		return 0;
+
+	/* switch on System Time Stamp - needed for XRC Support */
+	pfxdata->define_extent.ga_extended |= 0x08; /* 'Time Stamp Valid'   */
+	pfxdata->define_extent.ga_extended |= 0x02; /* 'Extended Parameter' */
+	pfxdata->validity.time_stamp = 1;	    /* 'Time Stamp Valid'   */
+
+	rc = get_sync_clock(&pfxdata->define_extent.ep_sys_time);
+	/* Ignore return code if sync clock is switched off. */
+	if (rc == -ENOSYS || rc == -EACCES)
+		rc = 0;
+	return rc;
+}
+
+static void fill_LRE_data(struct LRE_eckd_data *data, unsigned int trk,
+			  unsigned int rec_on_trk, int count, int cmd,
+			  struct dasd_device *device, unsigned int reclen,
+			  unsigned int tlf)
+{
+	struct dasd_eckd_private *private;
+	int sector;
+	int dn, d;
+
+	private = (struct dasd_eckd_private *) device->private;
+
+	memset(data, 0, sizeof(*data));
+	sector = 0;
+	if (rec_on_trk) {
+		switch (private->rdc_data.dev_type) {
+		case 0x3390:
+			dn = ceil_quot(reclen + 6, 232);
+			d = 9 + ceil_quot(reclen + 6 * (dn + 1), 34);
+			sector = (49 + (rec_on_trk - 1) * (10 + d)) / 8;
+			break;
+		case 0x3380:
+			d = 7 + ceil_quot(reclen + 12, 32);
+			sector = (39 + (rec_on_trk - 1) * (8 + d)) / 7;
+			break;
+		}
+	}
+	data->sector = sector;
+	/* note: meaning of count depends on the operation
+	 *	 for record based I/O it's the number of records, but for
+	 *	 track based I/O it's the number of tracks
+	 */
+	data->count = count;
+	switch (cmd) {
+	case DASD_ECKD_CCW_WRITE_HOME_ADDRESS:
+		data->operation.orientation = 0x3;
+		data->operation.operation = 0x03;
+		break;
+	case DASD_ECKD_CCW_READ_HOME_ADDRESS:
+		data->operation.orientation = 0x3;
+		data->operation.operation = 0x16;
+		break;
+	case DASD_ECKD_CCW_WRITE_RECORD_ZERO:
+		data->operation.orientation = 0x1;
+		data->operation.operation = 0x03;
+		data->count++;
+		break;
+	case DASD_ECKD_CCW_READ_RECORD_ZERO:
+		data->operation.orientation = 0x3;
+		data->operation.operation = 0x16;
+		data->count++;
+		break;
+	case DASD_ECKD_CCW_WRITE:
+	case DASD_ECKD_CCW_WRITE_MT:
+	case DASD_ECKD_CCW_WRITE_KD:
+	case DASD_ECKD_CCW_WRITE_KD_MT:
+		data->auxiliary.length_valid = 0x1;
+		data->length = reclen;
+		data->operation.operation = 0x01;
+		break;
+	case DASD_ECKD_CCW_WRITE_CKD:
+	case DASD_ECKD_CCW_WRITE_CKD_MT:
+		data->auxiliary.length_valid = 0x1;
+		data->length = reclen;
+		data->operation.operation = 0x03;
+		break;
+	case DASD_ECKD_CCW_WRITE_FULL_TRACK:
+		data->operation.orientation = 0x0;
+		data->operation.operation = 0x3F;
+		data->extended_operation = 0x11;
+		data->length = 0;
+		data->extended_parameter_length = 0x02;
+		if (data->count > 8) {
+			data->extended_parameter[0] = 0xFF;
+			data->extended_parameter[1] = 0xFF;
+			data->extended_parameter[1] <<= (16 - count);
+		} else {
+			data->extended_parameter[0] = 0xFF;
+			data->extended_parameter[0] <<= (8 - count);
+			data->extended_parameter[1] = 0x00;
+		}
+		data->sector = 0xFF;
+		break;
+	case DASD_ECKD_CCW_WRITE_TRACK_DATA:
+		data->auxiliary.length_valid = 0x1;
+		data->length = reclen;	/* not tlf, as one might think */
+		data->operation.operation = 0x3F;
+		data->extended_operation = 0x23;
+		break;
+	case DASD_ECKD_CCW_READ:
+	case DASD_ECKD_CCW_READ_MT:
+	case DASD_ECKD_CCW_READ_KD:
+	case DASD_ECKD_CCW_READ_KD_MT:
+		data->auxiliary.length_valid = 0x1;
+		data->length = reclen;
+		data->operation.operation = 0x06;
+		break;
+	case DASD_ECKD_CCW_READ_CKD:
+	case DASD_ECKD_CCW_READ_CKD_MT:
+		data->auxiliary.length_valid = 0x1;
+		data->length = reclen;
+		data->operation.operation = 0x16;
+		break;
+	case DASD_ECKD_CCW_READ_COUNT:
+		data->operation.operation = 0x06;
+		break;
+	case DASD_ECKD_CCW_READ_TRACK:
+		data->operation.orientation = 0x1;
+		data->operation.operation = 0x0C;
+		data->extended_parameter_length = 0;
+		data->sector = 0xFF;
+		break;
+	case DASD_ECKD_CCW_READ_TRACK_DATA:
+		data->auxiliary.length_valid = 0x1;
+		data->length = tlf;
+		data->operation.operation = 0x0C;
+		break;
+	case DASD_ECKD_CCW_ERASE:
+		data->length = reclen;
+		data->auxiliary.length_valid = 0x1;
+		data->operation.operation = 0x0b;
+		break;
+	default:
+		DBF_DEV_EVENT(DBF_ERR, device,
+			    "fill LRE unknown opcode 0x%x", cmd);
+		BUG();
+	}
+	set_ch_t(&data->seek_addr,
+		 trk / private->rdc_data.trk_per_cyl,
+		 trk % private->rdc_data.trk_per_cyl);
+	data->search_arg.cyl = data->seek_addr.cyl;
+	data->search_arg.head = data->seek_addr.head;
+	data->search_arg.record = rec_on_trk;
+}
+
+static int prefix_LRE(struct ccw1 *ccw, struct PFX_eckd_data *pfxdata,
+		      unsigned int trk, unsigned int totrk, int cmd,
+		      struct dasd_device *basedev, struct dasd_device *startdev,
+		      unsigned char format, unsigned int rec_on_trk, int count,
+		      unsigned int blksize, unsigned int tlf)
+{
+	struct dasd_eckd_private *basepriv, *startpriv;
+	struct DE_eckd_data *dedata;
+	struct LRE_eckd_data *lredata;
+	u32 begcyl, endcyl;
+	u16 heads, beghead, endhead;
+	int rc = 0;
+
+	basepriv = (struct dasd_eckd_private *) basedev->private;
+	startpriv = (struct dasd_eckd_private *) startdev->private;
+	dedata = &pfxdata->define_extent;
+	lredata = &pfxdata->locate_record;
+
+	ccw->cmd_code = DASD_ECKD_CCW_PFX;
+	ccw->flags = 0;
+	if (cmd == DASD_ECKD_CCW_WRITE_FULL_TRACK) {
+		ccw->count = sizeof(*pfxdata) + 2;
+		ccw->cda = (__u32) __pa(pfxdata);
+		memset(pfxdata, 0, sizeof(*pfxdata) + 2);
+	} else {
+		ccw->count = sizeof(*pfxdata);
+		ccw->cda = (__u32) __pa(pfxdata);
+		memset(pfxdata, 0, sizeof(*pfxdata));
+	}
+
+	/* prefix data */
+	if (format > 1) {
+		DBF_DEV_EVENT(DBF_ERR, basedev,
+			      "PFX LRE unknown format 0x%x", format);
+		BUG();
+		return -EINVAL;
+	}
+	pfxdata->format = format;
+	pfxdata->base_address = basepriv->ned->unit_addr;
+	pfxdata->base_lss = basepriv->ned->ID;
+	pfxdata->validity.define_extent = 1;
+
+	/* private uid is kept up to date, conf_data may be outdated */
+	if (startpriv->uid.type != UA_BASE_DEVICE) {
+		pfxdata->validity.verify_base = 1;
+		if (startpriv->uid.type == UA_HYPER_PAV_ALIAS)
+			pfxdata->validity.hyper_pav = 1;
+	}
+
+	/* define extend data (mostly)*/
+	switch (cmd) {
+	case DASD_ECKD_CCW_READ_HOME_ADDRESS:
+	case DASD_ECKD_CCW_READ_RECORD_ZERO:
+	case DASD_ECKD_CCW_READ:
+	case DASD_ECKD_CCW_READ_MT:
+	case DASD_ECKD_CCW_READ_CKD:
+	case DASD_ECKD_CCW_READ_CKD_MT:
+	case DASD_ECKD_CCW_READ_KD:
+	case DASD_ECKD_CCW_READ_KD_MT:
+	case DASD_ECKD_CCW_READ_COUNT:
+		dedata->mask.perm = 0x1;
+		dedata->attributes.operation = basepriv->attrib.operation;
+		break;
+	case DASD_ECKD_CCW_READ_TRACK:
+	case DASD_ECKD_CCW_READ_TRACK_DATA:
+		dedata->mask.perm = 0x1;
+		dedata->attributes.operation = basepriv->attrib.operation;
+		dedata->blk_size = 0;
+		break;
+	case DASD_ECKD_CCW_WRITE:
+	case DASD_ECKD_CCW_WRITE_MT:
+	case DASD_ECKD_CCW_WRITE_KD:
+	case DASD_ECKD_CCW_WRITE_KD_MT:
+		dedata->mask.perm = 0x02;
+		dedata->attributes.operation = basepriv->attrib.operation;
+		rc = check_XRC_on_prefix(pfxdata, basedev);
+		break;
+	case DASD_ECKD_CCW_WRITE_CKD:
+	case DASD_ECKD_CCW_WRITE_CKD_MT:
+		dedata->attributes.operation = DASD_BYPASS_CACHE;
+		rc = check_XRC_on_prefix(pfxdata, basedev);
+		break;
+	case DASD_ECKD_CCW_ERASE:
+	case DASD_ECKD_CCW_WRITE_HOME_ADDRESS:
+	case DASD_ECKD_CCW_WRITE_RECORD_ZERO:
+		dedata->mask.perm = 0x3;
+		dedata->mask.auth = 0x1;
+		dedata->attributes.operation = DASD_BYPASS_CACHE;
+		rc = check_XRC_on_prefix(pfxdata, basedev);
+		break;
+	case DASD_ECKD_CCW_WRITE_FULL_TRACK:
+		dedata->mask.perm = 0x03;
+		dedata->attributes.operation = basepriv->attrib.operation;
+		dedata->blk_size = 0;
+		break;
+	case DASD_ECKD_CCW_WRITE_TRACK_DATA:
+		dedata->mask.perm = 0x02;
+		dedata->attributes.operation = basepriv->attrib.operation;
+		dedata->blk_size = blksize;
+		rc = check_XRC_on_prefix(pfxdata, basedev);
+		break;
+	default:
+		DBF_DEV_EVENT(DBF_ERR, basedev,
+			    "PFX LRE unknown opcode 0x%x", cmd);
+		BUG();
+		return -EINVAL;
+	}
+
+	dedata->attributes.mode = 0x3;	/* ECKD */
+
+	if ((basepriv->rdc_data.cu_type == 0x2105 ||
+	     basepriv->rdc_data.cu_type == 0x2107 ||
+	     basepriv->rdc_data.cu_type == 0x1750)
+	    && !(basepriv->uses_cdl && trk < 2))
+		dedata->ga_extended |= 0x40; /* Regular Data Format Mode */
+
+	heads = basepriv->rdc_data.trk_per_cyl;
+	begcyl = trk / heads;
+	beghead = trk % heads;
+	endcyl = totrk / heads;
+	endhead = totrk % heads;
+
+	/* check for sequential prestage - enhance cylinder range */
+	if (dedata->attributes.operation == DASD_SEQ_PRESTAGE ||
+	    dedata->attributes.operation == DASD_SEQ_ACCESS) {
+
+		if (endcyl + basepriv->attrib.nr_cyl < basepriv->real_cyl)
+			endcyl += basepriv->attrib.nr_cyl;
+		else
+			endcyl = (basepriv->real_cyl - 1);
+	}
+
+	set_ch_t(&dedata->beg_ext, begcyl, beghead);
+	set_ch_t(&dedata->end_ext, endcyl, endhead);
+
+	if (format == 1) {
+		fill_LRE_data(lredata, trk, rec_on_trk, count, cmd,
+			      basedev, blksize, tlf);
+	}
+
+	return rc;
+}
+
+static int prefix(struct ccw1 *ccw, struct PFX_eckd_data *pfxdata,
+		  unsigned int trk, unsigned int totrk, int cmd,
+		  struct dasd_device *basedev, struct dasd_device *startdev)
+{
+	return prefix_LRE(ccw, pfxdata, trk, totrk, cmd, basedev, startdev,
+			  0, 0, 0, 0, 0);
+}
+
+static void
+locate_record(struct ccw1 *ccw, struct LO_eckd_data *data, unsigned int trk,
+	      unsigned int rec_on_trk, int no_rec, int cmd,
+	      struct dasd_device * device, int reclen)
+{
+	struct dasd_eckd_private *private;
+	int sector;
+	int dn, d;
+
+	private = (struct dasd_eckd_private *) device->private;
+
+	DBF_DEV_EVENT(DBF_INFO, device,
+		  "Locate: trk %d, rec %d, no_rec %d, cmd %d, reclen %d",
+		  trk, rec_on_trk, no_rec, cmd, reclen);
+
+	ccw->cmd_code = DASD_ECKD_CCW_LOCATE_RECORD;
+	ccw->flags = 0;
+	ccw->count = 16;
+	ccw->cda = (__u32) __pa(data);
+
+	memset(data, 0, sizeof(struct LO_eckd_data));
+	sector = 0;
+	if (rec_on_trk) {
+		switch (private->rdc_data.dev_type) {
+		case 0x3390:
+			dn = ceil_quot(reclen + 6, 232);
+			d = 9 + ceil_quot(reclen + 6 * (dn + 1), 34);
+			sector = (49 + (rec_on_trk - 1) * (10 + d)) / 8;
+			break;
+		case 0x3380:
+			d = 7 + ceil_quot(reclen + 12, 32);
+			sector = (39 + (rec_on_trk - 1) * (8 + d)) / 7;
+			break;
+		}
+	}
+	data->sector = sector;
+	data->count = no_rec;
+	switch (cmd) {
+	case DASD_ECKD_CCW_WRITE_HOME_ADDRESS:
+		data->operation.orientation = 0x3;
+		data->operation.operation = 0x03;
+		break;
+	case DASD_ECKD_CCW_READ_HOME_ADDRESS:
+		data->operation.orientation = 0x3;
+		data->operation.operation = 0x16;
+		break;
+	case DASD_ECKD_CCW_WRITE_RECORD_ZERO:
+		data->operation.orientation = 0x1;
+		data->operation.operation = 0x03;
+		data->count++;
+		break;
+	case DASD_ECKD_CCW_READ_RECORD_ZERO:
+		data->operation.orientation = 0x3;
+		data->operation.operation = 0x16;
+		data->count++;
+		break;
+	case DASD_ECKD_CCW_WRITE:
+	case DASD_ECKD_CCW_WRITE_MT:
+	case DASD_ECKD_CCW_WRITE_KD:
+	case DASD_ECKD_CCW_WRITE_KD_MT:
+		data->auxiliary.last_bytes_used = 0x1;
+		data->length = reclen;
+		data->operation.operation = 0x01;
+		break;
+	case DASD_ECKD_CCW_WRITE_CKD:
+	case DASD_ECKD_CCW_WRITE_CKD_MT:
+		data->auxiliary.last_bytes_used = 0x1;
+		data->length = reclen;
+		data->operation.operation = 0x03;
+		break;
+	case DASD_ECKD_CCW_READ:
+	case DASD_ECKD_CCW_READ_MT:
+	case DASD_ECKD_CCW_READ_KD:
+	case DASD_ECKD_CCW_READ_KD_MT:
+		data->auxiliary.last_bytes_used = 0x1;
+		data->length = reclen;
+		data->operation.operation = 0x06;
+		break;
+	case DASD_ECKD_CCW_READ_CKD:
+	case DASD_ECKD_CCW_READ_CKD_MT:
+		data->auxiliary.last_bytes_used = 0x1;
+		data->length = reclen;
+		data->operation.operation = 0x16;
+		break;
+	case DASD_ECKD_CCW_READ_COUNT:
+		data->operation.operation = 0x06;
+		break;
+	case DASD_ECKD_CCW_ERASE:
+		data->length = reclen;
+		data->auxiliary.last_bytes_used = 0x1;
+		data->operation.operation = 0x0b;
+		break;
+	default:
+		DBF_DEV_EVENT(DBF_ERR, device, "unknown locate record "
+			      "opcode 0x%x", cmd);
+	}
+	set_ch_t(&data->seek_addr,
+		 trk / private->rdc_data.trk_per_cyl,
+		 trk % private->rdc_data.trk_per_cyl);
+	data->search_arg.cyl = data->seek_addr.cyl;
+	data->search_arg.head = data->seek_addr.head;
+	data->search_arg.record = rec_on_trk;
+}
+
+/*
+ * Returns 1 if the block is one of the special blocks that needs
+ * to get read/written with the KD variant of the command.
+ * That is DASD_ECKD_READ_KD_MT instead of DASD_ECKD_READ_MT and
+ * DASD_ECKD_WRITE_KD_MT instead of DASD_ECKD_WRITE_MT.
+ * Luckily the KD variants differ only by one bit (0x08) from the
+ * normal variant. So don't wonder about code like:
+ * if (dasd_eckd_cdl_special(blk_per_trk, recid))
+ *         ccw->cmd_code |= 0x8;
+ */
+static inline int
+dasd_eckd_cdl_special(int blk_per_trk, int recid)
+{
+	if (recid < 3)
+		return 1;
+	if (recid < blk_per_trk)
+		return 0;
+	if (recid < 2 * blk_per_trk)
+		return 1;
+	return 0;
+}
+
+/*
+ * Returns the record size for the special blocks of the cdl format.
+ * Only returns something useful if dasd_eckd_cdl_special is true
+ * for the recid.
+ */
+static inline int
+dasd_eckd_cdl_reclen(int recid)
+{
+	if (recid < 3)
+		return sizes_trk0[recid];
+	return LABEL_SIZE;
+}
+/* create unique id from private structure. */
+static void create_uid(struct dasd_eckd_private *private)
+{
+	int count;
+	struct dasd_uid *uid;
+
+	uid = &private->uid;
+	memset(uid, 0, sizeof(struct dasd_uid));
+	memcpy(uid->vendor, private->ned->HDA_manufacturer,
+	       sizeof(uid->vendor) - 1);
+	EBCASC(uid->vendor, sizeof(uid->vendor) - 1);
+	memcpy(uid->serial, private->ned->HDA_location,
+	       sizeof(uid->serial) - 1);
+	EBCASC(uid->serial, sizeof(uid->serial) - 1);
+	uid->ssid = private->gneq->subsystemID;
+	uid->real_unit_addr = private->ned->unit_addr;
+	if (private->sneq) {
+		uid->type = private->sneq->sua_flags;
+		if (uid->type == UA_BASE_PAV_ALIAS)
+			uid->base_unit_addr = private->sneq->base_unit_addr;
+	} else {
+		uid->type = UA_BASE_DEVICE;
+	}
+	if (private->vdsneq) {
+		for (count = 0; count < 16; count++) {
+			sprintf(uid->vduit+2*count, "%02x",
+				private->vdsneq->uit[count]);
+		}
+	}
+}
+
+/*
+ * Generate device unique id that specifies the physical device.
+ */
+static int dasd_eckd_generate_uid(struct dasd_device *device)
+{
+	struct dasd_eckd_private *private;
+	unsigned long flags;
+
+	private = (struct dasd_eckd_private *) device->private;
+	if (!private)
+		return -ENODEV;
+	if (!private->ned || !private->gneq)
+		return -ENODEV;
+	spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
+	create_uid(private);
+	spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
+	return 0;
+}
+
+static int dasd_eckd_get_uid(struct dasd_device *device, struct dasd_uid *uid)
+{
+	struct dasd_eckd_private *private;
+	unsigned long flags;
+
+	if (device->private) {
+		private = (struct dasd_eckd_private *)device->private;
+		spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
+		*uid = private->uid;
+		spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
+		return 0;
+	}
+	return -EINVAL;
+}
+
+/*
+ * compare device UID with data of a given dasd_eckd_private structure
+ * return 0 for match
+ */
+static int dasd_eckd_compare_path_uid(struct dasd_device *device,
+				      struct dasd_eckd_private *private)
+{
+	struct dasd_uid device_uid;
+
+	create_uid(private);
+	dasd_eckd_get_uid(device, &device_uid);
+
+	return memcmp(&device_uid, &private->uid, sizeof(struct dasd_uid));
+}
+
+static void dasd_eckd_fill_rcd_cqr(struct dasd_device *device,
+				   struct dasd_ccw_req *cqr,
+				   __u8 *rcd_buffer,
+				   __u8 lpm)
+{
+	struct ccw1 *ccw;
+	/*
+	 * buffer has to start with EBCDIC "V1.0" to show
+	 * support for virtual device SNEQ
+	 */
+	rcd_buffer[0] = 0xE5;
+	rcd_buffer[1] = 0xF1;
+	rcd_buffer[2] = 0x4B;
+	rcd_buffer[3] = 0xF0;
+
+	ccw = cqr->cpaddr;
+	ccw->cmd_code = DASD_ECKD_CCW_RCD;
+	ccw->flags = 0;
+	ccw->cda = (__u32)(addr_t)rcd_buffer;
+	ccw->count = DASD_ECKD_RCD_DATA_SIZE;
+	cqr->magic = DASD_ECKD_MAGIC;
+
+	cqr->startdev = device;
+	cqr->memdev = device;
+	cqr->block = NULL;
+	cqr->expires = 10*HZ;
+	cqr->lpm = lpm;
+	cqr->retries = 256;
+	cqr->buildclk = get_clock();
+	cqr->status = DASD_CQR_FILLED;
+	set_bit(DASD_CQR_VERIFY_PATH, &cqr->flags);
+}
+
+/*
+ * Wakeup helper for read_conf
+ * if the cqr is not done and needs some error recovery
+ * the buffer has to be re-initialized with the EBCDIC "V1.0"
+ * to show support for virtual device SNEQ
+ */
+static void read_conf_cb(struct dasd_ccw_req *cqr, void *data)
+{
+	struct ccw1 *ccw;
+	__u8 *rcd_buffer;
+
+	if (cqr->status !=  DASD_CQR_DONE) {
+		ccw = cqr->cpaddr;
+		rcd_buffer = (__u8 *)((addr_t) ccw->cda);
+		memset(rcd_buffer, 0, sizeof(*rcd_buffer));
+
+		rcd_buffer[0] = 0xE5;
+		rcd_buffer[1] = 0xF1;
+		rcd_buffer[2] = 0x4B;
+		rcd_buffer[3] = 0xF0;
+	}
+	dasd_wakeup_cb(cqr, data);
+}
+
+static int dasd_eckd_read_conf_immediately(struct dasd_device *device,
+					   struct dasd_ccw_req *cqr,
+					   __u8 *rcd_buffer,
+					   __u8 lpm)
+{
+	struct ciw *ciw;
+	int rc;
+	/*
+	 * sanity check: scan for RCD command in extended SenseID data
+	 * some devices do not support RCD
+	 */
+	ciw = ccw_device_get_ciw(device->cdev, CIW_TYPE_RCD);
+	if (!ciw || ciw->cmd != DASD_ECKD_CCW_RCD)
+		return -EOPNOTSUPP;
+
+	dasd_eckd_fill_rcd_cqr(device, cqr, rcd_buffer, lpm);
+	clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
+	set_bit(DASD_CQR_ALLOW_SLOCK, &cqr->flags);
+	cqr->retries = 5;
+	cqr->callback = read_conf_cb;
+	rc = dasd_sleep_on_immediatly(cqr);
+	return rc;
+}
+
+static int dasd_eckd_read_conf_lpm(struct dasd_device *device,
+				   void **rcd_buffer,
+				   int *rcd_buffer_size, __u8 lpm)
+{
+	struct ciw *ciw;
+	char *rcd_buf = NULL;
+	int ret;
+	struct dasd_ccw_req *cqr;
+
+	/*
+	 * sanity check: scan for RCD command in extended SenseID data
+	 * some devices do not support RCD
+	 */
+	ciw = ccw_device_get_ciw(device->cdev, CIW_TYPE_RCD);
+	if (!ciw || ciw->cmd != DASD_ECKD_CCW_RCD) {
+		ret = -EOPNOTSUPP;
+		goto out_error;
+	}
+	rcd_buf = kzalloc(DASD_ECKD_RCD_DATA_SIZE, GFP_KERNEL | GFP_DMA);
+	if (!rcd_buf) {
+		ret = -ENOMEM;
+		goto out_error;
+	}
+	cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* RCD */,
+				   0, /* use rcd_buf as data ara */
+				   device);
+	if (IS_ERR(cqr)) {
+		DBF_DEV_EVENT(DBF_WARNING, device, "%s",
+			      "Could not allocate RCD request");
+		ret = -ENOMEM;
+		goto out_error;
+	}
+	dasd_eckd_fill_rcd_cqr(device, cqr, rcd_buf, lpm);
+	cqr->callback = read_conf_cb;
+	ret = dasd_sleep_on(cqr);
+	/*
+	 * on success we update the user input parms
+	 */
+	dasd_sfree_request(cqr, cqr->memdev);
+	if (ret)
+		goto out_error;
+
+	*rcd_buffer_size = DASD_ECKD_RCD_DATA_SIZE;
+	*rcd_buffer = rcd_buf;
+	return 0;
+out_error:
+	kfree(rcd_buf);
+	*rcd_buffer = NULL;
+	*rcd_buffer_size = 0;
+	return ret;
+}
+
+static int dasd_eckd_identify_conf_parts(struct dasd_eckd_private *private)
+{
+
+	struct dasd_sneq *sneq;
+	int i, count;
+
+	private->ned = NULL;
+	private->sneq = NULL;
+	private->vdsneq = NULL;
+	private->gneq = NULL;
+	count = private->conf_len / sizeof(struct dasd_sneq);
+	sneq = (struct dasd_sneq *)private->conf_data;
+	for (i = 0; i < count; ++i) {
+		if (sneq->flags.identifier == 1 && sneq->format == 1)
+			private->sneq = sneq;
+		else if (sneq->flags.identifier == 1 && sneq->format == 4)
+			private->vdsneq = (struct vd_sneq *)sneq;
+		else if (sneq->flags.identifier == 2)
+			private->gneq = (struct dasd_gneq *)sneq;
+		else if (sneq->flags.identifier == 3 && sneq->res1 == 1)
+			private->ned = (struct dasd_ned *)sneq;
+		sneq++;
+	}
+	if (!private->ned || !private->gneq) {
+		private->ned = NULL;
+		private->sneq = NULL;
+		private->vdsneq = NULL;
+		private->gneq = NULL;
+		return -EINVAL;
+	}
+	return 0;
+
+};
+
+static unsigned char dasd_eckd_path_access(void *conf_data, int conf_len)
+{
+	struct dasd_gneq *gneq;
+	int i, count, found;
+
+	count = conf_len / sizeof(*gneq);
+	gneq = (struct dasd_gneq *)conf_data;
+	found = 0;
+	for (i = 0; i < count; ++i) {
+		if (gneq->flags.identifier == 2) {
+			found = 1;
+			break;
+		}
+		gneq++;
+	}
+	if (found)
+		return ((char *)gneq)[18] & 0x07;
+	else
+		return 0;
+}
+
+static int dasd_eckd_read_conf(struct dasd_device *device)
+{
+	void *conf_data;
+	int conf_len, conf_data_saved;
+	int rc;
+	__u8 lpm, opm;
+	struct dasd_eckd_private *private, path_private;
+	struct dasd_path *path_data;
+	struct dasd_uid *uid;
+	char print_path_uid[60], print_device_uid[60];
+
+	private = (struct dasd_eckd_private *) device->private;
+	path_data = &device->path_data;
+	opm = ccw_device_get_path_mask(device->cdev);
+	conf_data_saved = 0;
+	/* get configuration data per operational path */
+	for (lpm = 0x80; lpm; lpm>>= 1) {
+		if (!(lpm & opm))
+			continue;
+		rc = dasd_eckd_read_conf_lpm(device, &conf_data,
+					     &conf_len, lpm);
+		if (rc && rc != -EOPNOTSUPP) {	/* -EOPNOTSUPP is ok */
+			DBF_EVENT_DEVID(DBF_WARNING, device->cdev,
+					"Read configuration data returned "
+					"error %d", rc);
+			return rc;
+		}
+		if (conf_data == NULL) {
+			DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s",
+					"No configuration data "
+					"retrieved");
+			/* no further analysis possible */
+			path_data->opm |= lpm;
+			continue;	/* no error */
+		}
+		/* save first valid configuration data */
+		if (!conf_data_saved) {
+			kfree(private->conf_data);
+			private->conf_data = conf_data;
+			private->conf_len = conf_len;
+			if (dasd_eckd_identify_conf_parts(private)) {
+				private->conf_data = NULL;
+				private->conf_len = 0;
+				kfree(conf_data);
+				continue;
+			}
+			/*
+			 * build device UID that other path data
+			 * can be compared to it
+			 */
+			dasd_eckd_generate_uid(device);
+			conf_data_saved++;
+		} else {
+			path_private.conf_data = conf_data;
+			path_private.conf_len = DASD_ECKD_RCD_DATA_SIZE;
+			if (dasd_eckd_identify_conf_parts(
+				    &path_private)) {
+				path_private.conf_data = NULL;
+				path_private.conf_len = 0;
+				kfree(conf_data);
+				continue;
+			}
+
+			if (dasd_eckd_compare_path_uid(
+				    device, &path_private)) {
+				uid = &path_private.uid;
+				if (strlen(uid->vduit) > 0)
+					snprintf(print_path_uid,
+						 sizeof(print_path_uid),
+						 "%s.%s.%04x.%02x.%s",
+						 uid->vendor, uid->serial,
+						 uid->ssid, uid->real_unit_addr,
+						 uid->vduit);
+				else
+					snprintf(print_path_uid,
+						 sizeof(print_path_uid),
+						 "%s.%s.%04x.%02x",
+						 uid->vendor, uid->serial,
+						 uid->ssid,
+						 uid->real_unit_addr);
+				uid = &private->uid;
+				if (strlen(uid->vduit) > 0)
+					snprintf(print_device_uid,
+						 sizeof(print_device_uid),
+						 "%s.%s.%04x.%02x.%s",
+						 uid->vendor, uid->serial,
+						 uid->ssid, uid->real_unit_addr,
+						 uid->vduit);
+				else
+					snprintf(print_device_uid,
+						 sizeof(print_device_uid),
+						 "%s.%s.%04x.%02x",
+						 uid->vendor, uid->serial,
+						 uid->ssid,
+						 uid->real_unit_addr);
+				dev_err(&device->cdev->dev,
+					"Not all channel paths lead to "
+					"the same device, path %02X leads to "
+					"device %s instead of %s\n", lpm,
+					print_path_uid, print_device_uid);
+				return -EINVAL;
+			}
+
+			path_private.conf_data = NULL;
+			path_private.conf_len = 0;
+		}
+		switch (dasd_eckd_path_access(conf_data, conf_len)) {
+		case 0x02:
+			path_data->npm |= lpm;
+			break;
+		case 0x03:
+			path_data->ppm |= lpm;
+			break;
+		}
+		path_data->opm |= lpm;
+
+		if (conf_data != private->conf_data)
+			kfree(conf_data);
+	}
+
+	return 0;
+}
+
+static int verify_fcx_max_data(struct dasd_device *device, __u8 lpm)
+{
+	struct dasd_eckd_private *private;
+	int mdc;
+	u32 fcx_max_data;
+
+	private = (struct dasd_eckd_private *) device->private;
+	if (private->fcx_max_data) {
+		mdc = ccw_device_get_mdc(device->cdev, lpm);
+		if ((mdc < 0)) {
+			dev_warn(&device->cdev->dev,
+				 "Detecting the maximum data size for zHPF "
+				 "requests failed (rc=%d) for a new path %x\n",
+				 mdc, lpm);
+			return mdc;
+		}
+		fcx_max_data = mdc * FCX_MAX_DATA_FACTOR;
+		if (fcx_max_data < private->fcx_max_data) {
+			dev_warn(&device->cdev->dev,
+				 "The maximum data size for zHPF requests %u "
+				 "on a new path %x is below the active maximum "
+				 "%u\n", fcx_max_data, lpm,
+				 private->fcx_max_data);
+			return -EACCES;
+		}
+	}
+	return 0;
+}
+
+static int rebuild_device_uid(struct dasd_device *device,
+			      struct path_verification_work_data *data)
+{
+	struct dasd_eckd_private *private;
+	struct dasd_path *path_data;
+	__u8 lpm, opm;
+	int rc;
+
+	rc = -ENODEV;
+	private = (struct dasd_eckd_private *) device->private;
+	path_data = &device->path_data;
+	opm = device->path_data.opm;
+
+	for (lpm = 0x80; lpm; lpm >>= 1) {
+		if (!(lpm & opm))
+			continue;
+		memset(&data->rcd_buffer, 0, sizeof(data->rcd_buffer));
+		memset(&data->cqr, 0, sizeof(data->cqr));
+		data->cqr.cpaddr = &data->ccw;
+		rc = dasd_eckd_read_conf_immediately(device, &data->cqr,
+						     data->rcd_buffer,
+						     lpm);
+
+		if (rc) {
+			if (rc == -EOPNOTSUPP) /* -EOPNOTSUPP is ok */
+				continue;
+			DBF_EVENT_DEVID(DBF_WARNING, device->cdev,
+					"Read configuration data "
+					"returned error %d", rc);
+			break;
+		}
+		memcpy(private->conf_data, data->rcd_buffer,
+		       DASD_ECKD_RCD_DATA_SIZE);
+		if (dasd_eckd_identify_conf_parts(private)) {
+			rc = -ENODEV;
+		} else /* first valid path is enough */
+			break;
+	}
+
+	if (!rc)
+		rc = dasd_eckd_generate_uid(device);
+
+	return rc;
+}
+
+static void do_path_verification_work(struct work_struct *work)
+{
+	struct path_verification_work_data *data;
+	struct dasd_device *device;
+	struct dasd_eckd_private path_private;
+	struct dasd_uid *uid;
+	__u8 path_rcd_buf[DASD_ECKD_RCD_DATA_SIZE];
+	__u8 lpm, opm, npm, ppm, epm;
+	unsigned long flags;
+	char print_uid[60];
+	int rc;
+
+	data = container_of(work, struct path_verification_work_data, worker);
+	device = data->device;
+
+	/* delay path verification until device was resumed */
+	if (test_bit(DASD_FLAG_SUSPENDED, &device->flags)) {
+		schedule_work(work);
+		return;
+	}
+
+	opm = 0;
+	npm = 0;
+	ppm = 0;
+	epm = 0;
+	for (lpm = 0x80; lpm; lpm >>= 1) {
+		if (!(lpm & data->tbvpm))
+			continue;
+		memset(&data->rcd_buffer, 0, sizeof(data->rcd_buffer));
+		memset(&data->cqr, 0, sizeof(data->cqr));
+		data->cqr.cpaddr = &data->ccw;
+		rc = dasd_eckd_read_conf_immediately(device, &data->cqr,
+						     data->rcd_buffer,
+						     lpm);
+		if (!rc) {
+			switch (dasd_eckd_path_access(data->rcd_buffer,
+						      DASD_ECKD_RCD_DATA_SIZE)
+				) {
+			case 0x02:
+				npm |= lpm;
+				break;
+			case 0x03:
+				ppm |= lpm;
+				break;
+			}
+			opm |= lpm;
+		} else if (rc == -EOPNOTSUPP) {
+			DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s",
+					"path verification: No configuration "
+					"data retrieved");
+			opm |= lpm;
+		} else if (rc == -EAGAIN) {
+			DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s",
+					"path verification: device is stopped,"
+					" try again later");
+			epm |= lpm;
+		} else {
+			dev_warn(&device->cdev->dev,
+				 "Reading device feature codes failed "
+				 "(rc=%d) for new path %x\n", rc, lpm);
+			continue;
+		}
+		if (verify_fcx_max_data(device, lpm)) {
+			opm &= ~lpm;
+			npm &= ~lpm;
+			ppm &= ~lpm;
+			continue;
+		}
+
+		/*
+		 * save conf_data for comparison after
+		 * rebuild_device_uid may have changed
+		 * the original data
+		 */
+		memcpy(&path_rcd_buf, data->rcd_buffer,
+		       DASD_ECKD_RCD_DATA_SIZE);
+		path_private.conf_data = (void *) &path_rcd_buf;
+		path_private.conf_len = DASD_ECKD_RCD_DATA_SIZE;
+		if (dasd_eckd_identify_conf_parts(&path_private)) {
+			path_private.conf_data = NULL;
+			path_private.conf_len = 0;
+			continue;
+		}
+
+		/*
+		 * compare path UID with device UID only if at least
+		 * one valid path is left
+		 * in other case the device UID may have changed and
+		 * the first working path UID will be used as device UID
+		 */
+		if (device->path_data.opm &&
+		    dasd_eckd_compare_path_uid(device, &path_private)) {
+			/*
+			 * the comparison was not successful
+			 * rebuild the device UID with at least one
+			 * known path in case a z/VM hyperswap command
+			 * has changed the device
+			 *
+			 * after this compare again
+			 *
+			 * if either the rebuild or the recompare fails
+			 * the path can not be used
+			 */
+			if (rebuild_device_uid(device, data) ||
+			    dasd_eckd_compare_path_uid(
+				    device, &path_private)) {
+				uid = &path_private.uid;
+				if (strlen(uid->vduit) > 0)
+					snprintf(print_uid, sizeof(print_uid),
+						 "%s.%s.%04x.%02x.%s",
+						 uid->vendor, uid->serial,
+						 uid->ssid, uid->real_unit_addr,
+						 uid->vduit);
+				else
+					snprintf(print_uid, sizeof(print_uid),
+						 "%s.%s.%04x.%02x",
+						 uid->vendor, uid->serial,
+						 uid->ssid,
+						 uid->real_unit_addr);
+				dev_err(&device->cdev->dev,
+					"The newly added channel path %02X "
+					"will not be used because it leads "
+					"to a different device %s\n",
+					lpm, print_uid);
+				opm &= ~lpm;
+				npm &= ~lpm;
+				ppm &= ~lpm;
+				continue;
+			}
+		}
+
+		/*
+		 * There is a small chance that a path is lost again between
+		 * above path verification and the following modification of
+		 * the device opm mask. We could avoid that race here by using
+		 * yet another path mask, but we rather deal with this unlikely
+		 * situation in dasd_start_IO.
+		 */
+		spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
+		if (!device->path_data.opm && opm) {
+			device->path_data.opm = opm;
+			dasd_generic_path_operational(device);
+		} else
+			device->path_data.opm |= opm;
+		device->path_data.npm |= npm;
+		device->path_data.ppm |= ppm;
+		device->path_data.tbvpm |= epm;
+		spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
+	}
+
+	dasd_put_device(device);
+	if (data->isglobal)
+		mutex_unlock(&dasd_path_verification_mutex);
+	else
+		kfree(data);
+}
+
+static int dasd_eckd_verify_path(struct dasd_device *device, __u8 lpm)
+{
+	struct path_verification_work_data *data;
+
+	data = kmalloc(sizeof(*data), GFP_ATOMIC | GFP_DMA);
+	if (!data) {
+		if (mutex_trylock(&dasd_path_verification_mutex)) {
+			data = path_verification_worker;
+			data->isglobal = 1;
+		} else
+			return -ENOMEM;
+	} else {
+		memset(data, 0, sizeof(*data));
+		data->isglobal = 0;
+	}
+	INIT_WORK(&data->worker, do_path_verification_work);
+	dasd_get_device(device);
+	data->device = device;
+	data->tbvpm = lpm;
+	schedule_work(&data->worker);
+	return 0;
+}
+
+static int dasd_eckd_read_features(struct dasd_device *device)
+{
+	struct dasd_psf_prssd_data *prssdp;
+	struct dasd_rssd_features *features;
+	struct dasd_ccw_req *cqr;
+	struct ccw1 *ccw;
+	int rc;
+	struct dasd_eckd_private *private;
+
+	private = (struct dasd_eckd_private *) device->private;
+	memset(&private->features, 0, sizeof(struct dasd_rssd_features));
+	cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* PSF */	+ 1 /* RSSD */,
+				   (sizeof(struct dasd_psf_prssd_data) +
+				    sizeof(struct dasd_rssd_features)),
+				   device);
+	if (IS_ERR(cqr)) {
+		DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s", "Could not "
+				"allocate initialization request");
+		return PTR_ERR(cqr);
+	}
+	cqr->startdev = device;
+	cqr->memdev = device;
+	cqr->block = NULL;
+	cqr->retries = 256;
+	cqr->expires = 10 * HZ;
+
+	/* Prepare for Read Subsystem Data */
+	prssdp = (struct dasd_psf_prssd_data *) cqr->data;
+	memset(prssdp, 0, sizeof(struct dasd_psf_prssd_data));
+	prssdp->order = PSF_ORDER_PRSSD;
+	prssdp->suborder = 0x41;	/* Read Feature Codes */
+	/* all other bytes of prssdp must be zero */
+
+	ccw = cqr->cpaddr;
+	ccw->cmd_code = DASD_ECKD_CCW_PSF;
+	ccw->count = sizeof(struct dasd_psf_prssd_data);
+	ccw->flags |= CCW_FLAG_CC;
+	ccw->cda = (__u32)(addr_t) prssdp;
+
+	/* Read Subsystem Data - feature codes */
+	features = (struct dasd_rssd_features *) (prssdp + 1);
+	memset(features, 0, sizeof(struct dasd_rssd_features));
+
+	ccw++;
+	ccw->cmd_code = DASD_ECKD_CCW_RSSD;
+	ccw->count = sizeof(struct dasd_rssd_features);
+	ccw->cda = (__u32)(addr_t) features;
+
+	cqr->buildclk = get_clock();
+	cqr->status = DASD_CQR_FILLED;
+	rc = dasd_sleep_on(cqr);
+	if (rc == 0) {
+		prssdp = (struct dasd_psf_prssd_data *) cqr->data;
+		features = (struct dasd_rssd_features *) (prssdp + 1);
+		memcpy(&private->features, features,
+		       sizeof(struct dasd_rssd_features));
+	} else
+		dev_warn(&device->cdev->dev, "Reading device feature codes"
+			 " failed with rc=%d\n", rc);
+	dasd_sfree_request(cqr, cqr->memdev);
+	return rc;
+}
+
+
+/*
+ * Build CP for Perform Subsystem Function - SSC.
+ */
+static struct dasd_ccw_req *dasd_eckd_build_psf_ssc(struct dasd_device *device,
+						    int enable_pav)
+{
+	struct dasd_ccw_req *cqr;
+	struct dasd_psf_ssc_data *psf_ssc_data;
+	struct ccw1 *ccw;
+
+	cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* PSF */ ,
+				  sizeof(struct dasd_psf_ssc_data),
+				  device);
+
+	if (IS_ERR(cqr)) {
+		DBF_DEV_EVENT(DBF_WARNING, device, "%s",
+			   "Could not allocate PSF-SSC request");
+		return cqr;
+	}
+	psf_ssc_data = (struct dasd_psf_ssc_data *)cqr->data;
+	psf_ssc_data->order = PSF_ORDER_SSC;
+	psf_ssc_data->suborder = 0xc0;
+	if (enable_pav) {
+		psf_ssc_data->suborder |= 0x08;
+		psf_ssc_data->reserved[0] = 0x88;
+	}
+	ccw = cqr->cpaddr;
+	ccw->cmd_code = DASD_ECKD_CCW_PSF;
+	ccw->cda = (__u32)(addr_t)psf_ssc_data;
+	ccw->count = 66;
+
+	cqr->startdev = device;
+	cqr->memdev = device;
+	cqr->block = NULL;
+	cqr->retries = 256;
+	cqr->expires = 10*HZ;
+	cqr->buildclk = get_clock();
+	cqr->status = DASD_CQR_FILLED;
+	return cqr;
+}
+
+/*
+ * Perform Subsystem Function.
+ * It is necessary to trigger CIO for channel revalidation since this
+ * call might change behaviour of DASD devices.
+ */
+static int
+dasd_eckd_psf_ssc(struct dasd_device *device, int enable_pav)
+{
+	struct dasd_ccw_req *cqr;
+	int rc;
+
+	cqr = dasd_eckd_build_psf_ssc(device, enable_pav);
+	if (IS_ERR(cqr))
+		return PTR_ERR(cqr);
+
+	rc = dasd_sleep_on(cqr);
+	if (!rc)
+		/* trigger CIO to reprobe devices */
+		css_schedule_reprobe();
+	dasd_sfree_request(cqr, cqr->memdev);
+	return rc;
+}
+
+/*
+ * Valide storage server of current device.
+ */
+static void dasd_eckd_validate_server(struct dasd_device *device)
+{
+	int rc;
+	struct dasd_eckd_private *private;
+	int enable_pav;
+
+	private = (struct dasd_eckd_private *) device->private;
+	if (private->uid.type == UA_BASE_PAV_ALIAS ||
+	    private->uid.type == UA_HYPER_PAV_ALIAS)
+		return;
+	if (dasd_nopav || MACHINE_IS_VM)
+		enable_pav = 0;
+	else
+		enable_pav = 1;
+	rc = dasd_eckd_psf_ssc(device, enable_pav);
+
+	/* may be requested feature is not available on server,
+	 * therefore just report error and go ahead */
+	DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "PSF-SSC for SSID %04x "
+			"returned rc=%d", private->uid.ssid, rc);
+}
+
+/*
+ * worker to do a validate server in case of a lost pathgroup
+ */
+static void dasd_eckd_do_validate_server(struct work_struct *work)
+{
+	struct dasd_device *device = container_of(work, struct dasd_device,
+						  kick_validate);
+	dasd_eckd_validate_server(device);
+	dasd_put_device(device);
+}
+
+static void dasd_eckd_kick_validate_server(struct dasd_device *device)
+{
+	dasd_get_device(device);
+	/* exit if device not online or in offline processing */
+	if (test_bit(DASD_FLAG_OFFLINE, &device->flags) ||
+	   device->state < DASD_STATE_ONLINE) {
+		dasd_put_device(device);
+		return;
+	}
+	/* queue call to do_validate_server to the kernel event daemon. */
+	schedule_work(&device->kick_validate);
+}
+
+static u32 get_fcx_max_data(struct dasd_device *device)
+{
+#if defined(CONFIG_64BIT)
+	int tpm, mdc;
+	int fcx_in_css, fcx_in_gneq, fcx_in_features;
+	struct dasd_eckd_private *private;
+
+	if (dasd_nofcx)
+		return 0;
+	/* is transport mode supported? */
+	private = (struct dasd_eckd_private *) device->private;
+	fcx_in_css = css_general_characteristics.fcx;
+	fcx_in_gneq = private->gneq->reserved2[7] & 0x04;
+	fcx_in_features = private->features.feature[40] & 0x80;
+	tpm = fcx_in_css && fcx_in_gneq && fcx_in_features;
+
+	if (!tpm)
+		return 0;
+
+	mdc = ccw_device_get_mdc(device->cdev, 0);
+	if (mdc < 0) {
+		dev_warn(&device->cdev->dev, "Detecting the maximum supported"
+			 " data size for zHPF requests failed\n");
+		return 0;
+	} else
+		return mdc * FCX_MAX_DATA_FACTOR;
+#else
+	return 0;
+#endif
+}
+
+/*
+ * Check device characteristics.
+ * If the device is accessible using ECKD discipline, the device is enabled.
+ */
+static int
+dasd_eckd_check_characteristics(struct dasd_device *device)
+{
+	struct dasd_eckd_private *private;
+	struct dasd_block *block;
+	struct dasd_uid temp_uid;
+	int rc, i;
+	int readonly;
+	unsigned long value;
+
+	/* setup work queue for validate server*/
+	INIT_WORK(&device->kick_validate, dasd_eckd_do_validate_server);
+
+	if (!ccw_device_is_pathgroup(device->cdev)) {
+		dev_warn(&device->cdev->dev,
+			 "A channel path group could not be established\n");
+		return -EIO;
+	}
+	if (!ccw_device_is_multipath(device->cdev)) {
+		dev_info(&device->cdev->dev,
+			 "The DASD is not operating in multipath mode\n");
+	}
+	private = (struct dasd_eckd_private *) device->private;
+	if (!private) {
+		private = kzalloc(sizeof(*private), GFP_KERNEL | GFP_DMA);
+		if (!private) {
+			dev_warn(&device->cdev->dev,
+				 "Allocating memory for private DASD data "
+				 "failed\n");
+			return -ENOMEM;
+		}
+		device->private = (void *) private;
+	} else {
+		memset(private, 0, sizeof(*private));
+	}
+	/* Invalidate status of initial analysis. */
+	private->init_cqr_status = -1;
+	/* Set default cache operations. */
+	private->attrib.operation = DASD_NORMAL_CACHE;
+	private->attrib.nr_cyl = 0;
+
+	/* Read Configuration Data */
+	rc = dasd_eckd_read_conf(device);
+	if (rc)
+		goto out_err1;
+
+	/* set default timeout */
+	device->default_expires = DASD_EXPIRES;
+	if (private->gneq) {
+		value = 1;
+		for (i = 0; i < private->gneq->timeout.value; i++)
+			value = 10 * value;
+		value = value * private->gneq->timeout.number;
+		/* do not accept useless values */
+		if (value != 0 && value <= DASD_EXPIRES_MAX)
+			device->default_expires = value;
+	}
+
+	dasd_eckd_get_uid(device, &temp_uid);
+	if (temp_uid.type == UA_BASE_DEVICE) {
+		block = dasd_alloc_block();
+		if (IS_ERR(block)) {
+			DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s",
+					"could not allocate dasd "
+					"block structure");
+			rc = PTR_ERR(block);
+			goto out_err1;
+		}
+		device->block = block;
+		block->base = device;
+	}
+
+	/* register lcu with alias handling, enable PAV */
+	rc = dasd_alias_make_device_known_to_lcu(device);
+	if (rc)
+		goto out_err2;
+
+	dasd_eckd_validate_server(device);
+
+	/* device may report different configuration data after LCU setup */
+	rc = dasd_eckd_read_conf(device);
+	if (rc)
+		goto out_err3;
+
+	/* Read Feature Codes */
+	dasd_eckd_read_features(device);
+
+	/* Read Device Characteristics */
+	rc = dasd_generic_read_dev_chars(device, DASD_ECKD_MAGIC,
+					 &private->rdc_data, 64);
+	if (rc) {
+		DBF_EVENT_DEVID(DBF_WARNING, device->cdev,
+				"Read device characteristic failed, rc=%d", rc);
+		goto out_err3;
+	}
+
+	if ((device->features & DASD_FEATURE_USERAW) &&
+	    !(private->rdc_data.facilities.RT_in_LR)) {
+		dev_err(&device->cdev->dev, "The storage server does not "
+			"support raw-track access\n");
+		rc = -EINVAL;
+		goto out_err3;
+	}
+
+	/* find the valid cylinder size */
+	if (private->rdc_data.no_cyl == LV_COMPAT_CYL &&
+	    private->rdc_data.long_no_cyl)
+		private->real_cyl = private->rdc_data.long_no_cyl;
+	else
+		private->real_cyl = private->rdc_data.no_cyl;
+
+	private->fcx_max_data = get_fcx_max_data(device);
+
+	readonly = dasd_device_is_ro(device);
+	if (readonly)
+		set_bit(DASD_FLAG_DEVICE_RO, &device->flags);
+
+	dev_info(&device->cdev->dev, "New DASD %04X/%02X (CU %04X/%02X) "
+		 "with %d cylinders, %d heads, %d sectors%s\n",
+		 private->rdc_data.dev_type,
+		 private->rdc_data.dev_model,
+		 private->rdc_data.cu_type,
+		 private->rdc_data.cu_model.model,
+		 private->real_cyl,
+		 private->rdc_data.trk_per_cyl,
+		 private->rdc_data.sec_per_trk,
+		 readonly ? ", read-only device" : "");
+	return 0;
+
+out_err3:
+	dasd_alias_disconnect_device_from_lcu(device);
+out_err2:
+	dasd_free_block(device->block);
+	device->block = NULL;
+out_err1:
+	kfree(private->conf_data);
+	kfree(device->private);
+	device->private = NULL;
+	return rc;
+}
+
+static void dasd_eckd_uncheck_device(struct dasd_device *device)
+{
+	struct dasd_eckd_private *private;
+
+	private = (struct dasd_eckd_private *) device->private;
+	dasd_alias_disconnect_device_from_lcu(device);
+	private->ned = NULL;
+	private->sneq = NULL;
+	private->vdsneq = NULL;
+	private->gneq = NULL;
+	private->conf_len = 0;
+	kfree(private->conf_data);
+	private->conf_data = NULL;
+}
+
+static struct dasd_ccw_req *
+dasd_eckd_analysis_ccw(struct dasd_device *device)
+{
+	struct dasd_eckd_private *private;
+	struct eckd_count *count_data;
+	struct LO_eckd_data *LO_data;
+	struct dasd_ccw_req *cqr;
+	struct ccw1 *ccw;
+	int cplength, datasize;
+	int i;
+
+	private = (struct dasd_eckd_private *) device->private;
+
+	cplength = 8;
+	datasize = sizeof(struct DE_eckd_data) + 2*sizeof(struct LO_eckd_data);
+	cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, cplength, datasize, device);
+	if (IS_ERR(cqr))
+		return cqr;
+	ccw = cqr->cpaddr;
+	/* Define extent for the first 3 tracks. */
+	define_extent(ccw++, cqr->data, 0, 2,
+		      DASD_ECKD_CCW_READ_COUNT, device);
+	LO_data = cqr->data + sizeof(struct DE_eckd_data);
+	/* Locate record for the first 4 records on track 0. */
+	ccw[-1].flags |= CCW_FLAG_CC;
+	locate_record(ccw++, LO_data++, 0, 0, 4,
+		      DASD_ECKD_CCW_READ_COUNT, device, 0);
+
+	count_data = private->count_area;
+	for (i = 0; i < 4; i++) {
+		ccw[-1].flags |= CCW_FLAG_CC;
+		ccw->cmd_code = DASD_ECKD_CCW_READ_COUNT;
+		ccw->flags = 0;
+		ccw->count = 8;
+		ccw->cda = (__u32)(addr_t) count_data;
+		ccw++;
+		count_data++;
+	}
+
+	/* Locate record for the first record on track 2. */
+	ccw[-1].flags |= CCW_FLAG_CC;
+	locate_record(ccw++, LO_data++, 2, 0, 1,
+		      DASD_ECKD_CCW_READ_COUNT, device, 0);
+	/* Read count ccw. */
+	ccw[-1].flags |= CCW_FLAG_CC;
+	ccw->cmd_code = DASD_ECKD_CCW_READ_COUNT;
+	ccw->flags = 0;
+	ccw->count = 8;
+	ccw->cda = (__u32)(addr_t) count_data;
+
+	cqr->block = NULL;
+	cqr->startdev = device;
+	cqr->memdev = device;
+	cqr->retries = 255;
+	cqr->buildclk = get_clock();
+	cqr->status = DASD_CQR_FILLED;
+	return cqr;
+}
+
+/* differentiate between 'no record found' and any other error */
+static int dasd_eckd_analysis_evaluation(struct dasd_ccw_req *init_cqr)
+{
+	char *sense;
+	if (init_cqr->status == DASD_CQR_DONE)
+		return INIT_CQR_OK;
+	else if (init_cqr->status == DASD_CQR_NEED_ERP ||
+		 init_cqr->status == DASD_CQR_FAILED) {
+		sense = dasd_get_sense(&init_cqr->irb);
+		if (sense && (sense[1] & SNS1_NO_REC_FOUND))
+			return INIT_CQR_UNFORMATTED;
+		else
+			return INIT_CQR_ERROR;
+	} else
+		return INIT_CQR_ERROR;
+}
+
+/*
+ * This is the callback function for the init_analysis cqr. It saves
+ * the status of the initial analysis ccw before it frees it and kicks
+ * the device to continue the startup sequence. This will call
+ * dasd_eckd_do_analysis again (if the devices has not been marked
+ * for deletion in the meantime).
+ */
+static void dasd_eckd_analysis_callback(struct dasd_ccw_req *init_cqr,
+					void *data)
+{
+	struct dasd_eckd_private *private;
+	struct dasd_device *device;
+
+	device = init_cqr->startdev;
+	private = (struct dasd_eckd_private *) device->private;
+	private->init_cqr_status = dasd_eckd_analysis_evaluation(init_cqr);
+	dasd_sfree_request(init_cqr, device);
+	dasd_kick_device(device);
+}
+
+static int dasd_eckd_start_analysis(struct dasd_block *block)
+{
+	struct dasd_ccw_req *init_cqr;
+
+	init_cqr = dasd_eckd_analysis_ccw(block->base);
+	if (IS_ERR(init_cqr))
+		return PTR_ERR(init_cqr);
+	init_cqr->callback = dasd_eckd_analysis_callback;
+	init_cqr->callback_data = NULL;
+	init_cqr->expires = 5*HZ;
+	/* first try without ERP, so we can later handle unformatted
+	 * devices as special case
+	 */
+	clear_bit(DASD_CQR_FLAGS_USE_ERP, &init_cqr->flags);
+	init_cqr->retries = 0;
+	dasd_add_request_head(init_cqr);
+	return -EAGAIN;
+}
+
+static int dasd_eckd_end_analysis(struct dasd_block *block)
+{
+	struct dasd_device *device;
+	struct dasd_eckd_private *private;
+	struct eckd_count *count_area;
+	unsigned int sb, blk_per_trk;
+	int status, i;
+	struct dasd_ccw_req *init_cqr;
+
+	device = block->base;
+	private = (struct dasd_eckd_private *) device->private;
+	status = private->init_cqr_status;
+	private->init_cqr_status = -1;
+	if (status == INIT_CQR_ERROR) {
+		/* try again, this time with full ERP */
+		init_cqr = dasd_eckd_analysis_ccw(device);
+		dasd_sleep_on(init_cqr);
+		status = dasd_eckd_analysis_evaluation(init_cqr);
+		dasd_sfree_request(init_cqr, device);
+	}
+
+	if (device->features & DASD_FEATURE_USERAW) {
+		block->bp_block = DASD_RAW_BLOCKSIZE;
+		blk_per_trk = DASD_RAW_BLOCK_PER_TRACK;
+		block->s2b_shift = 3;
+		goto raw;
+	}
+
+	if (status == INIT_CQR_UNFORMATTED) {
+		dev_warn(&device->cdev->dev, "The DASD is not formatted\n");
+		return -EMEDIUMTYPE;
+	} else if (status == INIT_CQR_ERROR) {
+		dev_err(&device->cdev->dev,
+			"Detecting the DASD disk layout failed because "
+			"of an I/O error\n");
+		return -EIO;
+	}
+
+	private->uses_cdl = 1;
+	/* Check Track 0 for Compatible Disk Layout */
+	count_area = NULL;
+	for (i = 0; i < 3; i++) {
+		if (private->count_area[i].kl != 4 ||
+		    private->count_area[i].dl != dasd_eckd_cdl_reclen(i) - 4) {
+			private->uses_cdl = 0;
+			break;
+		}
+	}
+	if (i == 3)
+		count_area = &private->count_area[4];
+
+	if (private->uses_cdl == 0) {
+		for (i = 0; i < 5; i++) {
+			if ((private->count_area[i].kl != 0) ||
+			    (private->count_area[i].dl !=
+			     private->count_area[0].dl))
+				break;
+		}
+		if (i == 5)
+			count_area = &private->count_area[0];
+	} else {
+		if (private->count_area[3].record == 1)
+			dev_warn(&device->cdev->dev,
+				 "Track 0 has no records following the VTOC\n");
+	}
+
+	if (count_area != NULL && count_area->kl == 0) {
+		/* we found notthing violating our disk layout */
+		if (dasd_check_blocksize(count_area->dl) == 0)
+			block->bp_block = count_area->dl;
+	}
+	if (block->bp_block == 0) {
+		dev_warn(&device->cdev->dev,
+			 "The disk layout of the DASD is not supported\n");
+		return -EMEDIUMTYPE;
+	}
+	block->s2b_shift = 0;	/* bits to shift 512 to get a block */
+	for (sb = 512; sb < block->bp_block; sb = sb << 1)
+		block->s2b_shift++;
+
+	blk_per_trk = recs_per_track(&private->rdc_data, 0, block->bp_block);
+
+raw:
+	block->blocks = (private->real_cyl *
+			  private->rdc_data.trk_per_cyl *
+			  blk_per_trk);
+
+	dev_info(&device->cdev->dev,
+		 "DASD with %d KB/block, %d KB total size, %d KB/track, "
+		 "%s\n", (block->bp_block >> 10),
+		 ((private->real_cyl *
+		   private->rdc_data.trk_per_cyl *
+		   blk_per_trk * (block->bp_block >> 9)) >> 1),
+		 ((blk_per_trk * block->bp_block) >> 10),
+		 private->uses_cdl ?
+		 "compatible disk layout" : "linux disk layout");
+
+	return 0;
+}
+
+static int dasd_eckd_do_analysis(struct dasd_block *block)
+{
+	struct dasd_eckd_private *private;
+
+	private = (struct dasd_eckd_private *) block->base->private;
+	if (private->init_cqr_status < 0)
+		return dasd_eckd_start_analysis(block);
+	else
+		return dasd_eckd_end_analysis(block);
+}
+
+static int dasd_eckd_ready_to_online(struct dasd_device *device)
+{
+	return dasd_alias_add_device(device);
+};
+
+static int dasd_eckd_online_to_ready(struct dasd_device *device)
+{
+	cancel_work_sync(&device->reload_device);
+	cancel_work_sync(&device->kick_validate);
+	return dasd_alias_remove_device(device);
+};
+
+static int
+dasd_eckd_fill_geometry(struct dasd_block *block, struct hd_geometry *geo)
+{
+	struct dasd_eckd_private *private;
+
+	private = (struct dasd_eckd_private *) block->base->private;
+	if (dasd_check_blocksize(block->bp_block) == 0) {
+		geo->sectors = recs_per_track(&private->rdc_data,
+					      0, block->bp_block);
+	}
+	geo->cylinders = private->rdc_data.no_cyl;
+	geo->heads = private->rdc_data.trk_per_cyl;
+	return 0;
+}
+
+static struct dasd_ccw_req *
+dasd_eckd_format_device(struct dasd_device * device,
+			struct format_data_t * fdata)
+{
+	struct dasd_eckd_private *private;
+	struct dasd_ccw_req *fcp;
+	struct eckd_count *ect;
+	struct ccw1 *ccw;
+	void *data;
+	int rpt;
+	struct ch_t address;
+	int cplength, datasize;
+	int i;
+	int intensity = 0;
+	int r0_perm;
+
+	private = (struct dasd_eckd_private *) device->private;
+	rpt = recs_per_track(&private->rdc_data, 0, fdata->blksize);
+	set_ch_t(&address,
+		 fdata->start_unit / private->rdc_data.trk_per_cyl,
+		 fdata->start_unit % private->rdc_data.trk_per_cyl);
+
+	/* Sanity checks. */
+	if (fdata->start_unit >=
+	    (private->real_cyl * private->rdc_data.trk_per_cyl)) {
+		dev_warn(&device->cdev->dev, "Start track number %d used in "
+			 "formatting is too big\n", fdata->start_unit);
+		return ERR_PTR(-EINVAL);
+	}
+	if (fdata->start_unit > fdata->stop_unit) {
+		dev_warn(&device->cdev->dev, "Start track %d used in "
+			 "formatting exceeds end track\n", fdata->start_unit);
+		return ERR_PTR(-EINVAL);
+	}
+	if (dasd_check_blocksize(fdata->blksize) != 0) {
+		dev_warn(&device->cdev->dev,
+			 "The DASD cannot be formatted with block size %d\n",
+			 fdata->blksize);
+		return ERR_PTR(-EINVAL);
+	}
+
+	/*
+	 * fdata->intensity is a bit string that tells us what to do:
+	 *   Bit 0: write record zero
+	 *   Bit 1: write home address, currently not supported
+	 *   Bit 2: invalidate tracks
+	 *   Bit 3: use OS/390 compatible disk layout (cdl)
+	 *   Bit 4: do not allow storage subsystem to modify record zero
+	 * Only some bit combinations do make sense.
+	 */
+	if (fdata->intensity & 0x10) {
+		r0_perm = 0;
+		intensity = fdata->intensity & ~0x10;
+	} else {
+		r0_perm = 1;
+		intensity = fdata->intensity;
+	}
+	switch (intensity) {
+	case 0x00:	/* Normal format */
+	case 0x08:	/* Normal format, use cdl. */
+		cplength = 2 + rpt;
+		datasize = sizeof(struct DE_eckd_data) +
+			sizeof(struct LO_eckd_data) +
+			rpt * sizeof(struct eckd_count);
+		break;
+	case 0x01:	/* Write record zero and format track. */
+	case 0x09:	/* Write record zero and format track, use cdl. */
+		cplength = 3 + rpt;
+		datasize = sizeof(struct DE_eckd_data) +
+			sizeof(struct LO_eckd_data) +
+			sizeof(struct eckd_count) +
+			rpt * sizeof(struct eckd_count);
+		break;
+	case 0x04:	/* Invalidate track. */
+	case 0x0c:	/* Invalidate track, use cdl. */
+		cplength = 3;
+		datasize = sizeof(struct DE_eckd_data) +
+			sizeof(struct LO_eckd_data) +
+			sizeof(struct eckd_count);
+		break;
+	default:
+		dev_warn(&device->cdev->dev, "An I/O control call used "
+			 "incorrect flags 0x%x\n", fdata->intensity);
+		return ERR_PTR(-EINVAL);
+	}
+	/* Allocate the format ccw request. */
+	fcp = dasd_smalloc_request(DASD_ECKD_MAGIC, cplength, datasize, device);
+	if (IS_ERR(fcp))
+		return fcp;
+
+	data = fcp->data;
+	ccw = fcp->cpaddr;
+
+	switch (intensity & ~0x08) {
+	case 0x00: /* Normal format. */
+		define_extent(ccw++, (struct DE_eckd_data *) data,
+			      fdata->start_unit, fdata->start_unit,
+			      DASD_ECKD_CCW_WRITE_CKD, device);
+		/* grant subsystem permission to format R0 */
+		if (r0_perm)
+			((struct DE_eckd_data *)data)->ga_extended |= 0x04;
+		data += sizeof(struct DE_eckd_data);
+		ccw[-1].flags |= CCW_FLAG_CC;
+		locate_record(ccw++, (struct LO_eckd_data *) data,
+			      fdata->start_unit, 0, rpt,
+			      DASD_ECKD_CCW_WRITE_CKD, device,
+			      fdata->blksize);
+		data += sizeof(struct LO_eckd_data);
+		break;
+	case 0x01: /* Write record zero + format track. */
+		define_extent(ccw++, (struct DE_eckd_data *) data,
+			      fdata->start_unit, fdata->start_unit,
+			      DASD_ECKD_CCW_WRITE_RECORD_ZERO,
+			      device);
+		data += sizeof(struct DE_eckd_data);
+		ccw[-1].flags |= CCW_FLAG_CC;
+		locate_record(ccw++, (struct LO_eckd_data *) data,
+			      fdata->start_unit, 0, rpt + 1,
+			      DASD_ECKD_CCW_WRITE_RECORD_ZERO, device,
+			      device->block->bp_block);
+		data += sizeof(struct LO_eckd_data);
+		break;
+	case 0x04: /* Invalidate track. */
+		define_extent(ccw++, (struct DE_eckd_data *) data,
+			      fdata->start_unit, fdata->start_unit,
+			      DASD_ECKD_CCW_WRITE_CKD, device);
+		data += sizeof(struct DE_eckd_data);
+		ccw[-1].flags |= CCW_FLAG_CC;
+		locate_record(ccw++, (struct LO_eckd_data *) data,
+			      fdata->start_unit, 0, 1,
+			      DASD_ECKD_CCW_WRITE_CKD, device, 8);
+		data += sizeof(struct LO_eckd_data);
+		break;
+	}
+	if (intensity & 0x01) {	/* write record zero */
+		ect = (struct eckd_count *) data;
+		data += sizeof(struct eckd_count);
+		ect->cyl = address.cyl;
+		ect->head = address.head;
+		ect->record = 0;
+		ect->kl = 0;
+		ect->dl = 8;
+		ccw[-1].flags |= CCW_FLAG_CC;
+		ccw->cmd_code = DASD_ECKD_CCW_WRITE_RECORD_ZERO;
+		ccw->flags = CCW_FLAG_SLI;
+		ccw->count = 8;
+		ccw->cda = (__u32)(addr_t) ect;
+		ccw++;
+	}
+	if ((intensity & ~0x08) & 0x04) {	/* erase track */
+		ect = (struct eckd_count *) data;
+		data += sizeof(struct eckd_count);
+		ect->cyl = address.cyl;
+		ect->head = address.head;
+		ect->record = 1;
+		ect->kl = 0;
+		ect->dl = 0;
+		ccw[-1].flags |= CCW_FLAG_CC;
+		ccw->cmd_code = DASD_ECKD_CCW_WRITE_CKD;
+		ccw->flags = CCW_FLAG_SLI;
+		ccw->count = 8;
+		ccw->cda = (__u32)(addr_t) ect;
+	} else {		/* write remaining records */
+		for (i = 0; i < rpt; i++) {
+			ect = (struct eckd_count *) data;
+			data += sizeof(struct eckd_count);
+			ect->cyl = address.cyl;
+			ect->head = address.head;
+			ect->record = i + 1;
+			ect->kl = 0;
+			ect->dl = fdata->blksize;
+			/* Check for special tracks 0-1 when formatting CDL */
+			if ((intensity & 0x08) &&
+			    fdata->start_unit == 0) {
+				if (i < 3) {
+					ect->kl = 4;
+					ect->dl = sizes_trk0[i] - 4;
+				}
+			}
+			if ((intensity & 0x08) &&
+			    fdata->start_unit == 1) {
+				ect->kl = 44;
+				ect->dl = LABEL_SIZE - 44;
+			}
+			ccw[-1].flags |= CCW_FLAG_CC;
+			ccw->cmd_code = DASD_ECKD_CCW_WRITE_CKD;
+			ccw->flags = CCW_FLAG_SLI;
+			ccw->count = 8;
+			ccw->cda = (__u32)(addr_t) ect;
+			ccw++;
+		}
+	}
+	fcp->startdev = device;
+	fcp->memdev = device;
+	fcp->retries = 256;
+	fcp->buildclk = get_clock();
+	fcp->status = DASD_CQR_FILLED;
+	return fcp;
+}
+
+static void dasd_eckd_handle_terminated_request(struct dasd_ccw_req *cqr)
+{
+	cqr->status = DASD_CQR_FILLED;
+	if (cqr->block && (cqr->startdev != cqr->block->base)) {
+		dasd_eckd_reset_ccw_to_base_io(cqr);
+		cqr->startdev = cqr->block->base;
+		cqr->lpm = cqr->block->base->path_data.opm;
+	}
+};
+
+static dasd_erp_fn_t
+dasd_eckd_erp_action(struct dasd_ccw_req * cqr)
+{
+	struct dasd_device *device = (struct dasd_device *) cqr->startdev;
+	struct ccw_device *cdev = device->cdev;
+
+	switch (cdev->id.cu_type) {
+	case 0x3990:
+	case 0x2105:
+	case 0x2107:
+	case 0x1750:
+		return dasd_3990_erp_action;
+	case 0x9343:
+	case 0x3880:
+	default:
+		return dasd_default_erp_action;
+	}
+}
+
+static dasd_erp_fn_t
+dasd_eckd_erp_postaction(struct dasd_ccw_req * cqr)
+{
+	return dasd_default_erp_postaction;
+}
+
+static void dasd_eckd_check_for_device_change(struct dasd_device *device,
+					      struct dasd_ccw_req *cqr,
+					      struct irb *irb)
+{
+	char mask;
+	char *sense = NULL;
+	struct dasd_eckd_private *private;
+
+	private = (struct dasd_eckd_private *) device->private;
+	/* first of all check for state change pending interrupt */
+	mask = DEV_STAT_ATTENTION | DEV_STAT_DEV_END | DEV_STAT_UNIT_EXCEP;
+	if ((scsw_dstat(&irb->scsw) & mask) == mask) {
+		/*
+		 * for alias only, not in offline processing
+		 * and only if not suspended
+		 */
+		if (!device->block && private->lcu &&
+		    device->state == DASD_STATE_ONLINE &&
+		    !test_bit(DASD_FLAG_OFFLINE, &device->flags) &&
+		    !test_bit(DASD_FLAG_SUSPENDED, &device->flags)) {
+			/*
+			 * the state change could be caused by an alias
+			 * reassignment remove device from alias handling
+			 * to prevent new requests from being scheduled on
+			 * the wrong alias device
+			 */
+			dasd_alias_remove_device(device);
+
+			/* schedule worker to reload device */
+			dasd_reload_device(device);
+		}
+		dasd_generic_handle_state_change(device);
+		return;
+	}
+
+	sense = dasd_get_sense(irb);
+	if (!sense)
+		return;
+
+	/* summary unit check */
+	if ((sense[27] & DASD_SENSE_BIT_0) && (sense[7] == 0x0D) &&
+	    (scsw_dstat(&irb->scsw) & DEV_STAT_UNIT_CHECK)) {
+		dasd_alias_handle_summary_unit_check(device, irb);
+		return;
+	}
+
+	/* service information message SIM */
+	if (!cqr && !(sense[27] & DASD_SENSE_BIT_0) &&
+	    ((sense[6] & DASD_SIM_SENSE) == DASD_SIM_SENSE)) {
+		dasd_3990_erp_handle_sim(device, sense);
+		return;
+	}
+
+	/* loss of device reservation is handled via base devices only
+	 * as alias devices may be used with several bases
+	 */
+	if (device->block && (sense[27] & DASD_SENSE_BIT_0) &&
+	    (sense[7] == 0x3F) &&
+	    (scsw_dstat(&irb->scsw) & DEV_STAT_UNIT_CHECK) &&
+	    test_bit(DASD_FLAG_IS_RESERVED, &device->flags)) {
+		if (device->features & DASD_FEATURE_FAILONSLCK)
+			set_bit(DASD_FLAG_LOCK_STOLEN, &device->flags);
+		clear_bit(DASD_FLAG_IS_RESERVED, &device->flags);
+		dev_err(&device->cdev->dev,
+			"The device reservation was lost\n");
+	}
+}
+
+static struct dasd_ccw_req *dasd_eckd_build_cp_cmd_single(
+					       struct dasd_device *startdev,
+					       struct dasd_block *block,
+					       struct request *req,
+					       sector_t first_rec,
+					       sector_t last_rec,
+					       sector_t first_trk,
+					       sector_t last_trk,
+					       unsigned int first_offs,
+					       unsigned int last_offs,
+					       unsigned int blk_per_trk,
+					       unsigned int blksize)
+{
+	struct dasd_eckd_private *private;
+	unsigned long *idaws;
+	struct LO_eckd_data *LO_data;
+	struct dasd_ccw_req *cqr;
+	struct ccw1 *ccw;
+	struct req_iterator iter;
+	struct bio_vec *bv;
+	char *dst;
+	unsigned int off;
+	int count, cidaw, cplength, datasize;
+	sector_t recid;
+	unsigned char cmd, rcmd;
+	int use_prefix;
+	struct dasd_device *basedev;
+
+	basedev = block->base;
+	private = (struct dasd_eckd_private *) basedev->private;
+	if (rq_data_dir(req) == READ)
+		cmd = DASD_ECKD_CCW_READ_MT;
+	else if (rq_data_dir(req) == WRITE)
+		cmd = DASD_ECKD_CCW_WRITE_MT;
+	else
+		return ERR_PTR(-EINVAL);
+
+	/* Check struct bio and count the number of blocks for the request. */
+	count = 0;
+	cidaw = 0;
+	rq_for_each_segment(bv, req, iter) {
+		if (bv->bv_len & (blksize - 1))
+			/* Eckd can only do full blocks. */
+			return ERR_PTR(-EINVAL);
+		count += bv->bv_len >> (block->s2b_shift + 9);
+#if defined(CONFIG_64BIT)
+		if (idal_is_needed (page_address(bv->bv_page), bv->bv_len))
+			cidaw += bv->bv_len >> (block->s2b_shift + 9);
+#endif
+	}
+	/* Paranoia. */
+	if (count != last_rec - first_rec + 1)
+		return ERR_PTR(-EINVAL);
+
+	/* use the prefix command if available */
+	use_prefix = private->features.feature[8] & 0x01;
+	if (use_prefix) {
+		/* 1x prefix + number of blocks */
+		cplength = 2 + count;
+		/* 1x prefix + cidaws*sizeof(long) */
+		datasize = sizeof(struct PFX_eckd_data) +
+			sizeof(struct LO_eckd_data) +
+			cidaw * sizeof(unsigned long);
+	} else {
+		/* 1x define extent + 1x locate record + number of blocks */
+		cplength = 2 + count;
+		/* 1x define extent + 1x locate record + cidaws*sizeof(long) */
+		datasize = sizeof(struct DE_eckd_data) +
+			sizeof(struct LO_eckd_data) +
+			cidaw * sizeof(unsigned long);
+	}
+	/* Find out the number of additional locate record ccws for cdl. */
+	if (private->uses_cdl && first_rec < 2*blk_per_trk) {
+		if (last_rec >= 2*blk_per_trk)
+			count = 2*blk_per_trk - first_rec;
+		cplength += count;
+		datasize += count*sizeof(struct LO_eckd_data);
+	}
+	/* Allocate the ccw request. */
+	cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, cplength, datasize,
+				   startdev);
+	if (IS_ERR(cqr))
+		return cqr;
+	ccw = cqr->cpaddr;
+	/* First ccw is define extent or prefix. */
+	if (use_prefix) {
+		if (prefix(ccw++, cqr->data, first_trk,
+			   last_trk, cmd, basedev, startdev) == -EAGAIN) {
+			/* Clock not in sync and XRC is enabled.
+			 * Try again later.
+			 */
+			dasd_sfree_request(cqr, startdev);
+			return ERR_PTR(-EAGAIN);
+		}
+		idaws = (unsigned long *) (cqr->data +
+					   sizeof(struct PFX_eckd_data));
+	} else {
+		if (define_extent(ccw++, cqr->data, first_trk,
+				  last_trk, cmd, basedev) == -EAGAIN) {
+			/* Clock not in sync and XRC is enabled.
+			 * Try again later.
+			 */
+			dasd_sfree_request(cqr, startdev);
+			return ERR_PTR(-EAGAIN);
+		}
+		idaws = (unsigned long *) (cqr->data +
+					   sizeof(struct DE_eckd_data));
+	}
+	/* Build locate_record+read/write/ccws. */
+	LO_data = (struct LO_eckd_data *) (idaws + cidaw);
+	recid = first_rec;
+	if (private->uses_cdl == 0 || recid > 2*blk_per_trk) {
+		/* Only standard blocks so there is just one locate record. */
+		ccw[-1].flags |= CCW_FLAG_CC;
+		locate_record(ccw++, LO_data++, first_trk, first_offs + 1,
+			      last_rec - recid + 1, cmd, basedev, blksize);
+	}
+	rq_for_each_segment(bv, req, iter) {
+		dst = page_address(bv->bv_page) + bv->bv_offset;
+		if (dasd_page_cache) {
+			char *copy = kmem_cache_alloc(dasd_page_cache,
+						      GFP_DMA | __GFP_NOWARN);
+			if (copy && rq_data_dir(req) == WRITE)
+				memcpy(copy + bv->bv_offset, dst, bv->bv_len);
+			if (copy)
+				dst = copy + bv->bv_offset;
+		}
+		for (off = 0; off < bv->bv_len; off += blksize) {
+			sector_t trkid = recid;
+			unsigned int recoffs = sector_div(trkid, blk_per_trk);
+			rcmd = cmd;
+			count = blksize;
+			/* Locate record for cdl special block ? */
+			if (private->uses_cdl && recid < 2*blk_per_trk) {
+				if (dasd_eckd_cdl_special(blk_per_trk, recid)){
+					rcmd |= 0x8;
+					count = dasd_eckd_cdl_reclen(recid);
+					if (count < blksize &&
+					    rq_data_dir(req) == READ)
+						memset(dst + count, 0xe5,
+						       blksize - count);
+				}
+				ccw[-1].flags |= CCW_FLAG_CC;
+				locate_record(ccw++, LO_data++,
+					      trkid, recoffs + 1,
+					      1, rcmd, basedev, count);
+			}
+			/* Locate record for standard blocks ? */
+			if (private->uses_cdl && recid == 2*blk_per_trk) {
+				ccw[-1].flags |= CCW_FLAG_CC;
+				locate_record(ccw++, LO_data++,
+					      trkid, recoffs + 1,
+					      last_rec - recid + 1,
+					      cmd, basedev, count);
+			}
+			/* Read/write ccw. */
+			ccw[-1].flags |= CCW_FLAG_CC;
+			ccw->cmd_code = rcmd;
+			ccw->count = count;
+			if (idal_is_needed(dst, blksize)) {
+				ccw->cda = (__u32)(addr_t) idaws;
+				ccw->flags = CCW_FLAG_IDA;
+				idaws = idal_create_words(idaws, dst, blksize);
+			} else {
+				ccw->cda = (__u32)(addr_t) dst;
+				ccw->flags = 0;
+			}
+			ccw++;
+			dst += blksize;
+			recid++;
+		}
+	}
+	if (blk_noretry_request(req) ||
+	    block->base->features & DASD_FEATURE_FAILFAST)
+		set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags);
+	cqr->startdev = startdev;
+	cqr->memdev = startdev;
+	cqr->block = block;
+	cqr->expires = startdev->default_expires * HZ;	/* default 5 minutes */
+	cqr->lpm = startdev->path_data.ppm;
+	cqr->retries = 256;
+	cqr->buildclk = get_clock();
+	cqr->status = DASD_CQR_FILLED;
+	return cqr;
+}
+
+static struct dasd_ccw_req *dasd_eckd_build_cp_cmd_track(
+					       struct dasd_device *startdev,
+					       struct dasd_block *block,
+					       struct request *req,
+					       sector_t first_rec,
+					       sector_t last_rec,
+					       sector_t first_trk,
+					       sector_t last_trk,
+					       unsigned int first_offs,
+					       unsigned int last_offs,
+					       unsigned int blk_per_trk,
+					       unsigned int blksize)
+{
+	unsigned long *idaws;
+	struct dasd_ccw_req *cqr;
+	struct ccw1 *ccw;
+	struct req_iterator iter;
+	struct bio_vec *bv;
+	char *dst, *idaw_dst;
+	unsigned int cidaw, cplength, datasize;
+	unsigned int tlf;
+	sector_t recid;
+	unsigned char cmd;
+	struct dasd_device *basedev;
+	unsigned int trkcount, count, count_to_trk_end;
+	unsigned int idaw_len, seg_len, part_len, len_to_track_end;
+	unsigned char new_track, end_idaw;
+	sector_t trkid;
+	unsigned int recoffs;
+
+	basedev = block->base;
+	if (rq_data_dir(req) == READ)
+		cmd = DASD_ECKD_CCW_READ_TRACK_DATA;
+	else if (rq_data_dir(req) == WRITE)
+		cmd = DASD_ECKD_CCW_WRITE_TRACK_DATA;
+	else
+		return ERR_PTR(-EINVAL);
+
+	/* Track based I/O needs IDAWs for each page, and not just for
+	 * 64 bit addresses. We need additional idals for pages
+	 * that get filled from two tracks, so we use the number
+	 * of records as upper limit.
+	 */
+	cidaw = last_rec - first_rec + 1;
+	trkcount = last_trk - first_trk + 1;
+
+	/* 1x prefix + one read/write ccw per track */
+	cplength = 1 + trkcount;
+
+	/* on 31-bit we need space for two 32 bit addresses per page
+	 * on 64-bit one 64 bit address
+	 */
+	datasize = sizeof(struct PFX_eckd_data) +
+		cidaw * sizeof(unsigned long long);
+
+	/* Allocate the ccw request. */
+	cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, cplength, datasize,
+				   startdev);
+	if (IS_ERR(cqr))
+		return cqr;
+	ccw = cqr->cpaddr;
+	/* transfer length factor: how many bytes to read from the last track */
+	if (first_trk == last_trk)
+		tlf = last_offs - first_offs + 1;
+	else
+		tlf = last_offs + 1;
+	tlf *= blksize;
+
+	if (prefix_LRE(ccw++, cqr->data, first_trk,
+		       last_trk, cmd, basedev, startdev,
+		       1 /* format */, first_offs + 1,
+		       trkcount, blksize,
+		       tlf) == -EAGAIN) {
+		/* Clock not in sync and XRC is enabled.
+		 * Try again later.
+		 */
+		dasd_sfree_request(cqr, startdev);
+		return ERR_PTR(-EAGAIN);
+	}
+
+	/*
+	 * The translation of request into ccw programs must meet the
+	 * following conditions:
+	 * - all idaws but the first and the last must address full pages
+	 *   (or 2K blocks on 31-bit)
+	 * - the scope of a ccw and it's idal ends with the track boundaries
+	 */
+	idaws = (unsigned long *) (cqr->data + sizeof(struct PFX_eckd_data));
+	recid = first_rec;
+	new_track = 1;
+	end_idaw = 0;
+	len_to_track_end = 0;
+	idaw_dst = NULL;
+	idaw_len = 0;
+	rq_for_each_segment(bv, req, iter) {
+		dst = page_address(bv->bv_page) + bv->bv_offset;
+		seg_len = bv->bv_len;
+		while (seg_len) {
+			if (new_track) {
+				trkid = recid;
+				recoffs = sector_div(trkid, blk_per_trk);
+				count_to_trk_end = blk_per_trk - recoffs;
+				count = min((last_rec - recid + 1),
+					    (sector_t)count_to_trk_end);
+				len_to_track_end = count * blksize;
+				ccw[-1].flags |= CCW_FLAG_CC;
+				ccw->cmd_code = cmd;
+				ccw->count = len_to_track_end;
+				ccw->cda = (__u32)(addr_t)idaws;
+				ccw->flags = CCW_FLAG_IDA;
+				ccw++;
+				recid += count;
+				new_track = 0;
+				/* first idaw for a ccw may start anywhere */
+				if (!idaw_dst)
+					idaw_dst = dst;
+			}
+			/* If we start a new idaw, we must make sure that it
+			 * starts on an IDA_BLOCK_SIZE boundary.
+			 * If we continue an idaw, we must make sure that the
+			 * current segment begins where the so far accumulated
+			 * idaw ends
+			 */
+			if (!idaw_dst) {
+				if (__pa(dst) & (IDA_BLOCK_SIZE-1)) {
+					dasd_sfree_request(cqr, startdev);
+					return ERR_PTR(-ERANGE);
+				} else
+					idaw_dst = dst;
+			}
+			if ((idaw_dst + idaw_len) != dst) {
+				dasd_sfree_request(cqr, startdev);
+				return ERR_PTR(-ERANGE);
+			}
+			part_len = min(seg_len, len_to_track_end);
+			seg_len -= part_len;
+			dst += part_len;
+			idaw_len += part_len;
+			len_to_track_end -= part_len;
+			/* collected memory area ends on an IDA_BLOCK border,
+			 * -> create an idaw
+			 * idal_create_words will handle cases where idaw_len
+			 * is larger then IDA_BLOCK_SIZE
+			 */
+			if (!(__pa(idaw_dst + idaw_len) & (IDA_BLOCK_SIZE-1)))
+				end_idaw = 1;
+			/* We also need to end the idaw at track end */
+			if (!len_to_track_end) {
+				new_track = 1;
+				end_idaw = 1;
+			}
+			if (end_idaw) {
+				idaws = idal_create_words(idaws, idaw_dst,
+							  idaw_len);
+				idaw_dst = NULL;
+				idaw_len = 0;
+				end_idaw = 0;
+			}
+		}
+	}
+
+	if (blk_noretry_request(req) ||
+	    block->base->features & DASD_FEATURE_FAILFAST)
+		set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags);
+	cqr->startdev = startdev;
+	cqr->memdev = startdev;
+	cqr->block = block;
+	cqr->expires = startdev->default_expires * HZ;	/* default 5 minutes */
+	cqr->lpm = startdev->path_data.ppm;
+	cqr->retries = 256;
+	cqr->buildclk = get_clock();
+	cqr->status = DASD_CQR_FILLED;
+	return cqr;
+}
+
+static int prepare_itcw(struct itcw *itcw,
+			unsigned int trk, unsigned int totrk, int cmd,
+			struct dasd_device *basedev,
+			struct dasd_device *startdev,
+			unsigned int rec_on_trk, int count,
+			unsigned int blksize,
+			unsigned int total_data_size,
+			unsigned int tlf,
+			unsigned int blk_per_trk)
+{
+	struct PFX_eckd_data pfxdata;
+	struct dasd_eckd_private *basepriv, *startpriv;
+	struct DE_eckd_data *dedata;
+	struct LRE_eckd_data *lredata;
+	struct dcw *dcw;
+
+	u32 begcyl, endcyl;
+	u16 heads, beghead, endhead;
+	u8 pfx_cmd;
+
+	int rc = 0;
+	int sector = 0;
+	int dn, d;
+
+
+	/* setup prefix data */
+	basepriv = (struct dasd_eckd_private *) basedev->private;
+	startpriv = (struct dasd_eckd_private *) startdev->private;
+	dedata = &pfxdata.define_extent;
+	lredata = &pfxdata.locate_record;
+
+	memset(&pfxdata, 0, sizeof(pfxdata));
+	pfxdata.format = 1; /* PFX with LRE */
+	pfxdata.base_address = basepriv->ned->unit_addr;
+	pfxdata.base_lss = basepriv->ned->ID;
+	pfxdata.validity.define_extent = 1;
+
+	/* private uid is kept up to date, conf_data may be outdated */
+	if (startpriv->uid.type != UA_BASE_DEVICE) {
+		pfxdata.validity.verify_base = 1;
+		if (startpriv->uid.type == UA_HYPER_PAV_ALIAS)
+			pfxdata.validity.hyper_pav = 1;
+	}
+
+	switch (cmd) {
+	case DASD_ECKD_CCW_READ_TRACK_DATA:
+		dedata->mask.perm = 0x1;
+		dedata->attributes.operation = basepriv->attrib.operation;
+		dedata->blk_size = blksize;
+		dedata->ga_extended |= 0x42;
+		lredata->operation.orientation = 0x0;
+		lredata->operation.operation = 0x0C;
+		lredata->auxiliary.check_bytes = 0x01;
+		pfx_cmd = DASD_ECKD_CCW_PFX_READ;
+		break;
+	case DASD_ECKD_CCW_WRITE_TRACK_DATA:
+		dedata->mask.perm = 0x02;
+		dedata->attributes.operation = basepriv->attrib.operation;
+		dedata->blk_size = blksize;
+		rc = check_XRC_on_prefix(&pfxdata, basedev);
+		dedata->ga_extended |= 0x42;
+		lredata->operation.orientation = 0x0;
+		lredata->operation.operation = 0x3F;
+		lredata->extended_operation = 0x23;
+		lredata->auxiliary.check_bytes = 0x2;
+		pfx_cmd = DASD_ECKD_CCW_PFX;
+		break;
+	default:
+		DBF_DEV_EVENT(DBF_ERR, basedev,
+			      "prepare itcw, unknown opcode 0x%x", cmd);
+		BUG();
+		break;
+	}
+	if (rc)
+		return rc;
+
+	dedata->attributes.mode = 0x3;	/* ECKD */
+
+	heads = basepriv->rdc_data.trk_per_cyl;
+	begcyl = trk / heads;
+	beghead = trk % heads;
+	endcyl = totrk / heads;
+	endhead = totrk % heads;
+
+	/* check for sequential prestage - enhance cylinder range */
+	if (dedata->attributes.operation == DASD_SEQ_PRESTAGE ||
+	    dedata->attributes.operation == DASD_SEQ_ACCESS) {
+
+		if (endcyl + basepriv->attrib.nr_cyl < basepriv->real_cyl)
+			endcyl += basepriv->attrib.nr_cyl;
+		else
+			endcyl = (basepriv->real_cyl - 1);
+	}
+
+	set_ch_t(&dedata->beg_ext, begcyl, beghead);
+	set_ch_t(&dedata->end_ext, endcyl, endhead);
+
+	dedata->ep_format = 0x20; /* records per track is valid */
+	dedata->ep_rec_per_track = blk_per_trk;
+
+	if (rec_on_trk) {
+		switch (basepriv->rdc_data.dev_type) {
+		case 0x3390:
+			dn = ceil_quot(blksize + 6, 232);
+			d = 9 + ceil_quot(blksize + 6 * (dn + 1), 34);
+			sector = (49 + (rec_on_trk - 1) * (10 + d)) / 8;
+			break;
+		case 0x3380:
+			d = 7 + ceil_quot(blksize + 12, 32);
+			sector = (39 + (rec_on_trk - 1) * (8 + d)) / 7;
+			break;
+		}
+	}
+
+	lredata->auxiliary.length_valid = 1;
+	lredata->auxiliary.length_scope = 1;
+	lredata->auxiliary.imbedded_ccw_valid = 1;
+	lredata->length = tlf;
+	lredata->imbedded_ccw = cmd;
+	lredata->count = count;
+	lredata->sector = sector;
+	set_ch_t(&lredata->seek_addr, begcyl, beghead);
+	lredata->search_arg.cyl = lredata->seek_addr.cyl;
+	lredata->search_arg.head = lredata->seek_addr.head;
+	lredata->search_arg.record = rec_on_trk;
+
+	dcw = itcw_add_dcw(itcw, pfx_cmd, 0,
+		     &pfxdata, sizeof(pfxdata), total_data_size);
+	return IS_ERR(dcw) ? PTR_ERR(dcw) : 0;
+}
+
+static struct dasd_ccw_req *dasd_eckd_build_cp_tpm_track(
+					       struct dasd_device *startdev,
+					       struct dasd_block *block,
+					       struct request *req,
+					       sector_t first_rec,
+					       sector_t last_rec,
+					       sector_t first_trk,
+					       sector_t last_trk,
+					       unsigned int first_offs,
+					       unsigned int last_offs,
+					       unsigned int blk_per_trk,
+					       unsigned int blksize)
+{
+	struct dasd_ccw_req *cqr;
+	struct req_iterator iter;
+	struct bio_vec *bv;
+	char *dst;
+	unsigned int trkcount, ctidaw;
+	unsigned char cmd;
+	struct dasd_device *basedev;
+	unsigned int tlf;
+	struct itcw *itcw;
+	struct tidaw *last_tidaw = NULL;
+	int itcw_op;
+	size_t itcw_size;
+	u8 tidaw_flags;
+	unsigned int seg_len, part_len, len_to_track_end;
+	unsigned char new_track;
+	sector_t recid, trkid;
+	unsigned int offs;
+	unsigned int count, count_to_trk_end;
+	int ret;
+
+	basedev = block->base;
+	if (rq_data_dir(req) == READ) {
+		cmd = DASD_ECKD_CCW_READ_TRACK_DATA;
+		itcw_op = ITCW_OP_READ;
+	} else if (rq_data_dir(req) == WRITE) {
+		cmd = DASD_ECKD_CCW_WRITE_TRACK_DATA;
+		itcw_op = ITCW_OP_WRITE;
+	} else
+		return ERR_PTR(-EINVAL);
+
+	/* trackbased I/O needs address all memory via TIDAWs,
+	 * not just for 64 bit addresses. This allows us to map
+	 * each segment directly to one tidaw.
+	 * In the case of write requests, additional tidaws may
+	 * be needed when a segment crosses a track boundary.
+	 */
+	trkcount = last_trk - first_trk + 1;
+	ctidaw = 0;
+	rq_for_each_segment(bv, req, iter) {
+		++ctidaw;
+	}
+	if (rq_data_dir(req) == WRITE)
+		ctidaw += (last_trk - first_trk);
+
+	/* Allocate the ccw request. */
+	itcw_size = itcw_calc_size(0, ctidaw, 0);
+	cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 0, itcw_size, startdev);
+	if (IS_ERR(cqr))
+		return cqr;
+
+	/* transfer length factor: how many bytes to read from the last track */
+	if (first_trk == last_trk)
+		tlf = last_offs - first_offs + 1;
+	else
+		tlf = last_offs + 1;
+	tlf *= blksize;
+
+	itcw = itcw_init(cqr->data, itcw_size, itcw_op, 0, ctidaw, 0);
+	if (IS_ERR(itcw)) {
+		ret = -EINVAL;
+		goto out_error;
+	}
+	cqr->cpaddr = itcw_get_tcw(itcw);
+	if (prepare_itcw(itcw, first_trk, last_trk,
+			 cmd, basedev, startdev,
+			 first_offs + 1,
+			 trkcount, blksize,
+			 (last_rec - first_rec + 1) * blksize,
+			 tlf, blk_per_trk) == -EAGAIN) {
+		/* Clock not in sync and XRC is enabled.
+		 * Try again later.
+		 */
+		ret = -EAGAIN;
+		goto out_error;
+	}
+	len_to_track_end = 0;
+	/*
+	 * A tidaw can address 4k of memory, but must not cross page boundaries
+	 * We can let the block layer handle this by setting
+	 * blk_queue_segment_boundary to page boundaries and
+	 * blk_max_segment_size to page size when setting up the request queue.
+	 * For write requests, a TIDAW must not cross track boundaries, because
+	 * we have to set the CBC flag on the last tidaw for each track.
+	 */
+	if (rq_data_dir(req) == WRITE) {
+		new_track = 1;
+		recid = first_rec;
+		rq_for_each_segment(bv, req, iter) {
+			dst = page_address(bv->bv_page) + bv->bv_offset;
+			seg_len = bv->bv_len;
+			while (seg_len) {
+				if (new_track) {
+					trkid = recid;
+					offs = sector_div(trkid, blk_per_trk);
+					count_to_trk_end = blk_per_trk - offs;
+					count = min((last_rec - recid + 1),
+						    (sector_t)count_to_trk_end);
+					len_to_track_end = count * blksize;
+					recid += count;
+					new_track = 0;
+				}
+				part_len = min(seg_len, len_to_track_end);
+				seg_len -= part_len;
+				len_to_track_end -= part_len;
+				/* We need to end the tidaw at track end */
+				if (!len_to_track_end) {
+					new_track = 1;
+					tidaw_flags = TIDAW_FLAGS_INSERT_CBC;
+				} else
+					tidaw_flags = 0;
+				last_tidaw = itcw_add_tidaw(itcw, tidaw_flags,
+							    dst, part_len);
+				if (IS_ERR(last_tidaw)) {
+					ret = -EINVAL;
+					goto out_error;
+				}
+				dst += part_len;
+			}
+		}
+	} else {
+		rq_for_each_segment(bv, req, iter) {
+			dst = page_address(bv->bv_page) + bv->bv_offset;
+			last_tidaw = itcw_add_tidaw(itcw, 0x00,
+						    dst, bv->bv_len);
+			if (IS_ERR(last_tidaw)) {
+				ret = -EINVAL;
+				goto out_error;
+			}
+		}
+	}
+	last_tidaw->flags |= TIDAW_FLAGS_LAST;
+	last_tidaw->flags &= ~TIDAW_FLAGS_INSERT_CBC;
+	itcw_finalize(itcw);
+
+	if (blk_noretry_request(req) ||
+	    block->base->features & DASD_FEATURE_FAILFAST)
+		set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags);
+	cqr->cpmode = 1;
+	cqr->startdev = startdev;
+	cqr->memdev = startdev;
+	cqr->block = block;
+	cqr->expires = startdev->default_expires * HZ;	/* default 5 minutes */
+	cqr->lpm = startdev->path_data.ppm;
+	cqr->retries = 256;
+	cqr->buildclk = get_clock();
+	cqr->status = DASD_CQR_FILLED;
+	return cqr;
+out_error:
+	dasd_sfree_request(cqr, startdev);
+	return ERR_PTR(ret);
+}
+
+static struct dasd_ccw_req *dasd_eckd_build_cp(struct dasd_device *startdev,
+					       struct dasd_block *block,
+					       struct request *req)
+{
+	int cmdrtd, cmdwtd;
+	int use_prefix;
+	int fcx_multitrack;
+	struct dasd_eckd_private *private;
+	struct dasd_device *basedev;
+	sector_t first_rec, last_rec;
+	sector_t first_trk, last_trk;
+	unsigned int first_offs, last_offs;
+	unsigned int blk_per_trk, blksize;
+	int cdlspecial;
+	unsigned int data_size;
+	struct dasd_ccw_req *cqr;
+
+	basedev = block->base;
+	private = (struct dasd_eckd_private *) basedev->private;
+
+	/* Calculate number of blocks/records per track. */
+	blksize = block->bp_block;
+	blk_per_trk = recs_per_track(&private->rdc_data, 0, blksize);
+	if (blk_per_trk == 0)
+		return ERR_PTR(-EINVAL);
+	/* Calculate record id of first and last block. */
+	first_rec = first_trk = blk_rq_pos(req) >> block->s2b_shift;
+	first_offs = sector_div(first_trk, blk_per_trk);
+	last_rec = last_trk =
+		(blk_rq_pos(req) + blk_rq_sectors(req) - 1) >> block->s2b_shift;
+	last_offs = sector_div(last_trk, blk_per_trk);
+	cdlspecial = (private->uses_cdl && first_rec < 2*blk_per_trk);
+
+	fcx_multitrack = private->features.feature[40] & 0x20;
+	data_size = blk_rq_bytes(req);
+	/* tpm write request add CBC data on each track boundary */
+	if (rq_data_dir(req) == WRITE)
+		data_size += (last_trk - first_trk) * 4;
+
+	/* is read track data and write track data in command mode supported? */
+	cmdrtd = private->features.feature[9] & 0x20;
+	cmdwtd = private->features.feature[12] & 0x40;
+	use_prefix = private->features.feature[8] & 0x01;
+
+	cqr = NULL;
+	if (cdlspecial || dasd_page_cache) {
+		/* do nothing, just fall through to the cmd mode single case */
+	} else if ((data_size <= private->fcx_max_data)
+		   && (fcx_multitrack || (first_trk == last_trk))) {
+		cqr = dasd_eckd_build_cp_tpm_track(startdev, block, req,
+						    first_rec, last_rec,
+						    first_trk, last_trk,
+						    first_offs, last_offs,
+						    blk_per_trk, blksize);
+		if (IS_ERR(cqr) && (PTR_ERR(cqr) != -EAGAIN) &&
+		    (PTR_ERR(cqr) != -ENOMEM))
+			cqr = NULL;
+	} else if (use_prefix &&
+		   (((rq_data_dir(req) == READ) && cmdrtd) ||
+		    ((rq_data_dir(req) == WRITE) && cmdwtd))) {
+		cqr = dasd_eckd_build_cp_cmd_track(startdev, block, req,
+						   first_rec, last_rec,
+						   first_trk, last_trk,
+						   first_offs, last_offs,
+						   blk_per_trk, blksize);
+		if (IS_ERR(cqr) && (PTR_ERR(cqr) != -EAGAIN) &&
+		    (PTR_ERR(cqr) != -ENOMEM))
+			cqr = NULL;
+	}
+	if (!cqr)
+		cqr = dasd_eckd_build_cp_cmd_single(startdev, block, req,
+						    first_rec, last_rec,
+						    first_trk, last_trk,
+						    first_offs, last_offs,
+						    blk_per_trk, blksize);
+	return cqr;
+}
+
+static struct dasd_ccw_req *dasd_raw_build_cp(struct dasd_device *startdev,
+					       struct dasd_block *block,
+					       struct request *req)
+{
+	unsigned long *idaws;
+	struct dasd_device *basedev;
+	struct dasd_ccw_req *cqr;
+	struct ccw1 *ccw;
+	struct req_iterator iter;
+	struct bio_vec *bv;
+	char *dst;
+	unsigned char cmd;
+	unsigned int trkcount;
+	unsigned int seg_len, len_to_track_end;
+	unsigned int first_offs;
+	unsigned int cidaw, cplength, datasize;
+	sector_t first_trk, last_trk;
+	unsigned int pfx_datasize;
+
+	/*
+	 * raw track access needs to be mutiple of 64k and on 64k boundary
+	 */
+	if ((blk_rq_pos(req) % DASD_RAW_SECTORS_PER_TRACK) != 0) {
+		cqr = ERR_PTR(-EINVAL);
+		goto out;
+	}
+	if (((blk_rq_pos(req) + blk_rq_sectors(req)) %
+	     DASD_RAW_SECTORS_PER_TRACK) != 0) {
+		cqr = ERR_PTR(-EINVAL);
+		goto out;
+	}
+
+	first_trk = blk_rq_pos(req) / DASD_RAW_SECTORS_PER_TRACK;
+	last_trk = (blk_rq_pos(req) + blk_rq_sectors(req) - 1) /
+		DASD_RAW_SECTORS_PER_TRACK;
+	trkcount = last_trk - first_trk + 1;
+	first_offs = 0;
+	basedev = block->base;
+
+	if (rq_data_dir(req) == READ)
+		cmd = DASD_ECKD_CCW_READ_TRACK;
+	else if (rq_data_dir(req) == WRITE)
+		cmd = DASD_ECKD_CCW_WRITE_FULL_TRACK;
+	else {
+		cqr = ERR_PTR(-EINVAL);
+		goto out;
+	}
+
+	/*
+	 * Raw track based I/O needs IDAWs for each page,
+	 * and not just for 64 bit addresses.
+	 */
+	cidaw = trkcount * DASD_RAW_BLOCK_PER_TRACK;
+
+	/* 1x prefix + one read/write ccw per track */
+	cplength = 1 + trkcount;
+
+	/*
+	 * struct PFX_eckd_data has up to 2 byte as extended parameter
+	 * this is needed for write full track and has to be mentioned
+	 * separately
+	 * add 8 instead of 2 to keep 8 byte boundary
+	 */
+	pfx_datasize = sizeof(struct PFX_eckd_data) + 8;
+
+	datasize = pfx_datasize + cidaw * sizeof(unsigned long long);
+
+	/* Allocate the ccw request. */
+	cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, cplength,
+				   datasize, startdev);
+	if (IS_ERR(cqr))
+		goto out;
+	ccw = cqr->cpaddr;
+
+	if (prefix_LRE(ccw++, cqr->data, first_trk, last_trk, cmd,
+		       basedev, startdev, 1 /* format */, first_offs + 1,
+		       trkcount, 0, 0) == -EAGAIN) {
+		/* Clock not in sync and XRC is enabled.
+		 * Try again later.
+		 */
+		dasd_sfree_request(cqr, startdev);
+		cqr = ERR_PTR(-EAGAIN);
+		goto out;
+	}
+
+	idaws = (unsigned long *)(cqr->data + pfx_datasize);
+
+	len_to_track_end = 0;
+
+	rq_for_each_segment(bv, req, iter) {
+		dst = page_address(bv->bv_page) + bv->bv_offset;
+		seg_len = bv->bv_len;
+		if (!len_to_track_end) {
+			ccw[-1].flags |= CCW_FLAG_CC;
+			ccw->cmd_code = cmd;
+			/* maximum 3390 track size */
+			ccw->count = 57326;
+			/* 64k map to one track */
+			len_to_track_end = 65536;
+			ccw->cda = (__u32)(addr_t)idaws;
+			ccw->flags |= CCW_FLAG_IDA;
+			ccw->flags |= CCW_FLAG_SLI;
+			ccw++;
+		}
+		len_to_track_end -= seg_len;
+		idaws = idal_create_words(idaws, dst, seg_len);
+	}
+
+	if (blk_noretry_request(req) ||
+	    block->base->features & DASD_FEATURE_FAILFAST)
+		set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags);
+	cqr->startdev = startdev;
+	cqr->memdev = startdev;
+	cqr->block = block;
+	cqr->expires = startdev->default_expires * HZ;
+	cqr->lpm = startdev->path_data.ppm;
+	cqr->retries = 256;
+	cqr->buildclk = get_clock();
+	cqr->status = DASD_CQR_FILLED;
+
+	if (IS_ERR(cqr) && PTR_ERR(cqr) != -EAGAIN)
+		cqr = NULL;
+out:
+	return cqr;
+}
+
+
+static int
+dasd_eckd_free_cp(struct dasd_ccw_req *cqr, struct request *req)
+{
+	struct dasd_eckd_private *private;
+	struct ccw1 *ccw;
+	struct req_iterator iter;
+	struct bio_vec *bv;
+	char *dst, *cda;
+	unsigned int blksize, blk_per_trk, off;
+	sector_t recid;
+	int status;
+
+	if (!dasd_page_cache)
+		goto out;
+	private = (struct dasd_eckd_private *) cqr->block->base->private;
+	blksize = cqr->block->bp_block;
+	blk_per_trk = recs_per_track(&private->rdc_data, 0, blksize);
+	recid = blk_rq_pos(req) >> cqr->block->s2b_shift;
+	ccw = cqr->cpaddr;
+	/* Skip over define extent & locate record. */
+	ccw++;
+	if (private->uses_cdl == 0 || recid > 2*blk_per_trk)
+		ccw++;
+	rq_for_each_segment(bv, req, iter) {
+		dst = page_address(bv->bv_page) + bv->bv_offset;
+		for (off = 0; off < bv->bv_len; off += blksize) {
+			/* Skip locate record. */
+			if (private->uses_cdl && recid <= 2*blk_per_trk)
+				ccw++;
+			if (dst) {
+				if (ccw->flags & CCW_FLAG_IDA)
+					cda = *((char **)((addr_t) ccw->cda));
+				else
+					cda = (char *)((addr_t) ccw->cda);
+				if (dst != cda) {
+					if (rq_data_dir(req) == READ)
+						memcpy(dst, cda, bv->bv_len);
+					kmem_cache_free(dasd_page_cache,
+					    (void *)((addr_t)cda & PAGE_MASK));
+				}
+				dst = NULL;
+			}
+			ccw++;
+			recid++;
+		}
+	}
+out:
+	status = cqr->status == DASD_CQR_DONE;
+	dasd_sfree_request(cqr, cqr->memdev);
+	return status;
+}
+
+/*
+ * Modify ccw/tcw in cqr so it can be started on a base device.
+ *
+ * Note that this is not enough to restart the cqr!
+ * Either reset cqr->startdev as well (summary unit check handling)
+ * or restart via separate cqr (as in ERP handling).
+ */
+void dasd_eckd_reset_ccw_to_base_io(struct dasd_ccw_req *cqr)
+{
+	struct ccw1 *ccw;
+	struct PFX_eckd_data *pfxdata;
+	struct tcw *tcw;
+	struct tccb *tccb;
+	struct dcw *dcw;
+
+	if (cqr->cpmode == 1) {
+		tcw = cqr->cpaddr;
+		tccb = tcw_get_tccb(tcw);
+		dcw = (struct dcw *)&tccb->tca[0];
+		pfxdata = (struct PFX_eckd_data *)&dcw->cd[0];
+		pfxdata->validity.verify_base = 0;
+		pfxdata->validity.hyper_pav = 0;
+	} else {
+		ccw = cqr->cpaddr;
+		pfxdata = cqr->data;
+		if (ccw->cmd_code == DASD_ECKD_CCW_PFX) {
+			pfxdata->validity.verify_base = 0;
+			pfxdata->validity.hyper_pav = 0;
+		}
+	}
+}
+
+#define DASD_ECKD_CHANQ_MAX_SIZE 4
+
+static struct dasd_ccw_req *dasd_eckd_build_alias_cp(struct dasd_device *base,
+						     struct dasd_block *block,
+						     struct request *req)
+{
+	struct dasd_eckd_private *private;
+	struct dasd_device *startdev;
+	unsigned long flags;
+	struct dasd_ccw_req *cqr;
+
+	startdev = dasd_alias_get_start_dev(base);
+	if (!startdev)
+		startdev = base;
+	private = (struct dasd_eckd_private *) startdev->private;
+	if (private->count >= DASD_ECKD_CHANQ_MAX_SIZE)
+		return ERR_PTR(-EBUSY);
+
+	spin_lock_irqsave(get_ccwdev_lock(startdev->cdev), flags);
+	private->count++;
+	if ((base->features & DASD_FEATURE_USERAW))
+		cqr = dasd_raw_build_cp(startdev, block, req);
+	else
+		cqr = dasd_eckd_build_cp(startdev, block, req);
+	if (IS_ERR(cqr))
+		private->count--;
+	spin_unlock_irqrestore(get_ccwdev_lock(startdev->cdev), flags);
+	return cqr;
+}
+
+static int dasd_eckd_free_alias_cp(struct dasd_ccw_req *cqr,
+				   struct request *req)
+{
+	struct dasd_eckd_private *private;
+	unsigned long flags;
+
+	spin_lock_irqsave(get_ccwdev_lock(cqr->memdev->cdev), flags);
+	private = (struct dasd_eckd_private *) cqr->memdev->private;
+	private->count--;
+	spin_unlock_irqrestore(get_ccwdev_lock(cqr->memdev->cdev), flags);
+	return dasd_eckd_free_cp(cqr, req);
+}
+
+static int
+dasd_eckd_fill_info(struct dasd_device * device,
+		    struct dasd_information2_t * info)
+{
+	struct dasd_eckd_private *private;
+
+	private = (struct dasd_eckd_private *) device->private;
+	info->label_block = 2;
+	info->FBA_layout = private->uses_cdl ? 0 : 1;
+	info->format = private->uses_cdl ? DASD_FORMAT_CDL : DASD_FORMAT_LDL;
+	info->characteristics_size = sizeof(struct dasd_eckd_characteristics);
+	memcpy(info->characteristics, &private->rdc_data,
+	       sizeof(struct dasd_eckd_characteristics));
+	info->confdata_size = min((unsigned long)private->conf_len,
+				  sizeof(info->configuration_data));
+	memcpy(info->configuration_data, private->conf_data,
+	       info->confdata_size);
+	return 0;
+}
+
+/*
+ * SECTION: ioctl functions for eckd devices.
+ */
+
+/*
+ * Release device ioctl.
+ * Buils a channel programm to releases a prior reserved
+ * (see dasd_eckd_reserve) device.
+ */
+static int
+dasd_eckd_release(struct dasd_device *device)
+{
+	struct dasd_ccw_req *cqr;
+	int rc;
+	struct ccw1 *ccw;
+	int useglobal;
+
+	if (!capable(CAP_SYS_ADMIN))
+		return -EACCES;
+
+	useglobal = 0;
+	cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1, 32, device);
+	if (IS_ERR(cqr)) {
+		mutex_lock(&dasd_reserve_mutex);
+		useglobal = 1;
+		cqr = &dasd_reserve_req->cqr;
+		memset(cqr, 0, sizeof(*cqr));
+		memset(&dasd_reserve_req->ccw, 0,
+		       sizeof(dasd_reserve_req->ccw));
+		cqr->cpaddr = &dasd_reserve_req->ccw;
+		cqr->data = &dasd_reserve_req->data;
+		cqr->magic = DASD_ECKD_MAGIC;
+	}
+	ccw = cqr->cpaddr;
+	ccw->cmd_code = DASD_ECKD_CCW_RELEASE;
+	ccw->flags |= CCW_FLAG_SLI;
+	ccw->count = 32;
+	ccw->cda = (__u32)(addr_t) cqr->data;
+	cqr->startdev = device;
+	cqr->memdev = device;
+	clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
+	set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags);
+	cqr->retries = 2;	/* set retry counter to enable basic ERP */
+	cqr->expires = 2 * HZ;
+	cqr->buildclk = get_clock();
+	cqr->status = DASD_CQR_FILLED;
+
+	rc = dasd_sleep_on_immediatly(cqr);
+	if (!rc)
+		clear_bit(DASD_FLAG_IS_RESERVED, &device->flags);
+
+	if (useglobal)
+		mutex_unlock(&dasd_reserve_mutex);
+	else
+		dasd_sfree_request(cqr, cqr->memdev);
+	return rc;
+}
+
+/*
+ * Reserve device ioctl.
+ * Options are set to 'synchronous wait for interrupt' and
+ * 'timeout the request'. This leads to a terminate IO if
+ * the interrupt is outstanding for a certain time.
+ */
+static int
+dasd_eckd_reserve(struct dasd_device *device)
+{
+	struct dasd_ccw_req *cqr;
+	int rc;
+	struct ccw1 *ccw;
+	int useglobal;
+
+	if (!capable(CAP_SYS_ADMIN))
+		return -EACCES;
+
+	useglobal = 0;
+	cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1, 32, device);
+	if (IS_ERR(cqr)) {
+		mutex_lock(&dasd_reserve_mutex);
+		useglobal = 1;
+		cqr = &dasd_reserve_req->cqr;
+		memset(cqr, 0, sizeof(*cqr));
+		memset(&dasd_reserve_req->ccw, 0,
+		       sizeof(dasd_reserve_req->ccw));
+		cqr->cpaddr = &dasd_reserve_req->ccw;
+		cqr->data = &dasd_reserve_req->data;
+		cqr->magic = DASD_ECKD_MAGIC;
+	}
+	ccw = cqr->cpaddr;
+	ccw->cmd_code = DASD_ECKD_CCW_RESERVE;
+	ccw->flags |= CCW_FLAG_SLI;
+	ccw->count = 32;
+	ccw->cda = (__u32)(addr_t) cqr->data;
+	cqr->startdev = device;
+	cqr->memdev = device;
+	clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
+	set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags);
+	cqr->retries = 2;	/* set retry counter to enable basic ERP */
+	cqr->expires = 2 * HZ;
+	cqr->buildclk = get_clock();
+	cqr->status = DASD_CQR_FILLED;
+
+	rc = dasd_sleep_on_immediatly(cqr);
+	if (!rc)
+		set_bit(DASD_FLAG_IS_RESERVED, &device->flags);
+
+	if (useglobal)
+		mutex_unlock(&dasd_reserve_mutex);
+	else
+		dasd_sfree_request(cqr, cqr->memdev);
+	return rc;
+}
+
+/*
+ * Steal lock ioctl - unconditional reserve device.
+ * Buils a channel programm to break a device's reservation.
+ * (unconditional reserve)
+ */
+static int
+dasd_eckd_steal_lock(struct dasd_device *device)
+{
+	struct dasd_ccw_req *cqr;
+	int rc;
+	struct ccw1 *ccw;
+	int useglobal;
+
+	if (!capable(CAP_SYS_ADMIN))
+		return -EACCES;
+
+	useglobal = 0;
+	cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1, 32, device);
+	if (IS_ERR(cqr)) {
+		mutex_lock(&dasd_reserve_mutex);
+		useglobal = 1;
+		cqr = &dasd_reserve_req->cqr;
+		memset(cqr, 0, sizeof(*cqr));
+		memset(&dasd_reserve_req->ccw, 0,
+		       sizeof(dasd_reserve_req->ccw));
+		cqr->cpaddr = &dasd_reserve_req->ccw;
+		cqr->data = &dasd_reserve_req->data;
+		cqr->magic = DASD_ECKD_MAGIC;
+	}
+	ccw = cqr->cpaddr;
+	ccw->cmd_code = DASD_ECKD_CCW_SLCK;
+	ccw->flags |= CCW_FLAG_SLI;
+	ccw->count = 32;
+	ccw->cda = (__u32)(addr_t) cqr->data;
+	cqr->startdev = device;
+	cqr->memdev = device;
+	clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
+	set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags);
+	cqr->retries = 2;	/* set retry counter to enable basic ERP */
+	cqr->expires = 2 * HZ;
+	cqr->buildclk = get_clock();
+	cqr->status = DASD_CQR_FILLED;
+
+	rc = dasd_sleep_on_immediatly(cqr);
+	if (!rc)
+		set_bit(DASD_FLAG_IS_RESERVED, &device->flags);
+
+	if (useglobal)
+		mutex_unlock(&dasd_reserve_mutex);
+	else
+		dasd_sfree_request(cqr, cqr->memdev);
+	return rc;
+}
+
+/*
+ * SNID - Sense Path Group ID
+ * This ioctl may be used in situations where I/O is stalled due to
+ * a reserve, so if the normal dasd_smalloc_request fails, we use the
+ * preallocated dasd_reserve_req.
+ */
+static int dasd_eckd_snid(struct dasd_device *device,
+			  void __user *argp)
+{
+	struct dasd_ccw_req *cqr;
+	int rc;
+	struct ccw1 *ccw;
+	int useglobal;
+	struct dasd_snid_ioctl_data usrparm;
+
+	if (!capable(CAP_SYS_ADMIN))
+		return -EACCES;
+
+	if (copy_from_user(&usrparm, argp, sizeof(usrparm)))
+		return -EFAULT;
+
+	useglobal = 0;
+	cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1,
+				   sizeof(struct dasd_snid_data), device);
+	if (IS_ERR(cqr)) {
+		mutex_lock(&dasd_reserve_mutex);
+		useglobal = 1;
+		cqr = &dasd_reserve_req->cqr;
+		memset(cqr, 0, sizeof(*cqr));
+		memset(&dasd_reserve_req->ccw, 0,
+		       sizeof(dasd_reserve_req->ccw));
+		cqr->cpaddr = &dasd_reserve_req->ccw;
+		cqr->data = &dasd_reserve_req->data;
+		cqr->magic = DASD_ECKD_MAGIC;
+	}
+	ccw = cqr->cpaddr;
+	ccw->cmd_code = DASD_ECKD_CCW_SNID;
+	ccw->flags |= CCW_FLAG_SLI;
+	ccw->count = 12;
+	ccw->cda = (__u32)(addr_t) cqr->data;
+	cqr->startdev = device;
+	cqr->memdev = device;
+	clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
+	set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags);
+	set_bit(DASD_CQR_ALLOW_SLOCK, &cqr->flags);
+	cqr->retries = 5;
+	cqr->expires = 10 * HZ;
+	cqr->buildclk = get_clock();
+	cqr->status = DASD_CQR_FILLED;
+	cqr->lpm = usrparm.path_mask;
+
+	rc = dasd_sleep_on_immediatly(cqr);
+	/* verify that I/O processing didn't modify the path mask */
+	if (!rc && usrparm.path_mask && (cqr->lpm != usrparm.path_mask))
+		rc = -EIO;
+	if (!rc) {
+		usrparm.data = *((struct dasd_snid_data *)cqr->data);
+		if (copy_to_user(argp, &usrparm, sizeof(usrparm)))
+			rc = -EFAULT;
+	}
+
+	if (useglobal)
+		mutex_unlock(&dasd_reserve_mutex);
+	else
+		dasd_sfree_request(cqr, cqr->memdev);
+	return rc;
+}
+
+/*
+ * Read performance statistics
+ */
+static int
+dasd_eckd_performance(struct dasd_device *device, void __user *argp)
+{
+	struct dasd_psf_prssd_data *prssdp;
+	struct dasd_rssd_perf_stats_t *stats;
+	struct dasd_ccw_req *cqr;
+	struct ccw1 *ccw;
+	int rc;
+
+	cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* PSF */  + 1 /* RSSD */,
+				   (sizeof(struct dasd_psf_prssd_data) +
+				    sizeof(struct dasd_rssd_perf_stats_t)),
+				   device);
+	if (IS_ERR(cqr)) {
+		DBF_DEV_EVENT(DBF_WARNING, device, "%s",
+			    "Could not allocate initialization request");
+		return PTR_ERR(cqr);
+	}
+	cqr->startdev = device;
+	cqr->memdev = device;
+	cqr->retries = 0;
+	clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
+	cqr->expires = 10 * HZ;
+
+	/* Prepare for Read Subsystem Data */
+	prssdp = (struct dasd_psf_prssd_data *) cqr->data;
+	memset(prssdp, 0, sizeof(struct dasd_psf_prssd_data));
+	prssdp->order = PSF_ORDER_PRSSD;
+	prssdp->suborder = 0x01;	/* Performance Statistics */
+	prssdp->varies[1] = 0x01;	/* Perf Statistics for the Subsystem */
+
+	ccw = cqr->cpaddr;
+	ccw->cmd_code = DASD_ECKD_CCW_PSF;
+	ccw->count = sizeof(struct dasd_psf_prssd_data);
+	ccw->flags |= CCW_FLAG_CC;
+	ccw->cda = (__u32)(addr_t) prssdp;
+
+	/* Read Subsystem Data - Performance Statistics */
+	stats = (struct dasd_rssd_perf_stats_t *) (prssdp + 1);
+	memset(stats, 0, sizeof(struct dasd_rssd_perf_stats_t));
+
+	ccw++;
+	ccw->cmd_code = DASD_ECKD_CCW_RSSD;
+	ccw->count = sizeof(struct dasd_rssd_perf_stats_t);
+	ccw->cda = (__u32)(addr_t) stats;
+
+	cqr->buildclk = get_clock();
+	cqr->status = DASD_CQR_FILLED;
+	rc = dasd_sleep_on(cqr);
+	if (rc == 0) {
+		prssdp = (struct dasd_psf_prssd_data *) cqr->data;
+		stats = (struct dasd_rssd_perf_stats_t *) (prssdp + 1);
+		if (copy_to_user(argp, stats,
+				 sizeof(struct dasd_rssd_perf_stats_t)))
+			rc = -EFAULT;
+	}
+	dasd_sfree_request(cqr, cqr->memdev);
+	return rc;
+}
+
+/*
+ * Get attributes (cache operations)
+ * Returnes the cache attributes used in Define Extend (DE).
+ */
+static int
+dasd_eckd_get_attrib(struct dasd_device *device, void __user *argp)
+{
+	struct dasd_eckd_private *private =
+		(struct dasd_eckd_private *)device->private;
+	struct attrib_data_t attrib = private->attrib;
+	int rc;
+
+        if (!capable(CAP_SYS_ADMIN))
+                return -EACCES;
+	if (!argp)
+                return -EINVAL;
+
+	rc = 0;
+	if (copy_to_user(argp, (long *) &attrib,
+			 sizeof(struct attrib_data_t)))
+		rc = -EFAULT;
+
+	return rc;
+}
+
+/*
+ * Set attributes (cache operations)
+ * Stores the attributes for cache operation to be used in Define Extend (DE).
+ */
+static int
+dasd_eckd_set_attrib(struct dasd_device *device, void __user *argp)
+{
+	struct dasd_eckd_private *private =
+		(struct dasd_eckd_private *)device->private;
+	struct attrib_data_t attrib;
+
+	if (!capable(CAP_SYS_ADMIN))
+		return -EACCES;
+	if (!argp)
+		return -EINVAL;
+
+	if (copy_from_user(&attrib, argp, sizeof(struct attrib_data_t)))
+		return -EFAULT;
+	private->attrib = attrib;
+
+	dev_info(&device->cdev->dev,
+		 "The DASD cache mode was set to %x (%i cylinder prestage)\n",
+		 private->attrib.operation, private->attrib.nr_cyl);
+	return 0;
+}
+
+/*
+ * Issue syscall I/O to EMC Symmetrix array.
+ * CCWs are PSF and RSSD
+ */
+static int dasd_symm_io(struct dasd_device *device, void __user *argp)
+{
+	struct dasd_symmio_parms usrparm;
+	char *psf_data, *rssd_result;
+	struct dasd_ccw_req *cqr;
+	struct ccw1 *ccw;
+	char psf0, psf1;
+	int rc;
+
+	if (!capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_RAWIO))
+		return -EACCES;
+	psf0 = psf1 = 0;
+
+	/* Copy parms from caller */
+	rc = -EFAULT;
+	if (copy_from_user(&usrparm, argp, sizeof(usrparm)))
+		goto out;
+	if (is_compat_task() || sizeof(long) == 4) {
+		/* Make sure pointers are sane even on 31 bit. */
+		rc = -EINVAL;
+		if ((usrparm.psf_data >> 32) != 0)
+			goto out;
+		if ((usrparm.rssd_result >> 32) != 0)
+			goto out;
+		usrparm.psf_data &= 0x7fffffffULL;
+		usrparm.rssd_result &= 0x7fffffffULL;
+	}
+	/* alloc I/O data area */
+	psf_data = kzalloc(usrparm.psf_data_len, GFP_KERNEL | GFP_DMA);
+	rssd_result = kzalloc(usrparm.rssd_result_len, GFP_KERNEL | GFP_DMA);
+	if (!psf_data || !rssd_result) {
+		rc = -ENOMEM;
+		goto out_free;
+	}
+
+	/* get syscall header from user space */
+	rc = -EFAULT;
+	if (copy_from_user(psf_data,
+			   (void __user *)(unsigned long) usrparm.psf_data,
+			   usrparm.psf_data_len))
+		goto out_free;
+	psf0 = psf_data[0];
+	psf1 = psf_data[1];
+
+	/* setup CCWs for PSF + RSSD */
+	cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 2 , 0, device);
+	if (IS_ERR(cqr)) {
+		DBF_DEV_EVENT(DBF_WARNING, device, "%s",
+			"Could not allocate initialization request");
+		rc = PTR_ERR(cqr);
+		goto out_free;
+	}
+
+	cqr->startdev = device;
+	cqr->memdev = device;
+	cqr->retries = 3;
+	cqr->expires = 10 * HZ;
+	cqr->buildclk = get_clock();
+	cqr->status = DASD_CQR_FILLED;
+
+	/* Build the ccws */
+	ccw = cqr->cpaddr;
+
+	/* PSF ccw */
+	ccw->cmd_code = DASD_ECKD_CCW_PSF;
+	ccw->count = usrparm.psf_data_len;
+	ccw->flags |= CCW_FLAG_CC;
+	ccw->cda = (__u32)(addr_t) psf_data;
+
+	ccw++;
+
+	/* RSSD ccw  */
+	ccw->cmd_code = DASD_ECKD_CCW_RSSD;
+	ccw->count = usrparm.rssd_result_len;
+	ccw->flags = CCW_FLAG_SLI ;
+	ccw->cda = (__u32)(addr_t) rssd_result;
+
+	rc = dasd_sleep_on(cqr);
+	if (rc)
+		goto out_sfree;
+
+	rc = -EFAULT;
+	if (copy_to_user((void __user *)(unsigned long) usrparm.rssd_result,
+			   rssd_result, usrparm.rssd_result_len))
+		goto out_sfree;
+	rc = 0;
+
+out_sfree:
+	dasd_sfree_request(cqr, cqr->memdev);
+out_free:
+	kfree(rssd_result);
+	kfree(psf_data);
+out:
+	DBF_DEV_EVENT(DBF_WARNING, device,
+		      "Symmetrix ioctl (0x%02x 0x%02x): rc=%d",
+		      (int) psf0, (int) psf1, rc);
+	return rc;
+}
+
+static int
+dasd_eckd_ioctl(struct dasd_block *block, unsigned int cmd, void __user *argp)
+{
+	struct dasd_device *device = block->base;
+
+	switch (cmd) {
+	case BIODASDGATTR:
+		return dasd_eckd_get_attrib(device, argp);
+	case BIODASDSATTR:
+		return dasd_eckd_set_attrib(device, argp);
+	case BIODASDPSRD:
+		return dasd_eckd_performance(device, argp);
+	case BIODASDRLSE:
+		return dasd_eckd_release(device);
+	case BIODASDRSRV:
+		return dasd_eckd_reserve(device);
+	case BIODASDSLCK:
+		return dasd_eckd_steal_lock(device);
+	case BIODASDSNID:
+		return dasd_eckd_snid(device, argp);
+	case BIODASDSYMMIO:
+		return dasd_symm_io(device, argp);
+	default:
+		return -ENOIOCTLCMD;
+	}
+}
+
+/*
+ * Dump the range of CCWs into 'page' buffer
+ * and return number of printed chars.
+ */
+static int
+dasd_eckd_dump_ccw_range(struct ccw1 *from, struct ccw1 *to, char *page)
+{
+	int len, count;
+	char *datap;
+
+	len = 0;
+	while (from <= to) {
+		len += sprintf(page + len, KERN_ERR PRINTK_HEADER
+			       " CCW %p: %08X %08X DAT:",
+			       from, ((int *) from)[0], ((int *) from)[1]);
+
+		/* get pointer to data (consider IDALs) */
+		if (from->flags & CCW_FLAG_IDA)
+			datap = (char *) *((addr_t *) (addr_t) from->cda);
+		else
+			datap = (char *) ((addr_t) from->cda);
+
+		/* dump data (max 32 bytes) */
+		for (count = 0; count < from->count && count < 32; count++) {
+			if (count % 8 == 0) len += sprintf(page + len, " ");
+			if (count % 4 == 0) len += sprintf(page + len, " ");
+			len += sprintf(page + len, "%02x", datap[count]);
+		}
+		len += sprintf(page + len, "\n");
+		from++;
+	}
+	return len;
+}
+
+static void
+dasd_eckd_dump_sense_dbf(struct dasd_device *device, struct irb *irb,
+			 char *reason)
+{
+	u64 *sense;
+	u64 *stat;
+
+	sense = (u64 *) dasd_get_sense(irb);
+	stat = (u64 *) &irb->scsw;
+	if (sense) {
+		DBF_DEV_EVENT(DBF_EMERG, device, "%s: %016llx %08x : "
+			      "%016llx %016llx %016llx %016llx",
+			      reason, *stat, *((u32 *) (stat + 1)),
+			      sense[0], sense[1], sense[2], sense[3]);
+	} else {
+		DBF_DEV_EVENT(DBF_EMERG, device, "%s: %016llx %08x : %s",
+			      reason, *stat, *((u32 *) (stat + 1)),
+			      "NO VALID SENSE");
+	}
+}
+
+/*
+ * Print sense data and related channel program.
+ * Parts are printed because printk buffer is only 1024 bytes.
+ */
+static void dasd_eckd_dump_sense_ccw(struct dasd_device *device,
+				 struct dasd_ccw_req *req, struct irb *irb)
+{
+	char *page;
+	struct ccw1 *first, *last, *fail, *from, *to;
+	int len, sl, sct;
+
+	page = (char *) get_zeroed_page(GFP_ATOMIC);
+	if (page == NULL) {
+		DBF_DEV_EVENT(DBF_WARNING, device, "%s",
+			      "No memory to dump sense data\n");
+		return;
+	}
+	/* dump the sense data */
+	len = sprintf(page,  KERN_ERR PRINTK_HEADER
+		      " I/O status report for device %s:\n",
+		      dev_name(&device->cdev->dev));
+	len += sprintf(page + len, KERN_ERR PRINTK_HEADER
+		       " in req: %p CC:%02X FC:%02X AC:%02X SC:%02X DS:%02X "
+		       "CS:%02X RC:%d\n",
+		       req, scsw_cc(&irb->scsw), scsw_fctl(&irb->scsw),
+		       scsw_actl(&irb->scsw), scsw_stctl(&irb->scsw),
+		       scsw_dstat(&irb->scsw), scsw_cstat(&irb->scsw),
+		       req ? req->intrc : 0);
+	len += sprintf(page + len, KERN_ERR PRINTK_HEADER
+		       " device %s: Failing CCW: %p\n",
+		       dev_name(&device->cdev->dev),
+		       (void *) (addr_t) irb->scsw.cmd.cpa);
+	if (irb->esw.esw0.erw.cons) {
+		for (sl = 0; sl < 4; sl++) {
+			len += sprintf(page + len, KERN_ERR PRINTK_HEADER
+				       " Sense(hex) %2d-%2d:",
+				       (8 * sl), ((8 * sl) + 7));
+
+			for (sct = 0; sct < 8; sct++) {
+				len += sprintf(page + len, " %02x",
+					       irb->ecw[8 * sl + sct]);
+			}
+			len += sprintf(page + len, "\n");
+		}
+
+		if (irb->ecw[27] & DASD_SENSE_BIT_0) {
+			/* 24 Byte Sense Data */
+			sprintf(page + len, KERN_ERR PRINTK_HEADER
+				" 24 Byte: %x MSG %x, "
+				"%s MSGb to SYSOP\n",
+				irb->ecw[7] >> 4, irb->ecw[7] & 0x0f,
+				irb->ecw[1] & 0x10 ? "" : "no");
+		} else {
+			/* 32 Byte Sense Data */
+			sprintf(page + len, KERN_ERR PRINTK_HEADER
+				" 32 Byte: Format: %x "
+				"Exception class %x\n",
+				irb->ecw[6] & 0x0f, irb->ecw[22] >> 4);
+		}
+	} else {
+		sprintf(page + len, KERN_ERR PRINTK_HEADER
+			" SORRY - NO VALID SENSE AVAILABLE\n");
+	}
+	printk("%s", page);
+
+	if (req) {
+		/* req == NULL for unsolicited interrupts */
+		/* dump the Channel Program (max 140 Bytes per line) */
+		/* Count CCW and print first CCWs (maximum 1024 % 140 = 7) */
+		first = req->cpaddr;
+		for (last = first; last->flags & (CCW_FLAG_CC | CCW_FLAG_DC); last++);
+		to = min(first + 6, last);
+		len = sprintf(page,  KERN_ERR PRINTK_HEADER
+			      " Related CP in req: %p\n", req);
+		dasd_eckd_dump_ccw_range(first, to, page + len);
+		printk("%s", page);
+
+		/* print failing CCW area (maximum 4) */
+		/* scsw->cda is either valid or zero  */
+		len = 0;
+		from = ++to;
+		fail = (struct ccw1 *)(addr_t)
+				irb->scsw.cmd.cpa; /* failing CCW */
+		if (from <  fail - 2) {
+			from = fail - 2;     /* there is a gap - print header */
+			len += sprintf(page, KERN_ERR PRINTK_HEADER "......\n");
+		}
+		to = min(fail + 1, last);
+		len += dasd_eckd_dump_ccw_range(from, to, page + len);
+
+		/* print last CCWs (maximum 2) */
+		from = max(from, ++to);
+		if (from < last - 1) {
+			from = last - 1;     /* there is a gap - print header */
+			len += sprintf(page + len, KERN_ERR PRINTK_HEADER "......\n");
+		}
+		len += dasd_eckd_dump_ccw_range(from, last, page + len);
+		if (len > 0)
+			printk("%s", page);
+	}
+	free_page((unsigned long) page);
+}
+
+
+/*
+ * Print sense data from a tcw.
+ */
+static void dasd_eckd_dump_sense_tcw(struct dasd_device *device,
+				 struct dasd_ccw_req *req, struct irb *irb)
+{
+	char *page;
+	int len, sl, sct, residual;
+	struct tsb *tsb;
+	u8 *sense, *rcq;
+
+	page = (char *) get_zeroed_page(GFP_ATOMIC);
+	if (page == NULL) {
+		DBF_DEV_EVENT(DBF_WARNING, device, " %s",
+			    "No memory to dump sense data");
+		return;
+	}
+	/* dump the sense data */
+	len = sprintf(page,  KERN_ERR PRINTK_HEADER
+		      " I/O status report for device %s:\n",
+		      dev_name(&device->cdev->dev));
+	len += sprintf(page + len, KERN_ERR PRINTK_HEADER
+		       " in req: %p CC:%02X FC:%02X AC:%02X SC:%02X DS:%02X "
+		       "CS:%02X fcxs:%02X schxs:%02X RC:%d\n",
+		       req, scsw_cc(&irb->scsw), scsw_fctl(&irb->scsw),
+		       scsw_actl(&irb->scsw), scsw_stctl(&irb->scsw),
+		       scsw_dstat(&irb->scsw), scsw_cstat(&irb->scsw),
+		       irb->scsw.tm.fcxs, irb->scsw.tm.schxs,
+		       req ? req->intrc : 0);
+	len += sprintf(page + len, KERN_ERR PRINTK_HEADER
+		       " device %s: Failing TCW: %p\n",
+		       dev_name(&device->cdev->dev),
+		       (void *) (addr_t) irb->scsw.tm.tcw);
+
+	tsb = NULL;
+	sense = NULL;
+	if (irb->scsw.tm.tcw && (irb->scsw.tm.fcxs & 0x01))
+		tsb = tcw_get_tsb(
+			(struct tcw *)(unsigned long)irb->scsw.tm.tcw);
+
+	if (tsb) {
+		len += sprintf(page + len, KERN_ERR PRINTK_HEADER
+			       " tsb->length %d\n", tsb->length);
+		len += sprintf(page + len, KERN_ERR PRINTK_HEADER
+			       " tsb->flags %x\n", tsb->flags);
+		len += sprintf(page + len, KERN_ERR PRINTK_HEADER
+			       " tsb->dcw_offset %d\n", tsb->dcw_offset);
+		len += sprintf(page + len, KERN_ERR PRINTK_HEADER
+			       " tsb->count %d\n", tsb->count);
+		residual = tsb->count - 28;
+		len += sprintf(page + len, KERN_ERR PRINTK_HEADER
+			       " residual %d\n", residual);
+
+		switch (tsb->flags & 0x07) {
+		case 1:	/* tsa_iostat */
+			len += sprintf(page + len, KERN_ERR PRINTK_HEADER
+			       " tsb->tsa.iostat.dev_time %d\n",
+				       tsb->tsa.iostat.dev_time);
+			len += sprintf(page + len, KERN_ERR PRINTK_HEADER
+			       " tsb->tsa.iostat.def_time %d\n",
+				       tsb->tsa.iostat.def_time);
+			len += sprintf(page + len, KERN_ERR PRINTK_HEADER
+			       " tsb->tsa.iostat.queue_time %d\n",
+				       tsb->tsa.iostat.queue_time);
+			len += sprintf(page + len, KERN_ERR PRINTK_HEADER
+			       " tsb->tsa.iostat.dev_busy_time %d\n",
+				       tsb->tsa.iostat.dev_busy_time);
+			len += sprintf(page + len, KERN_ERR PRINTK_HEADER
+			       " tsb->tsa.iostat.dev_act_time %d\n",
+				       tsb->tsa.iostat.dev_act_time);
+			sense = tsb->tsa.iostat.sense;
+			break;
+		case 2: /* ts_ddpc */
+			len += sprintf(page + len, KERN_ERR PRINTK_HEADER
+			       " tsb->tsa.ddpc.rc %d\n", tsb->tsa.ddpc.rc);
+			for (sl = 0; sl < 2; sl++) {
+				len += sprintf(page + len,
+					       KERN_ERR PRINTK_HEADER
+					       " tsb->tsa.ddpc.rcq %2d-%2d: ",
+					       (8 * sl), ((8 * sl) + 7));
+				rcq = tsb->tsa.ddpc.rcq;
+				for (sct = 0; sct < 8; sct++) {
+					len += sprintf(page + len, " %02x",
+						       rcq[8 * sl + sct]);
+				}
+				len += sprintf(page + len, "\n");
+			}
+			sense = tsb->tsa.ddpc.sense;
+			break;
+		case 3: /* tsa_intrg */
+			len += sprintf(page + len, KERN_ERR PRINTK_HEADER
+				      " tsb->tsa.intrg.: not supportet yet \n");
+			break;
+		}
+
+		if (sense) {
+			for (sl = 0; sl < 4; sl++) {
+				len += sprintf(page + len,
+					       KERN_ERR PRINTK_HEADER
+					       " Sense(hex) %2d-%2d:",
+					       (8 * sl), ((8 * sl) + 7));
+				for (sct = 0; sct < 8; sct++) {
+					len += sprintf(page + len, " %02x",
+						       sense[8 * sl + sct]);
+				}
+				len += sprintf(page + len, "\n");
+			}
+
+			if (sense[27] & DASD_SENSE_BIT_0) {
+				/* 24 Byte Sense Data */
+				sprintf(page + len, KERN_ERR PRINTK_HEADER
+					" 24 Byte: %x MSG %x, "
+					"%s MSGb to SYSOP\n",
+					sense[7] >> 4, sense[7] & 0x0f,
+					sense[1] & 0x10 ? "" : "no");
+			} else {
+				/* 32 Byte Sense Data */
+				sprintf(page + len, KERN_ERR PRINTK_HEADER
+					" 32 Byte: Format: %x "
+					"Exception class %x\n",
+					sense[6] & 0x0f, sense[22] >> 4);
+			}
+		} else {
+			sprintf(page + len, KERN_ERR PRINTK_HEADER
+				" SORRY - NO VALID SENSE AVAILABLE\n");
+		}
+	} else {
+		sprintf(page + len, KERN_ERR PRINTK_HEADER
+			" SORRY - NO TSB DATA AVAILABLE\n");
+	}
+	printk("%s", page);
+	free_page((unsigned long) page);
+}
+
+static void dasd_eckd_dump_sense(struct dasd_device *device,
+				 struct dasd_ccw_req *req, struct irb *irb)
+{
+	if (scsw_is_tm(&irb->scsw))
+		dasd_eckd_dump_sense_tcw(device, req, irb);
+	else
+		dasd_eckd_dump_sense_ccw(device, req, irb);
+}
+
+static int dasd_eckd_pm_freeze(struct dasd_device *device)
+{
+	/*
+	 * the device should be disconnected from our LCU structure
+	 * on restore we will reconnect it and reread LCU specific
+	 * information like PAV support that might have changed
+	 */
+	dasd_alias_remove_device(device);
+	dasd_alias_disconnect_device_from_lcu(device);
+
+	return 0;
+}
+
+static int dasd_eckd_restore_device(struct dasd_device *device)
+{
+	struct dasd_eckd_private *private;
+	struct dasd_eckd_characteristics temp_rdc_data;
+	int rc;
+	struct dasd_uid temp_uid;
+	unsigned long flags;
+
+	private = (struct dasd_eckd_private *) device->private;
+
+	/* Read Configuration Data */
+	rc = dasd_eckd_read_conf(device);
+	if (rc)
+		goto out_err;
+
+	dasd_eckd_get_uid(device, &temp_uid);
+	/* Generate device unique id */
+	rc = dasd_eckd_generate_uid(device);
+	spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
+	if (memcmp(&private->uid, &temp_uid, sizeof(struct dasd_uid)) != 0)
+		dev_err(&device->cdev->dev, "The UID of the DASD has "
+			"changed\n");
+	spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
+	if (rc)
+		goto out_err;
+
+	/* register lcu with alias handling, enable PAV if this is a new lcu */
+	rc = dasd_alias_make_device_known_to_lcu(device);
+	if (rc)
+		return rc;
+	dasd_eckd_validate_server(device);
+
+	/* RE-Read Configuration Data */
+	rc = dasd_eckd_read_conf(device);
+	if (rc)
+		goto out_err;
+
+	/* Read Feature Codes */
+	dasd_eckd_read_features(device);
+
+	/* Read Device Characteristics */
+	rc = dasd_generic_read_dev_chars(device, DASD_ECKD_MAGIC,
+					 &temp_rdc_data, 64);
+	if (rc) {
+		DBF_EVENT_DEVID(DBF_WARNING, device->cdev,
+				"Read device characteristic failed, rc=%d", rc);
+		goto out_err;
+	}
+	spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
+	memcpy(&private->rdc_data, &temp_rdc_data, sizeof(temp_rdc_data));
+	spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
+
+	/* add device to alias management */
+	dasd_alias_add_device(device);
+
+	return 0;
+
+out_err:
+	return -1;
+}
+
+static int dasd_eckd_reload_device(struct dasd_device *device)
+{
+	struct dasd_eckd_private *private;
+	int rc, old_base;
+	char print_uid[60];
+	struct dasd_uid uid;
+	unsigned long flags;
+
+	private = (struct dasd_eckd_private *) device->private;
+
+	spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
+	old_base = private->uid.base_unit_addr;
+	spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
+
+	/* Read Configuration Data */
+	rc = dasd_eckd_read_conf(device);
+	if (rc)
+		goto out_err;
+
+	rc = dasd_eckd_generate_uid(device);
+	if (rc)
+		goto out_err;
+	/*
+	 * update unit address configuration and
+	 * add device to alias management
+	 */
+	dasd_alias_update_add_device(device);
+
+	dasd_eckd_get_uid(device, &uid);
+
+	if (old_base != uid.base_unit_addr) {
+		if (strlen(uid.vduit) > 0)
+			snprintf(print_uid, sizeof(print_uid),
+				 "%s.%s.%04x.%02x.%s", uid.vendor, uid.serial,
+				 uid.ssid, uid.base_unit_addr, uid.vduit);
+		else
+			snprintf(print_uid, sizeof(print_uid),
+				 "%s.%s.%04x.%02x", uid.vendor, uid.serial,
+				 uid.ssid, uid.base_unit_addr);
+
+		dev_info(&device->cdev->dev,
+			 "An Alias device was reassigned to a new base device "
+			 "with UID: %s\n", print_uid);
+	}
+	return 0;
+
+out_err:
+	return -1;
+}
+
+static struct ccw_driver dasd_eckd_driver = {
+	.driver = {
+		.name	= "dasd-eckd",
+		.owner	= THIS_MODULE,
+	},
+	.ids	     = dasd_eckd_ids,
+	.probe	     = dasd_eckd_probe,
+	.remove      = dasd_generic_remove,
+	.set_offline = dasd_generic_set_offline,
+	.set_online  = dasd_eckd_set_online,
+	.notify      = dasd_generic_notify,
+	.path_event  = dasd_generic_path_event,
+	.freeze      = dasd_generic_pm_freeze,
+	.thaw	     = dasd_generic_restore_device,
+	.restore     = dasd_generic_restore_device,
+	.uc_handler  = dasd_generic_uc_handler,
+	.int_class   = IOINT_DAS,
+};
+
+/*
+ * max_blocks is dependent on the amount of storage that is available
+ * in the static io buffer for each device. Currently each device has
+ * 8192 bytes (=2 pages). For 64 bit one dasd_mchunkt_t structure has
+ * 24 bytes, the struct dasd_ccw_req has 136 bytes and each block can use
+ * up to 16 bytes (8 for the ccw and 8 for the idal pointer). In
+ * addition we have one define extent ccw + 16 bytes of data and one
+ * locate record ccw + 16 bytes of data. That makes:
+ * (8192 - 24 - 136 - 8 - 16 - 8 - 16) / 16 = 499 blocks at maximum.
+ * We want to fit two into the available memory so that we can immediately
+ * start the next request if one finishes off. That makes 249.5 blocks
+ * for one request. Give a little safety and the result is 240.
+ */
+static struct dasd_discipline dasd_eckd_discipline = {
+	.owner = THIS_MODULE,
+	.name = "ECKD",
+	.ebcname = "ECKD",
+	.max_blocks = 190,
+	.check_device = dasd_eckd_check_characteristics,
+	.uncheck_device = dasd_eckd_uncheck_device,
+	.do_analysis = dasd_eckd_do_analysis,
+	.verify_path = dasd_eckd_verify_path,
+	.ready_to_online = dasd_eckd_ready_to_online,
+	.online_to_ready = dasd_eckd_online_to_ready,
+	.fill_geometry = dasd_eckd_fill_geometry,
+	.start_IO = dasd_start_IO,
+	.term_IO = dasd_term_IO,
+	.handle_terminated_request = dasd_eckd_handle_terminated_request,
+	.format_device = dasd_eckd_format_device,
+	.erp_action = dasd_eckd_erp_action,
+	.erp_postaction = dasd_eckd_erp_postaction,
+	.check_for_device_change = dasd_eckd_check_for_device_change,
+	.build_cp = dasd_eckd_build_alias_cp,
+	.free_cp = dasd_eckd_free_alias_cp,
+	.dump_sense = dasd_eckd_dump_sense,
+	.dump_sense_dbf = dasd_eckd_dump_sense_dbf,
+	.fill_info = dasd_eckd_fill_info,
+	.ioctl = dasd_eckd_ioctl,
+	.freeze = dasd_eckd_pm_freeze,
+	.restore = dasd_eckd_restore_device,
+	.reload = dasd_eckd_reload_device,
+	.get_uid = dasd_eckd_get_uid,
+	.kick_validate = dasd_eckd_kick_validate_server,
+};
+
+static int __init
+dasd_eckd_init(void)
+{
+	int ret;
+
+	ASCEBC(dasd_eckd_discipline.ebcname, 4);
+	dasd_reserve_req = kmalloc(sizeof(*dasd_reserve_req),
+				   GFP_KERNEL | GFP_DMA);
+	if (!dasd_reserve_req)
+		return -ENOMEM;
+	path_verification_worker = kmalloc(sizeof(*path_verification_worker),
+				   GFP_KERNEL | GFP_DMA);
+	if (!path_verification_worker) {
+		kfree(dasd_reserve_req);
+		return -ENOMEM;
+	}
+	ret = ccw_driver_register(&dasd_eckd_driver);
+	if (!ret)
+		wait_for_device_probe();
+	else {
+		kfree(path_verification_worker);
+		kfree(dasd_reserve_req);
+	}
+	return ret;
+}
+
+static void __exit
+dasd_eckd_cleanup(void)
+{
+	ccw_driver_unregister(&dasd_eckd_driver);
+	kfree(path_verification_worker);
+	kfree(dasd_reserve_req);
+}
+
+module_init(dasd_eckd_init);
+module_exit(dasd_eckd_cleanup);
diff --git a/ap/os/linux/linux-3.4.x/drivers/s390/block/dasd_eckd.h b/ap/os/linux/linux-3.4.x/drivers/s390/block/dasd_eckd.h
new file mode 100644
index 0000000..4a688a8
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/s390/block/dasd_eckd.h
@@ -0,0 +1,475 @@
+/*
+ * File...........: linux/drivers/s390/block/dasd_eckd.h
+ * Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com>
+ *		    Horst Hummel <Horst.Hummel@de.ibm.com>
+ * Bugreports.to..: <Linux390@de.ibm.com>
+ * (C) IBM Corporation, IBM Deutschland Entwicklung GmbH, 1999,2000
+ *
+ */
+
+#ifndef DASD_ECKD_H
+#define DASD_ECKD_H
+
+/*****************************************************************************
+ * SECTION: CCW Definitions
+ ****************************************************************************/
+#define DASD_ECKD_CCW_WRITE		 0x05
+#define DASD_ECKD_CCW_READ		 0x06
+#define DASD_ECKD_CCW_WRITE_HOME_ADDRESS 0x09
+#define DASD_ECKD_CCW_READ_HOME_ADDRESS	 0x0a
+#define DASD_ECKD_CCW_WRITE_KD		 0x0d
+#define DASD_ECKD_CCW_READ_KD		 0x0e
+#define DASD_ECKD_CCW_ERASE		 0x11
+#define DASD_ECKD_CCW_READ_COUNT	 0x12
+#define DASD_ECKD_CCW_SLCK		 0x14
+#define DASD_ECKD_CCW_WRITE_RECORD_ZERO	 0x15
+#define DASD_ECKD_CCW_READ_RECORD_ZERO	 0x16
+#define DASD_ECKD_CCW_WRITE_CKD		 0x1d
+#define DASD_ECKD_CCW_READ_CKD		 0x1e
+#define DASD_ECKD_CCW_PSF		 0x27
+#define DASD_ECKD_CCW_SNID		 0x34
+#define DASD_ECKD_CCW_RSSD		 0x3e
+#define DASD_ECKD_CCW_LOCATE_RECORD	 0x47
+#define DASD_ECKD_CCW_SNSS		 0x54
+#define DASD_ECKD_CCW_DEFINE_EXTENT	 0x63
+#define DASD_ECKD_CCW_WRITE_MT		 0x85
+#define DASD_ECKD_CCW_READ_MT		 0x86
+#define DASD_ECKD_CCW_WRITE_KD_MT	 0x8d
+#define DASD_ECKD_CCW_READ_KD_MT	 0x8e
+#define DASD_ECKD_CCW_RELEASE		 0x94
+#define DASD_ECKD_CCW_WRITE_FULL_TRACK	 0x95
+#define DASD_ECKD_CCW_READ_CKD_MT	 0x9e
+#define DASD_ECKD_CCW_WRITE_CKD_MT	 0x9d
+#define DASD_ECKD_CCW_WRITE_TRACK_DATA	 0xA5
+#define DASD_ECKD_CCW_READ_TRACK_DATA	 0xA6
+#define DASD_ECKD_CCW_RESERVE		 0xB4
+#define DASD_ECKD_CCW_READ_TRACK	 0xDE
+#define DASD_ECKD_CCW_PFX		 0xE7
+#define DASD_ECKD_CCW_PFX_READ		 0xEA
+#define DASD_ECKD_CCW_RSCK		 0xF9
+#define DASD_ECKD_CCW_RCD		 0xFA
+
+/*
+ * Perform Subsystem Function / Sub-Orders
+ */
+#define PSF_ORDER_PRSSD 0x18
+#define PSF_ORDER_SSC	0x1D
+
+/*
+ * Size that is reportet for large volumes in the old 16-bit no_cyl field
+ */
+#define LV_COMPAT_CYL 0xFFFE
+
+
+#define FCX_MAX_DATA_FACTOR 65536
+#define DASD_ECKD_RCD_DATA_SIZE 256
+
+
+/*****************************************************************************
+ * SECTION: Type Definitions
+ ****************************************************************************/
+
+struct eckd_count {
+	__u16 cyl;
+	__u16 head;
+	__u8 record;
+	__u8 kl;
+	__u16 dl;
+} __attribute__ ((packed));
+
+struct ch_t {
+	__u16 cyl;
+	__u16 head;
+} __attribute__ ((packed));
+
+struct chs_t {
+	__u16 cyl;
+	__u16 head;
+	__u32 sector;
+} __attribute__ ((packed));
+
+struct chr_t {
+	__u16 cyl;
+	__u16 head;
+	__u8 record;
+} __attribute__ ((packed));
+
+struct geom_t {
+	__u16 cyl;
+	__u16 head;
+	__u32 sector;
+} __attribute__ ((packed));
+
+struct eckd_home {
+	__u8 skip_control[14];
+	__u16 cell_number;
+	__u8 physical_addr[3];
+	__u8 flag;
+	struct ch_t track_addr;
+	__u8 reserved;
+	__u8 key_length;
+	__u8 reserved2[2];
+} __attribute__ ((packed));
+
+struct DE_eckd_data {
+	struct {
+		unsigned char perm:2;	/* Permissions on this extent */
+		unsigned char reserved:1;
+		unsigned char seek:2;	/* Seek control */
+		unsigned char auth:2;	/* Access authorization */
+		unsigned char pci:1;	/* PCI Fetch mode */
+	} __attribute__ ((packed)) mask;
+	struct {
+		unsigned char mode:2;	/* Architecture mode */
+		unsigned char ckd:1;	/* CKD Conversion */
+		unsigned char operation:3;	/* Operation mode */
+		unsigned char cfw:1;	/* Cache fast write */
+		unsigned char dfw:1;	/* DASD fast write */
+	} __attribute__ ((packed)) attributes;
+	__u16 blk_size;		/* Blocksize */
+	__u16 fast_write_id;
+	__u8 ga_additional;	/* Global Attributes Additional */
+	__u8 ga_extended;	/* Global Attributes Extended	*/
+	struct ch_t beg_ext;
+	struct ch_t end_ext;
+	unsigned long long ep_sys_time; /* Ext Parameter - System Time Stamp */
+	__u8 ep_format;        /* Extended Parameter format byte       */
+	__u8 ep_prio;          /* Extended Parameter priority I/O byte */
+	__u8 ep_reserved1;     /* Extended Parameter Reserved	       */
+	__u8 ep_rec_per_track; /* Number of records on a track	       */
+	__u8 ep_reserved[4];   /* Extended Parameter Reserved	       */
+} __attribute__ ((packed));
+
+struct LO_eckd_data {
+	struct {
+		unsigned char orientation:2;
+		unsigned char operation:6;
+	} __attribute__ ((packed)) operation;
+	struct {
+		unsigned char last_bytes_used:1;
+		unsigned char reserved:6;
+		unsigned char read_count_suffix:1;
+	} __attribute__ ((packed)) auxiliary;
+	__u8 unused;
+	__u8 count;
+	struct ch_t seek_addr;
+	struct chr_t search_arg;
+	__u8 sector;
+	__u16 length;
+} __attribute__ ((packed));
+
+struct LRE_eckd_data {
+	struct {
+		unsigned char orientation:2;
+		unsigned char operation:6;
+	} __attribute__ ((packed)) operation;
+	struct {
+		unsigned char length_valid:1;
+		unsigned char length_scope:1;
+		unsigned char imbedded_ccw_valid:1;
+		unsigned char check_bytes:2;
+		unsigned char imbedded_count_valid:1;
+		unsigned char reserved:1;
+		unsigned char read_count_suffix:1;
+	} __attribute__ ((packed)) auxiliary;
+	__u8 imbedded_ccw;
+	__u8 count;
+	struct ch_t seek_addr;
+	struct chr_t search_arg;
+	__u8 sector;
+	__u16 length;
+	__u8 imbedded_count;
+	__u8 extended_operation;
+	__u16 extended_parameter_length;
+	__u8 extended_parameter[0];
+} __attribute__ ((packed));
+
+/* Prefix data for format 0x00 and 0x01 */
+struct PFX_eckd_data {
+	unsigned char format;
+	struct {
+		unsigned char define_extent:1;
+		unsigned char time_stamp:1;
+		unsigned char verify_base:1;
+		unsigned char hyper_pav:1;
+		unsigned char reserved:4;
+	} __attribute__ ((packed)) validity;
+	__u8 base_address;
+	__u8 aux;
+	__u8 base_lss;
+	__u8 reserved[7];
+	struct DE_eckd_data define_extent;
+	struct LRE_eckd_data locate_record;
+} __attribute__ ((packed));
+
+struct dasd_eckd_characteristics {
+	__u16 cu_type;
+	struct {
+		unsigned char support:2;
+		unsigned char async:1;
+		unsigned char reserved:1;
+		unsigned char cache_info:1;
+		unsigned char model:3;
+	} __attribute__ ((packed)) cu_model;
+	__u16 dev_type;
+	__u8 dev_model;
+	struct {
+		unsigned char mult_burst:1;
+		unsigned char RT_in_LR:1;
+		unsigned char reserved1:1;
+		unsigned char RD_IN_LR:1;
+		unsigned char reserved2:4;
+		unsigned char reserved3:8;
+		unsigned char defect_wr:1;
+		unsigned char XRC_supported:1;
+		unsigned char reserved4:1;
+		unsigned char striping:1;
+		unsigned char reserved5:4;
+		unsigned char cfw:1;
+		unsigned char reserved6:2;
+		unsigned char cache:1;
+		unsigned char dual_copy:1;
+		unsigned char dfw:1;
+		unsigned char reset_alleg:1;
+		unsigned char sense_down:1;
+	} __attribute__ ((packed)) facilities;
+	__u8 dev_class;
+	__u8 unit_type;
+	__u16 no_cyl;
+	__u16 trk_per_cyl;
+	__u8 sec_per_trk;
+	__u8 byte_per_track[3];
+	__u16 home_bytes;
+	__u8 formula;
+	union {
+		struct {
+			__u8 f1;
+			__u16 f2;
+			__u16 f3;
+		} __attribute__ ((packed)) f_0x01;
+		struct {
+			__u8 f1;
+			__u8 f2;
+			__u8 f3;
+			__u8 f4;
+			__u8 f5;
+		} __attribute__ ((packed)) f_0x02;
+	} __attribute__ ((packed)) factors;
+	__u16 first_alt_trk;
+	__u16 no_alt_trk;
+	__u16 first_dia_trk;
+	__u16 no_dia_trk;
+	__u16 first_sup_trk;
+	__u16 no_sup_trk;
+	__u8 MDR_ID;
+	__u8 OBR_ID;
+	__u8 director;
+	__u8 rd_trk_set;
+	__u16 max_rec_zero;
+	__u8 reserved1;
+	__u8 RWANY_in_LR;
+	__u8 factor6;
+	__u8 factor7;
+	__u8 factor8;
+	__u8 reserved2[3];
+	__u8 reserved3[6];
+	__u32 long_no_cyl;
+} __attribute__ ((packed));
+
+/* elements of the configuration data */
+struct dasd_ned {
+	struct {
+		__u8 identifier:2;
+		__u8 token_id:1;
+		__u8 sno_valid:1;
+		__u8 subst_sno:1;
+		__u8 recNED:1;
+		__u8 emuNED:1;
+		__u8 reserved:1;
+	} __attribute__ ((packed)) flags;
+	__u8 descriptor;
+	__u8 dev_class;
+	__u8 reserved;
+	__u8 dev_type[6];
+	__u8 dev_model[3];
+	__u8 HDA_manufacturer[3];
+	__u8 HDA_location[2];
+	__u8 HDA_seqno[12];
+	__u8 ID;
+	__u8 unit_addr;
+} __attribute__ ((packed));
+
+struct dasd_sneq {
+	struct {
+		__u8 identifier:2;
+		__u8 reserved:6;
+	} __attribute__ ((packed)) flags;
+	__u8 res1;
+	__u16 format;
+	__u8 res2[4];		/* byte  4- 7 */
+	__u8 sua_flags;		/* byte  8    */
+	__u8 base_unit_addr;	/* byte  9    */
+	__u8 res3[22];		/* byte 10-31 */
+} __attribute__ ((packed));
+
+struct vd_sneq {
+	struct {
+		__u8 identifier:2;
+		__u8 reserved:6;
+	} __attribute__ ((packed)) flags;
+	__u8 res1;
+	__u16 format;
+	__u8 res2[4];	/* byte  4- 7 */
+	__u8 uit[16];	/* byte  8-23 */
+	__u8 res3[8];	/* byte 24-31 */
+} __attribute__ ((packed));
+
+struct dasd_gneq {
+	struct {
+		__u8 identifier:2;
+		__u8 reserved:6;
+	} __attribute__ ((packed)) flags;
+	__u8 reserved[5];
+	struct {
+		__u8 value:2;
+		__u8 number:6;
+	} __attribute__ ((packed)) timeout;
+	__u8 reserved3;
+	__u16 subsystemID;
+	__u8 reserved2[22];
+} __attribute__ ((packed));
+
+struct dasd_rssd_features {
+	char feature[256];
+} __attribute__((packed));
+
+
+/*
+ * Perform Subsystem Function - Prepare for Read Subsystem Data
+ */
+struct dasd_psf_prssd_data {
+	unsigned char order;
+	unsigned char flags;
+	unsigned char reserved[4];
+	unsigned char suborder;
+	unsigned char varies[5];
+} __attribute__ ((packed));
+
+/*
+ * Perform Subsystem Function - Set Subsystem Characteristics
+ */
+struct dasd_psf_ssc_data {
+	unsigned char order;
+	unsigned char flags;
+	unsigned char cu_type[4];
+	unsigned char suborder;
+	unsigned char reserved[59];
+} __attribute__((packed));
+
+
+/*
+ * some structures and definitions for alias handling
+ */
+struct dasd_unit_address_configuration {
+	struct {
+		char ua_type;
+		char base_ua;
+	} unit[256];
+} __attribute__((packed));
+
+
+#define MAX_DEVICES_PER_LCU 256
+
+/* flags on the LCU  */
+#define NEED_UAC_UPDATE  0x01
+#define UPDATE_PENDING	0x02
+
+enum pavtype {NO_PAV, BASE_PAV, HYPER_PAV};
+
+
+struct alias_root {
+	struct list_head serverlist;
+	spinlock_t lock;
+};
+
+struct alias_server {
+	struct list_head server;
+	struct dasd_uid uid;
+	struct list_head lculist;
+};
+
+struct summary_unit_check_work_data {
+	char reason;
+	struct dasd_device *device;
+	struct work_struct worker;
+};
+
+struct read_uac_work_data {
+	struct dasd_device *device;
+	struct delayed_work dwork;
+};
+
+struct alias_lcu {
+	struct list_head lcu;
+	struct dasd_uid uid;
+	enum pavtype pav;
+	char flags;
+	spinlock_t lock;
+	struct list_head grouplist;
+	struct list_head active_devices;
+	struct list_head inactive_devices;
+	struct dasd_unit_address_configuration *uac;
+	struct summary_unit_check_work_data suc_data;
+	struct read_uac_work_data ruac_data;
+	struct dasd_ccw_req *rsu_cqr;
+	struct completion lcu_setup;
+};
+
+struct alias_pav_group {
+	struct list_head group;
+	struct dasd_uid uid;
+	struct alias_lcu *lcu;
+	struct list_head baselist;
+	struct list_head aliaslist;
+	struct dasd_device *next;
+};
+
+struct dasd_eckd_private {
+	struct dasd_eckd_characteristics rdc_data;
+	u8 *conf_data;
+	int conf_len;
+	/* pointers to specific parts in the conf_data */
+	struct dasd_ned *ned;
+	struct dasd_sneq *sneq;
+	struct vd_sneq *vdsneq;
+	struct dasd_gneq *gneq;
+
+	struct eckd_count count_area[5];
+	int init_cqr_status;
+	int uses_cdl;
+	struct attrib_data_t attrib;	/* e.g. cache operations */
+	struct dasd_rssd_features features;
+	u32 real_cyl;
+
+	/* alias managemnet */
+	struct dasd_uid uid;
+	struct alias_pav_group *pavgroup;
+	struct alias_lcu *lcu;
+	int count;
+
+	u32 fcx_max_data;
+};
+
+
+
+int dasd_alias_make_device_known_to_lcu(struct dasd_device *);
+void dasd_alias_disconnect_device_from_lcu(struct dasd_device *);
+int dasd_alias_add_device(struct dasd_device *);
+int dasd_alias_remove_device(struct dasd_device *);
+struct dasd_device *dasd_alias_get_start_dev(struct dasd_device *);
+void dasd_alias_handle_summary_unit_check(struct dasd_device *, struct irb *);
+void dasd_eckd_reset_ccw_to_base_io(struct dasd_ccw_req *);
+void dasd_alias_lcu_setup_complete(struct dasd_device *);
+void dasd_alias_wait_for_lcu_setup(struct dasd_device *);
+int dasd_alias_update_add_device(struct dasd_device *);
+#endif				/* DASD_ECKD_H */
diff --git a/ap/os/linux/linux-3.4.x/drivers/s390/block/dasd_eer.c b/ap/os/linux/linux-3.4.x/drivers/s390/block/dasd_eer.c
new file mode 100644
index 0000000..16c5208
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/s390/block/dasd_eer.c
@@ -0,0 +1,709 @@
+/*
+ *  Character device driver for extended error reporting.
+ *
+ *  Copyright (C) 2005 IBM Corporation
+ *  extended error reporting for DASD ECKD devices
+ *  Author(s): Stefan Weinhuber <wein@de.ibm.com>
+ */
+
+#define KMSG_COMPONENT "dasd-eckd"
+
+#include <linux/init.h>
+#include <linux/fs.h>
+#include <linux/kernel.h>
+#include <linux/miscdevice.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/device.h>
+#include <linux/poll.h>
+#include <linux/mutex.h>
+#include <linux/err.h>
+#include <linux/slab.h>
+
+#include <asm/uaccess.h>
+#include <linux/atomic.h>
+#include <asm/ebcdic.h>
+
+#include "dasd_int.h"
+#include "dasd_eckd.h"
+
+#ifdef PRINTK_HEADER
+#undef PRINTK_HEADER
+#endif				/* PRINTK_HEADER */
+#define PRINTK_HEADER "dasd(eer):"
+
+/*
+ * SECTION: the internal buffer
+ */
+
+/*
+ * The internal buffer is meant to store obaque blobs of data, so it does
+ * not know of higher level concepts like triggers.
+ * It consists of a number of pages that are used as a ringbuffer. Each data
+ * blob is stored in a simple record that consists of an integer, which
+ * contains the size of the following data, and the data bytes themselfes.
+ *
+ * To allow for multiple independent readers we create one internal buffer
+ * each time the device is opened and destroy the buffer when the file is
+ * closed again. The number of pages used for this buffer is determined by
+ * the module parmeter eer_pages.
+ *
+ * One record can be written to a buffer by using the functions
+ * - dasd_eer_start_record (one time per record to write the size to the
+ *                          buffer and reserve the space for the data)
+ * - dasd_eer_write_buffer (one or more times per record to write the data)
+ * The data can be written in several steps but you will have to compute
+ * the total size up front for the invocation of dasd_eer_start_record.
+ * If the ringbuffer is full, dasd_eer_start_record will remove the required
+ * number of old records.
+ *
+ * A record is typically read in two steps, first read the integer that
+ * specifies the size of the following data, then read the data.
+ * Both can be done by
+ * - dasd_eer_read_buffer
+ *
+ * For all mentioned functions you need to get the bufferlock first and keep
+ * it until a complete record is written or read.
+ *
+ * All information necessary to keep track of an internal buffer is kept in
+ * a struct eerbuffer. The buffer specific to a file pointer is strored in
+ * the private_data field of that file. To be able to write data to all
+ * existing buffers, each buffer is also added to the bufferlist.
+ * If the user does not want to read a complete record in one go, we have to
+ * keep track of the rest of the record. residual stores the number of bytes
+ * that are still to deliver. If the rest of the record is invalidated between
+ * two reads then residual will be set to -1 so that the next read will fail.
+ * All entries in the eerbuffer structure are protected with the bufferlock.
+ * To avoid races between writing to a buffer on the one side and creating
+ * and destroying buffers on the other side, the bufferlock must also be used
+ * to protect the bufferlist.
+ */
+
+static int eer_pages = 5;
+module_param(eer_pages, int, S_IRUGO|S_IWUSR);
+
+struct eerbuffer {
+	struct list_head list;
+	char **buffer;
+	int buffersize;
+	int buffer_page_count;
+	int head;
+        int tail;
+	int residual;
+};
+
+static LIST_HEAD(bufferlist);
+static DEFINE_SPINLOCK(bufferlock);
+static DECLARE_WAIT_QUEUE_HEAD(dasd_eer_read_wait_queue);
+
+/*
+ * How many free bytes are available on the buffer.
+ * Needs to be called with bufferlock held.
+ */
+static int dasd_eer_get_free_bytes(struct eerbuffer *eerb)
+{
+	if (eerb->head < eerb->tail)
+		return eerb->tail - eerb->head - 1;
+	return eerb->buffersize - eerb->head + eerb->tail -1;
+}
+
+/*
+ * How many bytes of buffer space are used.
+ * Needs to be called with bufferlock held.
+ */
+static int dasd_eer_get_filled_bytes(struct eerbuffer *eerb)
+{
+
+	if (eerb->head >= eerb->tail)
+		return eerb->head - eerb->tail;
+	return eerb->buffersize - eerb->tail + eerb->head;
+}
+
+/*
+ * The dasd_eer_write_buffer function just copies count bytes of data
+ * to the buffer. Make sure to call dasd_eer_start_record first, to
+ * make sure that enough free space is available.
+ * Needs to be called with bufferlock held.
+ */
+static void dasd_eer_write_buffer(struct eerbuffer *eerb,
+				  char *data, int count)
+{
+
+	unsigned long headindex,localhead;
+	unsigned long rest, len;
+	char *nextdata;
+
+	nextdata = data;
+	rest = count;
+	while (rest > 0) {
+ 		headindex = eerb->head / PAGE_SIZE;
+ 		localhead = eerb->head % PAGE_SIZE;
+		len = min(rest, PAGE_SIZE - localhead);
+		memcpy(eerb->buffer[headindex]+localhead, nextdata, len);
+		nextdata += len;
+		rest -= len;
+		eerb->head += len;
+		if (eerb->head == eerb->buffersize)
+			eerb->head = 0; /* wrap around */
+		BUG_ON(eerb->head > eerb->buffersize);
+	}
+}
+
+/*
+ * Needs to be called with bufferlock held.
+ */
+static int dasd_eer_read_buffer(struct eerbuffer *eerb, char *data, int count)
+{
+
+	unsigned long tailindex,localtail;
+	unsigned long rest, len, finalcount;
+	char *nextdata;
+
+	finalcount = min(count, dasd_eer_get_filled_bytes(eerb));
+	nextdata = data;
+	rest = finalcount;
+	while (rest > 0) {
+ 		tailindex = eerb->tail / PAGE_SIZE;
+ 		localtail = eerb->tail % PAGE_SIZE;
+		len = min(rest, PAGE_SIZE - localtail);
+		memcpy(nextdata, eerb->buffer[tailindex] + localtail, len);
+		nextdata += len;
+		rest -= len;
+		eerb->tail += len;
+		if (eerb->tail == eerb->buffersize)
+			eerb->tail = 0; /* wrap around */
+		BUG_ON(eerb->tail > eerb->buffersize);
+	}
+	return finalcount;
+}
+
+/*
+ * Whenever you want to write a blob of data to the internal buffer you
+ * have to start by using this function first. It will write the number
+ * of bytes that will be written to the buffer. If necessary it will remove
+ * old records to make room for the new one.
+ * Needs to be called with bufferlock held.
+ */
+static int dasd_eer_start_record(struct eerbuffer *eerb, int count)
+{
+	int tailcount;
+
+	if (count + sizeof(count) > eerb->buffersize)
+		return -ENOMEM;
+	while (dasd_eer_get_free_bytes(eerb) < count + sizeof(count)) {
+		if (eerb->residual > 0) {
+			eerb->tail += eerb->residual;
+			if (eerb->tail >= eerb->buffersize)
+				eerb->tail -= eerb->buffersize;
+			eerb->residual = -1;
+		}
+		dasd_eer_read_buffer(eerb, (char *) &tailcount,
+				     sizeof(tailcount));
+		eerb->tail += tailcount;
+		if (eerb->tail >= eerb->buffersize)
+			eerb->tail -= eerb->buffersize;
+	}
+	dasd_eer_write_buffer(eerb, (char*) &count, sizeof(count));
+
+	return 0;
+};
+
+/*
+ * Release pages that are not used anymore.
+ */
+static void dasd_eer_free_buffer_pages(char **buf, int no_pages)
+{
+	int i;
+
+	for (i = 0; i < no_pages; i++)
+		free_page((unsigned long) buf[i]);
+}
+
+/*
+ * Allocate a new set of memory pages.
+ */
+static int dasd_eer_allocate_buffer_pages(char **buf, int no_pages)
+{
+	int i;
+
+	for (i = 0; i < no_pages; i++) {
+		buf[i] = (char *) get_zeroed_page(GFP_KERNEL);
+		if (!buf[i]) {
+			dasd_eer_free_buffer_pages(buf, i);
+			return -ENOMEM;
+		}
+	}
+	return 0;
+}
+
+/*
+ * SECTION: The extended error reporting functionality
+ */
+
+/*
+ * When a DASD device driver wants to report an error, it calls the
+ * function dasd_eer_write and gives the respective trigger ID as
+ * parameter. Currently there are four kinds of triggers:
+ *
+ * DASD_EER_FATALERROR:  all kinds of unrecoverable I/O problems
+ * DASD_EER_PPRCSUSPEND: PPRC was suspended
+ * DASD_EER_NOPATH:      There is no path to the device left.
+ * DASD_EER_STATECHANGE: The state of the device has changed.
+ *
+ * For the first three triggers all required information can be supplied by
+ * the caller. For these triggers a record is written by the function
+ * dasd_eer_write_standard_trigger.
+ *
+ * The DASD_EER_STATECHANGE trigger is special since a sense subsystem
+ * status ccw need to be executed to gather the necessary sense data first.
+ * The dasd_eer_snss function will queue the SNSS request and the request
+ * callback will then call dasd_eer_write with the DASD_EER_STATCHANGE
+ * trigger.
+ *
+ * To avoid memory allocations at runtime, the necessary memory is allocated
+ * when the extended error reporting is enabled for a device (by
+ * dasd_eer_probe). There is one sense subsystem status request for each
+ * eer enabled DASD device. The presence of the cqr in device->eer_cqr
+ * indicates that eer is enable for the device. The use of the snss request
+ * is protected by the DASD_FLAG_EER_IN_USE bit. When this flag indicates
+ * that the cqr is currently in use, dasd_eer_snss cannot start a second
+ * request but sets the DASD_FLAG_EER_SNSS flag instead. The callback of
+ * the SNSS request will check the bit and call dasd_eer_snss again.
+ */
+
+#define SNSS_DATA_SIZE 44
+
+#define DASD_EER_BUSID_SIZE 10
+struct dasd_eer_header {
+	__u32 total_size;
+	__u32 trigger;
+	__u64 tv_sec;
+	__u64 tv_usec;
+	char busid[DASD_EER_BUSID_SIZE];
+} __attribute__ ((packed));
+
+/*
+ * The following function can be used for those triggers that have
+ * all necessary data available when the function is called.
+ * If the parameter cqr is not NULL, the chain of requests will be searched
+ * for valid sense data, and all valid sense data sets will be added to
+ * the triggers data.
+ */
+static void dasd_eer_write_standard_trigger(struct dasd_device *device,
+					    struct dasd_ccw_req *cqr,
+					    int trigger)
+{
+	struct dasd_ccw_req *temp_cqr;
+	int data_size;
+	struct timeval tv;
+	struct dasd_eer_header header;
+	unsigned long flags;
+	struct eerbuffer *eerb;
+	char *sense;
+
+	/* go through cqr chain and count the valid sense data sets */
+	data_size = 0;
+	for (temp_cqr = cqr; temp_cqr; temp_cqr = temp_cqr->refers)
+		if (dasd_get_sense(&temp_cqr->irb))
+			data_size += 32;
+
+	header.total_size = sizeof(header) + data_size + 4; /* "EOR" */
+	header.trigger = trigger;
+	do_gettimeofday(&tv);
+	header.tv_sec = tv.tv_sec;
+	header.tv_usec = tv.tv_usec;
+	strncpy(header.busid, dev_name(&device->cdev->dev),
+		DASD_EER_BUSID_SIZE);
+
+	spin_lock_irqsave(&bufferlock, flags);
+	list_for_each_entry(eerb, &bufferlist, list) {
+		dasd_eer_start_record(eerb, header.total_size);
+		dasd_eer_write_buffer(eerb, (char *) &header, sizeof(header));
+		for (temp_cqr = cqr; temp_cqr; temp_cqr = temp_cqr->refers) {
+			sense = dasd_get_sense(&temp_cqr->irb);
+			if (sense)
+				dasd_eer_write_buffer(eerb, sense, 32);
+		}
+		dasd_eer_write_buffer(eerb, "EOR", 4);
+	}
+	spin_unlock_irqrestore(&bufferlock, flags);
+	wake_up_interruptible(&dasd_eer_read_wait_queue);
+}
+
+/*
+ * This function writes a DASD_EER_STATECHANGE trigger.
+ */
+static void dasd_eer_write_snss_trigger(struct dasd_device *device,
+					struct dasd_ccw_req *cqr,
+					int trigger)
+{
+	int data_size;
+	int snss_rc;
+	struct timeval tv;
+	struct dasd_eer_header header;
+	unsigned long flags;
+	struct eerbuffer *eerb;
+
+	snss_rc = (cqr->status == DASD_CQR_DONE) ? 0 : -EIO;
+	if (snss_rc)
+		data_size = 0;
+	else
+		data_size = SNSS_DATA_SIZE;
+
+	header.total_size = sizeof(header) + data_size + 4; /* "EOR" */
+	header.trigger = DASD_EER_STATECHANGE;
+	do_gettimeofday(&tv);
+	header.tv_sec = tv.tv_sec;
+	header.tv_usec = tv.tv_usec;
+	strncpy(header.busid, dev_name(&device->cdev->dev),
+		DASD_EER_BUSID_SIZE);
+
+	spin_lock_irqsave(&bufferlock, flags);
+	list_for_each_entry(eerb, &bufferlist, list) {
+		dasd_eer_start_record(eerb, header.total_size);
+		dasd_eer_write_buffer(eerb, (char *) &header , sizeof(header));
+		if (!snss_rc)
+			dasd_eer_write_buffer(eerb, cqr->data, SNSS_DATA_SIZE);
+		dasd_eer_write_buffer(eerb, "EOR", 4);
+	}
+	spin_unlock_irqrestore(&bufferlock, flags);
+	wake_up_interruptible(&dasd_eer_read_wait_queue);
+}
+
+/*
+ * This function is called for all triggers. It calls the appropriate
+ * function that writes the actual trigger records.
+ */
+void dasd_eer_write(struct dasd_device *device, struct dasd_ccw_req *cqr,
+		    unsigned int id)
+{
+	if (!device->eer_cqr)
+		return;
+	switch (id) {
+	case DASD_EER_FATALERROR:
+	case DASD_EER_PPRCSUSPEND:
+		dasd_eer_write_standard_trigger(device, cqr, id);
+		break;
+	case DASD_EER_NOPATH:
+		dasd_eer_write_standard_trigger(device, NULL, id);
+		break;
+	case DASD_EER_STATECHANGE:
+		dasd_eer_write_snss_trigger(device, cqr, id);
+		break;
+	default: /* unknown trigger, so we write it without any sense data */
+		dasd_eer_write_standard_trigger(device, NULL, id);
+		break;
+	}
+}
+EXPORT_SYMBOL(dasd_eer_write);
+
+/*
+ * Start a sense subsystem status request.
+ * Needs to be called with the device held.
+ */
+void dasd_eer_snss(struct dasd_device *device)
+{
+	struct dasd_ccw_req *cqr;
+
+	cqr = device->eer_cqr;
+	if (!cqr)	/* Device not eer enabled. */
+		return;
+	if (test_and_set_bit(DASD_FLAG_EER_IN_USE, &device->flags)) {
+		/* Sense subsystem status request in use. */
+		set_bit(DASD_FLAG_EER_SNSS, &device->flags);
+		return;
+	}
+	/* cdev is already locked, can't use dasd_add_request_head */
+	clear_bit(DASD_FLAG_EER_SNSS, &device->flags);
+	cqr->status = DASD_CQR_QUEUED;
+	list_add(&cqr->devlist, &device->ccw_queue);
+	dasd_schedule_device_bh(device);
+}
+
+/*
+ * Callback function for use with sense subsystem status request.
+ */
+static void dasd_eer_snss_cb(struct dasd_ccw_req *cqr, void *data)
+{
+	struct dasd_device *device = cqr->startdev;
+	unsigned long flags;
+
+	dasd_eer_write(device, cqr, DASD_EER_STATECHANGE);
+	spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
+	if (device->eer_cqr == cqr) {
+		clear_bit(DASD_FLAG_EER_IN_USE, &device->flags);
+		if (test_bit(DASD_FLAG_EER_SNSS, &device->flags))
+			/* Another SNSS has been requested in the meantime. */
+			dasd_eer_snss(device);
+		cqr = NULL;
+	}
+	spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
+	if (cqr)
+		/*
+		 * Extended error recovery has been switched off while
+		 * the SNSS request was running. It could even have
+		 * been switched off and on again in which case there
+		 * is a new ccw in device->eer_cqr. Free the "old"
+		 * snss request now.
+		 */
+		dasd_kfree_request(cqr, device);
+}
+
+/*
+ * Enable error reporting on a given device.
+ */
+int dasd_eer_enable(struct dasd_device *device)
+{
+	struct dasd_ccw_req *cqr;
+	unsigned long flags;
+	struct ccw1 *ccw;
+
+	if (device->eer_cqr)
+		return 0;
+
+	if (!device->discipline || strcmp(device->discipline->name, "ECKD"))
+		return -EPERM;	/* FIXME: -EMEDIUMTYPE ? */
+
+	cqr = dasd_kmalloc_request(DASD_ECKD_MAGIC, 1 /* SNSS */,
+				   SNSS_DATA_SIZE, device);
+	if (IS_ERR(cqr))
+		return -ENOMEM;
+
+	cqr->startdev = device;
+	cqr->retries = 255;
+	cqr->expires = 10 * HZ;
+	clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
+	set_bit(DASD_CQR_ALLOW_SLOCK, &cqr->flags);
+
+	ccw = cqr->cpaddr;
+	ccw->cmd_code = DASD_ECKD_CCW_SNSS;
+	ccw->count = SNSS_DATA_SIZE;
+	ccw->flags = 0;
+	ccw->cda = (__u32)(addr_t) cqr->data;
+
+	cqr->buildclk = get_clock();
+	cqr->status = DASD_CQR_FILLED;
+	cqr->callback = dasd_eer_snss_cb;
+
+	spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
+	if (!device->eer_cqr) {
+		device->eer_cqr = cqr;
+		cqr = NULL;
+	}
+	spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
+	if (cqr)
+		dasd_kfree_request(cqr, device);
+	return 0;
+}
+
+/*
+ * Disable error reporting on a given device.
+ */
+void dasd_eer_disable(struct dasd_device *device)
+{
+	struct dasd_ccw_req *cqr;
+	unsigned long flags;
+	int in_use;
+
+	if (!device->eer_cqr)
+		return;
+	spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
+	cqr = device->eer_cqr;
+	device->eer_cqr = NULL;
+	clear_bit(DASD_FLAG_EER_SNSS, &device->flags);
+	in_use = test_and_clear_bit(DASD_FLAG_EER_IN_USE, &device->flags);
+	spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
+	if (cqr && !in_use)
+		dasd_kfree_request(cqr, device);
+}
+
+/*
+ * SECTION: the device operations
+ */
+
+/*
+ * On the one side we need a lock to access our internal buffer, on the
+ * other side a copy_to_user can sleep. So we need to copy the data we have
+ * to transfer in a readbuffer, which is protected by the readbuffer_mutex.
+ */
+static char readbuffer[PAGE_SIZE];
+static DEFINE_MUTEX(readbuffer_mutex);
+
+static int dasd_eer_open(struct inode *inp, struct file *filp)
+{
+	struct eerbuffer *eerb;
+	unsigned long flags;
+
+	eerb = kzalloc(sizeof(struct eerbuffer), GFP_KERNEL);
+	if (!eerb)
+		return -ENOMEM;
+	eerb->buffer_page_count = eer_pages;
+	if (eerb->buffer_page_count < 1 ||
+	    eerb->buffer_page_count > INT_MAX / PAGE_SIZE) {
+		kfree(eerb);
+		DBF_EVENT(DBF_WARNING, "can't open device since module "
+			"parameter eer_pages is smaller than 1 or"
+			" bigger than %d", (int)(INT_MAX / PAGE_SIZE));
+		return -EINVAL;
+	}
+	eerb->buffersize = eerb->buffer_page_count * PAGE_SIZE;
+	eerb->buffer = kmalloc(eerb->buffer_page_count * sizeof(char *),
+			       GFP_KERNEL);
+        if (!eerb->buffer) {
+		kfree(eerb);
+                return -ENOMEM;
+	}
+	if (dasd_eer_allocate_buffer_pages(eerb->buffer,
+					   eerb->buffer_page_count)) {
+		kfree(eerb->buffer);
+		kfree(eerb);
+		return -ENOMEM;
+	}
+	filp->private_data = eerb;
+	spin_lock_irqsave(&bufferlock, flags);
+	list_add(&eerb->list, &bufferlist);
+	spin_unlock_irqrestore(&bufferlock, flags);
+
+	return nonseekable_open(inp,filp);
+}
+
+static int dasd_eer_close(struct inode *inp, struct file *filp)
+{
+	struct eerbuffer *eerb;
+	unsigned long flags;
+
+	eerb = (struct eerbuffer *) filp->private_data;
+	spin_lock_irqsave(&bufferlock, flags);
+	list_del(&eerb->list);
+	spin_unlock_irqrestore(&bufferlock, flags);
+	dasd_eer_free_buffer_pages(eerb->buffer, eerb->buffer_page_count);
+	kfree(eerb->buffer);
+	kfree(eerb);
+
+	return 0;
+}
+
+static ssize_t dasd_eer_read(struct file *filp, char __user *buf,
+			     size_t count, loff_t *ppos)
+{
+	int tc,rc;
+	int tailcount,effective_count;
+        unsigned long flags;
+	struct eerbuffer *eerb;
+
+	eerb = (struct eerbuffer *) filp->private_data;
+	if (mutex_lock_interruptible(&readbuffer_mutex))
+		return -ERESTARTSYS;
+
+	spin_lock_irqsave(&bufferlock, flags);
+
+	if (eerb->residual < 0) { /* the remainder of this record */
+		                  /* has been deleted             */
+		eerb->residual = 0;
+		spin_unlock_irqrestore(&bufferlock, flags);
+		mutex_unlock(&readbuffer_mutex);
+		return -EIO;
+	} else if (eerb->residual > 0) {
+		/* OK we still have a second half of a record to deliver */
+		effective_count = min(eerb->residual, (int) count);
+		eerb->residual -= effective_count;
+	} else {
+		tc = 0;
+		while (!tc) {
+			tc = dasd_eer_read_buffer(eerb, (char *) &tailcount,
+						  sizeof(tailcount));
+			if (!tc) {
+				/* no data available */
+				spin_unlock_irqrestore(&bufferlock, flags);
+				mutex_unlock(&readbuffer_mutex);
+				if (filp->f_flags & O_NONBLOCK)
+					return -EAGAIN;
+				rc = wait_event_interruptible(
+					dasd_eer_read_wait_queue,
+					eerb->head != eerb->tail);
+				if (rc)
+					return rc;
+				if (mutex_lock_interruptible(&readbuffer_mutex))
+					return -ERESTARTSYS;
+				spin_lock_irqsave(&bufferlock, flags);
+			}
+		}
+		WARN_ON(tc != sizeof(tailcount));
+		effective_count = min(tailcount,(int)count);
+		eerb->residual = tailcount - effective_count;
+	}
+
+	tc = dasd_eer_read_buffer(eerb, readbuffer, effective_count);
+	WARN_ON(tc != effective_count);
+
+	spin_unlock_irqrestore(&bufferlock, flags);
+
+	if (copy_to_user(buf, readbuffer, effective_count)) {
+		mutex_unlock(&readbuffer_mutex);
+		return -EFAULT;
+	}
+
+	mutex_unlock(&readbuffer_mutex);
+	return effective_count;
+}
+
+static unsigned int dasd_eer_poll(struct file *filp, poll_table *ptable)
+{
+	unsigned int mask;
+	unsigned long flags;
+	struct eerbuffer *eerb;
+
+	eerb = (struct eerbuffer *) filp->private_data;
+	poll_wait(filp, &dasd_eer_read_wait_queue, ptable);
+	spin_lock_irqsave(&bufferlock, flags);
+	if (eerb->head != eerb->tail)
+		mask = POLLIN | POLLRDNORM ;
+	else
+		mask = 0;
+	spin_unlock_irqrestore(&bufferlock, flags);
+	return mask;
+}
+
+static const struct file_operations dasd_eer_fops = {
+	.open		= &dasd_eer_open,
+	.release	= &dasd_eer_close,
+	.read		= &dasd_eer_read,
+	.poll		= &dasd_eer_poll,
+	.owner		= THIS_MODULE,
+	.llseek		= noop_llseek,
+};
+
+static struct miscdevice *dasd_eer_dev = NULL;
+
+int __init dasd_eer_init(void)
+{
+	int rc;
+
+	dasd_eer_dev = kzalloc(sizeof(*dasd_eer_dev), GFP_KERNEL);
+	if (!dasd_eer_dev)
+		return -ENOMEM;
+
+	dasd_eer_dev->minor = MISC_DYNAMIC_MINOR;
+	dasd_eer_dev->name  = "dasd_eer";
+	dasd_eer_dev->fops  = &dasd_eer_fops;
+
+	rc = misc_register(dasd_eer_dev);
+	if (rc) {
+		kfree(dasd_eer_dev);
+		dasd_eer_dev = NULL;
+		DBF_EVENT(DBF_ERR, "%s", "dasd_eer_init could not "
+		       "register misc device");
+		return rc;
+	}
+
+	return 0;
+}
+
+void dasd_eer_exit(void)
+{
+	if (dasd_eer_dev) {
+		misc_deregister(dasd_eer_dev);
+		kfree(dasd_eer_dev);
+		dasd_eer_dev = NULL;
+	}
+}
diff --git a/ap/os/linux/linux-3.4.x/drivers/s390/block/dasd_erp.c b/ap/os/linux/linux-3.4.x/drivers/s390/block/dasd_erp.c
new file mode 100644
index 0000000..0eafe2e
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/s390/block/dasd_erp.c
@@ -0,0 +1,185 @@
+/*
+ * File...........: linux/drivers/s390/block/dasd.c
+ * Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com>
+ *		    Horst Hummel <Horst.Hummel@de.ibm.com>
+ *		    Carsten Otte <Cotte@de.ibm.com>
+ *		    Martin Schwidefsky <schwidefsky@de.ibm.com>
+ * Bugreports.to..: <Linux390@de.ibm.com>
+ * (C) IBM Corporation, IBM Deutschland Entwicklung GmbH, 1999-2001
+ *
+ */
+
+#define KMSG_COMPONENT "dasd"
+
+#include <linux/ctype.h>
+#include <linux/init.h>
+
+#include <asm/debug.h>
+#include <asm/ebcdic.h>
+#include <asm/uaccess.h>
+
+/* This is ugly... */
+#define PRINTK_HEADER "dasd_erp:"
+
+#include "dasd_int.h"
+
+struct dasd_ccw_req *
+dasd_alloc_erp_request(char *magic, int cplength, int datasize,
+		       struct dasd_device * device)
+{
+	unsigned long flags;
+	struct dasd_ccw_req *cqr;
+	char *data;
+	int size;
+
+	/* Sanity checks */
+	BUG_ON( magic == NULL || datasize > PAGE_SIZE ||
+	     (cplength*sizeof(struct ccw1)) > PAGE_SIZE);
+
+	size = (sizeof(struct dasd_ccw_req) + 7L) & -8L;
+	if (cplength > 0)
+		size += cplength * sizeof(struct ccw1);
+	if (datasize > 0)
+		size += datasize;
+	spin_lock_irqsave(&device->mem_lock, flags);
+	cqr = (struct dasd_ccw_req *)
+		dasd_alloc_chunk(&device->erp_chunks, size);
+	spin_unlock_irqrestore(&device->mem_lock, flags);
+	if (cqr == NULL)
+		return ERR_PTR(-ENOMEM);
+	memset(cqr, 0, sizeof(struct dasd_ccw_req));
+	INIT_LIST_HEAD(&cqr->devlist);
+	INIT_LIST_HEAD(&cqr->blocklist);
+	data = (char *) cqr + ((sizeof(struct dasd_ccw_req) + 7L) & -8L);
+	cqr->cpaddr = NULL;
+	if (cplength > 0) {
+		cqr->cpaddr = (struct ccw1 *) data;
+		data += cplength*sizeof(struct ccw1);
+		memset(cqr->cpaddr, 0, cplength*sizeof(struct ccw1));
+	}
+	cqr->data = NULL;
+	if (datasize > 0) {
+		cqr->data = data;
+ 		memset(cqr->data, 0, datasize);
+	}
+	strncpy((char *) &cqr->magic, magic, 4);
+	ASCEBC((char *) &cqr->magic, 4);
+	set_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
+	dasd_get_device(device);
+	return cqr;
+}
+
+void
+dasd_free_erp_request(struct dasd_ccw_req *cqr, struct dasd_device * device)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&device->mem_lock, flags);
+	dasd_free_chunk(&device->erp_chunks, cqr);
+	spin_unlock_irqrestore(&device->mem_lock, flags);
+	atomic_dec(&device->ref_count);
+}
+
+
+/*
+ * dasd_default_erp_action just retries the current cqr
+ */
+struct dasd_ccw_req *
+dasd_default_erp_action(struct dasd_ccw_req *cqr)
+{
+	struct dasd_device *device;
+
+	device = cqr->startdev;
+
+        /* just retry - there is nothing to save ... I got no sense data.... */
+        if (cqr->retries > 0) {
+		DBF_DEV_EVENT(DBF_DEBUG, device,
+                             "default ERP called (%i retries left)",
+                             cqr->retries);
+		if (!test_bit(DASD_CQR_VERIFY_PATH, &cqr->flags))
+			cqr->lpm = device->path_data.opm;
+		cqr->status = DASD_CQR_FILLED;
+        } else {
+		pr_err("%s: default ERP has run out of retries and failed\n",
+		       dev_name(&device->cdev->dev));
+		cqr->status = DASD_CQR_FAILED;
+		cqr->stopclk = get_clock();
+        }
+        return cqr;
+}				/* end dasd_default_erp_action */
+
+/*
+ * DESCRIPTION
+ *   Frees all ERPs of the current ERP Chain and set the status
+ *   of the original CQR either to DASD_CQR_DONE if ERP was successful
+ *   or to DASD_CQR_FAILED if ERP was NOT successful.
+ *   NOTE: This function is only called if no discipline postaction
+ *	   is available
+ *
+ * PARAMETER
+ *   erp		current erp_head
+ *
+ * RETURN VALUES
+ *   cqr		pointer to the original CQR
+ */
+struct dasd_ccw_req *dasd_default_erp_postaction(struct dasd_ccw_req *cqr)
+{
+	int success;
+
+	BUG_ON(cqr->refers == NULL || cqr->function == NULL);
+
+	success = cqr->status == DASD_CQR_DONE;
+
+	/* free all ERPs - but NOT the original cqr */
+	while (cqr->refers != NULL) {
+		struct dasd_ccw_req *refers;
+
+		refers = cqr->refers;
+		/* remove the request from the block queue */
+		list_del(&cqr->blocklist);
+		/* free the finished erp request */
+		dasd_free_erp_request(cqr, cqr->memdev);
+		cqr = refers;
+	}
+
+	/* set corresponding status to original cqr */
+	if (success)
+		cqr->status = DASD_CQR_DONE;
+	else {
+		cqr->status = DASD_CQR_FAILED;
+		cqr->stopclk = get_clock();
+	}
+
+	return cqr;
+
+}				/* end default_erp_postaction */
+
+void
+dasd_log_sense(struct dasd_ccw_req *cqr, struct irb *irb)
+{
+	struct dasd_device *device;
+
+	device = cqr->startdev;
+	/* dump sense data */
+	if (device->discipline && device->discipline->dump_sense)
+		device->discipline->dump_sense(device, cqr, irb);
+}
+
+void
+dasd_log_sense_dbf(struct dasd_ccw_req *cqr, struct irb *irb)
+{
+	struct dasd_device *device;
+
+	device = cqr->startdev;
+	/* dump sense data to s390 debugfeature*/
+	if (device->discipline && device->discipline->dump_sense_dbf)
+		device->discipline->dump_sense_dbf(device, irb, "log");
+}
+EXPORT_SYMBOL(dasd_log_sense_dbf);
+
+EXPORT_SYMBOL(dasd_default_erp_action);
+EXPORT_SYMBOL(dasd_default_erp_postaction);
+EXPORT_SYMBOL(dasd_alloc_erp_request);
+EXPORT_SYMBOL(dasd_free_erp_request);
+EXPORT_SYMBOL(dasd_log_sense);
+
diff --git a/ap/os/linux/linux-3.4.x/drivers/s390/block/dasd_fba.c b/ap/os/linux/linux-3.4.x/drivers/s390/block/dasd_fba.c
new file mode 100644
index 0000000..a62a753
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/s390/block/dasd_fba.c
@@ -0,0 +1,631 @@
+/*
+ * File...........: linux/drivers/s390/block/dasd_fba.c
+ * Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com>
+ * Bugreports.to..: <Linux390@de.ibm.com>
+ * Copyright IBM Corp. 1999, 2009
+ */
+
+#define KMSG_COMPONENT "dasd-fba"
+
+#include <linux/stddef.h>
+#include <linux/kernel.h>
+#include <asm/debug.h>
+
+#include <linux/slab.h>
+#include <linux/hdreg.h>	/* HDIO_GETGEO			    */
+#include <linux/bio.h>
+#include <linux/module.h>
+#include <linux/init.h>
+
+#include <asm/idals.h>
+#include <asm/ebcdic.h>
+#include <asm/io.h>
+#include <asm/ccwdev.h>
+
+#include "dasd_int.h"
+#include "dasd_fba.h"
+
+#ifdef PRINTK_HEADER
+#undef PRINTK_HEADER
+#endif				/* PRINTK_HEADER */
+#define PRINTK_HEADER "dasd(fba):"
+
+#define DASD_FBA_CCW_WRITE 0x41
+#define DASD_FBA_CCW_READ 0x42
+#define DASD_FBA_CCW_LOCATE 0x43
+#define DASD_FBA_CCW_DEFINE_EXTENT 0x63
+
+MODULE_LICENSE("GPL");
+
+static struct dasd_discipline dasd_fba_discipline;
+
+struct dasd_fba_private {
+	struct dasd_fba_characteristics rdc_data;
+};
+
+static struct ccw_device_id dasd_fba_ids[] = {
+	{ CCW_DEVICE_DEVTYPE (0x6310, 0, 0x9336, 0), .driver_info = 0x1},
+	{ CCW_DEVICE_DEVTYPE (0x3880, 0, 0x3370, 0), .driver_info = 0x2},
+	{ /* end of list */ },
+};
+
+MODULE_DEVICE_TABLE(ccw, dasd_fba_ids);
+
+static struct ccw_driver dasd_fba_driver; /* see below */
+static int
+dasd_fba_probe(struct ccw_device *cdev)
+{
+	return dasd_generic_probe(cdev, &dasd_fba_discipline);
+}
+
+static int
+dasd_fba_set_online(struct ccw_device *cdev)
+{
+	return dasd_generic_set_online(cdev, &dasd_fba_discipline);
+}
+
+static struct ccw_driver dasd_fba_driver = {
+	.driver = {
+		.name	= "dasd-fba",
+		.owner	= THIS_MODULE,
+	},
+	.ids         = dasd_fba_ids,
+	.probe       = dasd_fba_probe,
+	.remove      = dasd_generic_remove,
+	.set_offline = dasd_generic_set_offline,
+	.set_online  = dasd_fba_set_online,
+	.notify      = dasd_generic_notify,
+	.path_event  = dasd_generic_path_event,
+	.freeze      = dasd_generic_pm_freeze,
+	.thaw	     = dasd_generic_restore_device,
+	.restore     = dasd_generic_restore_device,
+	.int_class   = IOINT_DAS,
+};
+
+static void
+define_extent(struct ccw1 * ccw, struct DE_fba_data *data, int rw,
+	      int blksize, int beg, int nr)
+{
+	ccw->cmd_code = DASD_FBA_CCW_DEFINE_EXTENT;
+	ccw->flags = 0;
+	ccw->count = 16;
+	ccw->cda = (__u32) __pa(data);
+	memset(data, 0, sizeof (struct DE_fba_data));
+	if (rw == WRITE)
+		(data->mask).perm = 0x0;
+	else if (rw == READ)
+		(data->mask).perm = 0x1;
+	else
+		data->mask.perm = 0x2;
+	data->blk_size = blksize;
+	data->ext_loc = beg;
+	data->ext_end = nr - 1;
+}
+
+static void
+locate_record(struct ccw1 * ccw, struct LO_fba_data *data, int rw,
+	      int block_nr, int block_ct)
+{
+	ccw->cmd_code = DASD_FBA_CCW_LOCATE;
+	ccw->flags = 0;
+	ccw->count = 8;
+	ccw->cda = (__u32) __pa(data);
+	memset(data, 0, sizeof (struct LO_fba_data));
+	if (rw == WRITE)
+		data->operation.cmd = 0x5;
+	else if (rw == READ)
+		data->operation.cmd = 0x6;
+	else
+		data->operation.cmd = 0x8;
+	data->blk_nr = block_nr;
+	data->blk_ct = block_ct;
+}
+
+static int
+dasd_fba_check_characteristics(struct dasd_device *device)
+{
+	struct dasd_block *block;
+	struct dasd_fba_private *private;
+	struct ccw_device *cdev = device->cdev;
+	int rc;
+	int readonly;
+
+	private = (struct dasd_fba_private *) device->private;
+	if (!private) {
+		private = kzalloc(sizeof(*private), GFP_KERNEL | GFP_DMA);
+		if (!private) {
+			dev_warn(&device->cdev->dev,
+				 "Allocating memory for private DASD "
+				 "data failed\n");
+			return -ENOMEM;
+		}
+		device->private = (void *) private;
+	} else {
+		memset(private, 0, sizeof(*private));
+	}
+	block = dasd_alloc_block();
+	if (IS_ERR(block)) {
+		DBF_EVENT_DEVID(DBF_WARNING, cdev, "%s", "could not allocate "
+				"dasd block structure");
+		device->private = NULL;
+		kfree(private);
+		return PTR_ERR(block);
+	}
+	device->block = block;
+	block->base = device;
+
+	/* Read Device Characteristics */
+	rc = dasd_generic_read_dev_chars(device, DASD_FBA_MAGIC,
+					 &private->rdc_data, 32);
+	if (rc) {
+		DBF_EVENT_DEVID(DBF_WARNING, cdev, "Read device "
+				"characteristics returned error %d", rc);
+		device->block = NULL;
+		dasd_free_block(block);
+		device->private = NULL;
+		kfree(private);
+		return rc;
+	}
+
+	device->default_expires = DASD_EXPIRES;
+	device->path_data.opm = LPM_ANYPATH;
+
+	readonly = dasd_device_is_ro(device);
+	if (readonly)
+		set_bit(DASD_FLAG_DEVICE_RO, &device->flags);
+
+	dev_info(&device->cdev->dev,
+		 "New FBA DASD %04X/%02X (CU %04X/%02X) with %d MB "
+		 "and %d B/blk%s\n",
+		 cdev->id.dev_type,
+		 cdev->id.dev_model,
+		 cdev->id.cu_type,
+		 cdev->id.cu_model,
+		 ((private->rdc_data.blk_bdsa *
+		   (private->rdc_data.blk_size >> 9)) >> 11),
+		 private->rdc_data.blk_size,
+		 readonly ? ", read-only device" : "");
+	return 0;
+}
+
+static int dasd_fba_do_analysis(struct dasd_block *block)
+{
+	struct dasd_fba_private *private;
+	int sb, rc;
+
+	private = (struct dasd_fba_private *) block->base->private;
+	rc = dasd_check_blocksize(private->rdc_data.blk_size);
+	if (rc) {
+		DBF_DEV_EVENT(DBF_WARNING, block->base, "unknown blocksize %d",
+			    private->rdc_data.blk_size);
+		return rc;
+	}
+	block->blocks = private->rdc_data.blk_bdsa;
+	block->bp_block = private->rdc_data.blk_size;
+	block->s2b_shift = 0;	/* bits to shift 512 to get a block */
+	for (sb = 512; sb < private->rdc_data.blk_size; sb = sb << 1)
+		block->s2b_shift++;
+	return 0;
+}
+
+static int dasd_fba_fill_geometry(struct dasd_block *block,
+				  struct hd_geometry *geo)
+{
+	if (dasd_check_blocksize(block->bp_block) != 0)
+		return -EINVAL;
+	geo->cylinders = (block->blocks << block->s2b_shift) >> 10;
+	geo->heads = 16;
+	geo->sectors = 128 >> block->s2b_shift;
+	return 0;
+}
+
+static dasd_erp_fn_t
+dasd_fba_erp_action(struct dasd_ccw_req * cqr)
+{
+	return dasd_default_erp_action;
+}
+
+static dasd_erp_fn_t
+dasd_fba_erp_postaction(struct dasd_ccw_req * cqr)
+{
+	if (cqr->function == dasd_default_erp_action)
+		return dasd_default_erp_postaction;
+
+	DBF_DEV_EVENT(DBF_WARNING, cqr->startdev, "unknown ERP action %p",
+		    cqr->function);
+	return NULL;
+}
+
+static void dasd_fba_check_for_device_change(struct dasd_device *device,
+					     struct dasd_ccw_req *cqr,
+					     struct irb *irb)
+{
+	char mask;
+
+	/* first of all check for state change pending interrupt */
+	mask = DEV_STAT_ATTENTION | DEV_STAT_DEV_END | DEV_STAT_UNIT_EXCEP;
+	if ((irb->scsw.cmd.dstat & mask) == mask)
+		dasd_generic_handle_state_change(device);
+};
+
+static struct dasd_ccw_req *dasd_fba_build_cp(struct dasd_device * memdev,
+					      struct dasd_block *block,
+					      struct request *req)
+{
+	struct dasd_fba_private *private;
+	unsigned long *idaws;
+	struct LO_fba_data *LO_data;
+	struct dasd_ccw_req *cqr;
+	struct ccw1 *ccw;
+	struct req_iterator iter;
+	struct bio_vec *bv;
+	char *dst;
+	int count, cidaw, cplength, datasize;
+	sector_t recid, first_rec, last_rec;
+	unsigned int blksize, off;
+	unsigned char cmd;
+
+	private = (struct dasd_fba_private *) block->base->private;
+	if (rq_data_dir(req) == READ) {
+		cmd = DASD_FBA_CCW_READ;
+	} else if (rq_data_dir(req) == WRITE) {
+		cmd = DASD_FBA_CCW_WRITE;
+	} else
+		return ERR_PTR(-EINVAL);
+	blksize = block->bp_block;
+	/* Calculate record id of first and last block. */
+	first_rec = blk_rq_pos(req) >> block->s2b_shift;
+	last_rec =
+		(blk_rq_pos(req) + blk_rq_sectors(req) - 1) >> block->s2b_shift;
+	/* Check struct bio and count the number of blocks for the request. */
+	count = 0;
+	cidaw = 0;
+	rq_for_each_segment(bv, req, iter) {
+		if (bv->bv_len & (blksize - 1))
+			/* Fba can only do full blocks. */
+			return ERR_PTR(-EINVAL);
+		count += bv->bv_len >> (block->s2b_shift + 9);
+#if defined(CONFIG_64BIT)
+		if (idal_is_needed (page_address(bv->bv_page), bv->bv_len))
+			cidaw += bv->bv_len / blksize;
+#endif
+	}
+	/* Paranoia. */
+	if (count != last_rec - first_rec + 1)
+		return ERR_PTR(-EINVAL);
+	/* 1x define extent + 1x locate record + number of blocks */
+	cplength = 2 + count;
+	/* 1x define extent + 1x locate record */
+	datasize = sizeof(struct DE_fba_data) + sizeof(struct LO_fba_data) +
+		cidaw * sizeof(unsigned long);
+	/*
+	 * Find out number of additional locate record ccws if the device
+	 * can't do data chaining.
+	 */
+	if (private->rdc_data.mode.bits.data_chain == 0) {
+		cplength += count - 1;
+		datasize += (count - 1)*sizeof(struct LO_fba_data);
+	}
+	/* Allocate the ccw request. */
+	cqr = dasd_smalloc_request(DASD_FBA_MAGIC, cplength, datasize, memdev);
+	if (IS_ERR(cqr))
+		return cqr;
+	ccw = cqr->cpaddr;
+	/* First ccw is define extent. */
+	define_extent(ccw++, cqr->data, rq_data_dir(req),
+		      block->bp_block, blk_rq_pos(req), blk_rq_sectors(req));
+	/* Build locate_record + read/write ccws. */
+	idaws = (unsigned long *) (cqr->data + sizeof(struct DE_fba_data));
+	LO_data = (struct LO_fba_data *) (idaws + cidaw);
+	/* Locate record for all blocks for smart devices. */
+	if (private->rdc_data.mode.bits.data_chain != 0) {
+		ccw[-1].flags |= CCW_FLAG_CC;
+		locate_record(ccw++, LO_data++, rq_data_dir(req), 0, count);
+	}
+	recid = first_rec;
+	rq_for_each_segment(bv, req, iter) {
+		dst = page_address(bv->bv_page) + bv->bv_offset;
+		if (dasd_page_cache) {
+			char *copy = kmem_cache_alloc(dasd_page_cache,
+						      GFP_DMA | __GFP_NOWARN);
+			if (copy && rq_data_dir(req) == WRITE)
+				memcpy(copy + bv->bv_offset, dst, bv->bv_len);
+			if (copy)
+				dst = copy + bv->bv_offset;
+		}
+		for (off = 0; off < bv->bv_len; off += blksize) {
+			/* Locate record for stupid devices. */
+			if (private->rdc_data.mode.bits.data_chain == 0) {
+				ccw[-1].flags |= CCW_FLAG_CC;
+				locate_record(ccw, LO_data++,
+					      rq_data_dir(req),
+					      recid - first_rec, 1);
+				ccw->flags = CCW_FLAG_CC;
+				ccw++;
+			} else {
+				if (recid > first_rec)
+					ccw[-1].flags |= CCW_FLAG_DC;
+				else
+					ccw[-1].flags |= CCW_FLAG_CC;
+			}
+			ccw->cmd_code = cmd;
+			ccw->count = block->bp_block;
+			if (idal_is_needed(dst, blksize)) {
+				ccw->cda = (__u32)(addr_t) idaws;
+				ccw->flags = CCW_FLAG_IDA;
+				idaws = idal_create_words(idaws, dst, blksize);
+			} else {
+				ccw->cda = (__u32)(addr_t) dst;
+				ccw->flags = 0;
+			}
+			ccw++;
+			dst += blksize;
+			recid++;
+		}
+	}
+	if (blk_noretry_request(req) ||
+	    block->base->features & DASD_FEATURE_FAILFAST)
+		set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags);
+	cqr->startdev = memdev;
+	cqr->memdev = memdev;
+	cqr->block = block;
+	cqr->expires = memdev->default_expires * HZ;	/* default 5 minutes */
+	cqr->retries = 32;
+	cqr->buildclk = get_clock();
+	cqr->status = DASD_CQR_FILLED;
+	return cqr;
+}
+
+static int
+dasd_fba_free_cp(struct dasd_ccw_req *cqr, struct request *req)
+{
+	struct dasd_fba_private *private;
+	struct ccw1 *ccw;
+	struct req_iterator iter;
+	struct bio_vec *bv;
+	char *dst, *cda;
+	unsigned int blksize, off;
+	int status;
+
+	if (!dasd_page_cache)
+		goto out;
+	private = (struct dasd_fba_private *) cqr->block->base->private;
+	blksize = cqr->block->bp_block;
+	ccw = cqr->cpaddr;
+	/* Skip over define extent & locate record. */
+	ccw++;
+	if (private->rdc_data.mode.bits.data_chain != 0)
+		ccw++;
+	rq_for_each_segment(bv, req, iter) {
+		dst = page_address(bv->bv_page) + bv->bv_offset;
+		for (off = 0; off < bv->bv_len; off += blksize) {
+			/* Skip locate record. */
+			if (private->rdc_data.mode.bits.data_chain == 0)
+				ccw++;
+			if (dst) {
+				if (ccw->flags & CCW_FLAG_IDA)
+					cda = *((char **)((addr_t) ccw->cda));
+				else
+					cda = (char *)((addr_t) ccw->cda);
+				if (dst != cda) {
+					if (rq_data_dir(req) == READ)
+						memcpy(dst, cda, bv->bv_len);
+					kmem_cache_free(dasd_page_cache,
+					    (void *)((addr_t)cda & PAGE_MASK));
+				}
+				dst = NULL;
+			}
+			ccw++;
+		}
+	}
+out:
+	status = cqr->status == DASD_CQR_DONE;
+	dasd_sfree_request(cqr, cqr->memdev);
+	return status;
+}
+
+static void dasd_fba_handle_terminated_request(struct dasd_ccw_req *cqr)
+{
+	cqr->status = DASD_CQR_FILLED;
+};
+
+static int
+dasd_fba_fill_info(struct dasd_device * device,
+		   struct dasd_information2_t * info)
+{
+	info->label_block = 1;
+	info->FBA_layout = 1;
+	info->format = DASD_FORMAT_LDL;
+	info->characteristics_size = sizeof(struct dasd_fba_characteristics);
+	memcpy(info->characteristics,
+	       &((struct dasd_fba_private *) device->private)->rdc_data,
+	       sizeof (struct dasd_fba_characteristics));
+	info->confdata_size = 0;
+	return 0;
+}
+
+static void
+dasd_fba_dump_sense_dbf(struct dasd_device *device, struct irb *irb,
+			char *reason)
+{
+	u64 *sense;
+
+	sense = (u64 *) dasd_get_sense(irb);
+	if (sense) {
+		DBF_DEV_EVENT(DBF_EMERG, device,
+			      "%s: %s %02x%02x%02x %016llx %016llx %016llx "
+			      "%016llx", reason,
+			      scsw_is_tm(&irb->scsw) ? "t" : "c",
+			      scsw_cc(&irb->scsw), scsw_cstat(&irb->scsw),
+			      scsw_dstat(&irb->scsw), sense[0], sense[1],
+			      sense[2], sense[3]);
+	} else {
+		DBF_DEV_EVENT(DBF_EMERG, device, "%s",
+			      "SORRY - NO VALID SENSE AVAILABLE\n");
+	}
+}
+
+
+static void
+dasd_fba_dump_sense(struct dasd_device *device, struct dasd_ccw_req * req,
+		    struct irb *irb)
+{
+	char *page;
+	struct ccw1 *act, *end, *last;
+	int len, sl, sct, count;
+
+	page = (char *) get_zeroed_page(GFP_ATOMIC);
+	if (page == NULL) {
+		DBF_DEV_EVENT(DBF_WARNING, device, "%s",
+			    "No memory to dump sense data");
+		return;
+	}
+	len = sprintf(page, KERN_ERR PRINTK_HEADER
+		      " I/O status report for device %s:\n",
+		      dev_name(&device->cdev->dev));
+	len += sprintf(page + len, KERN_ERR PRINTK_HEADER
+		       " in req: %p CS: 0x%02X DS: 0x%02X\n", req,
+		       irb->scsw.cmd.cstat, irb->scsw.cmd.dstat);
+	len += sprintf(page + len, KERN_ERR PRINTK_HEADER
+		       " device %s: Failing CCW: %p\n",
+		       dev_name(&device->cdev->dev),
+		       (void *) (addr_t) irb->scsw.cmd.cpa);
+	if (irb->esw.esw0.erw.cons) {
+		for (sl = 0; sl < 4; sl++) {
+			len += sprintf(page + len, KERN_ERR PRINTK_HEADER
+				       " Sense(hex) %2d-%2d:",
+				       (8 * sl), ((8 * sl) + 7));
+
+			for (sct = 0; sct < 8; sct++) {
+				len += sprintf(page + len, " %02x",
+					       irb->ecw[8 * sl + sct]);
+			}
+			len += sprintf(page + len, "\n");
+		}
+	} else {
+	        len += sprintf(page + len, KERN_ERR PRINTK_HEADER
+			       " SORRY - NO VALID SENSE AVAILABLE\n");
+	}
+	printk(KERN_ERR "%s", page);
+
+	/* dump the Channel Program */
+	/* print first CCWs (maximum 8) */
+	act = req->cpaddr;
+        for (last = act; last->flags & (CCW_FLAG_CC | CCW_FLAG_DC); last++);
+	end = min(act + 8, last);
+	len = sprintf(page, KERN_ERR PRINTK_HEADER
+		      " Related CP in req: %p\n", req);
+	while (act <= end) {
+		len += sprintf(page + len, KERN_ERR PRINTK_HEADER
+			       " CCW %p: %08X %08X DAT:",
+			       act, ((int *) act)[0], ((int *) act)[1]);
+		for (count = 0; count < 32 && count < act->count;
+		     count += sizeof(int))
+			len += sprintf(page + len, " %08X",
+				       ((int *) (addr_t) act->cda)
+				       [(count>>2)]);
+		len += sprintf(page + len, "\n");
+		act++;
+	}
+	printk(KERN_ERR "%s", page);
+
+
+	/* print failing CCW area */
+	len = 0;
+	if (act <  ((struct ccw1 *)(addr_t) irb->scsw.cmd.cpa) - 2) {
+		act = ((struct ccw1 *)(addr_t) irb->scsw.cmd.cpa) - 2;
+		len += sprintf(page + len, KERN_ERR PRINTK_HEADER "......\n");
+	}
+	end = min((struct ccw1 *)(addr_t) irb->scsw.cmd.cpa + 2, last);
+	while (act <= end) {
+		len += sprintf(page + len, KERN_ERR PRINTK_HEADER
+			       " CCW %p: %08X %08X DAT:",
+			       act, ((int *) act)[0], ((int *) act)[1]);
+		for (count = 0; count < 32 && count < act->count;
+		     count += sizeof(int))
+			len += sprintf(page + len, " %08X",
+				       ((int *) (addr_t) act->cda)
+				       [(count>>2)]);
+		len += sprintf(page + len, "\n");
+		act++;
+	}
+
+	/* print last CCWs */
+	if (act <  last - 2) {
+		act = last - 2;
+		len += sprintf(page + len, KERN_ERR PRINTK_HEADER "......\n");
+	}
+	while (act <= last) {
+		len += sprintf(page + len, KERN_ERR PRINTK_HEADER
+			       " CCW %p: %08X %08X DAT:",
+			       act, ((int *) act)[0], ((int *) act)[1]);
+		for (count = 0; count < 32 && count < act->count;
+		     count += sizeof(int))
+			len += sprintf(page + len, " %08X",
+				       ((int *) (addr_t) act->cda)
+				       [(count>>2)]);
+		len += sprintf(page + len, "\n");
+		act++;
+	}
+	if (len > 0)
+		printk(KERN_ERR "%s", page);
+	free_page((unsigned long) page);
+}
+
+/*
+ * max_blocks is dependent on the amount of storage that is available
+ * in the static io buffer for each device. Currently each device has
+ * 8192 bytes (=2 pages). For 64 bit one dasd_mchunkt_t structure has
+ * 24 bytes, the struct dasd_ccw_req has 136 bytes and each block can use
+ * up to 16 bytes (8 for the ccw and 8 for the idal pointer). In
+ * addition we have one define extent ccw + 16 bytes of data and a
+ * locate record ccw for each block (stupid devices!) + 16 bytes of data.
+ * That makes:
+ * (8192 - 24 - 136 - 8 - 16) / 40 = 200.2 blocks at maximum.
+ * We want to fit two into the available memory so that we can immediately
+ * start the next request if one finishes off. That makes 100.1 blocks
+ * for one request. Give a little safety and the result is 96.
+ */
+static struct dasd_discipline dasd_fba_discipline = {
+	.owner = THIS_MODULE,
+	.name = "FBA ",
+	.ebcname = "FBA ",
+	.max_blocks = 96,
+	.check_device = dasd_fba_check_characteristics,
+	.do_analysis = dasd_fba_do_analysis,
+	.verify_path = dasd_generic_verify_path,
+	.fill_geometry = dasd_fba_fill_geometry,
+	.start_IO = dasd_start_IO,
+	.term_IO = dasd_term_IO,
+	.handle_terminated_request = dasd_fba_handle_terminated_request,
+	.erp_action = dasd_fba_erp_action,
+	.erp_postaction = dasd_fba_erp_postaction,
+	.check_for_device_change = dasd_fba_check_for_device_change,
+	.build_cp = dasd_fba_build_cp,
+	.free_cp = dasd_fba_free_cp,
+	.dump_sense = dasd_fba_dump_sense,
+	.dump_sense_dbf = dasd_fba_dump_sense_dbf,
+	.fill_info = dasd_fba_fill_info,
+};
+
+static int __init
+dasd_fba_init(void)
+{
+	int ret;
+
+	ASCEBC(dasd_fba_discipline.ebcname, 4);
+	ret = ccw_driver_register(&dasd_fba_driver);
+	if (!ret)
+		wait_for_device_probe();
+
+	return ret;
+}
+
+static void __exit
+dasd_fba_cleanup(void)
+{
+	ccw_driver_unregister(&dasd_fba_driver);
+}
+
+module_init(dasd_fba_init);
+module_exit(dasd_fba_cleanup);
diff --git a/ap/os/linux/linux-3.4.x/drivers/s390/block/dasd_fba.h b/ap/os/linux/linux-3.4.x/drivers/s390/block/dasd_fba.h
new file mode 100644
index 0000000..14c910b
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/s390/block/dasd_fba.h
@@ -0,0 +1,72 @@
+/*
+ * File...........: linux/drivers/s390/block/dasd_fba.h
+ * Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com>
+ * Bugreports.to..: <Linux390@de.ibm.com>
+ * (C) IBM Corporation, IBM Deutschland Entwicklung GmbH, 1999,2000
+ *
+ */
+
+#ifndef DASD_FBA_H
+#define DASD_FBA_H
+
+struct DE_fba_data {
+	struct {
+		unsigned char perm:2;	/* Permissions on this extent */
+		unsigned char zero:2;	/* Must be zero */
+		unsigned char da:1;	/* usually zero */
+		unsigned char diag:1;	/* allow diagnose */
+		unsigned char zero2:2;	/* zero */
+	} __attribute__ ((packed)) mask;
+	__u8 zero;		/* Must be zero */
+	__u16 blk_size;		/* Blocksize */
+	__u32 ext_loc;		/* Extent locator */
+	__u32 ext_beg;		/* logical number of block 0 in extent */
+	__u32 ext_end;		/* logocal number of last block in extent */
+} __attribute__ ((packed));
+
+struct LO_fba_data {
+	struct {
+		unsigned char zero:4;
+		unsigned char cmd:4;
+	} __attribute__ ((packed)) operation;
+	__u8 auxiliary;
+	__u16 blk_ct;
+	__u32 blk_nr;
+} __attribute__ ((packed));
+
+struct dasd_fba_characteristics {
+	union {
+		__u8 c;
+		struct {
+			unsigned char reserved:1;
+			unsigned char overrunnable:1;
+			unsigned char burst_byte:1;
+			unsigned char data_chain:1;
+			unsigned char zeros:4;
+		} __attribute__ ((packed)) bits;
+	} __attribute__ ((packed)) mode;
+	union {
+		__u8 c;
+		struct {
+			unsigned char zero0:1;
+			unsigned char removable:1;
+			unsigned char shared:1;
+			unsigned char zero1:1;
+			unsigned char mam:1;
+			unsigned char zeros:3;
+		} __attribute__ ((packed)) bits;
+	} __attribute__ ((packed)) features;
+	__u8 dev_class;
+	__u8 unit_type;
+	__u16 blk_size;
+	__u32 blk_per_cycl;
+	__u32 blk_per_bound;
+	__u32 blk_bdsa;
+	__u32 reserved0;
+	__u16 reserved1;
+	__u16 blk_ce;
+	__u32 reserved2;
+	__u16 reserved3;
+} __attribute__ ((packed));
+
+#endif				/* DASD_FBA_H */
diff --git a/ap/os/linux/linux-3.4.x/drivers/s390/block/dasd_genhd.c b/ap/os/linux/linux-3.4.x/drivers/s390/block/dasd_genhd.c
new file mode 100644
index 0000000..19a1ff0
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/s390/block/dasd_genhd.c
@@ -0,0 +1,180 @@
+/*
+ * File...........: linux/drivers/s390/block/dasd_genhd.c
+ * Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com>
+ *		    Horst Hummel <Horst.Hummel@de.ibm.com>
+ *		    Carsten Otte <Cotte@de.ibm.com>
+ *		    Martin Schwidefsky <schwidefsky@de.ibm.com>
+ * Bugreports.to..: <Linux390@de.ibm.com>
+ * (C) IBM Corporation, IBM Deutschland Entwicklung GmbH, 1999-2001
+ *
+ * gendisk related functions for the dasd driver.
+ *
+ */
+
+#define KMSG_COMPONENT "dasd"
+
+#include <linux/interrupt.h>
+#include <linux/fs.h>
+#include <linux/blkpg.h>
+
+#include <asm/uaccess.h>
+
+/* This is ugly... */
+#define PRINTK_HEADER "dasd_gendisk:"
+
+#include "dasd_int.h"
+
+/*
+ * Allocate and register gendisk structure for device.
+ */
+int dasd_gendisk_alloc(struct dasd_block *block)
+{
+	struct gendisk *gdp;
+	struct dasd_device *base;
+	int len;
+
+	/* Make sure the minor for this device exists. */
+	base = block->base;
+	if (base->devindex >= DASD_PER_MAJOR)
+		return -EBUSY;
+
+	gdp = alloc_disk(1 << DASD_PARTN_BITS);
+	if (!gdp)
+		return -ENOMEM;
+
+	/* Initialize gendisk structure. */
+	gdp->major = DASD_MAJOR;
+	gdp->first_minor = base->devindex << DASD_PARTN_BITS;
+	gdp->fops = &dasd_device_operations;
+	gdp->driverfs_dev = &base->cdev->dev;
+
+	/*
+	 * Set device name.
+	 *   dasda - dasdz : 26 devices
+	 *   dasdaa - dasdzz : 676 devices, added up = 702
+	 *   dasdaaa - dasdzzz : 17576 devices, added up = 18278
+	 *   dasdaaaa - dasdzzzz : 456976 devices, added up = 475252
+	 */
+	len = sprintf(gdp->disk_name, "dasd");
+	if (base->devindex > 25) {
+		if (base->devindex > 701) {
+			if (base->devindex > 18277)
+			        len += sprintf(gdp->disk_name + len, "%c",
+					       'a'+(((base->devindex-18278)
+						     /17576)%26));
+			len += sprintf(gdp->disk_name + len, "%c",
+				       'a'+(((base->devindex-702)/676)%26));
+		}
+		len += sprintf(gdp->disk_name + len, "%c",
+			       'a'+(((base->devindex-26)/26)%26));
+	}
+	len += sprintf(gdp->disk_name + len, "%c", 'a'+(base->devindex%26));
+
+	if (base->features & DASD_FEATURE_READONLY ||
+	    test_bit(DASD_FLAG_DEVICE_RO, &base->flags))
+		set_disk_ro(gdp, 1);
+	dasd_add_link_to_gendisk(gdp, base);
+	gdp->queue = block->request_queue;
+	block->gdp = gdp;
+	set_capacity(block->gdp, 0);
+	add_disk(block->gdp);
+	return 0;
+}
+
+/*
+ * Unregister and free gendisk structure for device.
+ */
+void dasd_gendisk_free(struct dasd_block *block)
+{
+	if (block->gdp) {
+		del_gendisk(block->gdp);
+		block->gdp->queue = NULL;
+		block->gdp->private_data = NULL;
+		put_disk(block->gdp);
+		block->gdp = NULL;
+	}
+}
+
+/*
+ * Trigger a partition detection.
+ */
+int dasd_scan_partitions(struct dasd_block *block)
+{
+	struct block_device *bdev;
+
+	bdev = bdget_disk(block->gdp, 0);
+	if (!bdev || blkdev_get(bdev, FMODE_READ, NULL) < 0)
+		return -ENODEV;
+	/*
+	 * See fs/partition/check.c:register_disk,rescan_partitions
+	 * Can't call rescan_partitions directly. Use ioctl.
+	 */
+	ioctl_by_bdev(bdev, BLKRRPART, 0);
+	/*
+	 * Since the matching blkdev_put call to the blkdev_get in
+	 * this function is not called before dasd_destroy_partitions
+	 * the offline open_count limit needs to be increased from
+	 * 0 to 1. This is done by setting device->bdev (see
+	 * dasd_generic_set_offline). As long as the partition
+	 * detection is running no offline should be allowed. That
+	 * is why the assignment to device->bdev is done AFTER
+	 * the BLKRRPART ioctl.
+	 */
+	block->bdev = bdev;
+	return 0;
+}
+
+/*
+ * Remove all inodes in the system for a device, delete the
+ * partitions and make device unusable by setting its size to zero.
+ */
+void dasd_destroy_partitions(struct dasd_block *block)
+{
+	/* The two structs have 168/176 byte on 31/64 bit. */
+	struct blkpg_partition bpart;
+	struct blkpg_ioctl_arg barg;
+	struct block_device *bdev;
+
+	/*
+	 * Get the bdev pointer from the device structure and clear
+	 * device->bdev to lower the offline open_count limit again.
+	 */
+	bdev = block->bdev;
+	block->bdev = NULL;
+
+	/*
+	 * See fs/partition/check.c:delete_partition
+	 * Can't call delete_partitions directly. Use ioctl.
+	 * The ioctl also does locking and invalidation.
+	 */
+	memset(&bpart, 0, sizeof(struct blkpg_partition));
+	memset(&barg, 0, sizeof(struct blkpg_ioctl_arg));
+	barg.data = (void __force __user *) &bpart;
+	barg.op = BLKPG_DEL_PARTITION;
+	for (bpart.pno = block->gdp->minors - 1; bpart.pno > 0; bpart.pno--)
+		ioctl_by_bdev(bdev, BLKPG, (unsigned long) &barg);
+
+	invalidate_partition(block->gdp, 0);
+	/* Matching blkdev_put to the blkdev_get in dasd_scan_partitions. */
+	blkdev_put(bdev, FMODE_READ);
+	set_capacity(block->gdp, 0);
+}
+
+int dasd_gendisk_init(void)
+{
+	int rc;
+
+	/* Register to static dasd major 94 */
+	rc = register_blkdev(DASD_MAJOR, "dasd");
+	if (rc != 0) {
+		pr_warning("Registering the device driver with major number "
+			   "%d failed\n", DASD_MAJOR);
+		return rc;
+	}
+	return 0;
+}
+
+void dasd_gendisk_exit(void)
+{
+	unregister_blkdev(DASD_MAJOR, "dasd");
+}
diff --git a/ap/os/linux/linux-3.4.x/drivers/s390/block/dasd_int.h b/ap/os/linux/linux-3.4.x/drivers/s390/block/dasd_int.h
new file mode 100644
index 0000000..33a6743
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/s390/block/dasd_int.h
@@ -0,0 +1,796 @@
+/*
+ * File...........: linux/drivers/s390/block/dasd_int.h
+ * Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com>
+ *		    Horst Hummel <Horst.Hummel@de.ibm.com>
+ *		    Martin Schwidefsky <schwidefsky@de.ibm.com>
+ * Bugreports.to..: <Linux390@de.ibm.com>
+ * Copyright IBM Corp. 1999, 2009
+ */
+
+#ifndef DASD_INT_H
+#define DASD_INT_H
+
+#ifdef __KERNEL__
+
+/* we keep old device allocation scheme; IOW, minors are still in 0..255 */
+#define DASD_PER_MAJOR (1U << (MINORBITS - DASD_PARTN_BITS))
+#define DASD_PARTN_MASK ((1 << DASD_PARTN_BITS) - 1)
+
+/*
+ * States a dasd device can have:
+ *   new: the dasd_device structure is allocated.
+ *   known: the discipline for the device is identified.
+ *   basic: the device can do basic i/o.
+ *   unfmt: the device could not be analyzed (format is unknown).
+ *   ready: partition detection is done and the device is can do block io.
+ *   online: the device accepts requests from the block device queue.
+ *
+ * Things to do for startup state transitions:
+ *   new -> known: find discipline for the device and create devfs entries.
+ *   known -> basic: request irq line for the device.
+ *   basic -> ready: do the initial analysis, e.g. format detection,
+ *                   do block device setup and detect partitions.
+ *   ready -> online: schedule the device tasklet.
+ * Things to do for shutdown state transitions:
+ *   online -> ready: just set the new device state.
+ *   ready -> basic: flush requests from the block device layer, clear
+ *                   partition information and reset format information.
+ *   basic -> known: terminate all requests and free irq.
+ *   known -> new: remove devfs entries and forget discipline.
+ */
+
+#define DASD_STATE_NEW	  0
+#define DASD_STATE_KNOWN  1
+#define DASD_STATE_BASIC  2
+#define DASD_STATE_UNFMT  3
+#define DASD_STATE_READY  4
+#define DASD_STATE_ONLINE 5
+
+#include <linux/module.h>
+#include <linux/wait.h>
+#include <linux/blkdev.h>
+#include <linux/genhd.h>
+#include <linux/hdreg.h>
+#include <linux/interrupt.h>
+#include <linux/log2.h>
+#include <asm/ccwdev.h>
+#include <linux/workqueue.h>
+#include <asm/debug.h>
+#include <asm/dasd.h>
+#include <asm/idals.h>
+
+/* DASD discipline magic */
+#define DASD_ECKD_MAGIC 0xC5C3D2C4
+#define DASD_DIAG_MAGIC 0xC4C9C1C7
+#define DASD_FBA_MAGIC 0xC6C2C140
+
+/*
+ * SECTION: Type definitions
+ */
+struct dasd_device;
+struct dasd_block;
+
+/* BIT DEFINITIONS FOR SENSE DATA */
+#define DASD_SENSE_BIT_0 0x80
+#define DASD_SENSE_BIT_1 0x40
+#define DASD_SENSE_BIT_2 0x20
+#define DASD_SENSE_BIT_3 0x10
+
+/* BIT DEFINITIONS FOR SIM SENSE */
+#define DASD_SIM_SENSE 0x0F
+#define DASD_SIM_MSG_TO_OP 0x03
+#define DASD_SIM_LOG 0x0C
+
+/* lock class for nested cdev lock */
+#define CDEV_NESTED_FIRST 1
+#define CDEV_NESTED_SECOND 2
+
+/*
+ * SECTION: MACROs for klogd and s390 debug feature (dbf)
+ */
+#define DBF_DEV_EVENT(d_level, d_device, d_str, d_data...) \
+do { \
+	debug_sprintf_event(d_device->debug_area, \
+			    d_level, \
+			    d_str "\n", \
+			    d_data); \
+} while(0)
+
+#define DBF_DEV_EXC(d_level, d_device, d_str, d_data...) \
+do { \
+	debug_sprintf_exception(d_device->debug_area, \
+				d_level, \
+				d_str "\n", \
+				d_data); \
+} while(0)
+
+#define DBF_EVENT(d_level, d_str, d_data...)\
+do { \
+	debug_sprintf_event(dasd_debug_area, \
+			    d_level,\
+			    d_str "\n", \
+			    d_data); \
+} while(0)
+
+#define DBF_EVENT_DEVID(d_level, d_cdev, d_str, d_data...)	\
+do { \
+	struct ccw_dev_id __dev_id;			\
+	ccw_device_get_id(d_cdev, &__dev_id);		\
+	debug_sprintf_event(dasd_debug_area,		\
+			    d_level,					\
+			    "0.%x.%04x " d_str "\n",			\
+			    __dev_id.ssid, __dev_id.devno, d_data);	\
+} while (0)
+
+#define DBF_EXC(d_level, d_str, d_data...)\
+do { \
+	debug_sprintf_exception(dasd_debug_area, \
+				d_level,\
+				d_str "\n", \
+				d_data); \
+} while(0)
+
+/* limit size for an errorstring */
+#define ERRORLENGTH 30
+
+/* definition of dbf debug levels */
+#define	DBF_EMERG	0	/* system is unusable			*/
+#define	DBF_ALERT	1	/* action must be taken immediately	*/
+#define	DBF_CRIT	2	/* critical conditions			*/
+#define	DBF_ERR		3	/* error conditions			*/
+#define	DBF_WARNING	4	/* warning conditions			*/
+#define	DBF_NOTICE	5	/* normal but significant condition	*/
+#define	DBF_INFO	6	/* informational			*/
+#define	DBF_DEBUG	6	/* debug-level messages			*/
+
+/* messages to be written via klogd and dbf */
+#define DEV_MESSAGE(d_loglevel,d_device,d_string,d_args...)\
+do { \
+	printk(d_loglevel PRINTK_HEADER " %s: " d_string "\n", \
+	       dev_name(&d_device->cdev->dev), d_args); \
+	DBF_DEV_EVENT(DBF_ALERT, d_device, d_string, d_args); \
+} while(0)
+
+#define MESSAGE(d_loglevel,d_string,d_args...)\
+do { \
+	printk(d_loglevel PRINTK_HEADER " " d_string "\n", d_args); \
+	DBF_EVENT(DBF_ALERT, d_string, d_args); \
+} while(0)
+
+/* messages to be written via klogd only */
+#define DEV_MESSAGE_LOG(d_loglevel,d_device,d_string,d_args...)\
+do { \
+	printk(d_loglevel PRINTK_HEADER " %s: " d_string "\n", \
+	       dev_name(&d_device->cdev->dev), d_args); \
+} while(0)
+
+#define MESSAGE_LOG(d_loglevel,d_string,d_args...)\
+do { \
+	printk(d_loglevel PRINTK_HEADER " " d_string "\n", d_args); \
+} while(0)
+
+struct dasd_ccw_req {
+	unsigned int magic;		/* Eye catcher */
+	struct list_head devlist;	/* for dasd_device request queue */
+	struct list_head blocklist;	/* for dasd_block request queue */
+
+	/* Where to execute what... */
+	struct dasd_block *block;	/* the originating block device */
+	struct dasd_device *memdev;	/* the device used to allocate this */
+	struct dasd_device *startdev;	/* device the request is started on */
+	void *cpaddr;			/* address of ccw or tcw */
+	unsigned char cpmode;		/* 0 = cmd mode, 1 = itcw */
+	char status;			/* status of this request */
+	short retries;			/* A retry counter */
+	unsigned long flags;        	/* flags of this request */
+
+	/* ... and how */
+	unsigned long starttime;	/* jiffies time of request start */
+	unsigned long expires;		/* expiration period in jiffies */
+	char lpm;			/* logical path mask */
+	void *data;			/* pointer to data area */
+
+	/* these are important for recovering erroneous requests          */
+	int intrc;			/* internal error, e.g. from start_IO */
+	struct irb irb;			/* device status in case of an error */
+	struct dasd_ccw_req *refers;	/* ERP-chain queueing. */
+	void *function; 		/* originating ERP action */
+
+	/* these are for statistics only */
+	unsigned long long buildclk;	/* TOD-clock of request generation */
+	unsigned long long startclk;	/* TOD-clock of request start */
+	unsigned long long stopclk;	/* TOD-clock of request interrupt */
+	unsigned long long endclk;	/* TOD-clock of request termination */
+
+        /* Callback that is called after reaching final status. */
+	void (*callback)(struct dasd_ccw_req *, void *data);
+	void *callback_data;
+};
+
+/*
+ * dasd_ccw_req -> status can be:
+ */
+#define DASD_CQR_FILLED 	0x00	/* request is ready to be processed */
+#define DASD_CQR_DONE		0x01	/* request is completed successfully */
+#define DASD_CQR_NEED_ERP	0x02	/* request needs recovery action */
+#define DASD_CQR_IN_ERP 	0x03	/* request is in recovery */
+#define DASD_CQR_FAILED 	0x04	/* request is finally failed */
+#define DASD_CQR_TERMINATED	0x05	/* request was stopped by driver */
+
+#define DASD_CQR_QUEUED 	0x80	/* request is queued to be processed */
+#define DASD_CQR_IN_IO		0x81	/* request is currently in IO */
+#define DASD_CQR_ERROR		0x82	/* request is completed with error */
+#define DASD_CQR_CLEAR_PENDING	0x83	/* request is clear pending */
+#define DASD_CQR_CLEARED	0x84	/* request was cleared */
+#define DASD_CQR_SUCCESS	0x85	/* request was successful */
+
+/* default expiration time*/
+#define DASD_EXPIRES	  300
+#define DASD_EXPIRES_MAX  40000000
+
+/* per dasd_ccw_req flags */
+#define DASD_CQR_FLAGS_USE_ERP   0	/* use ERP for this request */
+#define DASD_CQR_FLAGS_FAILFAST  1	/* FAILFAST */
+#define DASD_CQR_VERIFY_PATH	 2	/* path verification request */
+#define DASD_CQR_ALLOW_SLOCK	 3	/* Try this request even when lock was
+					 * stolen. Should not be combined with
+					 * DASD_CQR_FLAGS_USE_ERP
+					 */
+
+/* Signature for error recovery functions. */
+typedef struct dasd_ccw_req *(*dasd_erp_fn_t) (struct dasd_ccw_req *);
+
+/*
+ * Unique identifier for dasd device.
+ */
+#define UA_NOT_CONFIGURED  0x00
+#define UA_BASE_DEVICE	   0x01
+#define UA_BASE_PAV_ALIAS  0x02
+#define UA_HYPER_PAV_ALIAS 0x03
+
+struct dasd_uid {
+	__u8 type;
+	char vendor[4];
+	char serial[15];
+	__u16 ssid;
+	__u8 real_unit_addr;
+	__u8 base_unit_addr;
+	char vduit[33];
+};
+
+/*
+ * the struct dasd_discipline is
+ * sth like a table of virtual functions, if you think of dasd_eckd
+ * inheriting dasd...
+ * no, currently we are not planning to reimplement the driver in C++
+ */
+struct dasd_discipline {
+	struct module *owner;
+	char ebcname[8];	/* a name used for tagging and printks */
+	char name[8];		/* a name used for tagging and printks */
+	int max_blocks;		/* maximum number of blocks to be chained */
+
+	struct list_head list;	/* used for list of disciplines */
+
+	/*
+	 * Device recognition functions. check_device is used to verify
+	 * the sense data and the information returned by read device
+	 * characteristics. It returns 0 if the discipline can be used
+	 * for the device in question. uncheck_device is called during
+	 * device shutdown to deregister a device from its discipline.
+	 */
+	int (*check_device) (struct dasd_device *);
+	void (*uncheck_device) (struct dasd_device *);
+
+	/*
+	 * do_analysis is used in the step from device state "basic" to
+	 * state "accept". It returns 0 if the device can be made ready,
+	 * it returns -EMEDIUMTYPE if the device can't be made ready or
+	 * -EAGAIN if do_analysis started a ccw that needs to complete
+	 * before the analysis may be repeated.
+	 */
+	int (*do_analysis) (struct dasd_block *);
+
+	/*
+	 * This function is called, when new paths become available.
+	 * Disciplins may use this callback to do necessary setup work,
+	 * e.g. verify that new path is compatible with the current
+	 * configuration.
+	 */
+	int (*verify_path)(struct dasd_device *, __u8);
+
+	/*
+	 * Last things to do when a device is set online, and first things
+	 * when it is set offline.
+	 */
+	int (*ready_to_online) (struct dasd_device *);
+	int (*online_to_ready) (struct dasd_device *);
+
+	/*
+	 * Device operation functions. build_cp creates a ccw chain for
+	 * a block device request, start_io starts the request and
+	 * term_IO cancels it (e.g. in case of a timeout). format_device
+	 * returns a ccw chain to be used to format the device.
+	 * handle_terminated_request allows to examine a cqr and prepare
+	 * it for retry.
+	 */
+	struct dasd_ccw_req *(*build_cp) (struct dasd_device *,
+					  struct dasd_block *,
+					  struct request *);
+	int (*start_IO) (struct dasd_ccw_req *);
+	int (*term_IO) (struct dasd_ccw_req *);
+	void (*handle_terminated_request) (struct dasd_ccw_req *);
+	struct dasd_ccw_req *(*format_device) (struct dasd_device *,
+					       struct format_data_t *);
+	int (*free_cp) (struct dasd_ccw_req *, struct request *);
+
+	/*
+	 * Error recovery functions. examine_error() returns a value that
+	 * indicates what to do for an error condition. If examine_error()
+	 * returns 'dasd_era_recover' erp_action() is called to create a
+	 * special error recovery ccw. erp_postaction() is called after
+	 * an error recovery ccw has finished its execution. dump_sense
+	 * is called for every error condition to print the sense data
+	 * to the console.
+	 */
+	dasd_erp_fn_t(*erp_action) (struct dasd_ccw_req *);
+	dasd_erp_fn_t(*erp_postaction) (struct dasd_ccw_req *);
+	void (*dump_sense) (struct dasd_device *, struct dasd_ccw_req *,
+			    struct irb *);
+	void (*dump_sense_dbf) (struct dasd_device *, struct irb *, char *);
+	void (*check_for_device_change) (struct dasd_device *,
+					 struct dasd_ccw_req *,
+					 struct irb *);
+
+        /* i/o control functions. */
+	int (*fill_geometry) (struct dasd_block *, struct hd_geometry *);
+	int (*fill_info) (struct dasd_device *, struct dasd_information2_t *);
+	int (*ioctl) (struct dasd_block *, unsigned int, void __user *);
+
+	/* suspend/resume functions */
+	int (*freeze) (struct dasd_device *);
+	int (*restore) (struct dasd_device *);
+
+	/* reload device after state change */
+	int (*reload) (struct dasd_device *);
+
+	int (*get_uid) (struct dasd_device *, struct dasd_uid *);
+	void (*kick_validate) (struct dasd_device *);
+};
+
+extern struct dasd_discipline *dasd_diag_discipline_pointer;
+
+/*
+ * Notification numbers for extended error reporting notifications:
+ * The DASD_EER_DISABLE notification is sent before a dasd_device (and it's
+ * eer pointer) is freed. The error reporting module needs to do all necessary
+ * cleanup steps.
+ * The DASD_EER_TRIGGER notification sends the actual error reports (triggers).
+ */
+#define DASD_EER_DISABLE 0
+#define DASD_EER_TRIGGER 1
+
+/* Trigger IDs for extended error reporting DASD_EER_TRIGGER notification */
+#define DASD_EER_FATALERROR  1
+#define DASD_EER_NOPATH      2
+#define DASD_EER_STATECHANGE 3
+#define DASD_EER_PPRCSUSPEND 4
+
+struct dasd_path {
+	__u8 opm;
+	__u8 tbvpm;
+	__u8 ppm;
+	__u8 npm;
+};
+
+struct dasd_profile_info {
+	/* legacy part of profile data, as in dasd_profile_info_t */
+	unsigned int dasd_io_reqs;	 /* number of requests processed */
+	unsigned int dasd_io_sects;	 /* number of sectors processed */
+	unsigned int dasd_io_secs[32];	 /* histogram of request's sizes */
+	unsigned int dasd_io_times[32];	 /* histogram of requests's times */
+	unsigned int dasd_io_timps[32];	 /* h. of requests's times per sector */
+	unsigned int dasd_io_time1[32];	 /* hist. of time from build to start */
+	unsigned int dasd_io_time2[32];	 /* hist. of time from start to irq */
+	unsigned int dasd_io_time2ps[32]; /* hist. of time from start to irq */
+	unsigned int dasd_io_time3[32];	 /* hist. of time from irq to end */
+	unsigned int dasd_io_nr_req[32]; /* hist. of # of requests in chanq */
+
+	/* new data */
+	struct timespec starttod;	   /* time of start or last reset */
+	unsigned int dasd_io_alias;	   /* requests using an alias */
+	unsigned int dasd_io_tpm;	   /* requests using transport mode */
+	unsigned int dasd_read_reqs;	   /* total number of read  requests */
+	unsigned int dasd_read_sects;	   /* total number read sectors */
+	unsigned int dasd_read_alias;	   /* read request using an alias */
+	unsigned int dasd_read_tpm;	   /* read requests in transport mode */
+	unsigned int dasd_read_secs[32];   /* histogram of request's sizes */
+	unsigned int dasd_read_times[32];  /* histogram of requests's times */
+	unsigned int dasd_read_time1[32];  /* hist. time from build to start */
+	unsigned int dasd_read_time2[32];  /* hist. of time from start to irq */
+	unsigned int dasd_read_time3[32];  /* hist. of time from irq to end */
+	unsigned int dasd_read_nr_req[32]; /* hist. of # of requests in chanq */
+};
+
+struct dasd_profile {
+	struct dentry *dentry;
+	struct dasd_profile_info *data;
+	spinlock_t lock;
+};
+
+struct dasd_device {
+	/* Block device stuff. */
+	struct dasd_block *block;
+
+        unsigned int devindex;
+	unsigned long flags;	   /* per device flags */
+	unsigned short features;   /* copy of devmap-features (read-only!) */
+
+	/* extended error reporting stuff (eer) */
+	struct dasd_ccw_req *eer_cqr;
+
+	/* Device discipline stuff. */
+	struct dasd_discipline *discipline;
+	struct dasd_discipline *base_discipline;
+	char *private;
+	struct dasd_path path_data;
+
+	/* Device state and target state. */
+	int state, target;
+	struct mutex state_mutex;
+	int stopped;		/* device (ccw_device_start) was stopped */
+
+	/* reference count. */
+        atomic_t ref_count;
+
+	/* ccw queue and memory for static ccw/erp buffers. */
+	struct list_head ccw_queue;
+	spinlock_t mem_lock;
+	void *ccw_mem;
+	void *erp_mem;
+	struct list_head ccw_chunks;
+	struct list_head erp_chunks;
+
+	atomic_t tasklet_scheduled;
+        struct tasklet_struct tasklet;
+	struct work_struct kick_work;
+	struct work_struct restore_device;
+	struct work_struct reload_device;
+	struct work_struct kick_validate;
+	struct timer_list timer;
+
+	debug_info_t *debug_area;
+
+	struct ccw_device *cdev;
+
+	/* hook for alias management */
+	struct list_head alias_list;
+
+	/* default expiration time in s */
+	unsigned long default_expires;
+
+	struct dentry *debugfs_dentry;
+	struct dasd_profile profile;
+};
+
+struct dasd_block {
+	/* Block device stuff. */
+	struct gendisk *gdp;
+	struct request_queue *request_queue;
+	spinlock_t request_queue_lock;
+	struct block_device *bdev;
+	atomic_t open_count;
+
+	unsigned long long blocks; /* size of volume in blocks */
+	unsigned int bp_block;	   /* bytes per block */
+	unsigned int s2b_shift;	   /* log2 (bp_block/512) */
+
+	struct dasd_device *base;
+	struct list_head ccw_queue;
+	spinlock_t queue_lock;
+
+	atomic_t tasklet_scheduled;
+	struct tasklet_struct tasklet;
+	struct timer_list timer;
+
+	struct dentry *debugfs_dentry;
+	struct dasd_profile profile;
+};
+
+
+
+/* reasons why device (ccw_device_start) was stopped */
+#define DASD_STOPPED_NOT_ACC 1         /* not accessible */
+#define DASD_STOPPED_QUIESCE 2         /* Quiesced */
+#define DASD_STOPPED_PENDING 4         /* long busy */
+#define DASD_STOPPED_DC_WAIT 8         /* disconnected, wait */
+#define DASD_STOPPED_SU      16        /* summary unit check handling */
+#define DASD_STOPPED_PM      32        /* pm state transition */
+#define DASD_UNRESUMED_PM    64        /* pm resume failed state */
+
+/* per device flags */
+#define DASD_FLAG_OFFLINE	3	/* device is in offline processing */
+#define DASD_FLAG_EER_SNSS	4	/* A SNSS is required */
+#define DASD_FLAG_EER_IN_USE	5	/* A SNSS request is running */
+#define DASD_FLAG_DEVICE_RO	6	/* The device itself is read-only. Don't
+					 * confuse this with the user specified
+					 * read-only feature.
+					 */
+#define DASD_FLAG_IS_RESERVED	7	/* The device is reserved */
+#define DASD_FLAG_LOCK_STOLEN	8	/* The device lock was stolen */
+#define DASD_FLAG_SUSPENDED	9	/* The device was suspended */
+
+
+void dasd_put_device_wake(struct dasd_device *);
+
+/*
+ * Reference count inliners
+ */
+static inline void
+dasd_get_device(struct dasd_device *device)
+{
+	atomic_inc(&device->ref_count);
+}
+
+static inline void
+dasd_put_device(struct dasd_device *device)
+{
+	if (atomic_dec_return(&device->ref_count) == 0)
+		dasd_put_device_wake(device);
+}
+
+/*
+ * The static memory in ccw_mem and erp_mem is managed by a sorted
+ * list of free memory chunks.
+ */
+struct dasd_mchunk
+{
+	struct list_head list;
+	unsigned long size;
+} __attribute__ ((aligned(8)));
+
+static inline void
+dasd_init_chunklist(struct list_head *chunk_list, void *mem,
+		    unsigned long size)
+{
+	struct dasd_mchunk *chunk;
+
+	INIT_LIST_HEAD(chunk_list);
+	chunk = (struct dasd_mchunk *) mem;
+	chunk->size = size - sizeof(struct dasd_mchunk);
+	list_add(&chunk->list, chunk_list);
+}
+
+static inline void *
+dasd_alloc_chunk(struct list_head *chunk_list, unsigned long size)
+{
+	struct dasd_mchunk *chunk, *tmp;
+
+	size = (size + 7L) & -8L;
+	list_for_each_entry(chunk, chunk_list, list) {
+		if (chunk->size < size)
+			continue;
+		if (chunk->size > size + sizeof(struct dasd_mchunk)) {
+			char *endaddr = (char *) (chunk + 1) + chunk->size;
+			tmp = (struct dasd_mchunk *) (endaddr - size) - 1;
+			tmp->size = size;
+			chunk->size -= size + sizeof(struct dasd_mchunk);
+			chunk = tmp;
+		} else
+			list_del(&chunk->list);
+		return (void *) (chunk + 1);
+	}
+	return NULL;
+}
+
+static inline void
+dasd_free_chunk(struct list_head *chunk_list, void *mem)
+{
+	struct dasd_mchunk *chunk, *tmp;
+	struct list_head *p, *left;
+
+	chunk = (struct dasd_mchunk *)
+		((char *) mem - sizeof(struct dasd_mchunk));
+	/* Find out the left neighbour in chunk_list. */
+	left = chunk_list;
+	list_for_each(p, chunk_list) {
+		if (list_entry(p, struct dasd_mchunk, list) > chunk)
+			break;
+		left = p;
+	}
+	/* Try to merge with right neighbour = next element from left. */
+	if (left->next != chunk_list) {
+		tmp = list_entry(left->next, struct dasd_mchunk, list);
+		if ((char *) (chunk + 1) + chunk->size == (char *) tmp) {
+			list_del(&tmp->list);
+			chunk->size += tmp->size + sizeof(struct dasd_mchunk);
+		}
+	}
+	/* Try to merge with left neighbour. */
+	if (left != chunk_list) {
+		tmp = list_entry(left, struct dasd_mchunk, list);
+		if ((char *) (tmp + 1) + tmp->size == (char *) chunk) {
+			tmp->size += chunk->size + sizeof(struct dasd_mchunk);
+			return;
+		}
+	}
+	__list_add(&chunk->list, left, left->next);
+}
+
+/*
+ * Check if bsize is in { 512, 1024, 2048, 4096 }
+ */
+static inline int
+dasd_check_blocksize(int bsize)
+{
+	if (bsize < 512 || bsize > 4096 || !is_power_of_2(bsize))
+		return -EMEDIUMTYPE;
+	return 0;
+}
+
+/* externals in dasd.c */
+#define DASD_PROFILE_OFF	 0
+#define DASD_PROFILE_ON 	 1
+#define DASD_PROFILE_GLOBAL_ONLY 2
+
+extern debug_info_t *dasd_debug_area;
+extern struct dasd_profile_info dasd_global_profile_data;
+extern unsigned int dasd_global_profile_level;
+extern const struct block_device_operations dasd_device_operations;
+
+extern struct kmem_cache *dasd_page_cache;
+
+struct dasd_ccw_req *
+dasd_kmalloc_request(int , int, int, struct dasd_device *);
+struct dasd_ccw_req *
+dasd_smalloc_request(int , int, int, struct dasd_device *);
+void dasd_kfree_request(struct dasd_ccw_req *, struct dasd_device *);
+void dasd_sfree_request(struct dasd_ccw_req *, struct dasd_device *);
+void dasd_wakeup_cb(struct dasd_ccw_req *, void *);
+
+static inline int
+dasd_kmalloc_set_cda(struct ccw1 *ccw, void *cda, struct dasd_device *device)
+{
+	return set_normalized_cda(ccw, cda);
+}
+
+struct dasd_device *dasd_alloc_device(void);
+void dasd_free_device(struct dasd_device *);
+
+struct dasd_block *dasd_alloc_block(void);
+void dasd_free_block(struct dasd_block *);
+
+void dasd_enable_device(struct dasd_device *);
+void dasd_set_target_state(struct dasd_device *, int);
+void dasd_kick_device(struct dasd_device *);
+void dasd_restore_device(struct dasd_device *);
+void dasd_reload_device(struct dasd_device *);
+
+void dasd_add_request_head(struct dasd_ccw_req *);
+void dasd_add_request_tail(struct dasd_ccw_req *);
+int  dasd_start_IO(struct dasd_ccw_req *);
+int  dasd_term_IO(struct dasd_ccw_req *);
+void dasd_schedule_device_bh(struct dasd_device *);
+void dasd_schedule_block_bh(struct dasd_block *);
+int  dasd_sleep_on(struct dasd_ccw_req *);
+int  dasd_sleep_on_immediatly(struct dasd_ccw_req *);
+int  dasd_sleep_on_interruptible(struct dasd_ccw_req *);
+void dasd_device_set_timer(struct dasd_device *, int);
+void dasd_device_clear_timer(struct dasd_device *);
+void dasd_block_set_timer(struct dasd_block *, int);
+void dasd_block_clear_timer(struct dasd_block *);
+int  dasd_cancel_req(struct dasd_ccw_req *);
+int dasd_flush_device_queue(struct dasd_device *);
+int dasd_generic_probe (struct ccw_device *, struct dasd_discipline *);
+void dasd_generic_remove (struct ccw_device *cdev);
+int dasd_generic_set_online(struct ccw_device *, struct dasd_discipline *);
+int dasd_generic_set_offline (struct ccw_device *cdev);
+int dasd_generic_notify(struct ccw_device *, int);
+int dasd_generic_last_path_gone(struct dasd_device *);
+int dasd_generic_path_operational(struct dasd_device *);
+
+void dasd_generic_handle_state_change(struct dasd_device *);
+int dasd_generic_pm_freeze(struct ccw_device *);
+int dasd_generic_restore_device(struct ccw_device *);
+enum uc_todo dasd_generic_uc_handler(struct ccw_device *, struct irb *);
+void dasd_generic_path_event(struct ccw_device *, int *);
+int dasd_generic_verify_path(struct dasd_device *, __u8);
+
+int dasd_generic_read_dev_chars(struct dasd_device *, int, void *, int);
+char *dasd_get_sense(struct irb *);
+
+void dasd_device_set_stop_bits(struct dasd_device *, int);
+void dasd_device_remove_stop_bits(struct dasd_device *, int);
+
+int dasd_device_is_ro(struct dasd_device *);
+
+void dasd_profile_reset(struct dasd_profile *);
+int dasd_profile_on(struct dasd_profile *);
+void dasd_profile_off(struct dasd_profile *);
+void dasd_global_profile_reset(void);
+char *dasd_get_user_string(const char __user *, size_t);
+
+/* externals in dasd_devmap.c */
+extern int dasd_max_devindex;
+extern int dasd_probeonly;
+extern int dasd_autodetect;
+extern int dasd_nopav;
+extern int dasd_nofcx;
+
+int dasd_devmap_init(void);
+void dasd_devmap_exit(void);
+
+struct dasd_device *dasd_create_device(struct ccw_device *);
+void dasd_delete_device(struct dasd_device *);
+
+int dasd_get_feature(struct ccw_device *, int);
+int dasd_set_feature(struct ccw_device *, int, int);
+
+int dasd_add_sysfs_files(struct ccw_device *);
+void dasd_remove_sysfs_files(struct ccw_device *);
+
+struct dasd_device *dasd_device_from_cdev(struct ccw_device *);
+struct dasd_device *dasd_device_from_cdev_locked(struct ccw_device *);
+struct dasd_device *dasd_device_from_devindex(int);
+
+void dasd_add_link_to_gendisk(struct gendisk *, struct dasd_device *);
+struct dasd_device *dasd_device_from_gendisk(struct gendisk *);
+
+int dasd_parse(void);
+int dasd_busid_known(const char *);
+
+/* externals in dasd_gendisk.c */
+int  dasd_gendisk_init(void);
+void dasd_gendisk_exit(void);
+int dasd_gendisk_alloc(struct dasd_block *);
+void dasd_gendisk_free(struct dasd_block *);
+int dasd_scan_partitions(struct dasd_block *);
+void dasd_destroy_partitions(struct dasd_block *);
+
+/* externals in dasd_ioctl.c */
+int  dasd_ioctl(struct block_device *, fmode_t, unsigned int, unsigned long);
+
+/* externals in dasd_proc.c */
+int dasd_proc_init(void);
+void dasd_proc_exit(void);
+
+/* externals in dasd_erp.c */
+struct dasd_ccw_req *dasd_default_erp_action(struct dasd_ccw_req *);
+struct dasd_ccw_req *dasd_default_erp_postaction(struct dasd_ccw_req *);
+struct dasd_ccw_req *dasd_alloc_erp_request(char *, int, int,
+					    struct dasd_device *);
+void dasd_free_erp_request(struct dasd_ccw_req *, struct dasd_device *);
+void dasd_log_sense(struct dasd_ccw_req *, struct irb *);
+void dasd_log_sense_dbf(struct dasd_ccw_req *cqr, struct irb *irb);
+
+/* externals in dasd_3990_erp.c */
+struct dasd_ccw_req *dasd_3990_erp_action(struct dasd_ccw_req *);
+void dasd_3990_erp_handle_sim(struct dasd_device *, char *);
+
+/* externals in dasd_eer.c */
+#ifdef CONFIG_DASD_EER
+int dasd_eer_init(void);
+void dasd_eer_exit(void);
+int dasd_eer_enable(struct dasd_device *);
+void dasd_eer_disable(struct dasd_device *);
+void dasd_eer_write(struct dasd_device *, struct dasd_ccw_req *cqr,
+		    unsigned int id);
+void dasd_eer_snss(struct dasd_device *);
+
+static inline int dasd_eer_enabled(struct dasd_device *device)
+{
+	return device->eer_cqr != NULL;
+}
+#else
+#define dasd_eer_init()		(0)
+#define dasd_eer_exit()		do { } while (0)
+#define dasd_eer_enable(d)	(0)
+#define dasd_eer_disable(d)	do { } while (0)
+#define dasd_eer_write(d,c,i)	do { } while (0)
+#define dasd_eer_snss(d)	do { } while (0)
+#define dasd_eer_enabled(d)	(0)
+#endif	/* CONFIG_DASD_ERR */
+
+#endif				/* __KERNEL__ */
+
+#endif				/* DASD_H */
diff --git a/ap/os/linux/linux-3.4.x/drivers/s390/block/dasd_ioctl.c b/ap/os/linux/linux-3.4.x/drivers/s390/block/dasd_ioctl.c
new file mode 100644
index 0000000..792c69e
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/s390/block/dasd_ioctl.c
@@ -0,0 +1,511 @@
+/*
+ * File...........: linux/drivers/s390/block/dasd_ioctl.c
+ * Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com>
+ *		    Horst Hummel <Horst.Hummel@de.ibm.com>
+ *		    Carsten Otte <Cotte@de.ibm.com>
+ *		    Martin Schwidefsky <schwidefsky@de.ibm.com>
+ * Bugreports.to..: <Linux390@de.ibm.com>
+ * (C) IBM Corporation, IBM Deutschland Entwicklung GmbH, 1999-2001
+ *
+ * i/o controls for the dasd driver.
+ */
+
+#define KMSG_COMPONENT "dasd"
+
+#include <linux/interrupt.h>
+#include <linux/compat.h>
+#include <linux/major.h>
+#include <linux/fs.h>
+#include <linux/blkpg.h>
+#include <linux/slab.h>
+#include <asm/compat.h>
+#include <asm/ccwdev.h>
+#include <asm/cmb.h>
+#include <asm/uaccess.h>
+
+/* This is ugly... */
+#define PRINTK_HEADER "dasd_ioctl:"
+
+#include "dasd_int.h"
+
+
+static int
+dasd_ioctl_api_version(void __user *argp)
+{
+	int ver = DASD_API_VERSION;
+	return put_user(ver, (int __user *)argp);
+}
+
+/*
+ * Enable device.
+ * used by dasdfmt after BIODASDDISABLE to retrigger blocksize detection
+ */
+static int
+dasd_ioctl_enable(struct block_device *bdev)
+{
+	struct dasd_device *base;
+
+	if (!capable(CAP_SYS_ADMIN))
+		return -EACCES;
+
+	base = dasd_device_from_gendisk(bdev->bd_disk);
+	if (!base)
+		return -ENODEV;
+
+	dasd_enable_device(base);
+	/* Formatting the dasd device can change the capacity. */
+	mutex_lock(&bdev->bd_mutex);
+	i_size_write(bdev->bd_inode,
+		     (loff_t)get_capacity(base->block->gdp) << 9);
+	mutex_unlock(&bdev->bd_mutex);
+	dasd_put_device(base);
+	return 0;
+}
+
+/*
+ * Disable device.
+ * Used by dasdfmt. Disable I/O operations but allow ioctls.
+ */
+static int
+dasd_ioctl_disable(struct block_device *bdev)
+{
+	struct dasd_device *base;
+
+	if (!capable(CAP_SYS_ADMIN))
+		return -EACCES;
+
+	base = dasd_device_from_gendisk(bdev->bd_disk);
+	if (!base)
+		return -ENODEV;
+	/*
+	 * Man this is sick. We don't do a real disable but only downgrade
+	 * the device to DASD_STATE_BASIC. The reason is that dasdfmt uses
+	 * BIODASDDISABLE to disable accesses to the device via the block
+	 * device layer but it still wants to do i/o on the device by
+	 * using the BIODASDFMT ioctl. Therefore the correct state for the
+	 * device is DASD_STATE_BASIC that allows to do basic i/o.
+	 */
+	dasd_set_target_state(base, DASD_STATE_BASIC);
+	/*
+	 * Set i_size to zero, since read, write, etc. check against this
+	 * value.
+	 */
+	mutex_lock(&bdev->bd_mutex);
+	i_size_write(bdev->bd_inode, 0);
+	mutex_unlock(&bdev->bd_mutex);
+	dasd_put_device(base);
+	return 0;
+}
+
+/*
+ * Quiesce device.
+ */
+static int dasd_ioctl_quiesce(struct dasd_block *block)
+{
+	unsigned long flags;
+	struct dasd_device *base;
+
+	base = block->base;
+	if (!capable (CAP_SYS_ADMIN))
+		return -EACCES;
+
+	pr_info("%s: The DASD has been put in the quiesce "
+		"state\n", dev_name(&base->cdev->dev));
+	spin_lock_irqsave(get_ccwdev_lock(base->cdev), flags);
+	dasd_device_set_stop_bits(base, DASD_STOPPED_QUIESCE);
+	spin_unlock_irqrestore(get_ccwdev_lock(base->cdev), flags);
+	return 0;
+}
+
+
+/*
+ * Resume device.
+ */
+static int dasd_ioctl_resume(struct dasd_block *block)
+{
+	unsigned long flags;
+	struct dasd_device *base;
+
+	base = block->base;
+	if (!capable (CAP_SYS_ADMIN))
+		return -EACCES;
+
+	pr_info("%s: I/O operations have been resumed "
+		"on the DASD\n", dev_name(&base->cdev->dev));
+	spin_lock_irqsave(get_ccwdev_lock(base->cdev), flags);
+	dasd_device_remove_stop_bits(base, DASD_STOPPED_QUIESCE);
+	spin_unlock_irqrestore(get_ccwdev_lock(base->cdev), flags);
+
+	dasd_schedule_block_bh(block);
+	return 0;
+}
+
+/*
+ * performs formatting of _device_ according to _fdata_
+ * Note: The discipline's format_function is assumed to deliver formatting
+ * commands to format a single unit of the device. In terms of the ECKD
+ * devices this means CCWs are generated to format a single track.
+ */
+static int dasd_format(struct dasd_block *block, struct format_data_t *fdata)
+{
+	struct dasd_ccw_req *cqr;
+	struct dasd_device *base;
+	int rc;
+
+	base = block->base;
+	if (base->discipline->format_device == NULL)
+		return -EPERM;
+
+	if (base->state != DASD_STATE_BASIC) {
+		pr_warning("%s: The DASD cannot be formatted while it is "
+			   "enabled\n",  dev_name(&base->cdev->dev));
+		return -EBUSY;
+	}
+
+	DBF_DEV_EVENT(DBF_NOTICE, base,
+		      "formatting units %u to %u (%u B blocks) flags %u",
+		      fdata->start_unit,
+		      fdata->stop_unit, fdata->blksize, fdata->intensity);
+
+	/* Since dasdfmt keeps the device open after it was disabled,
+	 * there still exists an inode for this device.
+	 * We must update i_blkbits, otherwise we might get errors when
+	 * enabling the device later.
+	 */
+	if (fdata->start_unit == 0) {
+		struct block_device *bdev = bdget_disk(block->gdp, 0);
+		bdev->bd_inode->i_blkbits = blksize_bits(fdata->blksize);
+		bdput(bdev);
+	}
+
+	while (fdata->start_unit <= fdata->stop_unit) {
+		cqr = base->discipline->format_device(base, fdata);
+		if (IS_ERR(cqr))
+			return PTR_ERR(cqr);
+		rc = dasd_sleep_on_interruptible(cqr);
+		dasd_sfree_request(cqr, cqr->memdev);
+		if (rc) {
+			if (rc != -ERESTARTSYS)
+				pr_err("%s: Formatting unit %d failed with "
+				       "rc=%d\n", dev_name(&base->cdev->dev),
+				       fdata->start_unit, rc);
+			return rc;
+		}
+		fdata->start_unit++;
+	}
+	return 0;
+}
+
+/*
+ * Format device.
+ */
+static int
+dasd_ioctl_format(struct block_device *bdev, void __user *argp)
+{
+	struct dasd_device *base;
+	struct format_data_t fdata;
+	int rc;
+
+	if (!capable(CAP_SYS_ADMIN))
+		return -EACCES;
+	if (!argp)
+		return -EINVAL;
+	base = dasd_device_from_gendisk(bdev->bd_disk);
+	if (!base)
+		return -ENODEV;
+	if (base->features & DASD_FEATURE_READONLY ||
+	    test_bit(DASD_FLAG_DEVICE_RO, &base->flags)) {
+		dasd_put_device(base);
+		return -EROFS;
+	}
+	if (copy_from_user(&fdata, argp, sizeof(struct format_data_t))) {
+		dasd_put_device(base);
+		return -EFAULT;
+	}
+	if (bdev != bdev->bd_contains) {
+		pr_warning("%s: The specified DASD is a partition and cannot "
+			   "be formatted\n",
+			   dev_name(&base->cdev->dev));
+		dasd_put_device(base);
+		return -EINVAL;
+	}
+	rc = dasd_format(base->block, &fdata);
+	dasd_put_device(base);
+	return rc;
+}
+
+#ifdef CONFIG_DASD_PROFILE
+/*
+ * Reset device profile information
+ */
+static int dasd_ioctl_reset_profile(struct dasd_block *block)
+{
+	dasd_profile_reset(&block->profile);
+	return 0;
+}
+
+/*
+ * Return device profile information
+ */
+static int dasd_ioctl_read_profile(struct dasd_block *block, void __user *argp)
+{
+	struct dasd_profile_info_t *data;
+	int rc = 0;
+
+	data = kmalloc(sizeof(*data), GFP_KERNEL);
+	if (!data)
+		return -ENOMEM;
+
+	spin_lock_bh(&block->profile.lock);
+	if (block->profile.data) {
+		data->dasd_io_reqs = block->profile.data->dasd_io_reqs;
+		data->dasd_io_sects = block->profile.data->dasd_io_sects;
+		memcpy(data->dasd_io_secs, block->profile.data->dasd_io_secs,
+		       sizeof(data->dasd_io_secs));
+		memcpy(data->dasd_io_times, block->profile.data->dasd_io_times,
+		       sizeof(data->dasd_io_times));
+		memcpy(data->dasd_io_timps, block->profile.data->dasd_io_timps,
+		       sizeof(data->dasd_io_timps));
+		memcpy(data->dasd_io_time1, block->profile.data->dasd_io_time1,
+		       sizeof(data->dasd_io_time1));
+		memcpy(data->dasd_io_time2, block->profile.data->dasd_io_time2,
+		       sizeof(data->dasd_io_time2));
+		memcpy(data->dasd_io_time2ps,
+		       block->profile.data->dasd_io_time2ps,
+		       sizeof(data->dasd_io_time2ps));
+		memcpy(data->dasd_io_time3, block->profile.data->dasd_io_time3,
+		       sizeof(data->dasd_io_time3));
+		memcpy(data->dasd_io_nr_req,
+		       block->profile.data->dasd_io_nr_req,
+		       sizeof(data->dasd_io_nr_req));
+		spin_unlock_bh(&block->profile.lock);
+	} else {
+		spin_unlock_bh(&block->profile.lock);
+		rc = -EIO;
+		goto out;
+	}
+	if (copy_to_user(argp, data, sizeof(*data)))
+		rc = -EFAULT;
+out:
+	kfree(data);
+	return rc;
+}
+#else
+static int dasd_ioctl_reset_profile(struct dasd_block *block)
+{
+	return -ENOSYS;
+}
+
+static int dasd_ioctl_read_profile(struct dasd_block *block, void __user *argp)
+{
+	return -ENOSYS;
+}
+#endif
+
+/*
+ * Return dasd information. Used for BIODASDINFO and BIODASDINFO2.
+ */
+static int dasd_ioctl_information(struct dasd_block *block,
+				  unsigned int cmd, void __user *argp)
+{
+	struct dasd_information2_t *dasd_info;
+	unsigned long flags;
+	int rc;
+	struct dasd_device *base;
+	struct ccw_device *cdev;
+	struct ccw_dev_id dev_id;
+
+	base = block->base;
+	if (!base->discipline || !base->discipline->fill_info)
+		return -EINVAL;
+
+	dasd_info = kzalloc(sizeof(struct dasd_information2_t), GFP_KERNEL);
+	if (dasd_info == NULL)
+		return -ENOMEM;
+
+	rc = base->discipline->fill_info(base, dasd_info);
+	if (rc) {
+		kfree(dasd_info);
+		return rc;
+	}
+
+	cdev = base->cdev;
+	ccw_device_get_id(cdev, &dev_id);
+
+	dasd_info->devno = dev_id.devno;
+	dasd_info->schid = _ccw_device_get_subchannel_number(base->cdev);
+	dasd_info->cu_type = cdev->id.cu_type;
+	dasd_info->cu_model = cdev->id.cu_model;
+	dasd_info->dev_type = cdev->id.dev_type;
+	dasd_info->dev_model = cdev->id.dev_model;
+	dasd_info->status = base->state;
+	/*
+	 * The open_count is increased for every opener, that includes
+	 * the blkdev_get in dasd_scan_partitions.
+	 * This must be hidden from user-space.
+	 */
+	dasd_info->open_count = atomic_read(&block->open_count);
+	if (!block->bdev)
+		dasd_info->open_count++;
+
+	/*
+	 * check if device is really formatted
+	 * LDL / CDL was returned by 'fill_info'
+	 */
+	if ((base->state < DASD_STATE_READY) ||
+	    (dasd_check_blocksize(block->bp_block)))
+		dasd_info->format = DASD_FORMAT_NONE;
+
+	dasd_info->features |=
+		((base->features & DASD_FEATURE_READONLY) != 0);
+
+	memcpy(dasd_info->type, base->discipline->name, 4);
+
+	if (block->request_queue->request_fn) {
+		struct list_head *l;
+#ifdef DASD_EXTENDED_PROFILING
+		{
+			struct list_head *l;
+			spin_lock_irqsave(&block->lock, flags);
+			list_for_each(l, &block->request_queue->queue_head)
+				dasd_info->req_queue_len++;
+			spin_unlock_irqrestore(&block->lock, flags);
+		}
+#endif				/* DASD_EXTENDED_PROFILING */
+		spin_lock_irqsave(get_ccwdev_lock(base->cdev), flags);
+		list_for_each(l, &base->ccw_queue)
+			dasd_info->chanq_len++;
+		spin_unlock_irqrestore(get_ccwdev_lock(base->cdev),
+				       flags);
+	}
+
+	rc = 0;
+	if (copy_to_user(argp, dasd_info,
+			 ((cmd == (unsigned int) BIODASDINFO2) ?
+			  sizeof(struct dasd_information2_t) :
+			  sizeof(struct dasd_information_t))))
+		rc = -EFAULT;
+	kfree(dasd_info);
+	return rc;
+}
+
+/*
+ * Set read only
+ */
+static int
+dasd_ioctl_set_ro(struct block_device *bdev, void __user *argp)
+{
+	struct dasd_device *base;
+	int intval, rc;
+
+	if (!capable(CAP_SYS_ADMIN))
+		return -EACCES;
+	if (bdev != bdev->bd_contains)
+		// ro setting is not allowed for partitions
+		return -EINVAL;
+	if (get_user(intval, (int __user *)argp))
+		return -EFAULT;
+	base = dasd_device_from_gendisk(bdev->bd_disk);
+	if (!base)
+		return -ENODEV;
+	if (!intval && test_bit(DASD_FLAG_DEVICE_RO, &base->flags)) {
+		dasd_put_device(base);
+		return -EROFS;
+	}
+	set_disk_ro(bdev->bd_disk, intval);
+	rc = dasd_set_feature(base->cdev, DASD_FEATURE_READONLY, intval);
+	dasd_put_device(base);
+	return rc;
+}
+
+static int dasd_ioctl_readall_cmb(struct dasd_block *block, unsigned int cmd,
+				  struct cmbdata __user *argp)
+{
+	size_t size = _IOC_SIZE(cmd);
+	struct cmbdata data;
+	int ret;
+
+	ret = cmf_readall(block->base->cdev, &data);
+	if (!ret && copy_to_user(argp, &data, min(size, sizeof(*argp))))
+		return -EFAULT;
+	return ret;
+}
+
+int dasd_ioctl(struct block_device *bdev, fmode_t mode,
+	       unsigned int cmd, unsigned long arg)
+{
+	struct dasd_block *block;
+	struct dasd_device *base;
+	void __user *argp;
+	int rc;
+
+	if (is_compat_task())
+		argp = compat_ptr(arg);
+	else
+		argp = (void __user *)arg;
+
+	if ((_IOC_DIR(cmd) != _IOC_NONE) && !arg) {
+		PRINT_DEBUG("empty data ptr");
+		return -EINVAL;
+	}
+
+	base = dasd_device_from_gendisk(bdev->bd_disk);
+	if (!base)
+		return -ENODEV;
+	block = base->block;
+	rc = 0;
+	switch (cmd) {
+	case BIODASDDISABLE:
+		rc = dasd_ioctl_disable(bdev);
+		break;
+	case BIODASDENABLE:
+		rc = dasd_ioctl_enable(bdev);
+		break;
+	case BIODASDQUIESCE:
+		rc = dasd_ioctl_quiesce(block);
+		break;
+	case BIODASDRESUME:
+		rc = dasd_ioctl_resume(block);
+		break;
+	case BIODASDFMT:
+		rc = dasd_ioctl_format(bdev, argp);
+		break;
+	case BIODASDINFO:
+		rc = dasd_ioctl_information(block, cmd, argp);
+		break;
+	case BIODASDINFO2:
+		rc = dasd_ioctl_information(block, cmd, argp);
+		break;
+	case BIODASDPRRD:
+		rc = dasd_ioctl_read_profile(block, argp);
+		break;
+	case BIODASDPRRST:
+		rc = dasd_ioctl_reset_profile(block);
+		break;
+	case BLKROSET:
+		rc = dasd_ioctl_set_ro(bdev, argp);
+		break;
+	case DASDAPIVER:
+		rc = dasd_ioctl_api_version(argp);
+		break;
+	case BIODASDCMFENABLE:
+		rc = enable_cmf(base->cdev);
+		break;
+	case BIODASDCMFDISABLE:
+		rc = disable_cmf(base->cdev);
+		break;
+	case BIODASDREADALLCMB:
+		rc = dasd_ioctl_readall_cmb(block, cmd, argp);
+		break;
+	default:
+		/* if the discipline has an ioctl method try it. */
+		if (base->discipline->ioctl) {
+			rc = base->discipline->ioctl(block, cmd, argp);
+			if (rc == -ENOIOCTLCMD)
+				rc = -EINVAL;
+		} else
+			rc = -EINVAL;
+	}
+	dasd_put_device(base);
+	return rc;
+}
diff --git a/ap/os/linux/linux-3.4.x/drivers/s390/block/dasd_proc.c b/ap/os/linux/linux-3.4.x/drivers/s390/block/dasd_proc.c
new file mode 100644
index 0000000..e12989f
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/s390/block/dasd_proc.c
@@ -0,0 +1,377 @@
+/*
+ * File...........: linux/drivers/s390/block/dasd_proc.c
+ * Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com>
+ *		    Horst Hummel <Horst.Hummel@de.ibm.com>
+ *		    Carsten Otte <Cotte@de.ibm.com>
+ *		    Martin Schwidefsky <schwidefsky@de.ibm.com>
+ * Bugreports.to..: <Linux390@de.ibm.com>
+ * (C) IBM Corporation, IBM Deutschland Entwicklung GmbH, 1999-2002
+ *
+ * /proc interface for the dasd driver.
+ *
+ */
+
+#define KMSG_COMPONENT "dasd"
+
+#include <linux/ctype.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/seq_file.h>
+#include <linux/vmalloc.h>
+#include <linux/proc_fs.h>
+
+#include <asm/debug.h>
+#include <asm/uaccess.h>
+
+/* This is ugly... */
+#define PRINTK_HEADER "dasd_proc:"
+
+#include "dasd_int.h"
+
+static struct proc_dir_entry *dasd_proc_root_entry = NULL;
+static struct proc_dir_entry *dasd_devices_entry = NULL;
+static struct proc_dir_entry *dasd_statistics_entry = NULL;
+
+static int
+dasd_devices_show(struct seq_file *m, void *v)
+{
+	struct dasd_device *device;
+	struct dasd_block *block;
+	char *substr;
+
+	device = dasd_device_from_devindex((unsigned long) v - 1);
+	if (IS_ERR(device))
+		return 0;
+	if (device->block)
+		block = device->block;
+	else {
+		dasd_put_device(device);
+		return 0;
+	}
+	/* Print device number. */
+	seq_printf(m, "%s", dev_name(&device->cdev->dev));
+	/* Print discipline string. */
+	if (device->discipline != NULL)
+		seq_printf(m, "(%s)", device->discipline->name);
+	else
+		seq_printf(m, "(none)");
+	/* Print kdev. */
+	if (block->gdp)
+		seq_printf(m, " at (%3d:%6d)",
+			   MAJOR(disk_devt(block->gdp)),
+			   MINOR(disk_devt(block->gdp)));
+	else
+		seq_printf(m, "  at (???:??????)");
+	/* Print device name. */
+	if (block->gdp)
+		seq_printf(m, " is %-8s", block->gdp->disk_name);
+	else
+		seq_printf(m, " is ????????");
+	/* Print devices features. */
+	substr = (device->features & DASD_FEATURE_READONLY) ? "(ro)" : " ";
+	seq_printf(m, "%4s: ", substr);
+	/* Print device status information. */
+	switch (device->state) {
+	case DASD_STATE_NEW:
+		seq_printf(m, "new");
+		break;
+	case DASD_STATE_KNOWN:
+		seq_printf(m, "detected");
+		break;
+	case DASD_STATE_BASIC:
+		seq_printf(m, "basic");
+		break;
+	case DASD_STATE_UNFMT:
+		seq_printf(m, "unformatted");
+		break;
+	case DASD_STATE_READY:
+	case DASD_STATE_ONLINE:
+		seq_printf(m, "active ");
+		if (dasd_check_blocksize(block->bp_block))
+			seq_printf(m, "n/f	 ");
+		else
+			seq_printf(m,
+				   "at blocksize: %d, %lld blocks, %lld MB",
+				   block->bp_block, block->blocks,
+				   ((block->bp_block >> 9) *
+				    block->blocks) >> 11);
+		break;
+	default:
+		seq_printf(m, "no stat");
+		break;
+	}
+	dasd_put_device(device);
+	if (dasd_probeonly)
+		seq_printf(m, "(probeonly)");
+	seq_printf(m, "\n");
+	return 0;
+}
+
+static void *dasd_devices_start(struct seq_file *m, loff_t *pos)
+{
+	if (*pos >= dasd_max_devindex)
+		return NULL;
+	return (void *)((unsigned long) *pos + 1);
+}
+
+static void *dasd_devices_next(struct seq_file *m, void *v, loff_t *pos)
+{
+	++*pos;
+	return dasd_devices_start(m, pos);
+}
+
+static void dasd_devices_stop(struct seq_file *m, void *v)
+{
+}
+
+static const struct seq_operations dasd_devices_seq_ops = {
+	.start		= dasd_devices_start,
+	.next		= dasd_devices_next,
+	.stop		= dasd_devices_stop,
+	.show		= dasd_devices_show,
+};
+
+static int dasd_devices_open(struct inode *inode, struct file *file)
+{
+	return seq_open(file, &dasd_devices_seq_ops);
+}
+
+static const struct file_operations dasd_devices_file_ops = {
+	.owner		= THIS_MODULE,
+	.open		= dasd_devices_open,
+	.read		= seq_read,
+	.llseek		= seq_lseek,
+	.release	= seq_release,
+};
+
+#ifdef CONFIG_DASD_PROFILE
+static int dasd_stats_all_block_on(void)
+{
+	int i, rc;
+	struct dasd_device *device;
+
+	rc = 0;
+	for (i = 0; i < dasd_max_devindex; ++i) {
+		device = dasd_device_from_devindex(i);
+		if (IS_ERR(device))
+			continue;
+		if (device->block)
+			rc = dasd_profile_on(&device->block->profile);
+		dasd_put_device(device);
+		if (rc)
+			return rc;
+	}
+	return 0;
+}
+
+static void dasd_stats_all_block_off(void)
+{
+	int i;
+	struct dasd_device *device;
+
+	for (i = 0; i < dasd_max_devindex; ++i) {
+		device = dasd_device_from_devindex(i);
+		if (IS_ERR(device))
+			continue;
+		if (device->block)
+			dasd_profile_off(&device->block->profile);
+		dasd_put_device(device);
+	}
+}
+
+static void dasd_stats_all_block_reset(void)
+{
+	int i;
+	struct dasd_device *device;
+
+	for (i = 0; i < dasd_max_devindex; ++i) {
+		device = dasd_device_from_devindex(i);
+		if (IS_ERR(device))
+			continue;
+		if (device->block)
+			dasd_profile_reset(&device->block->profile);
+		dasd_put_device(device);
+	}
+}
+
+static void dasd_statistics_array(struct seq_file *m, unsigned int *array, int factor)
+{
+	int i;
+
+	for (i = 0; i < 32; i++) {
+		seq_printf(m, "%7d ", array[i] / factor);
+		if (i == 15)
+			seq_putc(m, '\n');
+	}
+	seq_putc(m, '\n');
+}
+#endif /* CONFIG_DASD_PROFILE */
+
+static int dasd_stats_proc_show(struct seq_file *m, void *v)
+{
+#ifdef CONFIG_DASD_PROFILE
+	struct dasd_profile_info *prof;
+	int factor;
+
+	/* check for active profiling */
+	if (!dasd_global_profile_level) {
+		seq_printf(m, "Statistics are off - they might be "
+				    "switched on using 'echo set on > "
+				    "/proc/dasd/statistics'\n");
+		return 0;
+	}
+	prof = &dasd_global_profile_data;
+
+	/* prevent counter 'overflow' on output */
+	for (factor = 1; (prof->dasd_io_reqs / factor) > 9999999;
+	     factor *= 10);
+
+	seq_printf(m, "%d dasd I/O requests\n", prof->dasd_io_reqs);
+	seq_printf(m, "with %u sectors(512B each)\n",
+		       prof->dasd_io_sects);
+	seq_printf(m, "Scale Factor is  %d\n", factor);
+	seq_printf(m,
+		       "   __<4	   ___8	   __16	   __32	   __64	   _128	"
+		       "   _256	   _512	   __1k	   __2k	   __4k	   __8k	"
+		       "   _16k	   _32k	   _64k	   128k\n");
+	seq_printf(m,
+		       "   _256	   _512	   __1M	   __2M	   __4M	   __8M	"
+		       "   _16M	   _32M	   _64M	   128M	   256M	   512M	"
+		       "   __1G	   __2G	   __4G " "   _>4G\n");
+
+	seq_printf(m, "Histogram of sizes (512B secs)\n");
+	dasd_statistics_array(m, prof->dasd_io_secs, factor);
+	seq_printf(m, "Histogram of I/O times (microseconds)\n");
+	dasd_statistics_array(m, prof->dasd_io_times, factor);
+	seq_printf(m, "Histogram of I/O times per sector\n");
+	dasd_statistics_array(m, prof->dasd_io_timps, factor);
+	seq_printf(m, "Histogram of I/O time till ssch\n");
+	dasd_statistics_array(m, prof->dasd_io_time1, factor);
+	seq_printf(m, "Histogram of I/O time between ssch and irq\n");
+	dasd_statistics_array(m, prof->dasd_io_time2, factor);
+	seq_printf(m, "Histogram of I/O time between ssch "
+			    "and irq per sector\n");
+	dasd_statistics_array(m, prof->dasd_io_time2ps, factor);
+	seq_printf(m, "Histogram of I/O time between irq and end\n");
+	dasd_statistics_array(m, prof->dasd_io_time3, factor);
+	seq_printf(m, "# of req in chanq at enqueuing (1..32) \n");
+	dasd_statistics_array(m, prof->dasd_io_nr_req, factor);
+#else
+	seq_printf(m, "Statistics are not activated in this kernel\n");
+#endif
+	return 0;
+}
+
+static int dasd_stats_proc_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, dasd_stats_proc_show, NULL);
+}
+
+static ssize_t dasd_stats_proc_write(struct file *file,
+		const char __user *user_buf, size_t user_len, loff_t *pos)
+{
+#ifdef CONFIG_DASD_PROFILE
+	char *buffer, *str;
+	int rc;
+
+	if (user_len > 65536)
+		user_len = 65536;
+	buffer = dasd_get_user_string(user_buf, user_len);
+	if (IS_ERR(buffer))
+		return PTR_ERR(buffer);
+
+	/* check for valid verbs */
+	str = skip_spaces(buffer);
+	if (strncmp(str, "set", 3) == 0 && isspace(str[3])) {
+		/* 'set xxx' was given */
+		str = skip_spaces(str + 4);
+		if (strcmp(str, "on") == 0) {
+			/* switch on statistics profiling */
+			rc = dasd_stats_all_block_on();
+			if (rc) {
+				dasd_stats_all_block_off();
+				goto out_error;
+			}
+			dasd_global_profile_reset();
+			dasd_global_profile_level = DASD_PROFILE_ON;
+			pr_info("The statistics feature has been switched "
+				"on\n");
+		} else if (strcmp(str, "off") == 0) {
+			/* switch off and reset statistics profiling */
+			dasd_global_profile_level = DASD_PROFILE_OFF;
+			dasd_global_profile_reset();
+			dasd_stats_all_block_off();
+			pr_info("The statistics feature has been switched "
+				"off\n");
+		} else
+			goto out_parse_error;
+	} else if (strncmp(str, "reset", 5) == 0) {
+		/* reset the statistics */
+		dasd_global_profile_reset();
+		dasd_stats_all_block_reset();
+		pr_info("The statistics have been reset\n");
+	} else
+		goto out_parse_error;
+	vfree(buffer);
+	return user_len;
+out_parse_error:
+	rc = -EINVAL;
+	pr_warning("%s is not a supported value for /proc/dasd/statistics\n",
+		str);
+out_error:
+	vfree(buffer);
+	return rc;
+#else
+	pr_warning("/proc/dasd/statistics: is not activated in this kernel\n");
+	return user_len;
+#endif				/* CONFIG_DASD_PROFILE */
+}
+
+static const struct file_operations dasd_stats_proc_fops = {
+	.owner		= THIS_MODULE,
+	.open		= dasd_stats_proc_open,
+	.read		= seq_read,
+	.llseek		= seq_lseek,
+	.release	= single_release,
+	.write		= dasd_stats_proc_write,
+};
+
+/*
+ * Create dasd proc-fs entries.
+ * In case creation failed, cleanup and return -ENOENT.
+ */
+int
+dasd_proc_init(void)
+{
+	dasd_proc_root_entry = proc_mkdir("dasd", NULL);
+	if (!dasd_proc_root_entry)
+		goto out_nodasd;
+	dasd_devices_entry = proc_create("devices",
+					 S_IFREG | S_IRUGO | S_IWUSR,
+					 dasd_proc_root_entry,
+					 &dasd_devices_file_ops);
+	if (!dasd_devices_entry)
+		goto out_nodevices;
+	dasd_statistics_entry = proc_create("statistics",
+					    S_IFREG | S_IRUGO | S_IWUSR,
+					    dasd_proc_root_entry,
+					    &dasd_stats_proc_fops);
+	if (!dasd_statistics_entry)
+		goto out_nostatistics;
+	return 0;
+
+ out_nostatistics:
+	remove_proc_entry("devices", dasd_proc_root_entry);
+ out_nodevices:
+	remove_proc_entry("dasd", NULL);
+ out_nodasd:
+	return -ENOENT;
+}
+
+void
+dasd_proc_exit(void)
+{
+	remove_proc_entry("devices", dasd_proc_root_entry);
+	remove_proc_entry("statistics", dasd_proc_root_entry);
+	remove_proc_entry("dasd", NULL);
+}
diff --git a/ap/os/linux/linux-3.4.x/drivers/s390/block/dcssblk.c b/ap/os/linux/linux-3.4.x/drivers/s390/block/dcssblk.c
new file mode 100644
index 0000000..a5a55da
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/s390/block/dcssblk.c
@@ -0,0 +1,1094 @@
+/*
+ * dcssblk.c -- the S/390 block driver for dcss memory
+ *
+ * Authors: Carsten Otte, Stefan Weinhuber, Gerald Schaefer
+ */
+
+#define KMSG_COMPONENT "dcssblk"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/ctype.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/blkdev.h>
+#include <linux/completion.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <asm/extmem.h>
+#include <asm/io.h>
+
+#define DCSSBLK_NAME "dcssblk"
+#define DCSSBLK_MINORS_PER_DISK 1
+#define DCSSBLK_PARM_LEN 400
+#define DCSS_BUS_ID_SIZE 20
+
+static int dcssblk_open(struct block_device *bdev, fmode_t mode);
+static int dcssblk_release(struct gendisk *disk, fmode_t mode);
+static void dcssblk_make_request(struct request_queue *q, struct bio *bio);
+static int dcssblk_direct_access(struct block_device *bdev, sector_t secnum,
+				 void **kaddr, unsigned long *pfn);
+
+static char dcssblk_segments[DCSSBLK_PARM_LEN] = "\0";
+
+static int dcssblk_major;
+static const struct block_device_operations dcssblk_devops = {
+	.owner   	= THIS_MODULE,
+	.open    	= dcssblk_open,
+	.release 	= dcssblk_release,
+	.direct_access 	= dcssblk_direct_access,
+};
+
+struct dcssblk_dev_info {
+	struct list_head lh;
+	struct device dev;
+	char segment_name[DCSS_BUS_ID_SIZE];
+	atomic_t use_count;
+	struct gendisk *gd;
+	unsigned long start;
+	unsigned long end;
+	int segment_type;
+	unsigned char save_pending;
+	unsigned char is_shared;
+	struct request_queue *dcssblk_queue;
+	int num_of_segments;
+	struct list_head seg_list;
+};
+
+struct segment_info {
+	struct list_head lh;
+	char segment_name[DCSS_BUS_ID_SIZE];
+	unsigned long start;
+	unsigned long end;
+	int segment_type;
+};
+
+static ssize_t dcssblk_add_store(struct device * dev, struct device_attribute *attr, const char * buf,
+				  size_t count);
+static ssize_t dcssblk_remove_store(struct device * dev, struct device_attribute *attr, const char * buf,
+				  size_t count);
+static ssize_t dcssblk_save_store(struct device * dev, struct device_attribute *attr, const char * buf,
+				  size_t count);
+static ssize_t dcssblk_save_show(struct device *dev, struct device_attribute *attr, char *buf);
+static ssize_t dcssblk_shared_store(struct device * dev, struct device_attribute *attr, const char * buf,
+				  size_t count);
+static ssize_t dcssblk_shared_show(struct device *dev, struct device_attribute *attr, char *buf);
+static ssize_t dcssblk_seglist_show(struct device *dev,
+				struct device_attribute *attr,
+				char *buf);
+
+static DEVICE_ATTR(add, S_IWUSR, NULL, dcssblk_add_store);
+static DEVICE_ATTR(remove, S_IWUSR, NULL, dcssblk_remove_store);
+static DEVICE_ATTR(save, S_IWUSR | S_IRUSR, dcssblk_save_show,
+		   dcssblk_save_store);
+static DEVICE_ATTR(shared, S_IWUSR | S_IRUSR, dcssblk_shared_show,
+		   dcssblk_shared_store);
+static DEVICE_ATTR(seglist, S_IRUSR, dcssblk_seglist_show, NULL);
+
+static struct device *dcssblk_root_dev;
+
+static LIST_HEAD(dcssblk_devices);
+static struct rw_semaphore dcssblk_devices_sem;
+
+/*
+ * release function for segment device.
+ */
+static void
+dcssblk_release_segment(struct device *dev)
+{
+	struct dcssblk_dev_info *dev_info;
+	struct segment_info *entry, *temp;
+
+	dev_info = container_of(dev, struct dcssblk_dev_info, dev);
+	list_for_each_entry_safe(entry, temp, &dev_info->seg_list, lh) {
+		list_del(&entry->lh);
+		kfree(entry);
+	}
+	kfree(dev_info);
+	module_put(THIS_MODULE);
+}
+
+/*
+ * get a minor number. needs to be called with
+ * down_write(&dcssblk_devices_sem) and the
+ * device needs to be enqueued before the semaphore is
+ * freed.
+ */
+static int
+dcssblk_assign_free_minor(struct dcssblk_dev_info *dev_info)
+{
+	int minor, found;
+	struct dcssblk_dev_info *entry;
+
+	if (dev_info == NULL)
+		return -EINVAL;
+	for (minor = 0; minor < (1<<MINORBITS); minor++) {
+		found = 0;
+		// test if minor available
+		list_for_each_entry(entry, &dcssblk_devices, lh)
+			if (minor == entry->gd->first_minor)
+				found++;
+		if (!found) break; // got unused minor
+	}
+	if (found)
+		return -EBUSY;
+	dev_info->gd->first_minor = minor;
+	return 0;
+}
+
+/*
+ * get the struct dcssblk_dev_info from dcssblk_devices
+ * for the given name.
+ * down_read(&dcssblk_devices_sem) must be held.
+ */
+static struct dcssblk_dev_info *
+dcssblk_get_device_by_name(char *name)
+{
+	struct dcssblk_dev_info *entry;
+
+	list_for_each_entry(entry, &dcssblk_devices, lh) {
+		if (!strcmp(name, entry->segment_name)) {
+			return entry;
+		}
+	}
+	return NULL;
+}
+
+/*
+ * get the struct segment_info from seg_list
+ * for the given name.
+ * down_read(&dcssblk_devices_sem) must be held.
+ */
+static struct segment_info *
+dcssblk_get_segment_by_name(char *name)
+{
+	struct dcssblk_dev_info *dev_info;
+	struct segment_info *entry;
+
+	list_for_each_entry(dev_info, &dcssblk_devices, lh) {
+		list_for_each_entry(entry, &dev_info->seg_list, lh) {
+			if (!strcmp(name, entry->segment_name))
+				return entry;
+		}
+	}
+	return NULL;
+}
+
+/*
+ * get the highest address of the multi-segment block.
+ */
+static unsigned long
+dcssblk_find_highest_addr(struct dcssblk_dev_info *dev_info)
+{
+	unsigned long highest_addr;
+	struct segment_info *entry;
+
+	highest_addr = 0;
+	list_for_each_entry(entry, &dev_info->seg_list, lh) {
+		if (highest_addr < entry->end)
+			highest_addr = entry->end;
+	}
+	return highest_addr;
+}
+
+/*
+ * get the lowest address of the multi-segment block.
+ */
+static unsigned long
+dcssblk_find_lowest_addr(struct dcssblk_dev_info *dev_info)
+{
+	int set_first;
+	unsigned long lowest_addr;
+	struct segment_info *entry;
+
+	set_first = 0;
+	lowest_addr = 0;
+	list_for_each_entry(entry, &dev_info->seg_list, lh) {
+		if (set_first == 0) {
+			lowest_addr = entry->start;
+			set_first = 1;
+		} else {
+			if (lowest_addr > entry->start)
+				lowest_addr = entry->start;
+		}
+	}
+	return lowest_addr;
+}
+
+/*
+ * Check continuity of segments.
+ */
+static int
+dcssblk_is_continuous(struct dcssblk_dev_info *dev_info)
+{
+	int i, j, rc;
+	struct segment_info *sort_list, *entry, temp;
+
+	if (dev_info->num_of_segments <= 1)
+		return 0;
+
+	sort_list = kzalloc(
+			sizeof(struct segment_info) * dev_info->num_of_segments,
+			GFP_KERNEL);
+	if (sort_list == NULL)
+		return -ENOMEM;
+	i = 0;
+	list_for_each_entry(entry, &dev_info->seg_list, lh) {
+		memcpy(&sort_list[i], entry, sizeof(struct segment_info));
+		i++;
+	}
+
+	/* sort segments */
+	for (i = 0; i < dev_info->num_of_segments; i++)
+		for (j = 0; j < dev_info->num_of_segments; j++)
+			if (sort_list[j].start > sort_list[i].start) {
+				memcpy(&temp, &sort_list[i],
+					sizeof(struct segment_info));
+				memcpy(&sort_list[i], &sort_list[j],
+					sizeof(struct segment_info));
+				memcpy(&sort_list[j], &temp,
+					sizeof(struct segment_info));
+			}
+
+	/* check continuity */
+	for (i = 0; i < dev_info->num_of_segments - 1; i++) {
+		if ((sort_list[i].end + 1) != sort_list[i+1].start) {
+			pr_err("Adjacent DCSSs %s and %s are not "
+			       "contiguous\n", sort_list[i].segment_name,
+			       sort_list[i+1].segment_name);
+			rc = -EINVAL;
+			goto out;
+		}
+		/* EN and EW are allowed in a block device */
+		if (sort_list[i].segment_type != sort_list[i+1].segment_type) {
+			if (!(sort_list[i].segment_type & SEGMENT_EXCLUSIVE) ||
+				(sort_list[i].segment_type == SEG_TYPE_ER) ||
+				!(sort_list[i+1].segment_type &
+				SEGMENT_EXCLUSIVE) ||
+				(sort_list[i+1].segment_type == SEG_TYPE_ER)) {
+				pr_err("DCSS %s and DCSS %s have "
+				       "incompatible types\n",
+				       sort_list[i].segment_name,
+				       sort_list[i+1].segment_name);
+				rc = -EINVAL;
+				goto out;
+			}
+		}
+	}
+	rc = 0;
+out:
+	kfree(sort_list);
+	return rc;
+}
+
+/*
+ * Load a segment
+ */
+static int
+dcssblk_load_segment(char *name, struct segment_info **seg_info)
+{
+	int rc;
+
+	/* already loaded? */
+	down_read(&dcssblk_devices_sem);
+	*seg_info = dcssblk_get_segment_by_name(name);
+	up_read(&dcssblk_devices_sem);
+	if (*seg_info != NULL)
+		return -EEXIST;
+
+	/* get a struct segment_info */
+	*seg_info = kzalloc(sizeof(struct segment_info), GFP_KERNEL);
+	if (*seg_info == NULL)
+		return -ENOMEM;
+
+	strcpy((*seg_info)->segment_name, name);
+
+	/* load the segment */
+	rc = segment_load(name, SEGMENT_SHARED,
+			&(*seg_info)->start, &(*seg_info)->end);
+	if (rc < 0) {
+		segment_warning(rc, (*seg_info)->segment_name);
+		kfree(*seg_info);
+	} else {
+		INIT_LIST_HEAD(&(*seg_info)->lh);
+		(*seg_info)->segment_type = rc;
+	}
+	return rc;
+}
+
+static void dcssblk_unregister_callback(struct device *dev)
+{
+	device_unregister(dev);
+	put_device(dev);
+}
+
+/*
+ * device attribute for switching shared/nonshared (exclusive)
+ * operation (show + store)
+ */
+static ssize_t
+dcssblk_shared_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+	struct dcssblk_dev_info *dev_info;
+
+	dev_info = container_of(dev, struct dcssblk_dev_info, dev);
+	return sprintf(buf, dev_info->is_shared ? "1\n" : "0\n");
+}
+
+static ssize_t
+dcssblk_shared_store(struct device *dev, struct device_attribute *attr, const char *inbuf, size_t count)
+{
+	struct dcssblk_dev_info *dev_info;
+	struct segment_info *entry, *temp;
+	int rc;
+
+	if ((count > 1) && (inbuf[1] != '\n') && (inbuf[1] != '\0'))
+		return -EINVAL;
+	down_write(&dcssblk_devices_sem);
+	dev_info = container_of(dev, struct dcssblk_dev_info, dev);
+	if (atomic_read(&dev_info->use_count)) {
+		rc = -EBUSY;
+		goto out;
+	}
+	if (inbuf[0] == '1') {
+		/* reload segments in shared mode */
+		list_for_each_entry(entry, &dev_info->seg_list, lh) {
+			rc = segment_modify_shared(entry->segment_name,
+						SEGMENT_SHARED);
+			if (rc < 0) {
+				BUG_ON(rc == -EINVAL);
+				if (rc != -EAGAIN)
+					goto removeseg;
+			}
+		}
+		dev_info->is_shared = 1;
+		switch (dev_info->segment_type) {
+		case SEG_TYPE_SR:
+		case SEG_TYPE_ER:
+		case SEG_TYPE_SC:
+			set_disk_ro(dev_info->gd, 1);
+		}
+	} else if (inbuf[0] == '0') {
+		/* reload segments in exclusive mode */
+		if (dev_info->segment_type == SEG_TYPE_SC) {
+			pr_err("DCSS %s is of type SC and cannot be "
+			       "loaded as exclusive-writable\n",
+			       dev_info->segment_name);
+			rc = -EINVAL;
+			goto out;
+		}
+		list_for_each_entry(entry, &dev_info->seg_list, lh) {
+			rc = segment_modify_shared(entry->segment_name,
+						   SEGMENT_EXCLUSIVE);
+			if (rc < 0) {
+				BUG_ON(rc == -EINVAL);
+				if (rc != -EAGAIN)
+					goto removeseg;
+			}
+		}
+		dev_info->is_shared = 0;
+		set_disk_ro(dev_info->gd, 0);
+	} else {
+		rc = -EINVAL;
+		goto out;
+	}
+	rc = count;
+	goto out;
+
+removeseg:
+	pr_err("DCSS device %s is removed after a failed access mode "
+	       "change\n", dev_info->segment_name);
+	temp = entry;
+	list_for_each_entry(entry, &dev_info->seg_list, lh) {
+		if (entry != temp)
+			segment_unload(entry->segment_name);
+	}
+	list_del(&dev_info->lh);
+
+	del_gendisk(dev_info->gd);
+	blk_cleanup_queue(dev_info->dcssblk_queue);
+	dev_info->gd->queue = NULL;
+	put_disk(dev_info->gd);
+	rc = device_schedule_callback(dev, dcssblk_unregister_callback);
+out:
+	up_write(&dcssblk_devices_sem);
+	return rc;
+}
+
+/*
+ * device attribute for save operation on current copy
+ * of the segment. If the segment is busy, saving will
+ * become pending until it gets released, which can be
+ * undone by storing a non-true value to this entry.
+ * (show + store)
+ */
+static ssize_t
+dcssblk_save_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+	struct dcssblk_dev_info *dev_info;
+
+	dev_info = container_of(dev, struct dcssblk_dev_info, dev);
+	return sprintf(buf, dev_info->save_pending ? "1\n" : "0\n");
+}
+
+static ssize_t
+dcssblk_save_store(struct device *dev, struct device_attribute *attr, const char *inbuf, size_t count)
+{
+	struct dcssblk_dev_info *dev_info;
+	struct segment_info *entry;
+
+	if ((count > 1) && (inbuf[1] != '\n') && (inbuf[1] != '\0'))
+		return -EINVAL;
+	dev_info = container_of(dev, struct dcssblk_dev_info, dev);
+
+	down_write(&dcssblk_devices_sem);
+	if (inbuf[0] == '1') {
+		if (atomic_read(&dev_info->use_count) == 0) {
+			// device is idle => we save immediately
+			pr_info("All DCSSs that map to device %s are "
+				"saved\n", dev_info->segment_name);
+			list_for_each_entry(entry, &dev_info->seg_list, lh) {
+				segment_save(entry->segment_name);
+			}
+		}  else {
+			// device is busy => we save it when it becomes
+			// idle in dcssblk_release
+			pr_info("Device %s is in use, its DCSSs will be "
+				"saved when it becomes idle\n",
+				dev_info->segment_name);
+			dev_info->save_pending = 1;
+		}
+	} else if (inbuf[0] == '0') {
+		if (dev_info->save_pending) {
+			// device is busy & the user wants to undo his save
+			// request
+			dev_info->save_pending = 0;
+			pr_info("A pending save request for device %s "
+				"has been canceled\n",
+				dev_info->segment_name);
+		}
+	} else {
+		up_write(&dcssblk_devices_sem);
+		return -EINVAL;
+	}
+	up_write(&dcssblk_devices_sem);
+	return count;
+}
+
+/*
+ * device attribute for showing all segments in a device
+ */
+static ssize_t
+dcssblk_seglist_show(struct device *dev, struct device_attribute *attr,
+		char *buf)
+{
+	int i;
+
+	struct dcssblk_dev_info *dev_info;
+	struct segment_info *entry;
+
+	down_read(&dcssblk_devices_sem);
+	dev_info = container_of(dev, struct dcssblk_dev_info, dev);
+	i = 0;
+	buf[0] = '\0';
+	list_for_each_entry(entry, &dev_info->seg_list, lh) {
+		strcpy(&buf[i], entry->segment_name);
+		i += strlen(entry->segment_name);
+		buf[i] = '\n';
+		i++;
+	}
+	up_read(&dcssblk_devices_sem);
+	return i;
+}
+
+/*
+ * device attribute for adding devices
+ */
+static ssize_t
+dcssblk_add_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
+{
+	int rc, i, j, num_of_segments;
+	struct dcssblk_dev_info *dev_info;
+	struct segment_info *seg_info, *temp;
+	char *local_buf;
+	unsigned long seg_byte_size;
+
+	dev_info = NULL;
+	seg_info = NULL;
+	if (dev != dcssblk_root_dev) {
+		rc = -EINVAL;
+		goto out_nobuf;
+	}
+	if ((count < 1) || (buf[0] == '\0') || (buf[0] == '\n')) {
+		rc = -ENAMETOOLONG;
+		goto out_nobuf;
+	}
+
+	local_buf = kmalloc(count + 1, GFP_KERNEL);
+	if (local_buf == NULL) {
+		rc = -ENOMEM;
+		goto out_nobuf;
+	}
+
+	/*
+	 * parse input
+	 */
+	num_of_segments = 0;
+	for (i = 0; ((buf[i] != '\0') && (buf[i] != '\n') && i < count); i++) {
+		for (j = i; (buf[j] != ':') &&
+			(buf[j] != '\0') &&
+			(buf[j] != '\n') &&
+			j < count; j++) {
+			local_buf[j-i] = toupper(buf[j]);
+		}
+		local_buf[j-i] = '\0';
+		if (((j - i) == 0) || ((j - i) > 8)) {
+			rc = -ENAMETOOLONG;
+			goto seg_list_del;
+		}
+
+		rc = dcssblk_load_segment(local_buf, &seg_info);
+		if (rc < 0)
+			goto seg_list_del;
+		/*
+		 * get a struct dcssblk_dev_info
+		 */
+		if (num_of_segments == 0) {
+			dev_info = kzalloc(sizeof(struct dcssblk_dev_info),
+					GFP_KERNEL);
+			if (dev_info == NULL) {
+				rc = -ENOMEM;
+				goto out;
+			}
+			strcpy(dev_info->segment_name, local_buf);
+			dev_info->segment_type = seg_info->segment_type;
+			INIT_LIST_HEAD(&dev_info->seg_list);
+		}
+		list_add_tail(&seg_info->lh, &dev_info->seg_list);
+		num_of_segments++;
+		i = j;
+
+		if ((buf[j] == '\0') || (buf[j] == '\n'))
+			break;
+	}
+
+	/* no trailing colon at the end of the input */
+	if ((i > 0) && (buf[i-1] == ':')) {
+		rc = -ENAMETOOLONG;
+		goto seg_list_del;
+	}
+	strlcpy(local_buf, buf, i + 1);
+	dev_info->num_of_segments = num_of_segments;
+	rc = dcssblk_is_continuous(dev_info);
+	if (rc < 0)
+		goto seg_list_del;
+
+	dev_info->start = dcssblk_find_lowest_addr(dev_info);
+	dev_info->end = dcssblk_find_highest_addr(dev_info);
+
+	dev_set_name(&dev_info->dev, dev_info->segment_name);
+	dev_info->dev.release = dcssblk_release_segment;
+	INIT_LIST_HEAD(&dev_info->lh);
+	dev_info->gd = alloc_disk(DCSSBLK_MINORS_PER_DISK);
+	if (dev_info->gd == NULL) {
+		rc = -ENOMEM;
+		goto seg_list_del;
+	}
+	dev_info->gd->major = dcssblk_major;
+	dev_info->gd->fops = &dcssblk_devops;
+	dev_info->dcssblk_queue = blk_alloc_queue(GFP_KERNEL);
+	dev_info->gd->queue = dev_info->dcssblk_queue;
+	dev_info->gd->private_data = dev_info;
+	dev_info->gd->driverfs_dev = &dev_info->dev;
+	blk_queue_make_request(dev_info->dcssblk_queue, dcssblk_make_request);
+	blk_queue_logical_block_size(dev_info->dcssblk_queue, 4096);
+
+	seg_byte_size = (dev_info->end - dev_info->start + 1);
+	set_capacity(dev_info->gd, seg_byte_size >> 9); // size in sectors
+	pr_info("Loaded %s with total size %lu bytes and capacity %lu "
+		"sectors\n", local_buf, seg_byte_size, seg_byte_size >> 9);
+
+	dev_info->save_pending = 0;
+	dev_info->is_shared = 1;
+	dev_info->dev.parent = dcssblk_root_dev;
+
+	/*
+	 *get minor, add to list
+	 */
+	down_write(&dcssblk_devices_sem);
+	if (dcssblk_get_segment_by_name(local_buf)) {
+		rc = -EEXIST;
+		goto release_gd;
+	}
+	rc = dcssblk_assign_free_minor(dev_info);
+	if (rc)
+		goto release_gd;
+	sprintf(dev_info->gd->disk_name, "dcssblk%d",
+		dev_info->gd->first_minor);
+	list_add_tail(&dev_info->lh, &dcssblk_devices);
+
+	if (!try_module_get(THIS_MODULE)) {
+		rc = -ENODEV;
+		goto dev_list_del;
+	}
+	/*
+	 * register the device
+	 */
+	rc = device_register(&dev_info->dev);
+	if (rc) {
+		module_put(THIS_MODULE);
+		goto dev_list_del;
+	}
+	get_device(&dev_info->dev);
+	rc = device_create_file(&dev_info->dev, &dev_attr_shared);
+	if (rc)
+		goto unregister_dev;
+	rc = device_create_file(&dev_info->dev, &dev_attr_save);
+	if (rc)
+		goto unregister_dev;
+	rc = device_create_file(&dev_info->dev, &dev_attr_seglist);
+	if (rc)
+		goto unregister_dev;
+
+	add_disk(dev_info->gd);
+
+	switch (dev_info->segment_type) {
+		case SEG_TYPE_SR:
+		case SEG_TYPE_ER:
+		case SEG_TYPE_SC:
+			set_disk_ro(dev_info->gd,1);
+			break;
+		default:
+			set_disk_ro(dev_info->gd,0);
+			break;
+	}
+	up_write(&dcssblk_devices_sem);
+	rc = count;
+	goto out;
+
+unregister_dev:
+	list_del(&dev_info->lh);
+	blk_cleanup_queue(dev_info->dcssblk_queue);
+	dev_info->gd->queue = NULL;
+	put_disk(dev_info->gd);
+	device_unregister(&dev_info->dev);
+	list_for_each_entry(seg_info, &dev_info->seg_list, lh) {
+		segment_unload(seg_info->segment_name);
+	}
+	put_device(&dev_info->dev);
+	up_write(&dcssblk_devices_sem);
+	goto out;
+dev_list_del:
+	list_del(&dev_info->lh);
+release_gd:
+	blk_cleanup_queue(dev_info->dcssblk_queue);
+	dev_info->gd->queue = NULL;
+	put_disk(dev_info->gd);
+	up_write(&dcssblk_devices_sem);
+seg_list_del:
+	if (dev_info == NULL)
+		goto out;
+	list_for_each_entry_safe(seg_info, temp, &dev_info->seg_list, lh) {
+		list_del(&seg_info->lh);
+		segment_unload(seg_info->segment_name);
+		kfree(seg_info);
+	}
+	kfree(dev_info);
+out:
+	kfree(local_buf);
+out_nobuf:
+	return rc;
+}
+
+/*
+ * device attribute for removing devices
+ */
+static ssize_t
+dcssblk_remove_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
+{
+	struct dcssblk_dev_info *dev_info;
+	struct segment_info *entry;
+	int rc, i;
+	char *local_buf;
+
+	if (dev != dcssblk_root_dev) {
+		return -EINVAL;
+	}
+	local_buf = kmalloc(count + 1, GFP_KERNEL);
+	if (local_buf == NULL) {
+		return -ENOMEM;
+	}
+	/*
+	 * parse input
+	 */
+	for (i = 0; ((*(buf+i)!='\0') && (*(buf+i)!='\n') && i < count); i++) {
+		local_buf[i] = toupper(buf[i]);
+	}
+	local_buf[i] = '\0';
+	if ((i == 0) || (i > 8)) {
+		rc = -ENAMETOOLONG;
+		goto out_buf;
+	}
+
+	down_write(&dcssblk_devices_sem);
+	dev_info = dcssblk_get_device_by_name(local_buf);
+	if (dev_info == NULL) {
+		up_write(&dcssblk_devices_sem);
+		pr_warning("Device %s cannot be removed because it is not a "
+			   "known device\n", local_buf);
+		rc = -ENODEV;
+		goto out_buf;
+	}
+	if (atomic_read(&dev_info->use_count) != 0) {
+		up_write(&dcssblk_devices_sem);
+		pr_warning("Device %s cannot be removed while it is in "
+			   "use\n", local_buf);
+		rc = -EBUSY;
+		goto out_buf;
+	}
+
+	list_del(&dev_info->lh);
+	del_gendisk(dev_info->gd);
+	blk_cleanup_queue(dev_info->dcssblk_queue);
+	dev_info->gd->queue = NULL;
+	put_disk(dev_info->gd);
+	device_unregister(&dev_info->dev);
+
+	/* unload all related segments */
+	list_for_each_entry(entry, &dev_info->seg_list, lh)
+		segment_unload(entry->segment_name);
+
+	put_device(&dev_info->dev);
+	up_write(&dcssblk_devices_sem);
+
+	rc = count;
+out_buf:
+	kfree(local_buf);
+	return rc;
+}
+
+static int
+dcssblk_open(struct block_device *bdev, fmode_t mode)
+{
+	struct dcssblk_dev_info *dev_info;
+	int rc;
+
+	dev_info = bdev->bd_disk->private_data;
+	if (NULL == dev_info) {
+		rc = -ENODEV;
+		goto out;
+	}
+	atomic_inc(&dev_info->use_count);
+	bdev->bd_block_size = 4096;
+	rc = 0;
+out:
+	return rc;
+}
+
+static int
+dcssblk_release(struct gendisk *disk, fmode_t mode)
+{
+	struct dcssblk_dev_info *dev_info = disk->private_data;
+	struct segment_info *entry;
+	int rc;
+
+	if (!dev_info) {
+		rc = -ENODEV;
+		goto out;
+	}
+	down_write(&dcssblk_devices_sem);
+	if (atomic_dec_and_test(&dev_info->use_count)
+	    && (dev_info->save_pending)) {
+		pr_info("Device %s has become idle and is being saved "
+			"now\n", dev_info->segment_name);
+		list_for_each_entry(entry, &dev_info->seg_list, lh) {
+			segment_save(entry->segment_name);
+		}
+		dev_info->save_pending = 0;
+	}
+	up_write(&dcssblk_devices_sem);
+	rc = 0;
+out:
+	return rc;
+}
+
+static void
+dcssblk_make_request(struct request_queue *q, struct bio *bio)
+{
+	struct dcssblk_dev_info *dev_info;
+	struct bio_vec *bvec;
+	unsigned long index;
+	unsigned long page_addr;
+	unsigned long source_addr;
+	unsigned long bytes_done;
+	int i;
+
+	bytes_done = 0;
+	dev_info = bio->bi_bdev->bd_disk->private_data;
+	if (dev_info == NULL)
+		goto fail;
+	if ((bio->bi_sector & 7) != 0 || (bio->bi_size & 4095) != 0)
+		/* Request is not page-aligned. */
+		goto fail;
+	if (((bio->bi_size >> 9) + bio->bi_sector)
+			> get_capacity(bio->bi_bdev->bd_disk)) {
+		/* Request beyond end of DCSS segment. */
+		goto fail;
+	}
+	/* verify data transfer direction */
+	if (dev_info->is_shared) {
+		switch (dev_info->segment_type) {
+		case SEG_TYPE_SR:
+		case SEG_TYPE_ER:
+		case SEG_TYPE_SC:
+			/* cannot write to these segments */
+			if (bio_data_dir(bio) == WRITE) {
+				pr_warning("Writing to %s failed because it "
+					   "is a read-only device\n",
+					   dev_name(&dev_info->dev));
+				goto fail;
+			}
+		}
+	}
+
+	index = (bio->bi_sector >> 3);
+	bio_for_each_segment(bvec, bio, i) {
+		page_addr = (unsigned long)
+			page_address(bvec->bv_page) + bvec->bv_offset;
+		source_addr = dev_info->start + (index<<12) + bytes_done;
+		if (unlikely((page_addr & 4095) != 0) || (bvec->bv_len & 4095) != 0)
+			// More paranoia.
+			goto fail;
+		if (bio_data_dir(bio) == READ) {
+			memcpy((void*)page_addr, (void*)source_addr,
+				bvec->bv_len);
+		} else {
+			memcpy((void*)source_addr, (void*)page_addr,
+				bvec->bv_len);
+		}
+		bytes_done += bvec->bv_len;
+	}
+	bio_endio(bio, 0);
+	return;
+fail:
+	bio_io_error(bio);
+}
+
+static int
+dcssblk_direct_access (struct block_device *bdev, sector_t secnum,
+			void **kaddr, unsigned long *pfn)
+{
+	struct dcssblk_dev_info *dev_info;
+	unsigned long pgoff;
+
+	dev_info = bdev->bd_disk->private_data;
+	if (!dev_info)
+		return -ENODEV;
+	if (secnum % (PAGE_SIZE/512))
+		return -EINVAL;
+	pgoff = secnum / (PAGE_SIZE / 512);
+	if ((pgoff+1)*PAGE_SIZE-1 > dev_info->end - dev_info->start)
+		return -ERANGE;
+	*kaddr = (void *) (dev_info->start+pgoff*PAGE_SIZE);
+	*pfn = virt_to_phys(*kaddr) >> PAGE_SHIFT;
+
+	return 0;
+}
+
+static void
+dcssblk_check_params(void)
+{
+	int rc, i, j, k;
+	char buf[DCSSBLK_PARM_LEN + 1];
+	struct dcssblk_dev_info *dev_info;
+
+	for (i = 0; (i < DCSSBLK_PARM_LEN) && (dcssblk_segments[i] != '\0');
+	     i++) {
+		for (j = i; (dcssblk_segments[j] != ',')  &&
+			    (dcssblk_segments[j] != '\0') &&
+			    (dcssblk_segments[j] != '(')  &&
+			    (j < DCSSBLK_PARM_LEN); j++)
+		{
+			buf[j-i] = dcssblk_segments[j];
+		}
+		buf[j-i] = '\0';
+		rc = dcssblk_add_store(dcssblk_root_dev, NULL, buf, j-i);
+		if ((rc >= 0) && (dcssblk_segments[j] == '(')) {
+			for (k = 0; (buf[k] != ':') && (buf[k] != '\0'); k++)
+				buf[k] = toupper(buf[k]);
+			buf[k] = '\0';
+			if (!strncmp(&dcssblk_segments[j], "(local)", 7)) {
+				down_read(&dcssblk_devices_sem);
+				dev_info = dcssblk_get_device_by_name(buf);
+				up_read(&dcssblk_devices_sem);
+				if (dev_info)
+					dcssblk_shared_store(&dev_info->dev,
+							     NULL, "0\n", 2);
+			}
+		}
+		while ((dcssblk_segments[j] != ',') &&
+		       (dcssblk_segments[j] != '\0'))
+		{
+			j++;
+		}
+		if (dcssblk_segments[j] == '\0')
+			break;
+		i = j;
+	}
+}
+
+/*
+ * Suspend / Resume
+ */
+static int dcssblk_freeze(struct device *dev)
+{
+	struct dcssblk_dev_info *dev_info;
+	int rc = 0;
+
+	list_for_each_entry(dev_info, &dcssblk_devices, lh) {
+		switch (dev_info->segment_type) {
+			case SEG_TYPE_SR:
+			case SEG_TYPE_ER:
+			case SEG_TYPE_SC:
+				if (!dev_info->is_shared)
+					rc = -EINVAL;
+				break;
+			default:
+				rc = -EINVAL;
+				break;
+		}
+		if (rc)
+			break;
+	}
+	if (rc)
+		pr_err("Suspending the system failed because DCSS device %s "
+		       "is writable\n",
+		       dev_info->segment_name);
+	return rc;
+}
+
+static int dcssblk_restore(struct device *dev)
+{
+	struct dcssblk_dev_info *dev_info;
+	struct segment_info *entry;
+	unsigned long start, end;
+	int rc = 0;
+
+	list_for_each_entry(dev_info, &dcssblk_devices, lh) {
+		list_for_each_entry(entry, &dev_info->seg_list, lh) {
+			segment_unload(entry->segment_name);
+			rc = segment_load(entry->segment_name, SEGMENT_SHARED,
+					  &start, &end);
+			if (rc < 0) {
+// TODO in_use check ?
+				segment_warning(rc, entry->segment_name);
+				goto out_panic;
+			}
+			if (start != entry->start || end != entry->end) {
+				pr_err("The address range of DCSS %s changed "
+				       "while the system was suspended\n",
+				       entry->segment_name);
+				goto out_panic;
+			}
+		}
+	}
+	return 0;
+out_panic:
+	panic("fatal dcssblk resume error\n");
+}
+
+static int dcssblk_thaw(struct device *dev)
+{
+	return 0;
+}
+
+static const struct dev_pm_ops dcssblk_pm_ops = {
+	.freeze		= dcssblk_freeze,
+	.thaw		= dcssblk_thaw,
+	.restore	= dcssblk_restore,
+};
+
+static struct platform_driver dcssblk_pdrv = {
+	.driver = {
+		.name	= "dcssblk",
+		.owner	= THIS_MODULE,
+		.pm	= &dcssblk_pm_ops,
+	},
+};
+
+static struct platform_device *dcssblk_pdev;
+
+
+/*
+ * The init/exit functions.
+ */
+static void __exit
+dcssblk_exit(void)
+{
+	platform_device_unregister(dcssblk_pdev);
+	platform_driver_unregister(&dcssblk_pdrv);
+	root_device_unregister(dcssblk_root_dev);
+	unregister_blkdev(dcssblk_major, DCSSBLK_NAME);
+}
+
+static int __init
+dcssblk_init(void)
+{
+	int rc;
+
+	rc = platform_driver_register(&dcssblk_pdrv);
+	if (rc)
+		return rc;
+
+	dcssblk_pdev = platform_device_register_simple("dcssblk", -1, NULL,
+							0);
+	if (IS_ERR(dcssblk_pdev)) {
+		rc = PTR_ERR(dcssblk_pdev);
+		goto out_pdrv;
+	}
+
+	dcssblk_root_dev = root_device_register("dcssblk");
+	if (IS_ERR(dcssblk_root_dev)) {
+		rc = PTR_ERR(dcssblk_root_dev);
+		goto out_pdev;
+	}
+	rc = device_create_file(dcssblk_root_dev, &dev_attr_add);
+	if (rc)
+		goto out_root;
+	rc = device_create_file(dcssblk_root_dev, &dev_attr_remove);
+	if (rc)
+		goto out_root;
+	rc = register_blkdev(0, DCSSBLK_NAME);
+	if (rc < 0)
+		goto out_root;
+	dcssblk_major = rc;
+	init_rwsem(&dcssblk_devices_sem);
+
+	dcssblk_check_params();
+	return 0;
+
+out_root:
+	root_device_unregister(dcssblk_root_dev);
+out_pdev:
+	platform_device_unregister(dcssblk_pdev);
+out_pdrv:
+	platform_driver_unregister(&dcssblk_pdrv);
+	return rc;
+}
+
+module_init(dcssblk_init);
+module_exit(dcssblk_exit);
+
+module_param_string(segments, dcssblk_segments, DCSSBLK_PARM_LEN, 0444);
+MODULE_PARM_DESC(segments, "Name of DCSS segment(s) to be loaded, "
+		 "comma-separated list, names in each set separated "
+		 "by commas are separated by colons, each set contains "
+		 "names of contiguous segments and each name max. 8 chars.\n"
+		 "Adding \"(local)\" to the end of each set equals echoing 0 "
+		 "to /sys/devices/dcssblk/<device name>/shared after loading "
+		 "the contiguous segments - \n"
+		 "e.g. segments=\"mydcss1,mydcss2:mydcss3,mydcss4(local)\"");
+
+MODULE_LICENSE("GPL");
diff --git a/ap/os/linux/linux-3.4.x/drivers/s390/block/xpram.c b/ap/os/linux/linux-3.4.x/drivers/s390/block/xpram.c
new file mode 100644
index 0000000..690c333
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/s390/block/xpram.c
@@ -0,0 +1,475 @@
+/*
+ * Xpram.c -- the S/390 expanded memory RAM-disk
+ *           
+ * significant parts of this code are based on
+ * the sbull device driver presented in
+ * A. Rubini: Linux Device Drivers
+ *
+ * Author of XPRAM specific coding: Reinhard Buendgen
+ *                                  buendgen@de.ibm.com
+ * Rewrite for 2.5: Martin Schwidefsky <schwidefsky@de.ibm.com>
+ *
+ * External interfaces:
+ *   Interfaces to linux kernel
+ *        xpram_setup: read kernel parameters
+ *   Device specific file operations
+ *        xpram_iotcl
+ *        xpram_open
+ *
+ * "ad-hoc" partitioning:
+ *    the expanded memory can be partitioned among several devices 
+ *    (with different minors). The partitioning set up can be
+ *    set by kernel or module parameters (int devs & int sizes[])
+ *
+ * Potential future improvements:
+ *   generic hard disk support to replace ad-hoc partitioning
+ */
+
+#define KMSG_COMPONENT "xpram"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/ctype.h>  /* isdigit, isxdigit */
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/blkdev.h>
+#include <linux/blkpg.h>
+#include <linux/hdreg.h>  /* HDIO_GETGEO */
+#include <linux/device.h>
+#include <linux/bio.h>
+#include <linux/suspend.h>
+#include <linux/platform_device.h>
+#include <linux/gfp.h>
+#include <asm/uaccess.h>
+
+#define XPRAM_NAME	"xpram"
+#define XPRAM_DEVS	1	/* one partition */
+#define XPRAM_MAX_DEVS	32	/* maximal number of devices (partitions) */
+
+typedef struct {
+	unsigned int	size;		/* size of xpram segment in pages */
+	unsigned int	offset;		/* start page of xpram segment */
+} xpram_device_t;
+
+static xpram_device_t xpram_devices[XPRAM_MAX_DEVS];
+static unsigned int xpram_sizes[XPRAM_MAX_DEVS];
+static struct gendisk *xpram_disks[XPRAM_MAX_DEVS];
+static struct request_queue *xpram_queues[XPRAM_MAX_DEVS];
+static unsigned int xpram_pages;
+static int xpram_devs;
+
+/*
+ * Parameter parsing functions.
+ */
+static int devs = XPRAM_DEVS;
+static char *sizes[XPRAM_MAX_DEVS];
+
+module_param(devs, int, 0);
+module_param_array(sizes, charp, NULL, 0);
+
+MODULE_PARM_DESC(devs, "number of devices (\"partitions\"), " \
+		 "the default is " __MODULE_STRING(XPRAM_DEVS) "\n");
+MODULE_PARM_DESC(sizes, "list of device (partition) sizes " \
+		 "the defaults are 0s \n" \
+		 "All devices with size 0 equally partition the "
+		 "remaining space on the expanded strorage not "
+		 "claimed by explicit sizes\n");
+MODULE_LICENSE("GPL");
+
+/*
+ * Copy expanded memory page (4kB) into main memory                  
+ * Arguments                                                         
+ *           page_addr:    address of target page                    
+ *           xpage_index:  index of expandeded memory page           
+ * Return value                                                      
+ *           0:            if operation succeeds
+ *           -EIO:         if pgin failed
+ *           -ENXIO:       if xpram has vanished
+ */
+static int xpram_page_in (unsigned long page_addr, unsigned int xpage_index)
+{
+	int cc = 2;	/* return unused cc 2 if pgin traps */
+
+	asm volatile(
+		"	.insn	rre,0xb22e0000,%1,%2\n"  /* pgin %1,%2 */
+		"0:	ipm	%0\n"
+		"	srl	%0,28\n"
+		"1:\n"
+		EX_TABLE(0b,1b)
+		: "+d" (cc) : "a" (__pa(page_addr)), "d" (xpage_index) : "cc");
+	if (cc == 3)
+		return -ENXIO;
+	if (cc == 2)
+		return -ENXIO;
+	if (cc == 1)
+		return -EIO;
+	return 0;
+}
+
+/*
+ * Copy a 4kB page of main memory to an expanded memory page          
+ * Arguments                                                          
+ *           page_addr:    address of source page                     
+ *           xpage_index:  index of expandeded memory page            
+ * Return value                                                       
+ *           0:            if operation succeeds
+ *           -EIO:         if pgout failed
+ *           -ENXIO:       if xpram has vanished
+ */
+static long xpram_page_out (unsigned long page_addr, unsigned int xpage_index)
+{
+	int cc = 2;	/* return unused cc 2 if pgin traps */
+
+	asm volatile(
+		"	.insn	rre,0xb22f0000,%1,%2\n"  /* pgout %1,%2 */
+		"0:	ipm	%0\n"
+		"	srl	%0,28\n"
+		"1:\n"
+		EX_TABLE(0b,1b)
+		: "+d" (cc) : "a" (__pa(page_addr)), "d" (xpage_index) : "cc");
+	if (cc == 3)
+		return -ENXIO;
+	if (cc == 2)
+		return -ENXIO;
+	if (cc == 1)
+		return -EIO;
+	return 0;
+}
+
+/*
+ * Check if xpram is available.
+ */
+static int xpram_present(void)
+{
+	unsigned long mem_page;
+	int rc;
+
+	mem_page = (unsigned long) __get_free_page(GFP_KERNEL);
+	if (!mem_page)
+		return -ENOMEM;
+	rc = xpram_page_in(mem_page, 0);
+	free_page(mem_page);
+	return rc ? -ENXIO : 0;
+}
+
+/*
+ * Return index of the last available xpram page.
+ */
+static unsigned long xpram_highest_page_index(void)
+{
+	unsigned int page_index, add_bit;
+	unsigned long mem_page;
+
+	mem_page = (unsigned long) __get_free_page(GFP_KERNEL);
+	if (!mem_page)
+		return 0;
+
+	page_index = 0;
+	add_bit = 1ULL << (sizeof(unsigned int)*8 - 1);
+	while (add_bit > 0) {
+		if (xpram_page_in(mem_page, page_index | add_bit) == 0)
+			page_index |= add_bit;
+		add_bit >>= 1;
+	}
+
+	free_page (mem_page);
+
+	return page_index;
+}
+
+/*
+ * Block device make request function.
+ */
+static void xpram_make_request(struct request_queue *q, struct bio *bio)
+{
+	xpram_device_t *xdev = bio->bi_bdev->bd_disk->private_data;
+	struct bio_vec *bvec;
+	unsigned int index;
+	unsigned long page_addr;
+	unsigned long bytes;
+	int i;
+
+	if ((bio->bi_sector & 7) != 0 || (bio->bi_size & 4095) != 0)
+		/* Request is not page-aligned. */
+		goto fail;
+	if ((bio->bi_size >> 12) > xdev->size)
+		/* Request size is no page-aligned. */
+		goto fail;
+	if ((bio->bi_sector >> 3) > 0xffffffffU - xdev->offset)
+		goto fail;
+	index = (bio->bi_sector >> 3) + xdev->offset;
+	bio_for_each_segment(bvec, bio, i) {
+		page_addr = (unsigned long)
+			kmap(bvec->bv_page) + bvec->bv_offset;
+		bytes = bvec->bv_len;
+		if ((page_addr & 4095) != 0 || (bytes & 4095) != 0)
+			/* More paranoia. */
+			goto fail;
+		while (bytes > 0) {
+			if (bio_data_dir(bio) == READ) {
+				if (xpram_page_in(page_addr, index) != 0)
+					goto fail;
+			} else {
+				if (xpram_page_out(page_addr, index) != 0)
+					goto fail;
+			}
+			page_addr += 4096;
+			bytes -= 4096;
+			index++;
+		}
+	}
+	set_bit(BIO_UPTODATE, &bio->bi_flags);
+	bio_endio(bio, 0);
+	return;
+fail:
+	bio_io_error(bio);
+}
+
+static int xpram_getgeo(struct block_device *bdev, struct hd_geometry *geo)
+{
+	unsigned long size;
+
+	/*
+	 * get geometry: we have to fake one...  trim the size to a
+	 * multiple of 64 (32k): tell we have 16 sectors, 4 heads,
+	 * whatever cylinders. Tell also that data starts at sector. 4.
+	 */
+	size = (xpram_pages * 8) & ~0x3f;
+	geo->cylinders = size >> 6;
+	geo->heads = 4;
+	geo->sectors = 16;
+	geo->start = 4;
+	return 0;
+}
+
+static const struct block_device_operations xpram_devops =
+{
+	.owner	= THIS_MODULE,
+	.getgeo	= xpram_getgeo,
+};
+
+/*
+ * Setup xpram_sizes array.
+ */
+static int __init xpram_setup_sizes(unsigned long pages)
+{
+	unsigned long mem_needed;
+	unsigned long mem_auto;
+	unsigned long long size;
+	int mem_auto_no;
+	int i;
+
+	/* Check number of devices. */
+	if (devs <= 0 || devs > XPRAM_MAX_DEVS) {
+		pr_err("%d is not a valid number of XPRAM devices\n",devs);
+		return -EINVAL;
+	}
+	xpram_devs = devs;
+
+	/*
+	 * Copy sizes array to xpram_sizes and align partition
+	 * sizes to page boundary.
+	 */
+	mem_needed = 0;
+	mem_auto_no = 0;
+	for (i = 0; i < xpram_devs; i++) {
+		if (sizes[i]) {
+			size = simple_strtoull(sizes[i], &sizes[i], 0);
+			switch (sizes[i][0]) {
+			case 'g':
+			case 'G':
+				size <<= 20;
+				break;
+			case 'm':
+			case 'M':
+				size <<= 10;
+			}
+			xpram_sizes[i] = (size + 3) & -4UL;
+		}
+		if (xpram_sizes[i])
+			mem_needed += xpram_sizes[i];
+		else
+			mem_auto_no++;
+	}
+	
+	pr_info("  number of devices (partitions): %d \n", xpram_devs);
+	for (i = 0; i < xpram_devs; i++) {
+		if (xpram_sizes[i])
+			pr_info("  size of partition %d: %u kB\n",
+				i, xpram_sizes[i]);
+		else
+			pr_info("  size of partition %d to be set "
+				"automatically\n",i);
+	}
+	pr_info("  memory needed (for sized partitions): %lu kB\n",
+		mem_needed);
+	pr_info("  partitions to be sized automatically: %d\n",
+		mem_auto_no);
+
+	if (mem_needed > pages * 4) {
+		pr_err("Not enough expanded memory available\n");
+		return -EINVAL;
+	}
+
+	/*
+	 * partitioning:
+	 * xpram_sizes[i] != 0; partition i has size xpram_sizes[i] kB
+	 * else:             ; all partitions with zero xpram_sizes[i]
+	 *                     partition equally the remaining space
+	 */
+	if (mem_auto_no) {
+		mem_auto = ((pages - mem_needed / 4) / mem_auto_no) * 4;
+		pr_info("  automatically determined "
+			"partition size: %lu kB\n", mem_auto);
+		for (i = 0; i < xpram_devs; i++)
+			if (xpram_sizes[i] == 0)
+				xpram_sizes[i] = mem_auto;
+	}
+	return 0;
+}
+
+static int __init xpram_setup_blkdev(void)
+{
+	unsigned long offset;
+	int i, rc = -ENOMEM;
+
+	for (i = 0; i < xpram_devs; i++) {
+		xpram_disks[i] = alloc_disk(1);
+		if (!xpram_disks[i])
+			goto out;
+		xpram_queues[i] = blk_alloc_queue(GFP_KERNEL);
+		if (!xpram_queues[i]) {
+			put_disk(xpram_disks[i]);
+			goto out;
+		}
+		blk_queue_make_request(xpram_queues[i], xpram_make_request);
+		blk_queue_logical_block_size(xpram_queues[i], 4096);
+	}
+
+	/*
+	 * Register xpram major.
+	 */
+	rc = register_blkdev(XPRAM_MAJOR, XPRAM_NAME);
+	if (rc < 0)
+		goto out;
+
+	/*
+	 * Setup device structures.
+	 */
+	offset = 0;
+	for (i = 0; i < xpram_devs; i++) {
+		struct gendisk *disk = xpram_disks[i];
+
+		xpram_devices[i].size = xpram_sizes[i] / 4;
+		xpram_devices[i].offset = offset;
+		offset += xpram_devices[i].size;
+		disk->major = XPRAM_MAJOR;
+		disk->first_minor = i;
+		disk->fops = &xpram_devops;
+		disk->private_data = &xpram_devices[i];
+		disk->queue = xpram_queues[i];
+		sprintf(disk->disk_name, "slram%d", i);
+		set_capacity(disk, xpram_sizes[i] << 1);
+		add_disk(disk);
+	}
+
+	return 0;
+out:
+	while (i--) {
+		blk_cleanup_queue(xpram_queues[i]);
+		put_disk(xpram_disks[i]);
+	}
+	return rc;
+}
+
+/*
+ * Resume failed: Print error message and call panic.
+ */
+static void xpram_resume_error(const char *message)
+{
+	pr_err("Resuming the system failed: %s\n", message);
+	panic("xpram resume error\n");
+}
+
+/*
+ * Check if xpram setup changed between suspend and resume.
+ */
+static int xpram_restore(struct device *dev)
+{
+	if (!xpram_pages)
+		return 0;
+	if (xpram_present() != 0)
+		xpram_resume_error("xpram disappeared");
+	if (xpram_pages != xpram_highest_page_index() + 1)
+		xpram_resume_error("Size of xpram changed");
+	return 0;
+}
+
+static const struct dev_pm_ops xpram_pm_ops = {
+	.restore	= xpram_restore,
+};
+
+static struct platform_driver xpram_pdrv = {
+	.driver = {
+		.name	= XPRAM_NAME,
+		.owner	= THIS_MODULE,
+		.pm	= &xpram_pm_ops,
+	},
+};
+
+static struct platform_device *xpram_pdev;
+
+/*
+ * Finally, the init/exit functions.
+ */
+static void __exit xpram_exit(void)
+{
+	int i;
+	for (i = 0; i < xpram_devs; i++) {
+		del_gendisk(xpram_disks[i]);
+		blk_cleanup_queue(xpram_queues[i]);
+		put_disk(xpram_disks[i]);
+	}
+	unregister_blkdev(XPRAM_MAJOR, XPRAM_NAME);
+	platform_device_unregister(xpram_pdev);
+	platform_driver_unregister(&xpram_pdrv);
+}
+
+static int __init xpram_init(void)
+{
+	int rc;
+
+	/* Find out size of expanded memory. */
+	if (xpram_present() != 0) {
+		pr_err("No expanded memory available\n");
+		return -ENODEV;
+	}
+	xpram_pages = xpram_highest_page_index() + 1;
+	pr_info("  %u pages expanded memory found (%lu KB).\n",
+		xpram_pages, (unsigned long) xpram_pages*4);
+	rc = xpram_setup_sizes(xpram_pages);
+	if (rc)
+		return rc;
+	rc = platform_driver_register(&xpram_pdrv);
+	if (rc)
+		return rc;
+	xpram_pdev = platform_device_register_simple(XPRAM_NAME, -1, NULL, 0);
+	if (IS_ERR(xpram_pdev)) {
+		rc = PTR_ERR(xpram_pdev);
+		goto fail_platform_driver_unregister;
+	}
+	rc = xpram_setup_blkdev();
+	if (rc)
+		goto fail_platform_device_unregister;
+	return 0;
+
+fail_platform_device_unregister:
+	platform_device_unregister(xpram_pdev);
+fail_platform_driver_unregister:
+	platform_driver_unregister(&xpram_pdrv);
+	return rc;
+}
+
+module_init(xpram_init);
+module_exit(xpram_exit);
diff --git a/ap/os/linux/linux-3.4.x/drivers/s390/char/Kconfig b/ap/os/linux/linux-3.4.x/drivers/s390/char/Kconfig
new file mode 100644
index 0000000..2c9a776
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/s390/char/Kconfig
@@ -0,0 +1,178 @@
+comment "S/390 character device drivers"
+	depends on S390
+
+config TN3270
+	def_tristate y
+	prompt "Support for locally attached 3270 terminals"
+	depends on CCW
+	help
+	  Include support for IBM 3270 terminals.
+
+config TN3270_TTY
+	def_tristate y
+	prompt "Support for tty input/output on 3270 terminals"
+	depends on TN3270
+	help
+	  Include support for using an IBM 3270 terminal as a Linux tty.
+
+config TN3270_FS
+	def_tristate m
+	prompt "Support for fullscreen applications on 3270 terminals"
+	depends on TN3270
+	help
+	  Include support for fullscreen applications on an IBM 3270 terminal.
+
+config TN3270_CONSOLE
+	def_bool y
+	prompt "Support for console on 3270 terminal"
+	depends on TN3270=y && TN3270_TTY=y
+	help
+	  Include support for using an IBM 3270 terminal as a Linux system
+	  console.  Available only if 3270 support is compiled in statically.
+
+config TN3215
+	def_bool y
+	prompt "Support for 3215 line mode terminal"
+	depends on CCW
+	help
+	  Include support for IBM 3215 line-mode terminals.
+
+config TN3215_CONSOLE
+	def_bool y
+	prompt "Support for console on 3215 line mode terminal"
+	depends on TN3215
+	help
+	  Include support for using an IBM 3215 line-mode terminal as a
+	  Linux system console.
+
+config CCW_CONSOLE
+	def_bool y if TN3215_CONSOLE || TN3270_CONSOLE
+
+config SCLP_TTY
+	def_bool y
+	prompt "Support for SCLP line mode terminal"
+	depends on S390
+	help
+	  Include support for IBM SCLP line-mode terminals.
+
+config SCLP_CONSOLE
+	def_bool y
+	prompt "Support for console on SCLP line mode terminal"
+	depends on SCLP_TTY
+	help
+	  Include support for using an IBM HWC line-mode terminal as the Linux
+	  system console.
+
+config SCLP_VT220_TTY
+	def_bool y
+	prompt "Support for SCLP VT220-compatible terminal"
+	depends on S390
+	help
+	  Include support for an IBM SCLP VT220-compatible terminal.
+
+config SCLP_VT220_CONSOLE
+	def_bool y
+	prompt "Support for console on SCLP VT220-compatible terminal"
+	depends on SCLP_VT220_TTY
+	help
+	  Include support for using an IBM SCLP VT220-compatible terminal as a
+	  Linux system console.
+
+config SCLP_CPI
+	def_tristate m
+	prompt "Control-Program Identification"
+	depends on S390
+	help
+	  This option enables the hardware console interface for system
+	  identification. This is commonly used for workload management and
+	  gives you a nice name for the system on the service element.
+	  Please select this option as a module since built-in operation is
+	  completely untested.
+	  You should only select this option if you know what you are doing,
+	  need this feature and intend to run your kernel in LPAR.
+
+config SCLP_ASYNC
+	def_tristate m
+	prompt "Support for Call Home via Asynchronous SCLP Records"
+	depends on S390
+	help
+	  This option enables the call home function, which is able to inform
+	  the service element and connected organisations about a kernel panic.
+	  You should only select this option if you know what you are doing,
+	  want for inform other people about your kernel panics,
+	  need this feature and intend to run your kernel in LPAR.
+
+config S390_TAPE
+	def_tristate m
+	prompt "S/390 tape device support"
+	depends on CCW
+	help
+	  Select this option if you want to access channel-attached tape
+	  devices on IBM S/390 or zSeries.
+	  If you select this option you will also want to select at
+	  least one of the tape interface options and one of the tape
+	  hardware options in order to access a tape device.
+	  This option is also available as a module. The module will be
+	  called tape390 and include all selected interfaces and
+	  hardware drivers.
+
+comment "S/390 tape hardware support"
+	depends on S390_TAPE
+
+config S390_TAPE_34XX
+	def_tristate m
+	prompt "Support for 3480/3490 tape hardware"
+	depends on S390_TAPE
+	help
+	  Select this option if you want to access IBM 3480/3490 magnetic
+	  tape subsystems and 100% compatibles.
+	  It is safe to say "Y" here.
+
+config S390_TAPE_3590
+	def_tristate m
+	prompt "Support for 3590 tape hardware"
+	depends on S390_TAPE
+	help
+	  Select this option if you want to access IBM 3590 magnetic
+	  tape subsystems and 100% compatibles.
+	  It is safe to say "Y" here.
+
+config VMLOGRDR
+	def_tristate m
+	prompt "Support for the z/VM recording system services (VM only)"
+	depends on IUCV
+	help
+	  Select this option if you want to be able to receive records collected
+	  by the z/VM recording system services, eg. from *LOGREC, *ACCOUNT or
+	  *SYMPTOM.
+	  This driver depends on the IUCV support driver.
+
+config VMCP
+	def_bool y
+	prompt "Support for the z/VM CP interface"
+	depends on S390
+	help
+	  Select this option if you want to be able to interact with the control
+	  program on z/VM
+
+config MONREADER
+	def_tristate m
+	prompt "API for reading z/VM monitor service records"
+	depends on IUCV
+	help
+	  Character device driver for reading z/VM monitor service records
+
+config MONWRITER
+	def_tristate m
+	prompt "API for writing z/VM monitor service records"
+	depends on S390
+	help
+	  Character device driver for writing z/VM monitor service records
+
+config S390_VMUR
+	def_tristate m
+	prompt "z/VM unit record device driver"
+	depends on S390
+	help
+	  Character device driver for z/VM reader, puncher and printer.
+
diff --git a/ap/os/linux/linux-3.4.x/drivers/s390/char/Makefile b/ap/os/linux/linux-3.4.x/drivers/s390/char/Makefile
new file mode 100644
index 0000000..f3c3252
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/s390/char/Makefile
@@ -0,0 +1,35 @@
+#
+# S/390 character devices
+#
+
+obj-y += ctrlchar.o keyboard.o defkeymap.o sclp.o sclp_rw.o sclp_quiesce.o \
+	 sclp_cmd.o sclp_config.o sclp_cpi_sys.o sclp_ocf.o
+
+obj-$(CONFIG_TN3270) += raw3270.o
+obj-$(CONFIG_TN3270_CONSOLE) += con3270.o
+obj-$(CONFIG_TN3270_TTY) += tty3270.o
+obj-$(CONFIG_TN3270_FS) += fs3270.o
+
+obj-$(CONFIG_TN3215) += con3215.o
+
+obj-$(CONFIG_SCLP_TTY) += sclp_tty.o
+obj-$(CONFIG_SCLP_CONSOLE) += sclp_con.o
+obj-$(CONFIG_SCLP_VT220_TTY) += sclp_vt220.o
+obj-$(CONFIG_SCLP_CPI) += sclp_cpi.o
+obj-$(CONFIG_SCLP_ASYNC) += sclp_async.o
+
+obj-$(CONFIG_ZVM_WATCHDOG) += vmwatchdog.o
+obj-$(CONFIG_VMLOGRDR) += vmlogrdr.o
+obj-$(CONFIG_VMCP) += vmcp.o
+
+tape-$(CONFIG_PROC_FS) += tape_proc.o
+tape-objs := tape_core.o tape_std.o tape_char.o $(tape-y)
+obj-$(CONFIG_S390_TAPE) += tape.o tape_class.o
+obj-$(CONFIG_S390_TAPE_34XX) += tape_34xx.o
+obj-$(CONFIG_S390_TAPE_3590) += tape_3590.o
+obj-$(CONFIG_MONREADER) += monreader.o
+obj-$(CONFIG_MONWRITER) += monwriter.o
+obj-$(CONFIG_S390_VMUR) += vmur.o
+
+zcore_mod-objs := sclp_sdias.o zcore.o
+obj-$(CONFIG_ZFCPDUMP) += zcore_mod.o
diff --git a/ap/os/linux/linux-3.4.x/drivers/s390/char/con3215.c b/ap/os/linux/linux-3.4.x/drivers/s390/char/con3215.c
new file mode 100644
index 0000000..fa47d8d
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/s390/char/con3215.c
@@ -0,0 +1,1187 @@
+/*
+ * 3215 line mode terminal driver.
+ *
+ * Copyright IBM Corp. 1999, 2009
+ * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
+ *
+ * Updated:
+ *  Aug-2000: Added tab support
+ *	      Dan Morrison, IBM Corporation <dmorriso@cse.buffalo.edu>
+ */
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/kdev_t.h>
+#include <linux/tty.h>
+#include <linux/tty_flip.h>
+#include <linux/vt_kern.h>
+#include <linux/init.h>
+#include <linux/console.h>
+#include <linux/interrupt.h>
+#include <linux/err.h>
+#include <linux/reboot.h>
+#include <linux/slab.h>
+#include <asm/ccwdev.h>
+#include <asm/cio.h>
+#include <asm/io.h>
+#include <asm/ebcdic.h>
+#include <asm/uaccess.h>
+#include <asm/delay.h>
+#include <asm/cpcmd.h>
+#include <asm/setup.h>
+
+#include "ctrlchar.h"
+
+#define NR_3215		    1
+#define NR_3215_REQ	    (4*NR_3215)
+#define RAW3215_BUFFER_SIZE 65536     /* output buffer size */
+#define RAW3215_INBUF_SIZE  256	      /* input buffer size */
+#define RAW3215_MIN_SPACE   128	      /* minimum free space for wakeup */
+#define RAW3215_MIN_WRITE   1024      /* min. length for immediate output */
+#define RAW3215_MAX_BYTES   3968      /* max. bytes to write with one ssch */
+#define RAW3215_MAX_NEWLINE 50	      /* max. lines to write with one ssch */
+#define RAW3215_NR_CCWS	    3
+#define RAW3215_TIMEOUT	    HZ/10     /* time for delayed output */
+
+#define RAW3215_FIXED	    1	      /* 3215 console device is not be freed */
+#define RAW3215_ACTIVE	    2	      /* set if the device is in use */
+#define RAW3215_WORKING	    4	      /* set if a request is being worked on */
+#define RAW3215_THROTTLED   8	      /* set if reading is disabled */
+#define RAW3215_STOPPED	    16	      /* set if writing is disabled */
+#define RAW3215_CLOSING	    32	      /* set while in close process */
+#define RAW3215_TIMER_RUNS  64	      /* set if the output delay timer is on */
+#define RAW3215_FLUSHING    128	      /* set to flush buffer (no delay) */
+#define RAW3215_FROZEN	    256	      /* set if 3215 is frozen for suspend */
+
+#define TAB_STOP_SIZE	    8	      /* tab stop size */
+
+/*
+ * Request types for a 3215 device
+ */
+enum raw3215_type {
+	RAW3215_FREE, RAW3215_READ, RAW3215_WRITE
+};
+
+/*
+ * Request structure for a 3215 device
+ */
+struct raw3215_req {
+	enum raw3215_type type;	      /* type of the request */
+	int start, len;		      /* start index & len in output buffer */
+	int delayable;		      /* indication to wait for more data */
+	int residual;		      /* residual count for read request */
+	struct ccw1 ccws[RAW3215_NR_CCWS]; /* space for the channel program */
+	struct raw3215_info *info;    /* pointer to main structure */
+	struct raw3215_req *next;     /* pointer to next request */
+} __attribute__ ((aligned(8)));
+
+struct raw3215_info {
+	struct ccw_device *cdev;      /* device for tty driver */
+	spinlock_t *lock;	      /* pointer to irq lock */
+	int flags;		      /* state flags */
+	char *buffer;		      /* pointer to output buffer */
+	char *inbuf;		      /* pointer to input buffer */
+	int head;		      /* first free byte in output buffer */
+	int count;		      /* number of bytes in output buffer */
+	int written;		      /* number of bytes in write requests */
+	struct tty_struct *tty;	      /* pointer to tty structure if present */
+	struct raw3215_req *queued_read; /* pointer to queued read requests */
+	struct raw3215_req *queued_write;/* pointer to queued write requests */
+	struct tasklet_struct tlet;   /* tasklet to invoke tty_wakeup */
+	wait_queue_head_t empty_wait; /* wait queue for flushing */
+	struct timer_list timer;      /* timer for delayed output */
+	int line_pos;		      /* position on the line (for tabs) */
+	char ubuffer[80];	      /* copy_from_user buffer */
+};
+
+/* array of 3215 devices structures */
+static struct raw3215_info *raw3215[NR_3215];
+/* spinlock to protect the raw3215 array */
+static DEFINE_SPINLOCK(raw3215_device_lock);
+/* list of free request structures */
+static struct raw3215_req *raw3215_freelist;
+/* spinlock to protect free list */
+static spinlock_t raw3215_freelist_lock;
+
+static struct tty_driver *tty3215_driver;
+
+/*
+ * Get a request structure from the free list
+ */
+static inline struct raw3215_req *raw3215_alloc_req(void)
+{
+	struct raw3215_req *req;
+	unsigned long flags;
+
+	spin_lock_irqsave(&raw3215_freelist_lock, flags);
+	req = raw3215_freelist;
+	raw3215_freelist = req->next;
+	spin_unlock_irqrestore(&raw3215_freelist_lock, flags);
+	return req;
+}
+
+/*
+ * Put a request structure back to the free list
+ */
+static inline void raw3215_free_req(struct raw3215_req *req)
+{
+	unsigned long flags;
+
+	if (req->type == RAW3215_FREE)
+		return;		/* don't free a free request */
+	req->type = RAW3215_FREE;
+	spin_lock_irqsave(&raw3215_freelist_lock, flags);
+	req->next = raw3215_freelist;
+	raw3215_freelist = req;
+	spin_unlock_irqrestore(&raw3215_freelist_lock, flags);
+}
+
+/*
+ * Set up a read request that reads up to 160 byte from the 3215 device.
+ * If there is a queued read request it is used, but that shouldn't happen
+ * because a 3215 terminal won't accept a new read before the old one is
+ * completed.
+ */
+static void raw3215_mk_read_req(struct raw3215_info *raw)
+{
+	struct raw3215_req *req;
+	struct ccw1 *ccw;
+
+	/* there can only be ONE read request at a time */
+	req = raw->queued_read;
+	if (req == NULL) {
+		/* no queued read request, use new req structure */
+		req = raw3215_alloc_req();
+		req->type = RAW3215_READ;
+		req->info = raw;
+		raw->queued_read = req;
+	}
+
+	ccw = req->ccws;
+	ccw->cmd_code = 0x0A; /* read inquiry */
+	ccw->flags = 0x20;    /* ignore incorrect length */
+	ccw->count = 160;
+	ccw->cda = (__u32) __pa(raw->inbuf);
+}
+
+/*
+ * Set up a write request with the information from the main structure.
+ * A ccw chain is created that writes as much as possible from the output
+ * buffer to the 3215 device. If a queued write exists it is replaced by
+ * the new, probably lengthened request.
+ */
+static void raw3215_mk_write_req(struct raw3215_info *raw)
+{
+	struct raw3215_req *req;
+	struct ccw1 *ccw;
+	int len, count, ix, lines;
+
+	if (raw->count <= raw->written)
+		return;
+	/* check if there is a queued write request */
+	req = raw->queued_write;
+	if (req == NULL) {
+		/* no queued write request, use new req structure */
+		req = raw3215_alloc_req();
+		req->type = RAW3215_WRITE;
+		req->info = raw;
+		raw->queued_write = req;
+	} else {
+		raw->written -= req->len;
+	}
+
+	ccw = req->ccws;
+	req->start = (raw->head - raw->count + raw->written) &
+		     (RAW3215_BUFFER_SIZE - 1);
+	/*
+	 * now we have to count newlines. We can at max accept
+	 * RAW3215_MAX_NEWLINE newlines in a single ssch due to
+	 * a restriction in VM
+	 */
+	lines = 0;
+	ix = req->start;
+	while (lines < RAW3215_MAX_NEWLINE && ix != raw->head) {
+		if (raw->buffer[ix] == 0x15)
+			lines++;
+		ix = (ix + 1) & (RAW3215_BUFFER_SIZE - 1);
+	}
+	len = ((ix - 1 - req->start) & (RAW3215_BUFFER_SIZE - 1)) + 1;
+	if (len > RAW3215_MAX_BYTES)
+		len = RAW3215_MAX_BYTES;
+	req->len = len;
+	raw->written += len;
+
+	/* set the indication if we should try to enlarge this request */
+	req->delayable = (ix == raw->head) && (len < RAW3215_MIN_WRITE);
+
+	ix = req->start;
+	while (len > 0) {
+		if (ccw > req->ccws)
+			ccw[-1].flags |= 0x40; /* use command chaining */
+		ccw->cmd_code = 0x01; /* write, auto carrier return */
+		ccw->flags = 0x20;    /* ignore incorrect length ind.  */
+		ccw->cda =
+			(__u32) __pa(raw->buffer + ix);
+		count = len;
+		if (ix + count > RAW3215_BUFFER_SIZE)
+			count = RAW3215_BUFFER_SIZE - ix;
+		ccw->count = count;
+		len -= count;
+		ix = (ix + count) & (RAW3215_BUFFER_SIZE - 1);
+		ccw++;
+	}
+	/*
+	 * Add a NOP to the channel program. 3215 devices are purely
+	 * emulated and its much better to avoid the channel end
+	 * interrupt in this case.
+	 */
+	if (ccw > req->ccws)
+		ccw[-1].flags |= 0x40; /* use command chaining */
+	ccw->cmd_code = 0x03; /* NOP */
+	ccw->flags = 0;
+	ccw->cda = 0;
+	ccw->count = 1;
+}
+
+/*
+ * Start a read or a write request
+ */
+static void raw3215_start_io(struct raw3215_info *raw)
+{
+	struct raw3215_req *req;
+	int res;
+
+	req = raw->queued_read;
+	if (req != NULL &&
+	    !(raw->flags & (RAW3215_WORKING | RAW3215_THROTTLED))) {
+		/* dequeue request */
+		raw->queued_read = NULL;
+		res = ccw_device_start(raw->cdev, req->ccws,
+				       (unsigned long) req, 0, 0);
+		if (res != 0) {
+			/* do_IO failed, put request back to queue */
+			raw->queued_read = req;
+		} else {
+			raw->flags |= RAW3215_WORKING;
+		}
+	}
+	req = raw->queued_write;
+	if (req != NULL &&
+	    !(raw->flags & (RAW3215_WORKING | RAW3215_STOPPED))) {
+		/* dequeue request */
+		raw->queued_write = NULL;
+		res = ccw_device_start(raw->cdev, req->ccws,
+				       (unsigned long) req, 0, 0);
+		if (res != 0) {
+			/* do_IO failed, put request back to queue */
+			raw->queued_write = req;
+		} else {
+			raw->flags |= RAW3215_WORKING;
+		}
+	}
+}
+
+/*
+ * Function to start a delayed output after RAW3215_TIMEOUT seconds
+ */
+static void raw3215_timeout(unsigned long __data)
+{
+	struct raw3215_info *raw = (struct raw3215_info *) __data;
+	unsigned long flags;
+
+	spin_lock_irqsave(get_ccwdev_lock(raw->cdev), flags);
+	if (raw->flags & RAW3215_TIMER_RUNS) {
+		del_timer(&raw->timer);
+		raw->flags &= ~RAW3215_TIMER_RUNS;
+		if (!(raw->flags & RAW3215_FROZEN)) {
+			raw3215_mk_write_req(raw);
+			raw3215_start_io(raw);
+		}
+	}
+	spin_unlock_irqrestore(get_ccwdev_lock(raw->cdev), flags);
+}
+
+/*
+ * Function to conditionally start an IO. A read is started immediately,
+ * a write is only started immediately if the flush flag is on or the
+ * amount of data is bigger than RAW3215_MIN_WRITE. If a write is not
+ * done immediately a timer is started with a delay of RAW3215_TIMEOUT.
+ */
+static inline void raw3215_try_io(struct raw3215_info *raw)
+{
+	if (!(raw->flags & RAW3215_ACTIVE) || (raw->flags & RAW3215_FROZEN))
+		return;
+	if (raw->queued_read != NULL)
+		raw3215_start_io(raw);
+	else if (raw->queued_write != NULL) {
+		if ((raw->queued_write->delayable == 0) ||
+		    (raw->flags & RAW3215_FLUSHING)) {
+			/* execute write requests bigger than minimum size */
+			raw3215_start_io(raw);
+			if (raw->flags & RAW3215_TIMER_RUNS) {
+				del_timer(&raw->timer);
+				raw->flags &= ~RAW3215_TIMER_RUNS;
+			}
+		} else if (!(raw->flags & RAW3215_TIMER_RUNS)) {
+			/* delay small writes */
+			init_timer(&raw->timer);
+			raw->timer.expires = RAW3215_TIMEOUT + jiffies;
+			raw->timer.data = (unsigned long) raw;
+			raw->timer.function = raw3215_timeout;
+			add_timer(&raw->timer);
+			raw->flags |= RAW3215_TIMER_RUNS;
+		}
+	}
+}
+
+/*
+ * Call tty_wakeup from tasklet context
+ */
+static void raw3215_wakeup(unsigned long data)
+{
+	struct raw3215_info *raw = (struct raw3215_info *) data;
+	tty_wakeup(raw->tty);
+}
+
+/*
+ * Try to start the next IO and wake up processes waiting on the tty.
+ */
+static void raw3215_next_io(struct raw3215_info *raw)
+{
+	raw3215_mk_write_req(raw);
+	raw3215_try_io(raw);
+	if (raw->tty && RAW3215_BUFFER_SIZE - raw->count >= RAW3215_MIN_SPACE)
+		tasklet_schedule(&raw->tlet);
+}
+
+/*
+ * Interrupt routine, called from common io layer
+ */
+static void raw3215_irq(struct ccw_device *cdev, unsigned long intparm,
+			struct irb *irb)
+{
+	struct raw3215_info *raw;
+	struct raw3215_req *req;
+	struct tty_struct *tty;
+	int cstat, dstat;
+	int count;
+
+	raw = dev_get_drvdata(&cdev->dev);
+	req = (struct raw3215_req *) intparm;
+	cstat = irb->scsw.cmd.cstat;
+	dstat = irb->scsw.cmd.dstat;
+	if (cstat != 0)
+		raw3215_next_io(raw);
+	if (dstat & 0x01) { /* we got a unit exception */
+		dstat &= ~0x01;	 /* we can ignore it */
+	}
+	switch (dstat) {
+	case 0x80:
+		if (cstat != 0)
+			break;
+		/* Attention interrupt, someone hit the enter key */
+		raw3215_mk_read_req(raw);
+		raw3215_next_io(raw);
+		break;
+	case 0x08:
+	case 0x0C:
+		/* Channel end interrupt. */
+		if ((raw = req->info) == NULL)
+			return;		     /* That shouldn't happen ... */
+		if (req->type == RAW3215_READ) {
+			/* store residual count, then wait for device end */
+			req->residual = irb->scsw.cmd.count;
+		}
+		if (dstat == 0x08)
+			break;
+	case 0x04:
+		/* Device end interrupt. */
+		if ((raw = req->info) == NULL)
+			return;		     /* That shouldn't happen ... */
+		if (req->type == RAW3215_READ && raw->tty != NULL) {
+			unsigned int cchar;
+
+			tty = raw->tty;
+			count = 160 - req->residual;
+			EBCASC(raw->inbuf, count);
+			cchar = ctrlchar_handle(raw->inbuf, count, tty);
+			switch (cchar & CTRLCHAR_MASK) {
+			case CTRLCHAR_SYSRQ:
+				break;
+
+			case CTRLCHAR_CTRL:
+				tty_insert_flip_char(tty, cchar, TTY_NORMAL);
+				tty_flip_buffer_push(raw->tty);
+				break;
+
+			case CTRLCHAR_NONE:
+				if (count < 2 ||
+				    (strncmp(raw->inbuf+count-2, "\252n", 2) &&
+				     strncmp(raw->inbuf+count-2, "^n", 2)) ) {
+					/* add the auto \n */
+					raw->inbuf[count] = '\n';
+					count++;
+				} else
+					count -= 2;
+				tty_insert_flip_string(tty, raw->inbuf, count);
+				tty_flip_buffer_push(raw->tty);
+				break;
+			}
+		} else if (req->type == RAW3215_WRITE) {
+			raw->count -= req->len;
+			raw->written -= req->len;
+		}
+		raw->flags &= ~RAW3215_WORKING;
+		raw3215_free_req(req);
+		/* check for empty wait */
+		if (waitqueue_active(&raw->empty_wait) &&
+		    raw->queued_write == NULL &&
+		    raw->queued_read == NULL) {
+			wake_up_interruptible(&raw->empty_wait);
+		}
+		raw3215_next_io(raw);
+		break;
+	default:
+		/* Strange interrupt, I'll do my best to clean up */
+		if (req != NULL && req->type != RAW3215_FREE) {
+			if (req->type == RAW3215_WRITE) {
+				raw->count -= req->len;
+				raw->written -= req->len;
+			}
+			raw->flags &= ~RAW3215_WORKING;
+			raw3215_free_req(req);
+		}
+		raw3215_next_io(raw);
+	}
+	return;
+}
+
+/*
+ * Drop the oldest line from the output buffer.
+ */
+static void raw3215_drop_line(struct raw3215_info *raw)
+{
+	int ix;
+	char ch;
+
+	BUG_ON(raw->written != 0);
+	ix = (raw->head - raw->count) & (RAW3215_BUFFER_SIZE - 1);
+	while (raw->count > 0) {
+		ch = raw->buffer[ix];
+		ix = (ix + 1) & (RAW3215_BUFFER_SIZE - 1);
+		raw->count--;
+		if (ch == 0x15)
+			break;
+	}
+	raw->head = ix;
+}
+
+/*
+ * Wait until length bytes are available int the output buffer.
+ * Has to be called with the s390irq lock held. Can be called
+ * disabled.
+ */
+static void raw3215_make_room(struct raw3215_info *raw, unsigned int length)
+{
+	while (RAW3215_BUFFER_SIZE - raw->count < length) {
+		/* While console is frozen for suspend we have no other
+		 * choice but to drop message from the buffer to make
+		 * room for even more messages. */
+		if (raw->flags & RAW3215_FROZEN) {
+			raw3215_drop_line(raw);
+			continue;
+		}
+		/* there might be a request pending */
+		raw->flags |= RAW3215_FLUSHING;
+		raw3215_mk_write_req(raw);
+		raw3215_try_io(raw);
+		raw->flags &= ~RAW3215_FLUSHING;
+#ifdef CONFIG_TN3215_CONSOLE
+		wait_cons_dev();
+#endif
+		/* Enough room freed up ? */
+		if (RAW3215_BUFFER_SIZE - raw->count >= length)
+			break;
+		/* there might be another cpu waiting for the lock */
+		spin_unlock(get_ccwdev_lock(raw->cdev));
+		udelay(100);
+		spin_lock(get_ccwdev_lock(raw->cdev));
+	}
+}
+
+/*
+ * String write routine for 3215 devices
+ */
+static void raw3215_write(struct raw3215_info *raw, const char *str,
+			  unsigned int length)
+{
+	unsigned long flags;
+	int c, count;
+
+	while (length > 0) {
+		spin_lock_irqsave(get_ccwdev_lock(raw->cdev), flags);
+		count = (length > RAW3215_BUFFER_SIZE) ?
+					     RAW3215_BUFFER_SIZE : length;
+		length -= count;
+
+		raw3215_make_room(raw, count);
+
+		/* copy string to output buffer and convert it to EBCDIC */
+		while (1) {
+			c = min_t(int, count,
+				  min(RAW3215_BUFFER_SIZE - raw->count,
+				      RAW3215_BUFFER_SIZE - raw->head));
+			if (c <= 0)
+				break;
+			memcpy(raw->buffer + raw->head, str, c);
+			ASCEBC(raw->buffer + raw->head, c);
+			raw->head = (raw->head + c) & (RAW3215_BUFFER_SIZE - 1);
+			raw->count += c;
+			raw->line_pos += c;
+			str += c;
+			count -= c;
+		}
+		if (!(raw->flags & RAW3215_WORKING)) {
+			raw3215_mk_write_req(raw);
+			/* start or queue request */
+			raw3215_try_io(raw);
+		}
+		spin_unlock_irqrestore(get_ccwdev_lock(raw->cdev), flags);
+	}
+}
+
+/*
+ * Put character routine for 3215 devices
+ */
+static void raw3215_putchar(struct raw3215_info *raw, unsigned char ch)
+{
+	unsigned long flags;
+	unsigned int length, i;
+
+	spin_lock_irqsave(get_ccwdev_lock(raw->cdev), flags);
+	if (ch == '\t') {
+		length = TAB_STOP_SIZE - (raw->line_pos%TAB_STOP_SIZE);
+		raw->line_pos += length;
+		ch = ' ';
+	} else if (ch == '\n') {
+		length = 1;
+		raw->line_pos = 0;
+	} else {
+		length = 1;
+		raw->line_pos++;
+	}
+	raw3215_make_room(raw, length);
+
+	for (i = 0; i < length; i++) {
+		raw->buffer[raw->head] = (char) _ascebc[(int) ch];
+		raw->head = (raw->head + 1) & (RAW3215_BUFFER_SIZE - 1);
+		raw->count++;
+	}
+	if (!(raw->flags & RAW3215_WORKING)) {
+		raw3215_mk_write_req(raw);
+		/* start or queue request */
+		raw3215_try_io(raw);
+	}
+	spin_unlock_irqrestore(get_ccwdev_lock(raw->cdev), flags);
+}
+
+/*
+ * Flush routine, it simply sets the flush flag and tries to start
+ * pending IO.
+ */
+static void raw3215_flush_buffer(struct raw3215_info *raw)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(get_ccwdev_lock(raw->cdev), flags);
+	if (raw->count > 0) {
+		raw->flags |= RAW3215_FLUSHING;
+		raw3215_try_io(raw);
+		raw->flags &= ~RAW3215_FLUSHING;
+	}
+	spin_unlock_irqrestore(get_ccwdev_lock(raw->cdev), flags);
+}
+
+/*
+ * Fire up a 3215 device.
+ */
+static int raw3215_startup(struct raw3215_info *raw)
+{
+	unsigned long flags;
+
+	if (raw->flags & RAW3215_ACTIVE)
+		return 0;
+	raw->line_pos = 0;
+	raw->flags |= RAW3215_ACTIVE;
+	spin_lock_irqsave(get_ccwdev_lock(raw->cdev), flags);
+	raw3215_try_io(raw);
+	spin_unlock_irqrestore(get_ccwdev_lock(raw->cdev), flags);
+
+	return 0;
+}
+
+/*
+ * Shutdown a 3215 device.
+ */
+static void raw3215_shutdown(struct raw3215_info *raw)
+{
+	DECLARE_WAITQUEUE(wait, current);
+	unsigned long flags;
+
+	if (!(raw->flags & RAW3215_ACTIVE) || (raw->flags & RAW3215_FIXED))
+		return;
+	/* Wait for outstanding requests, then free irq */
+	spin_lock_irqsave(get_ccwdev_lock(raw->cdev), flags);
+	if ((raw->flags & RAW3215_WORKING) ||
+	    raw->queued_write != NULL ||
+	    raw->queued_read != NULL) {
+		raw->flags |= RAW3215_CLOSING;
+		add_wait_queue(&raw->empty_wait, &wait);
+		set_current_state(TASK_INTERRUPTIBLE);
+		spin_unlock_irqrestore(get_ccwdev_lock(raw->cdev), flags);
+		schedule();
+		spin_lock_irqsave(get_ccwdev_lock(raw->cdev), flags);
+		remove_wait_queue(&raw->empty_wait, &wait);
+		set_current_state(TASK_RUNNING);
+		raw->flags &= ~(RAW3215_ACTIVE | RAW3215_CLOSING);
+	}
+	spin_unlock_irqrestore(get_ccwdev_lock(raw->cdev), flags);
+}
+
+static int raw3215_probe (struct ccw_device *cdev)
+{
+	struct raw3215_info *raw;
+	int line;
+
+	/* Console is special. */
+	if (raw3215[0] && (raw3215[0] == dev_get_drvdata(&cdev->dev)))
+		return 0;
+	raw = kmalloc(sizeof(struct raw3215_info) +
+		      RAW3215_INBUF_SIZE, GFP_KERNEL|GFP_DMA);
+	if (raw == NULL)
+		return -ENOMEM;
+
+	spin_lock(&raw3215_device_lock);
+	for (line = 0; line < NR_3215; line++) {
+		if (!raw3215[line]) {
+			raw3215[line] = raw;
+			break;
+		}
+	}
+	spin_unlock(&raw3215_device_lock);
+	if (line == NR_3215) {
+		kfree(raw);
+		return -ENODEV;
+	}
+
+	raw->cdev = cdev;
+	raw->inbuf = (char *) raw + sizeof(struct raw3215_info);
+	memset(raw, 0, sizeof(struct raw3215_info));
+	raw->buffer = kmalloc(RAW3215_BUFFER_SIZE,
+				       GFP_KERNEL|GFP_DMA);
+	if (raw->buffer == NULL) {
+		spin_lock(&raw3215_device_lock);
+		raw3215[line] = NULL;
+		spin_unlock(&raw3215_device_lock);
+		kfree(raw);
+		return -ENOMEM;
+	}
+	init_waitqueue_head(&raw->empty_wait);
+	tasklet_init(&raw->tlet, raw3215_wakeup, (unsigned long) raw);
+
+	dev_set_drvdata(&cdev->dev, raw);
+	cdev->handler = raw3215_irq;
+
+	return 0;
+}
+
+static void raw3215_remove (struct ccw_device *cdev)
+{
+	struct raw3215_info *raw;
+
+	ccw_device_set_offline(cdev);
+	raw = dev_get_drvdata(&cdev->dev);
+	if (raw) {
+		dev_set_drvdata(&cdev->dev, NULL);
+		kfree(raw->buffer);
+		kfree(raw);
+	}
+}
+
+static int raw3215_set_online (struct ccw_device *cdev)
+{
+	struct raw3215_info *raw;
+
+	raw = dev_get_drvdata(&cdev->dev);
+	if (!raw)
+		return -ENODEV;
+
+	return raw3215_startup(raw);
+}
+
+static int raw3215_set_offline (struct ccw_device *cdev)
+{
+	struct raw3215_info *raw;
+
+	raw = dev_get_drvdata(&cdev->dev);
+	if (!raw)
+		return -ENODEV;
+
+	raw3215_shutdown(raw);
+
+	return 0;
+}
+
+static int raw3215_pm_stop(struct ccw_device *cdev)
+{
+	struct raw3215_info *raw;
+	unsigned long flags;
+
+	/* Empty the output buffer, then prevent new I/O. */
+	raw = dev_get_drvdata(&cdev->dev);
+	spin_lock_irqsave(get_ccwdev_lock(raw->cdev), flags);
+	raw3215_make_room(raw, RAW3215_BUFFER_SIZE);
+	raw->flags |= RAW3215_FROZEN;
+	spin_unlock_irqrestore(get_ccwdev_lock(raw->cdev), flags);
+	return 0;
+}
+
+static int raw3215_pm_start(struct ccw_device *cdev)
+{
+	struct raw3215_info *raw;
+	unsigned long flags;
+
+	/* Allow I/O again and flush output buffer. */
+	raw = dev_get_drvdata(&cdev->dev);
+	spin_lock_irqsave(get_ccwdev_lock(raw->cdev), flags);
+	raw->flags &= ~RAW3215_FROZEN;
+	raw->flags |= RAW3215_FLUSHING;
+	raw3215_try_io(raw);
+	raw->flags &= ~RAW3215_FLUSHING;
+	spin_unlock_irqrestore(get_ccwdev_lock(raw->cdev), flags);
+	return 0;
+}
+
+static struct ccw_device_id raw3215_id[] = {
+	{ CCW_DEVICE(0x3215, 0) },
+	{ /* end of list */ },
+};
+
+static struct ccw_driver raw3215_ccw_driver = {
+	.driver = {
+		.name	= "3215",
+		.owner	= THIS_MODULE,
+	},
+	.ids		= raw3215_id,
+	.probe		= &raw3215_probe,
+	.remove		= &raw3215_remove,
+	.set_online	= &raw3215_set_online,
+	.set_offline	= &raw3215_set_offline,
+	.freeze		= &raw3215_pm_stop,
+	.thaw		= &raw3215_pm_start,
+	.restore	= &raw3215_pm_start,
+	.int_class	= IOINT_C15,
+};
+
+#ifdef CONFIG_TN3215_CONSOLE
+/*
+ * Write a string to the 3215 console
+ */
+static void con3215_write(struct console *co, const char *str,
+			  unsigned int count)
+{
+	struct raw3215_info *raw;
+	int i;
+
+	if (count <= 0)
+		return;
+	raw = raw3215[0];	/* console 3215 is the first one */
+	while (count > 0) {
+		for (i = 0; i < count; i++)
+			if (str[i] == '\t' || str[i] == '\n')
+				break;
+		raw3215_write(raw, str, i);
+		count -= i;
+		str += i;
+		if (count > 0) {
+			raw3215_putchar(raw, *str);
+			count--;
+			str++;
+		}
+	}
+}
+
+static struct tty_driver *con3215_device(struct console *c, int *index)
+{
+	*index = c->index;
+	return tty3215_driver;
+}
+
+/*
+ * panic() calls con3215_flush through a panic_notifier
+ * before the system enters a disabled, endless loop.
+ */
+static void con3215_flush(void)
+{
+	struct raw3215_info *raw;
+	unsigned long flags;
+
+	raw = raw3215[0];  /* console 3215 is the first one */
+	if (raw->flags & RAW3215_FROZEN)
+		/* The console is still frozen for suspend. */
+		if (ccw_device_force_console())
+			/* Forcing didn't work, no panic message .. */
+			return;
+	spin_lock_irqsave(get_ccwdev_lock(raw->cdev), flags);
+	raw3215_make_room(raw, RAW3215_BUFFER_SIZE);
+	spin_unlock_irqrestore(get_ccwdev_lock(raw->cdev), flags);
+}
+
+static int con3215_notify(struct notifier_block *self,
+			  unsigned long event, void *data)
+{
+	con3215_flush();
+	return NOTIFY_OK;
+}
+
+static struct notifier_block on_panic_nb = {
+	.notifier_call = con3215_notify,
+	.priority = 0,
+};
+
+static struct notifier_block on_reboot_nb = {
+	.notifier_call = con3215_notify,
+	.priority = 0,
+};
+
+/*
+ *  The console structure for the 3215 console
+ */
+static struct console con3215 = {
+	.name	 = "ttyS",
+	.write	 = con3215_write,
+	.device	 = con3215_device,
+	.flags	 = CON_PRINTBUFFER,
+};
+
+/*
+ * 3215 console initialization code called from console_init().
+ */
+static int __init con3215_init(void)
+{
+	struct ccw_device *cdev;
+	struct raw3215_info *raw;
+	struct raw3215_req *req;
+	int i;
+
+	/* Check if 3215 is to be the console */
+	if (!CONSOLE_IS_3215)
+		return -ENODEV;
+
+	/* Set the console mode for VM */
+	if (MACHINE_IS_VM) {
+		cpcmd("TERM CONMODE 3215", NULL, 0, NULL);
+		cpcmd("TERM AUTOCR OFF", NULL, 0, NULL);
+	}
+
+	/* allocate 3215 request structures */
+	raw3215_freelist = NULL;
+	spin_lock_init(&raw3215_freelist_lock);
+	for (i = 0; i < NR_3215_REQ; i++) {
+		req = kzalloc(sizeof(struct raw3215_req), GFP_KERNEL | GFP_DMA);
+		req->next = raw3215_freelist;
+		raw3215_freelist = req;
+	}
+
+	cdev = ccw_device_probe_console();
+	if (IS_ERR(cdev))
+		return -ENODEV;
+
+	raw3215[0] = raw = (struct raw3215_info *)
+		kzalloc(sizeof(struct raw3215_info), GFP_KERNEL | GFP_DMA);
+	raw->buffer = kzalloc(RAW3215_BUFFER_SIZE, GFP_KERNEL | GFP_DMA);
+	raw->inbuf = kzalloc(RAW3215_INBUF_SIZE, GFP_KERNEL | GFP_DMA);
+	raw->cdev = cdev;
+	dev_set_drvdata(&cdev->dev, raw);
+	cdev->handler = raw3215_irq;
+
+	raw->flags |= RAW3215_FIXED;
+	init_waitqueue_head(&raw->empty_wait);
+	tasklet_init(&raw->tlet, raw3215_wakeup, (unsigned long) raw);
+
+	/* Request the console irq */
+	if (raw3215_startup(raw) != 0) {
+		kfree(raw->inbuf);
+		kfree(raw->buffer);
+		kfree(raw);
+		raw3215[0] = NULL;
+		return -ENODEV;
+	}
+	atomic_notifier_chain_register(&panic_notifier_list, &on_panic_nb);
+	register_reboot_notifier(&on_reboot_nb);
+	register_console(&con3215);
+	return 0;
+}
+console_initcall(con3215_init);
+#endif
+
+/*
+ * tty3215_open
+ *
+ * This routine is called whenever a 3215 tty is opened.
+ */
+static int tty3215_open(struct tty_struct *tty, struct file * filp)
+{
+	struct raw3215_info *raw;
+	int retval;
+
+	raw = raw3215[tty->index];
+	if (raw == NULL)
+		return -ENODEV;
+
+	tty->driver_data = raw;
+	raw->tty = tty;
+
+	tty->low_latency = 0;  /* don't use bottom half for pushing chars */
+	/*
+	 * Start up 3215 device
+	 */
+	retval = raw3215_startup(raw);
+	if (retval)
+		return retval;
+
+	return 0;
+}
+
+/*
+ * tty3215_close()
+ *
+ * This routine is called when the 3215 tty is closed. We wait
+ * for the remaining request to be completed. Then we clean up.
+ */
+static void tty3215_close(struct tty_struct *tty, struct file * filp)
+{
+	struct raw3215_info *raw;
+
+	raw = (struct raw3215_info *) tty->driver_data;
+	if (raw == NULL || tty->count > 1)
+		return;
+	tty->closing = 1;
+	/* Shutdown the terminal */
+	raw3215_shutdown(raw);
+	tasklet_kill(&raw->tlet);
+	tty->closing = 0;
+	raw->tty = NULL;
+}
+
+/*
+ * Returns the amount of free space in the output buffer.
+ */
+static int tty3215_write_room(struct tty_struct *tty)
+{
+	struct raw3215_info *raw;
+
+	raw = (struct raw3215_info *) tty->driver_data;
+
+	/* Subtract TAB_STOP_SIZE to allow for a tab, 8 <<< 64K */
+	if ((RAW3215_BUFFER_SIZE - raw->count - TAB_STOP_SIZE) >= 0)
+		return RAW3215_BUFFER_SIZE - raw->count - TAB_STOP_SIZE;
+	else
+		return 0;
+}
+
+/*
+ * String write routine for 3215 ttys
+ */
+static int tty3215_write(struct tty_struct * tty,
+			 const unsigned char *buf, int count)
+{
+	struct raw3215_info *raw;
+	int i, written;
+
+	if (!tty)
+		return 0;
+	raw = (struct raw3215_info *) tty->driver_data;
+	written = count;
+	while (count > 0) {
+		for (i = 0; i < count; i++)
+			if (buf[i] == '\t' || buf[i] == '\n')
+				break;
+		raw3215_write(raw, buf, i);
+		count -= i;
+		buf += i;
+		if (count > 0) {
+			raw3215_putchar(raw, *buf);
+			count--;
+			buf++;
+		}
+	}
+	return written;
+}
+
+/*
+ * Put character routine for 3215 ttys
+ */
+static int tty3215_put_char(struct tty_struct *tty, unsigned char ch)
+{
+	struct raw3215_info *raw;
+
+	if (!tty)
+		return 0;
+	raw = (struct raw3215_info *) tty->driver_data;
+	raw3215_putchar(raw, ch);
+	return 1;
+}
+
+static void tty3215_flush_chars(struct tty_struct *tty)
+{
+}
+
+/*
+ * Returns the number of characters in the output buffer
+ */
+static int tty3215_chars_in_buffer(struct tty_struct *tty)
+{
+	struct raw3215_info *raw;
+
+	raw = (struct raw3215_info *) tty->driver_data;
+	return raw->count;
+}
+
+static void tty3215_flush_buffer(struct tty_struct *tty)
+{
+	struct raw3215_info *raw;
+
+	raw = (struct raw3215_info *) tty->driver_data;
+	raw3215_flush_buffer(raw);
+	tty_wakeup(tty);
+}
+
+/*
+ * Disable reading from a 3215 tty
+ */
+static void tty3215_throttle(struct tty_struct * tty)
+{
+	struct raw3215_info *raw;
+
+	raw = (struct raw3215_info *) tty->driver_data;
+	raw->flags |= RAW3215_THROTTLED;
+}
+
+/*
+ * Enable reading from a 3215 tty
+ */
+static void tty3215_unthrottle(struct tty_struct * tty)
+{
+	struct raw3215_info *raw;
+	unsigned long flags;
+
+	raw = (struct raw3215_info *) tty->driver_data;
+	if (raw->flags & RAW3215_THROTTLED) {
+		spin_lock_irqsave(get_ccwdev_lock(raw->cdev), flags);
+		raw->flags &= ~RAW3215_THROTTLED;
+		raw3215_try_io(raw);
+		spin_unlock_irqrestore(get_ccwdev_lock(raw->cdev), flags);
+	}
+}
+
+/*
+ * Disable writing to a 3215 tty
+ */
+static void tty3215_stop(struct tty_struct *tty)
+{
+	struct raw3215_info *raw;
+
+	raw = (struct raw3215_info *) tty->driver_data;
+	raw->flags |= RAW3215_STOPPED;
+}
+
+/*
+ * Enable writing to a 3215 tty
+ */
+static void tty3215_start(struct tty_struct *tty)
+{
+	struct raw3215_info *raw;
+	unsigned long flags;
+
+	raw = (struct raw3215_info *) tty->driver_data;
+	if (raw->flags & RAW3215_STOPPED) {
+		spin_lock_irqsave(get_ccwdev_lock(raw->cdev), flags);
+		raw->flags &= ~RAW3215_STOPPED;
+		raw3215_try_io(raw);
+		spin_unlock_irqrestore(get_ccwdev_lock(raw->cdev), flags);
+	}
+}
+
+static const struct tty_operations tty3215_ops = {
+	.open = tty3215_open,
+	.close = tty3215_close,
+	.write = tty3215_write,
+	.put_char = tty3215_put_char,
+	.flush_chars = tty3215_flush_chars,
+	.write_room = tty3215_write_room,
+	.chars_in_buffer = tty3215_chars_in_buffer,
+	.flush_buffer = tty3215_flush_buffer,
+	.throttle = tty3215_throttle,
+	.unthrottle = tty3215_unthrottle,
+	.stop = tty3215_stop,
+	.start = tty3215_start,
+};
+
+/*
+ * 3215 tty registration code called from tty_init().
+ * Most kernel services (incl. kmalloc) are available at this poimt.
+ */
+static int __init tty3215_init(void)
+{
+	struct tty_driver *driver;
+	int ret;
+
+	if (!CONSOLE_IS_3215)
+		return 0;
+
+	driver = alloc_tty_driver(NR_3215);
+	if (!driver)
+		return -ENOMEM;
+
+	ret = ccw_driver_register(&raw3215_ccw_driver);
+	if (ret) {
+		put_tty_driver(driver);
+		return ret;
+	}
+	/*
+	 * Initialize the tty_driver structure
+	 * Entries in tty3215_driver that are NOT initialized:
+	 * proc_entry, set_termios, flush_buffer, set_ldisc, write_proc
+	 */
+
+	driver->driver_name = "tty3215";
+	driver->name = "ttyS";
+	driver->major = TTY_MAJOR;
+	driver->minor_start = 64;
+	driver->type = TTY_DRIVER_TYPE_SYSTEM;
+	driver->subtype = SYSTEM_TYPE_TTY;
+	driver->init_termios = tty_std_termios;
+	driver->init_termios.c_iflag = IGNBRK | IGNPAR;
+	driver->init_termios.c_oflag = ONLCR;
+	driver->init_termios.c_lflag = ISIG;
+	driver->flags = TTY_DRIVER_REAL_RAW;
+	tty_set_operations(driver, &tty3215_ops);
+	ret = tty_register_driver(driver);
+	if (ret) {
+		put_tty_driver(driver);
+		return ret;
+	}
+	tty3215_driver = driver;
+	return 0;
+}
+
+static void __exit tty3215_exit(void)
+{
+	tty_unregister_driver(tty3215_driver);
+	put_tty_driver(tty3215_driver);
+	ccw_driver_unregister(&raw3215_ccw_driver);
+}
+
+module_init(tty3215_init);
+module_exit(tty3215_exit);
diff --git a/ap/os/linux/linux-3.4.x/drivers/s390/char/con3270.c b/ap/os/linux/linux-3.4.x/drivers/s390/char/con3270.c
new file mode 100644
index 0000000..bb07577
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/s390/char/con3270.c
@@ -0,0 +1,636 @@
+/*
+ * IBM/3270 Driver - console view.
+ *
+ * Author(s):
+ *   Original 3270 Code for 2.4 written by Richard Hitt (UTS Global)
+ *   Rewritten for 2.5 by Martin Schwidefsky <schwidefsky@de.ibm.com>
+ *     Copyright IBM Corp. 2003, 2009
+ */
+
+#include <linux/console.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/list.h>
+#include <linux/types.h>
+#include <linux/slab.h>
+#include <linux/err.h>
+#include <linux/reboot.h>
+
+#include <asm/ccwdev.h>
+#include <asm/cio.h>
+#include <asm/cpcmd.h>
+#include <asm/ebcdic.h>
+
+#include "raw3270.h"
+#include "tty3270.h"
+#include "ctrlchar.h"
+
+#define CON3270_OUTPUT_BUFFER_SIZE 1024
+#define CON3270_STRING_PAGES 4
+
+static struct raw3270_fn con3270_fn;
+
+/*
+ * Main 3270 console view data structure.
+ */
+struct con3270 {
+	struct raw3270_view view;
+	spinlock_t lock;
+	struct list_head freemem;	/* list of free memory for strings. */
+
+	/* Output stuff. */
+	struct list_head lines;		/* list of lines. */
+	struct list_head update;	/* list of lines to update. */
+	int line_nr;			/* line number for next update. */
+	int nr_lines;			/* # lines in list. */
+	int nr_up;			/* # lines up in history. */
+	unsigned long update_flags;	/* Update indication bits. */
+	struct string *cline;		/* current output line. */
+	struct string *status;		/* last line of display. */
+	struct raw3270_request *write;	/* single write request. */
+	struct timer_list timer;
+
+	/* Input stuff. */
+	struct string *input;		/* input string for read request. */
+	struct raw3270_request *read;	/* single read request. */
+	struct raw3270_request *kreset;	/* single keyboard reset request. */
+	struct tasklet_struct readlet;	/* tasklet to issue read request. */
+};
+
+static struct con3270 *condev;
+
+/* con3270->update_flags. See con3270_update for details. */
+#define CON_UPDATE_ERASE	1	/* Use EWRITEA instead of WRITE. */
+#define CON_UPDATE_LIST		2	/* Update lines in tty3270->update. */
+#define CON_UPDATE_STATUS	4	/* Update status line. */
+#define CON_UPDATE_ALL		8	/* Recreate screen. */
+
+static void con3270_update(struct con3270 *);
+
+/*
+ * Setup timeout for a device. On timeout trigger an update.
+ */
+static void con3270_set_timer(struct con3270 *cp, int expires)
+{
+	if (expires == 0)
+		del_timer(&cp->timer);
+	else
+		mod_timer(&cp->timer, jiffies + expires);
+}
+
+/*
+ * The status line is the last line of the screen. It shows the string
+ * "console view" in the lower left corner and "Running"/"More..."/"Holding"
+ * in the lower right corner of the screen.
+ */
+static void
+con3270_update_status(struct con3270 *cp)
+{
+	char *str;
+
+	str = (cp->nr_up != 0) ? "History" : "Running";
+	memcpy(cp->status->string + 24, str, 7);
+	codepage_convert(cp->view.ascebc, cp->status->string + 24, 7);
+	cp->update_flags |= CON_UPDATE_STATUS;
+}
+
+static void
+con3270_create_status(struct con3270 *cp)
+{
+	static const unsigned char blueprint[] =
+		{ TO_SBA, 0, 0, TO_SF,TF_LOG,TO_SA,TAT_COLOR, TAC_GREEN,
+		  'c','o','n','s','o','l','e',' ','v','i','e','w',
+		  TO_RA,0,0,0,'R','u','n','n','i','n','g',TO_SF,TF_LOG };
+
+	cp->status = alloc_string(&cp->freemem, sizeof(blueprint));
+	/* Copy blueprint to status line */
+	memcpy(cp->status->string, blueprint, sizeof(blueprint));
+	/* Set TO_RA addresses. */
+	raw3270_buffer_address(cp->view.dev, cp->status->string + 1,
+			       cp->view.cols * (cp->view.rows - 1));
+	raw3270_buffer_address(cp->view.dev, cp->status->string + 21,
+			       cp->view.cols * cp->view.rows - 8);
+	/* Convert strings to ebcdic. */
+	codepage_convert(cp->view.ascebc, cp->status->string + 8, 12);
+	codepage_convert(cp->view.ascebc, cp->status->string + 24, 7);
+}
+
+/*
+ * Set output offsets to 3270 datastream fragment of a console string.
+ */
+static void
+con3270_update_string(struct con3270 *cp, struct string *s, int nr)
+{
+	if (s->len >= cp->view.cols - 5)
+		return;
+	raw3270_buffer_address(cp->view.dev, s->string + s->len - 3,
+			       cp->view.cols * (nr + 1));
+}
+
+/*
+ * Rebuild update list to print all lines.
+ */
+static void
+con3270_rebuild_update(struct con3270 *cp)
+{
+	struct string *s, *n;
+	int nr;
+
+	/* 
+	 * Throw away update list and create a new one,
+	 * containing all lines that will fit on the screen.
+	 */
+	list_for_each_entry_safe(s, n, &cp->update, update)
+		list_del_init(&s->update);
+	nr = cp->view.rows - 2 + cp->nr_up;
+	list_for_each_entry_reverse(s, &cp->lines, list) {
+		if (nr < cp->view.rows - 1)
+			list_add(&s->update, &cp->update);
+		if (--nr < 0)
+			break;
+	}
+	cp->line_nr = 0;
+	cp->update_flags |= CON_UPDATE_LIST;
+}
+
+/*
+ * Alloc string for size bytes. Free strings from history if necessary.
+ */
+static struct string *
+con3270_alloc_string(struct con3270 *cp, size_t size)
+{
+	struct string *s, *n;
+
+	s = alloc_string(&cp->freemem, size);
+	if (s)
+		return s;
+	list_for_each_entry_safe(s, n, &cp->lines, list) {
+		list_del(&s->list);
+		if (!list_empty(&s->update))
+			list_del(&s->update);
+		cp->nr_lines--;
+		if (free_string(&cp->freemem, s) >= size)
+			break;
+	}
+	s = alloc_string(&cp->freemem, size);
+	BUG_ON(!s);
+	if (cp->nr_up != 0 && cp->nr_up + cp->view.rows > cp->nr_lines) {
+		cp->nr_up = cp->nr_lines - cp->view.rows + 1;
+		con3270_rebuild_update(cp);
+		con3270_update_status(cp);
+	}
+	return s;
+}
+
+/*
+ * Write completion callback.
+ */
+static void
+con3270_write_callback(struct raw3270_request *rq, void *data)
+{
+	raw3270_request_reset(rq);
+	xchg(&((struct con3270 *) rq->view)->write, rq);
+}
+
+/*
+ * Update console display.
+ */
+static void
+con3270_update(struct con3270 *cp)
+{
+	struct raw3270_request *wrq;
+	char wcc, prolog[6];
+	unsigned long flags;
+	unsigned long updated;
+	struct string *s, *n;
+	int rc;
+
+	if (cp->view.dev)
+		raw3270_activate_view(&cp->view);
+
+	wrq = xchg(&cp->write, 0);
+	if (!wrq) {
+		con3270_set_timer(cp, 1);
+		return;
+	}
+
+	spin_lock_irqsave(&cp->view.lock, flags);
+	updated = 0;
+	if (cp->update_flags & CON_UPDATE_ALL) {
+		con3270_rebuild_update(cp);
+		con3270_update_status(cp);
+		cp->update_flags = CON_UPDATE_ERASE | CON_UPDATE_LIST |
+			CON_UPDATE_STATUS;
+	}
+	if (cp->update_flags & CON_UPDATE_ERASE) {
+		/* Use erase write alternate to initialize display. */
+		raw3270_request_set_cmd(wrq, TC_EWRITEA);
+		updated |= CON_UPDATE_ERASE;
+	} else
+		raw3270_request_set_cmd(wrq, TC_WRITE);
+
+	wcc = TW_NONE;
+	raw3270_request_add_data(wrq, &wcc, 1);
+
+	/*
+	 * Update status line.
+	 */
+	if (cp->update_flags & CON_UPDATE_STATUS)
+		if (raw3270_request_add_data(wrq, cp->status->string,
+					     cp->status->len) == 0)
+			updated |= CON_UPDATE_STATUS;
+
+	if (cp->update_flags & CON_UPDATE_LIST) {
+		prolog[0] = TO_SBA;
+		prolog[3] = TO_SA;
+		prolog[4] = TAT_COLOR;
+		prolog[5] = TAC_TURQ;
+		raw3270_buffer_address(cp->view.dev, prolog + 1,
+				       cp->view.cols * cp->line_nr);
+		raw3270_request_add_data(wrq, prolog, 6);
+		/* Write strings in the update list to the screen. */
+		list_for_each_entry_safe(s, n, &cp->update, update) {
+			if (s != cp->cline)
+				con3270_update_string(cp, s, cp->line_nr);
+			if (raw3270_request_add_data(wrq, s->string,
+						     s->len) != 0)
+				break;
+			list_del_init(&s->update);
+			if (s != cp->cline)
+				cp->line_nr++;
+		}
+		if (list_empty(&cp->update))
+			updated |= CON_UPDATE_LIST;
+	}
+	wrq->callback = con3270_write_callback;
+	rc = raw3270_start(&cp->view, wrq);
+	if (rc == 0) {
+		cp->update_flags &= ~updated;
+		if (cp->update_flags)
+			con3270_set_timer(cp, 1);
+	} else {
+		raw3270_request_reset(wrq);
+		xchg(&cp->write, wrq);
+	}
+	spin_unlock_irqrestore(&cp->view.lock, flags);
+}
+
+/*
+ * Read tasklet.
+ */
+static void
+con3270_read_tasklet(struct raw3270_request *rrq)
+{
+	static char kreset_data = TW_KR;
+	struct con3270 *cp;
+	unsigned long flags;
+	int nr_up, deactivate;
+
+	cp = (struct con3270 *) rrq->view;
+	spin_lock_irqsave(&cp->view.lock, flags);
+	nr_up = cp->nr_up;
+	deactivate = 0;
+	/* Check aid byte. */
+	switch (cp->input->string[0]) {
+	case 0x7d:	/* enter: jump to bottom. */
+		nr_up = 0;
+		break;
+	case 0xf3:	/* PF3: deactivate the console view. */
+		deactivate = 1;
+		break;
+	case 0x6d:	/* clear: start from scratch. */
+		cp->update_flags = CON_UPDATE_ALL;
+		con3270_set_timer(cp, 1);
+		break;
+	case 0xf7:	/* PF7: do a page up in the console log. */
+		nr_up += cp->view.rows - 2;
+		if (nr_up + cp->view.rows - 1 > cp->nr_lines) {
+			nr_up = cp->nr_lines - cp->view.rows + 1;
+			if (nr_up < 0)
+				nr_up = 0;
+		}
+		break;
+	case 0xf8:	/* PF8: do a page down in the console log. */
+		nr_up -= cp->view.rows - 2;
+		if (nr_up < 0)
+			nr_up = 0;
+		break;
+	}
+	if (nr_up != cp->nr_up) {
+		cp->nr_up = nr_up;
+		con3270_rebuild_update(cp);
+		con3270_update_status(cp);
+		con3270_set_timer(cp, 1);
+	}
+	spin_unlock_irqrestore(&cp->view.lock, flags);
+
+	/* Start keyboard reset command. */
+	raw3270_request_reset(cp->kreset);
+	raw3270_request_set_cmd(cp->kreset, TC_WRITE);
+	raw3270_request_add_data(cp->kreset, &kreset_data, 1);
+	raw3270_start(&cp->view, cp->kreset);
+
+	if (deactivate)
+		raw3270_deactivate_view(&cp->view);
+
+	raw3270_request_reset(rrq);
+	xchg(&cp->read, rrq);
+	raw3270_put_view(&cp->view);
+}
+
+/*
+ * Read request completion callback.
+ */
+static void
+con3270_read_callback(struct raw3270_request *rq, void *data)
+{
+	raw3270_get_view(rq->view);
+	/* Schedule tasklet to pass input to tty. */
+	tasklet_schedule(&((struct con3270 *) rq->view)->readlet);
+}
+
+/*
+ * Issue a read request. Called only from interrupt function.
+ */
+static void
+con3270_issue_read(struct con3270 *cp)
+{
+	struct raw3270_request *rrq;
+	int rc;
+
+	rrq = xchg(&cp->read, 0);
+	if (!rrq)
+		/* Read already scheduled. */
+		return;
+	rrq->callback = con3270_read_callback;
+	rrq->callback_data = cp;
+	raw3270_request_set_cmd(rrq, TC_READMOD);
+	raw3270_request_set_data(rrq, cp->input->string, cp->input->len);
+	/* Issue the read modified request. */
+	rc = raw3270_start_irq(&cp->view, rrq);
+	if (rc)
+		raw3270_request_reset(rrq);
+}
+
+/*
+ * Switch to the console view.
+ */
+static int
+con3270_activate(struct raw3270_view *view)
+{
+	struct con3270 *cp;
+
+	cp = (struct con3270 *) view;
+	cp->update_flags = CON_UPDATE_ALL;
+	con3270_set_timer(cp, 1);
+	return 0;
+}
+
+static void
+con3270_deactivate(struct raw3270_view *view)
+{
+	struct con3270 *cp;
+
+	cp = (struct con3270 *) view;
+	del_timer(&cp->timer);
+}
+
+static int
+con3270_irq(struct con3270 *cp, struct raw3270_request *rq, struct irb *irb)
+{
+	/* Handle ATTN. Schedule tasklet to read aid. */
+	if (irb->scsw.cmd.dstat & DEV_STAT_ATTENTION)
+		con3270_issue_read(cp);
+
+	if (rq) {
+		if (irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK)
+			rq->rc = -EIO;
+		else
+			/* Normal end. Copy residual count. */
+			rq->rescnt = irb->scsw.cmd.count;
+	}
+	return RAW3270_IO_DONE;
+}
+
+/* Console view to a 3270 device. */
+static struct raw3270_fn con3270_fn = {
+	.activate = con3270_activate,
+	.deactivate = con3270_deactivate,
+	.intv = (void *) con3270_irq
+};
+
+static inline void
+con3270_cline_add(struct con3270 *cp)
+{
+	if (!list_empty(&cp->cline->list))
+		/* Already added. */
+		return;
+	list_add_tail(&cp->cline->list, &cp->lines);
+	cp->nr_lines++;
+	con3270_rebuild_update(cp);
+}
+
+static inline void
+con3270_cline_insert(struct con3270 *cp, unsigned char c)
+{
+	cp->cline->string[cp->cline->len++] = 
+		cp->view.ascebc[(c < ' ') ? ' ' : c];
+	if (list_empty(&cp->cline->update)) {
+		list_add_tail(&cp->cline->update, &cp->update);
+		cp->update_flags |= CON_UPDATE_LIST;
+	}
+}
+
+static inline void
+con3270_cline_end(struct con3270 *cp)
+{
+	struct string *s;
+	unsigned int size;
+
+	/* Copy cline. */
+	size = (cp->cline->len < cp->view.cols - 5) ?
+		cp->cline->len + 4 : cp->view.cols;
+	s = con3270_alloc_string(cp, size);
+	memcpy(s->string, cp->cline->string, cp->cline->len);
+	if (s->len < cp->view.cols - 5) {
+		s->string[s->len - 4] = TO_RA;
+		s->string[s->len - 1] = 0;
+	} else {
+		while (--size > cp->cline->len)
+			s->string[size] = cp->view.ascebc[' '];
+	}
+	/* Replace cline with allocated line s and reset cline. */
+	list_add(&s->list, &cp->cline->list);
+	list_del_init(&cp->cline->list);
+	if (!list_empty(&cp->cline->update)) {
+		list_add(&s->update, &cp->cline->update);
+		list_del_init(&cp->cline->update);
+	}
+	cp->cline->len = 0;
+}
+
+/*
+ * Write a string to the 3270 console
+ */
+static void
+con3270_write(struct console *co, const char *str, unsigned int count)
+{
+	struct con3270 *cp;
+	unsigned long flags;
+	unsigned char c;
+
+	cp = condev;
+	spin_lock_irqsave(&cp->view.lock, flags);
+	while (count-- > 0) {
+		c = *str++;
+		if (cp->cline->len == 0)
+			con3270_cline_add(cp);
+		if (c != '\n')
+			con3270_cline_insert(cp, c);
+		if (c == '\n' || cp->cline->len >= cp->view.cols)
+			con3270_cline_end(cp);
+	}
+	/* Setup timer to output current console buffer after 1/10 second */
+	cp->nr_up = 0;
+	if (cp->view.dev && !timer_pending(&cp->timer))
+		con3270_set_timer(cp, HZ/10);
+	spin_unlock_irqrestore(&cp->view.lock,flags);
+}
+
+static struct tty_driver *
+con3270_device(struct console *c, int *index)
+{
+	*index = c->index;
+	return tty3270_driver;
+}
+
+/*
+ * Wait for end of write request.
+ */
+static void
+con3270_wait_write(struct con3270 *cp)
+{
+	while (!cp->write) {
+		raw3270_wait_cons_dev(cp->view.dev);
+		barrier();
+	}
+}
+
+/*
+ * panic() calls con3270_flush through a panic_notifier
+ * before the system enters a disabled, endless loop.
+ */
+static void
+con3270_flush(void)
+{
+	struct con3270 *cp;
+	unsigned long flags;
+
+	cp = condev;
+	if (!cp->view.dev)
+		return;
+	raw3270_pm_unfreeze(&cp->view);
+	spin_lock_irqsave(&cp->view.lock, flags);
+	con3270_wait_write(cp);
+	cp->nr_up = 0;
+	con3270_rebuild_update(cp);
+	con3270_update_status(cp);
+	while (cp->update_flags != 0) {
+		spin_unlock_irqrestore(&cp->view.lock, flags);
+		con3270_update(cp);
+		spin_lock_irqsave(&cp->view.lock, flags);
+		con3270_wait_write(cp);
+	}
+	spin_unlock_irqrestore(&cp->view.lock, flags);
+}
+
+static int con3270_notify(struct notifier_block *self,
+			  unsigned long event, void *data)
+{
+	con3270_flush();
+	return NOTIFY_OK;
+}
+
+static struct notifier_block on_panic_nb = {
+	.notifier_call = con3270_notify,
+	.priority = 0,
+};
+
+static struct notifier_block on_reboot_nb = {
+	.notifier_call = con3270_notify,
+	.priority = 0,
+};
+
+/*
+ *  The console structure for the 3270 console
+ */
+static struct console con3270 = {
+	.name	 = "tty3270",
+	.write	 = con3270_write,
+	.device	 = con3270_device,
+	.flags	 = CON_PRINTBUFFER,
+};
+
+/*
+ * 3270 console initialization code called from console_init().
+ */
+static int __init
+con3270_init(void)
+{
+	struct ccw_device *cdev;
+	struct raw3270 *rp;
+	void *cbuf;
+	int i;
+
+	/* Check if 3270 is to be the console */
+	if (!CONSOLE_IS_3270)
+		return -ENODEV;
+
+	/* Set the console mode for VM */
+	if (MACHINE_IS_VM) {
+		cpcmd("TERM CONMODE 3270", NULL, 0, NULL);
+		cpcmd("TERM AUTOCR OFF", NULL, 0, NULL);
+	}
+
+	cdev = ccw_device_probe_console();
+	if (IS_ERR(cdev))
+		return -ENODEV;
+	rp = raw3270_setup_console(cdev);
+	if (IS_ERR(rp))
+		return PTR_ERR(rp);
+
+	condev = kzalloc(sizeof(struct con3270), GFP_KERNEL | GFP_DMA);
+	condev->view.dev = rp;
+
+	condev->read = raw3270_request_alloc(0);
+	condev->read->callback = con3270_read_callback;
+	condev->read->callback_data = condev;
+	condev->write = raw3270_request_alloc(CON3270_OUTPUT_BUFFER_SIZE);
+	condev->kreset = raw3270_request_alloc(1);
+
+	INIT_LIST_HEAD(&condev->lines);
+	INIT_LIST_HEAD(&condev->update);
+	setup_timer(&condev->timer, (void (*)(unsigned long)) con3270_update,
+		    (unsigned long) condev);
+	tasklet_init(&condev->readlet, 
+		     (void (*)(unsigned long)) con3270_read_tasklet,
+		     (unsigned long) condev->read);
+
+	raw3270_add_view(&condev->view, &con3270_fn, 1);
+
+	INIT_LIST_HEAD(&condev->freemem);
+	for (i = 0; i < CON3270_STRING_PAGES; i++) {
+		cbuf = (void *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
+		add_string_memory(&condev->freemem, cbuf, PAGE_SIZE);
+	}
+	condev->cline = alloc_string(&condev->freemem, condev->view.cols);
+	condev->cline->len = 0;
+	con3270_create_status(condev);
+	condev->input = alloc_string(&condev->freemem, 80);
+	atomic_notifier_chain_register(&panic_notifier_list, &on_panic_nb);
+	register_reboot_notifier(&on_reboot_nb);
+	register_console(&con3270);
+	return 0;
+}
+
+console_initcall(con3270_init);
diff --git a/ap/os/linux/linux-3.4.x/drivers/s390/char/ctrlchar.c b/ap/os/linux/linux-3.4.x/drivers/s390/char/ctrlchar.c
new file mode 100644
index 0000000..0e9a309
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/s390/char/ctrlchar.c
@@ -0,0 +1,73 @@
+/*
+ *  drivers/s390/char/ctrlchar.c
+ *  Unified handling of special chars.
+ *
+ *    Copyright (C) 2001 IBM Deutschland Entwicklung GmbH, IBM Corporation
+ *    Author(s): Fritz Elfert <felfert@millenux.com> <elfert@de.ibm.com>
+ *
+ */
+
+#include <linux/stddef.h>
+#include <asm/errno.h>
+#include <linux/sysrq.h>
+#include <linux/ctype.h>
+
+#include "ctrlchar.h"
+
+#ifdef CONFIG_MAGIC_SYSRQ
+static int ctrlchar_sysrq_key;
+
+static void
+ctrlchar_handle_sysrq(struct work_struct *work)
+{
+	handle_sysrq(ctrlchar_sysrq_key);
+}
+
+static DECLARE_WORK(ctrlchar_work, ctrlchar_handle_sysrq);
+#endif
+
+
+/**
+ * Check for special chars at start of input.
+ *
+ * @param buf Console input buffer.
+ * @param len Length of valid data in buffer.
+ * @param tty The tty struct for this console.
+ * @return CTRLCHAR_NONE, if nothing matched,
+ *         CTRLCHAR_SYSRQ, if sysrq was encountered
+ *         otherwise char to be inserted logically or'ed
+ *         with CTRLCHAR_CTRL
+ */
+unsigned int
+ctrlchar_handle(const unsigned char *buf, int len, struct tty_struct *tty)
+{
+	if ((len < 2) || (len > 3))
+		return CTRLCHAR_NONE;
+
+	/* hat is 0xb1 in codepage 037 (US etc.) and thus */
+	/* converted to 0x5e in ascii ('^') */
+	if ((buf[0] != '^') && (buf[0] != '\252'))
+		return CTRLCHAR_NONE;
+
+#ifdef CONFIG_MAGIC_SYSRQ
+	/* racy */
+	if (len == 3 && buf[1] == '-') {
+		ctrlchar_sysrq_key = buf[2];
+		schedule_work(&ctrlchar_work);
+		return CTRLCHAR_SYSRQ;
+	}
+#endif
+
+	if (len != 2)
+		return CTRLCHAR_NONE;
+
+	switch (tolower(buf[1])) {
+	case 'c':
+		return INTR_CHAR(tty) | CTRLCHAR_CTRL;
+	case 'd':
+		return EOF_CHAR(tty)  | CTRLCHAR_CTRL;
+	case 'z':
+		return SUSP_CHAR(tty) | CTRLCHAR_CTRL;
+	}
+	return CTRLCHAR_NONE;
+}
diff --git a/ap/os/linux/linux-3.4.x/drivers/s390/char/ctrlchar.h b/ap/os/linux/linux-3.4.x/drivers/s390/char/ctrlchar.h
new file mode 100644
index 0000000..935ffa0
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/s390/char/ctrlchar.h
@@ -0,0 +1,20 @@
+/*
+ *  drivers/s390/char/ctrlchar.c
+ *  Unified handling of special chars.
+ *
+ *    Copyright (C) 2001 IBM Deutschland Entwicklung GmbH, IBM Corporation
+ *    Author(s): Fritz Elfert <felfert@millenux.com> <elfert@de.ibm.com>
+ *
+ */
+
+#include <linux/tty.h>
+
+extern unsigned int
+ctrlchar_handle(const unsigned char *buf, int len, struct tty_struct *tty);
+
+
+#define CTRLCHAR_NONE  (1 << 8)
+#define CTRLCHAR_CTRL  (2 << 8)
+#define CTRLCHAR_SYSRQ (3 << 8)
+
+#define CTRLCHAR_MASK (~0xffu)
diff --git a/ap/os/linux/linux-3.4.x/drivers/s390/char/defkeymap.c b/ap/os/linux/linux-3.4.x/drivers/s390/char/defkeymap.c
new file mode 100644
index 0000000..07c7f31
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/s390/char/defkeymap.c
@@ -0,0 +1,158 @@
+
+/* Do not edit this file! It was automatically generated by   */
+/*    loadkeys --mktable defkeymap.map > defkeymap.c          */
+
+#include <linux/types.h>
+#include <linux/keyboard.h>
+#include <linux/kd.h>
+#include <linux/kbd_kern.h>
+#include <linux/kbd_diacr.h>
+
+u_short plain_map[NR_KEYS] = {
+	0xf000,	0xf000,	0xf000,	0xf000,	0xf000,	0xf000,	0xf000,	0xf000,
+	0xf000,	0xf000,	0xf000,	0xf000,	0xf000,	0xf000,	0xf000,	0xf000,
+	0xf000,	0xf000,	0xf000,	0xf000,	0xf000,	0xf000,	0xf000,	0xf000,
+	0xf000,	0xf000,	0xf000,	0xf000,	0xf000,	0xf000,	0xf000,	0xf000,
+	0xf000,	0xf000,	0xf000,	0xf000,	0xf000,	0xf000,	0xf000,	0xf000,
+	0xf000,	0xf000,	0xf000,	0xf000,	0xf000,	0xf000,	0xf000,	0xf000,
+	0xf000,	0xf000,	0xf000,	0xf000,	0xf000,	0xf000,	0xf000,	0xf000,
+	0xf000,	0xf000,	0xf000,	0xf000,	0xf000,	0xf000,	0xf000,	0xf000,
+	0xf020,	0xf000,	0xf0e2,	0xf0e4,	0xf0e0,	0xf0e1,	0xf0e3,	0xf0e5,
+	0xf0e7,	0xf0f1,	0xf0a2,	0xf02e,	0xf03c,	0xf028,	0xf02b,	0xf07c,
+	0xf026,	0xf0e9,	0xf0e2,	0xf0eb,	0xf0e8,	0xf0ed,	0xf0ee,	0xf0ef,
+	0xf0ec,	0xf0df,	0xf021,	0xf024,	0xf02a,	0xf029,	0xf03b,	0xf0ac,
+	0xf02d,	0xf02f,	0xf0c2,	0xf0c4,	0xf0c0,	0xf0c1,	0xf0c3,	0xf0c5,
+	0xf0c7,	0xf0d1,	0xf0a6,	0xf02c,	0xf025,	0xf05f,	0xf03e,	0xf03f,
+	0xf0f8,	0xf0c9,	0xf0ca,	0xf0cb,	0xf0c8,	0xf0cd,	0xf0ce,	0xf0cf,
+	0xf0cc,	0xf060,	0xf03a,	0xf023,	0xf040,	0xf027,	0xf03d,	0xf022,
+};
+
+static u_short shift_map[NR_KEYS] = {
+	0xf0d8,	0xf061,	0xf062,	0xf063,	0xf064,	0xf065,	0xf066,	0xf067,
+	0xf068,	0xf069,	0xf0ab,	0xf0bb,	0xf0f0,	0xf0fd,	0xf0fe,	0xf0b1,
+	0xf0b0,	0xf06a,	0xf06b,	0xf06c,	0xf06d,	0xf06e,	0xf06f,	0xf070,
+	0xf071,	0xf072,	0xf000,	0xf000,	0xf0e6,	0xf0b8,	0xf0c6,	0xf0a4,
+	0xf0b5,	0xf07e,	0xf073,	0xf074,	0xf075,	0xf076,	0xf077,	0xf078,
+	0xf079,	0xf07a,	0xf0a1,	0xf0bf,	0xf0d0,	0xf0dd,	0xf0de,	0xf0ae,
+	0xf402,	0xf0a3,	0xf0a5,	0xf0b7,	0xf0a9,	0xf0a7,	0xf0b6,	0xf0bc,
+	0xf0bd,	0xf0be,	0xf05b,	0xf05d,	0xf000,	0xf0a8,	0xf0b4,	0xf0d7,
+	0xf07b,	0xf041,	0xf042,	0xf043,	0xf044,	0xf045,	0xf046,	0xf047,
+	0xf048,	0xf049,	0xf000,	0xf0f4,	0xf0f6,	0xf0f2,	0xf0f3,	0xf0f5,
+	0xf07d,	0xf04a,	0xf04b,	0xf04c,	0xf04d,	0xf04e,	0xf04f,	0xf050,
+	0xf051,	0xf052,	0xf0b9,	0xf0fb,	0xf0fc,	0xf0f9,	0xf0fa,	0xf0ff,
+	0xf05c,	0xf0f7,	0xf053,	0xf054,	0xf055,	0xf056,	0xf057,	0xf058,
+	0xf059,	0xf05a,	0xf0b2,	0xf0d4,	0xf0d6,	0xf0d2,	0xf0d3,	0xf0d5,
+	0xf030,	0xf031,	0xf032,	0xf033,	0xf034,	0xf035,	0xf036,	0xf037,
+	0xf038,	0xf039,	0xf0b3,	0xf0db,	0xf0dc,	0xf0d9,	0xf0da,	0xf000,
+};
+
+static u_short ctrl_map[NR_KEYS] = {
+	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,
+	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,
+	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,
+	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,
+	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,
+	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,
+	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,
+	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,
+	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,
+	0xf200,	0xf200,	0xf11f,	0xf120,	0xf121,	0xf200,	0xf200,	0xf200,
+	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,
+	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,
+	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,
+	0xf200,	0xf200,	0xf200,	0xf01a,	0xf003,	0xf212,	0xf004,	0xf200,
+	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,
+	0xf200,	0xf200,	0xf109,	0xf10a,	0xf206,	0xf00a,	0xf200,	0xf200,
+};
+
+static u_short shift_ctrl_map[NR_KEYS] = {
+	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,
+	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,
+	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,
+	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,
+	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,
+	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,
+	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,
+	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,
+	0xf200,	0xf10c,	0xf10d,	0xf10e,	0xf10f,	0xf110,	0xf111,	0xf112,
+	0xf113,	0xf11e,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,
+	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,
+	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,
+	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,
+	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,
+	0xf200,	0xf100,	0xf101,	0xf211,	0xf103,	0xf104,	0xf105,	0xf20b,
+	0xf20a,	0xf108,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,
+};
+
+ushort *key_maps[MAX_NR_KEYMAPS] = {
+	plain_map, shift_map, NULL, NULL,
+	ctrl_map, shift_ctrl_map, NULL,
+};
+
+unsigned int keymap_count = 4;
+
+
+/*
+ * Philosophy: most people do not define more strings, but they who do
+ * often want quite a lot of string space. So, we statically allocate
+ * the default and allocate dynamically in chunks of 512 bytes.
+ */
+
+char func_buf[] = {
+	'\033', '[', '[', 'A', 0, 
+	'\033', '[', '[', 'B', 0, 
+	'\033', '[', '[', 'C', 0, 
+	'\033', '[', '[', 'D', 0, 
+	'\033', '[', '[', 'E', 0, 
+	'\033', '[', '1', '7', '~', 0, 
+	'\033', '[', '1', '8', '~', 0, 
+	'\033', '[', '1', '9', '~', 0, 
+	'\033', '[', '2', '0', '~', 0, 
+	'\033', '[', '2', '1', '~', 0, 
+	'\033', '[', '2', '3', '~', 0, 
+	'\033', '[', '2', '4', '~', 0, 
+	'\033', '[', '2', '5', '~', 0, 
+	'\033', '[', '2', '6', '~', 0, 
+	'\033', '[', '2', '8', '~', 0, 
+	'\033', '[', '2', '9', '~', 0, 
+	'\033', '[', '3', '1', '~', 0, 
+	'\033', '[', '3', '2', '~', 0, 
+	'\033', '[', '3', '3', '~', 0, 
+	'\033', '[', '3', '4', '~', 0, 
+};
+
+
+char *funcbufptr = func_buf;
+int funcbufsize = sizeof(func_buf);
+int funcbufleft = 0;          /* space left */
+
+char *func_table[MAX_NR_FUNC] = {
+	func_buf + 0,
+	func_buf + 5,
+	func_buf + 10,
+	func_buf + 15,
+	func_buf + 20,
+	func_buf + 25,
+	func_buf + 31,
+	func_buf + 37,
+	func_buf + 43,
+	func_buf + 49,
+	func_buf + 55,
+	func_buf + 61,
+	func_buf + 67,
+	func_buf + 73,
+	func_buf + 79,
+	func_buf + 85,
+	func_buf + 91,
+	func_buf + 97,
+	func_buf + 103,
+	func_buf + 109,
+	NULL,
+};
+
+struct kbdiacruc accent_table[MAX_DIACR] = {
+	{'^', 'c', 0003},	{'^', 'd', 0004},
+	{'^', 'z', 0032},	{'^', 0012, 0000},
+};
+
+unsigned int accent_table_size = 4;
diff --git a/ap/os/linux/linux-3.4.x/drivers/s390/char/defkeymap.map b/ap/os/linux/linux-3.4.x/drivers/s390/char/defkeymap.map
new file mode 100644
index 0000000..353b3f2
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/s390/char/defkeymap.map
@@ -0,0 +1,191 @@
+# Default keymap for 3270 (ebcdic codepage 037).
+keymaps 0-1,4-5
+
+keycode   0 = nul		Oslash
+keycode   1 = nul		a
+keycode   2 = nul		b
+keycode   3 = nul		c
+keycode   4 = nul		d
+keycode   5 = nul		e
+keycode   6 = nul		f
+keycode   7 = nul		g
+keycode   8 = nul		h
+keycode   9 = nul		i
+keycode  10 = nul		guillemotleft
+keycode  11 = nul		guillemotright
+keycode  12 = nul		eth
+keycode  13 = nul		yacute
+keycode  14 = nul		thorn
+keycode  15 = nul		plusminus
+keycode  16 = nul		degree
+keycode  17 = nul		j
+keycode  18 = nul		k
+keycode  19 = nul		l
+keycode  20 = nul		m
+keycode  21 = nul		n
+keycode  22 = nul		o
+keycode  23 = nul		p
+keycode  24 = nul		q
+keycode  25 = nul		r
+keycode  26 = nul		nul
+keycode  27 = nul		nul
+keycode  28 = nul		ae
+keycode  29 = nul		cedilla
+keycode  30 = nul		AE
+keycode  31 = nul		currency
+keycode  32 = nul		mu
+keycode  33 = nul		tilde
+keycode  34 = nul		s
+keycode  35 = nul		t
+keycode  36 = nul		u
+keycode  37 = nul		v
+keycode  38 = nul		w
+keycode  39 = nul		x
+keycode  40 = nul		y
+keycode  41 = nul		z
+keycode  42 = nul		exclamdown
+keycode  43 = nul		questiondown
+keycode  44 = nul		ETH
+keycode  45 = nul		Yacute
+keycode  46 = nul		THORN
+keycode  47 = nul		registered
+keycode  48 = nul		dead_circumflex
+keycode  49 = nul		sterling
+keycode  50 = nul		yen
+keycode  51 = nul		periodcentered
+keycode  52 = nul		copyright
+keycode  53 = nul		section
+keycode  54 = nul		paragraph
+keycode  55 = nul		onequarter
+keycode  56 = nul		onehalf
+keycode  57 = nul		threequarters
+keycode  58 = nul		bracketleft
+keycode  59 = nul		bracketright
+keycode  60 = nul		nul
+keycode  61 = nul		diaeresis
+keycode  62 = nul		acute
+keycode  63 = nul		multiply
+keycode  64 = space		braceleft
+keycode  65 = nul		A
+keycode  66 = acircumflex	B
+keycode  67 = adiaeresis	C
+keycode  68 = agrave		D
+keycode  69 = aacute		E
+keycode  70 = atilde		F
+keycode  71 = aring		G
+keycode  72 = ccedilla		H
+keycode  73 = ntilde		I
+keycode  74 = cent		nul
+keycode  75 = period		ocircumflex
+keycode  76 = less		odiaeresis
+keycode  77 = parenleft		ograve
+keycode  78 = plus		oacute
+keycode  79 = bar		otilde
+keycode  80 = ampersand		braceright
+keycode  81 = eacute		J
+keycode  82 = acircumflex	K
+keycode  83 = ediaeresis	L
+keycode  84 = egrave		M
+keycode  85 = iacute		N
+keycode  86 = icircumflex	O
+keycode  87 = idiaeresis	P
+keycode  88 = igrave		Q
+keycode  89 = ssharp		R
+keycode  90 = exclam		onesuperior
+keycode  91 = dollar		ucircumflex
+keycode  92 = asterisk		udiaeresis
+keycode  93 = parenright	ugrave
+keycode  94 = semicolon		uacute
+keycode  95 = notsign		ydiaeresis
+keycode  96 = minus		backslash
+keycode  97 = slash		division
+keycode  98 = Acircumflex	S
+keycode  99 = Adiaeresis	T
+keycode 100 = Agrave		U
+keycode 101 = Aacute		V
+keycode 102 = Atilde		W
+keycode 103 = Aring		X
+keycode 104 = Ccedilla		Y
+keycode 105 = Ntilde		Z
+keycode 106 = brokenbar		twosuperior
+keycode 107 = comma		Ocircumflex
+keycode 108 = percent		Odiaeresis
+keycode 109 = underscore	Ograve
+keycode 110 = greater		Oacute
+keycode 111 = question		Otilde
+keycode 112 = oslash		zero
+keycode 113 = Eacute		one
+keycode 114 = Ecircumflex	two
+keycode 115 = Ediaeresis	three
+keycode 116 = Egrave		four
+keycode 117 = Iacute		five
+keycode 118 = Icircumflex	six
+keycode 119 = Idiaeresis	seven
+keycode 120 = Igrave		eight
+keycode 121 = grave		nine
+keycode 122 = colon		threesuperior
+keycode 123 = numbersign	Ucircumflex
+keycode 124 = at		Udiaeresis
+keycode 125 = apostrophe	Ugrave
+keycode 126 = equal		Uacute
+keycode 127 = quotedbl		nul
+
+# AID keys
+control keycode  74 = F22
+control keycode  75 = F23
+control keycode  76 = F24
+control keycode 107 = Control_z		# PA3
+control keycode 108 = Control_c		# PA1
+control keycode 109 = KeyboardSignal	# Clear
+control keycode 110 = Control_d		# PA2
+control keycode 122 = F10
+control keycode 123 = F11		# F11
+control keycode 124 = Last_Console	# F12
+control keycode 125 = Linefeed
+shift control keycode  65 = F13
+shift control keycode  66 = F14
+shift control keycode  67 = F15
+shift control keycode  68 = F16
+shift control keycode  69 = F17
+shift control keycode  70 = F18
+shift control keycode  71 = F19
+shift control keycode  72 = F20
+shift control keycode  73 = F21
+shift control keycode 113 = F1
+shift control keycode 114 = F2
+shift control keycode 115 = Incr_Console
+shift control keycode 116 = F4
+shift control keycode 117 = F5
+shift control keycode 118 = F6
+shift control keycode 119 = Scroll_Backward
+shift control keycode 120 = Scroll_Forward
+shift control keycode 121 = F9
+
+string F1 = "\033[[A"
+string F2 = "\033[[B"
+string F3 = "\033[[C"
+string F4 = "\033[[D"
+string F5 = "\033[[E"
+string F6 = "\033[17~"
+string F7 = "\033[18~"
+string F8 = "\033[19~"
+string F9 = "\033[20~"
+string F10 = "\033[21~"
+string F11 = "\033[23~"
+string F12 = "\033[24~"
+string F13 = "\033[25~"
+string F14 = "\033[26~"
+string F15 = "\033[28~"
+string F16 = "\033[29~"
+string F17 = "\033[31~"
+string F18 = "\033[32~"
+string F19 = "\033[33~"
+string F20 = "\033[34~"
+# string F21 ??
+# string F22 ??
+# string F23 ??
+# string F24 ??
+compose '^' 'c' to Control_c
+compose '^' 'd' to Control_d
+compose '^' 'z' to Control_z
+compose '^' '\012' to nul
diff --git a/ap/os/linux/linux-3.4.x/drivers/s390/char/fs3270.c b/ap/os/linux/linux-3.4.x/drivers/s390/char/fs3270.c
new file mode 100644
index 0000000..9117045
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/s390/char/fs3270.c
@@ -0,0 +1,551 @@
+/*
+ * IBM/3270 Driver - fullscreen driver.
+ *
+ * Author(s):
+ *   Original 3270 Code for 2.4 written by Richard Hitt (UTS Global)
+ *   Rewritten for 2.5/2.6 by Martin Schwidefsky <schwidefsky@de.ibm.com>
+ *     Copyright IBM Corp. 2003, 2009
+ */
+
+#include <linux/bootmem.h>
+#include <linux/console.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/compat.h>
+#include <linux/module.h>
+#include <linux/list.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+
+#include <asm/compat.h>
+#include <asm/ccwdev.h>
+#include <asm/cio.h>
+#include <asm/ebcdic.h>
+#include <asm/idals.h>
+
+#include "raw3270.h"
+#include "ctrlchar.h"
+
+static struct raw3270_fn fs3270_fn;
+
+struct fs3270 {
+	struct raw3270_view view;
+	struct pid *fs_pid;		/* Pid of controlling program. */
+	int read_command;		/* ccw command to use for reads. */
+	int write_command;		/* ccw command to use for writes. */
+	int attention;			/* Got attention. */
+	int active;			/* Fullscreen view is active. */
+	struct raw3270_request *init;	/* single init request. */
+	wait_queue_head_t wait;		/* Init & attention wait queue. */
+	struct idal_buffer *rdbuf;	/* full-screen-deactivate buffer */
+	size_t rdbuf_size;		/* size of data returned by RDBUF */
+};
+
+static DEFINE_MUTEX(fs3270_mutex);
+
+static void
+fs3270_wake_up(struct raw3270_request *rq, void *data)
+{
+	wake_up((wait_queue_head_t *) data);
+}
+
+static inline int
+fs3270_working(struct fs3270 *fp)
+{
+	/*
+	 * The fullscreen view is in working order if the view
+	 * has been activated AND the initial request is finished.
+	 */
+	return fp->active && raw3270_request_final(fp->init);
+}
+
+static int
+fs3270_do_io(struct raw3270_view *view, struct raw3270_request *rq)
+{
+	struct fs3270 *fp;
+	int rc;
+
+	fp = (struct fs3270 *) view;
+	rq->callback = fs3270_wake_up;
+	rq->callback_data = &fp->wait;
+
+	do {
+		if (!fs3270_working(fp)) {
+			/* Fullscreen view isn't ready yet. */
+			rc = wait_event_interruptible(fp->wait,
+						      fs3270_working(fp));
+			if (rc != 0)
+				break;
+		}
+		rc = raw3270_start(view, rq);
+		if (rc == 0) {
+			/* Started successfully. Now wait for completion. */
+			wait_event(fp->wait, raw3270_request_final(rq));
+		}
+	} while (rc == -EACCES);
+	return rc;
+}
+
+/*
+ * Switch to the fullscreen view.
+ */
+static void
+fs3270_reset_callback(struct raw3270_request *rq, void *data)
+{
+	struct fs3270 *fp;
+
+	fp = (struct fs3270 *) rq->view;
+	raw3270_request_reset(rq);
+	wake_up(&fp->wait);
+}
+
+static void
+fs3270_restore_callback(struct raw3270_request *rq, void *data)
+{
+	struct fs3270 *fp;
+
+	fp = (struct fs3270 *) rq->view;
+	if (rq->rc != 0 || rq->rescnt != 0) {
+		if (fp->fs_pid)
+			kill_pid(fp->fs_pid, SIGHUP, 1);
+	}
+	fp->rdbuf_size = 0;
+	raw3270_request_reset(rq);
+	wake_up(&fp->wait);
+}
+
+static int
+fs3270_activate(struct raw3270_view *view)
+{
+	struct fs3270 *fp;
+	char *cp;
+	int rc;
+
+	fp = (struct fs3270 *) view;
+
+	/* If an old init command is still running just return. */
+	if (!raw3270_request_final(fp->init))
+		return 0;
+
+	if (fp->rdbuf_size == 0) {
+		/* No saved buffer. Just clear the screen. */
+		raw3270_request_set_cmd(fp->init, TC_EWRITEA);
+		fp->init->callback = fs3270_reset_callback;
+	} else {
+		/* Restore fullscreen buffer saved by fs3270_deactivate. */
+		raw3270_request_set_cmd(fp->init, TC_EWRITEA);
+		raw3270_request_set_idal(fp->init, fp->rdbuf);
+		fp->init->ccw.count = fp->rdbuf_size;
+		cp = fp->rdbuf->data[0];
+		cp[0] = TW_KR;
+		cp[1] = TO_SBA;
+		cp[2] = cp[6];
+		cp[3] = cp[7];
+		cp[4] = TO_IC;
+		cp[5] = TO_SBA;
+		cp[6] = 0x40;
+		cp[7] = 0x40;
+		fp->init->rescnt = 0;
+		fp->init->callback = fs3270_restore_callback;
+	}
+	rc = fp->init->rc = raw3270_start_locked(view, fp->init);
+	if (rc)
+		fp->init->callback(fp->init, NULL);
+	else
+		fp->active = 1;
+	return rc;
+}
+
+/*
+ * Shutdown fullscreen view.
+ */
+static void
+fs3270_save_callback(struct raw3270_request *rq, void *data)
+{
+	struct fs3270 *fp;
+
+	fp = (struct fs3270 *) rq->view;
+
+	/* Correct idal buffer element 0 address. */
+	fp->rdbuf->data[0] -= 5;
+	fp->rdbuf->size += 5;
+
+	/*
+	 * If the rdbuf command failed or the idal buffer is
+	 * to small for the amount of data returned by the
+	 * rdbuf command, then we have no choice but to send
+	 * a SIGHUP to the application.
+	 */
+	if (rq->rc != 0 || rq->rescnt == 0) {
+		if (fp->fs_pid)
+			kill_pid(fp->fs_pid, SIGHUP, 1);
+		fp->rdbuf_size = 0;
+	} else
+		fp->rdbuf_size = fp->rdbuf->size - rq->rescnt;
+	raw3270_request_reset(rq);
+	wake_up(&fp->wait);
+}
+
+static void
+fs3270_deactivate(struct raw3270_view *view)
+{
+	struct fs3270 *fp;
+
+	fp = (struct fs3270 *) view;
+	fp->active = 0;
+
+	/* If an old init command is still running just return. */
+	if (!raw3270_request_final(fp->init))
+		return;
+
+	/* Prepare read-buffer request. */
+	raw3270_request_set_cmd(fp->init, TC_RDBUF);
+	/*
+	 * Hackish: skip first 5 bytes of the idal buffer to make
+	 * room for the TW_KR/TO_SBA/<address>/<address>/TO_IC sequence
+	 * in the activation command.
+	 */
+	fp->rdbuf->data[0] += 5;
+	fp->rdbuf->size -= 5;
+	raw3270_request_set_idal(fp->init, fp->rdbuf);
+	fp->init->rescnt = 0;
+	fp->init->callback = fs3270_save_callback;
+
+	/* Start I/O to read in the 3270 buffer. */
+	fp->init->rc = raw3270_start_locked(view, fp->init);
+	if (fp->init->rc)
+		fp->init->callback(fp->init, NULL);
+}
+
+static int
+fs3270_irq(struct fs3270 *fp, struct raw3270_request *rq, struct irb *irb)
+{
+	/* Handle ATTN. Set indication and wake waiters for attention. */
+	if (irb->scsw.cmd.dstat & DEV_STAT_ATTENTION) {
+		fp->attention = 1;
+		wake_up(&fp->wait);
+	}
+
+	if (rq) {
+		if (irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK)
+			rq->rc = -EIO;
+		else
+			/* Normal end. Copy residual count. */
+			rq->rescnt = irb->scsw.cmd.count;
+	}
+	return RAW3270_IO_DONE;
+}
+
+/*
+ * Process reads from fullscreen 3270.
+ */
+static ssize_t
+fs3270_read(struct file *filp, char __user *data, size_t count, loff_t *off)
+{
+	struct fs3270 *fp;
+	struct raw3270_request *rq;
+	struct idal_buffer *ib;
+	ssize_t rc;
+	
+	if (count == 0 || count > 65535)
+		return -EINVAL;
+	fp = filp->private_data;
+	if (!fp)
+		return -ENODEV;
+	ib = idal_buffer_alloc(count, 0);
+	if (IS_ERR(ib))
+		return -ENOMEM;
+	rq = raw3270_request_alloc(0);
+	if (!IS_ERR(rq)) {
+		if (fp->read_command == 0 && fp->write_command != 0)
+			fp->read_command = 6;
+		raw3270_request_set_cmd(rq, fp->read_command ? : 2);
+		raw3270_request_set_idal(rq, ib);
+		rc = wait_event_interruptible(fp->wait, fp->attention);
+		fp->attention = 0;
+		if (rc == 0) {
+			rc = fs3270_do_io(&fp->view, rq);
+			if (rc == 0) {
+				count -= rq->rescnt;
+				if (idal_buffer_to_user(ib, data, count) != 0)
+					rc = -EFAULT;
+				else
+					rc = count;
+
+			}
+		}
+		raw3270_request_free(rq);
+	} else
+		rc = PTR_ERR(rq);
+	idal_buffer_free(ib);
+	return rc;
+}
+
+/*
+ * Process writes to fullscreen 3270.
+ */
+static ssize_t
+fs3270_write(struct file *filp, const char __user *data, size_t count, loff_t *off)
+{
+	struct fs3270 *fp;
+	struct raw3270_request *rq;
+	struct idal_buffer *ib;
+	int write_command;
+	ssize_t rc;
+
+	fp = filp->private_data;
+	if (!fp)
+		return -ENODEV;
+	ib = idal_buffer_alloc(count, 0);
+	if (IS_ERR(ib))
+		return -ENOMEM;
+	rq = raw3270_request_alloc(0);
+	if (!IS_ERR(rq)) {
+		if (idal_buffer_from_user(ib, data, count) == 0) {
+			write_command = fp->write_command ? : 1;
+			if (write_command == 5)
+				write_command = 13;
+			raw3270_request_set_cmd(rq, write_command);
+			raw3270_request_set_idal(rq, ib);
+			rc = fs3270_do_io(&fp->view, rq);
+			if (rc == 0)
+				rc = count - rq->rescnt;
+		} else
+			rc = -EFAULT;
+		raw3270_request_free(rq);
+	} else
+		rc = PTR_ERR(rq);
+	idal_buffer_free(ib);
+	return rc;
+}
+
+/*
+ * process ioctl commands for the tube driver
+ */
+static long
+fs3270_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
+{
+	char __user *argp;
+	struct fs3270 *fp;
+	struct raw3270_iocb iocb;
+	int rc;
+
+	fp = filp->private_data;
+	if (!fp)
+		return -ENODEV;
+	if (is_compat_task())
+		argp = compat_ptr(arg);
+	else
+		argp = (char __user *)arg;
+	rc = 0;
+	mutex_lock(&fs3270_mutex);
+	switch (cmd) {
+	case TUBICMD:
+		fp->read_command = arg;
+		break;
+	case TUBOCMD:
+		fp->write_command = arg;
+		break;
+	case TUBGETI:
+		rc = put_user(fp->read_command, argp);
+		break;
+	case TUBGETO:
+		rc = put_user(fp->write_command, argp);
+		break;
+	case TUBGETMOD:
+		iocb.model = fp->view.model;
+		iocb.line_cnt = fp->view.rows;
+		iocb.col_cnt = fp->view.cols;
+		iocb.pf_cnt = 24;
+		iocb.re_cnt = 20;
+		iocb.map = 0;
+		if (copy_to_user(argp, &iocb, sizeof(struct raw3270_iocb)))
+			rc = -EFAULT;
+		break;
+	}
+	mutex_unlock(&fs3270_mutex);
+	return rc;
+}
+
+/*
+ * Allocate fs3270 structure.
+ */
+static struct fs3270 *
+fs3270_alloc_view(void)
+{
+	struct fs3270 *fp;
+
+	fp = kzalloc(sizeof(struct fs3270),GFP_KERNEL);
+	if (!fp)
+		return ERR_PTR(-ENOMEM);
+	fp->init = raw3270_request_alloc(0);
+	if (IS_ERR(fp->init)) {
+		kfree(fp);
+		return ERR_PTR(-ENOMEM);
+	}
+	return fp;
+}
+
+/*
+ * Free fs3270 structure.
+ */
+static void
+fs3270_free_view(struct raw3270_view *view)
+{
+	struct fs3270 *fp;
+
+	fp = (struct fs3270 *) view;
+	if (fp->rdbuf)
+		idal_buffer_free(fp->rdbuf);
+	raw3270_request_free(((struct fs3270 *) view)->init);
+	kfree(view);
+}
+
+/*
+ * Unlink fs3270 data structure from filp.
+ */
+static void
+fs3270_release(struct raw3270_view *view)
+{
+	struct fs3270 *fp;
+
+	fp = (struct fs3270 *) view;
+	if (fp->fs_pid)
+		kill_pid(fp->fs_pid, SIGHUP, 1);
+}
+
+/* View to a 3270 device. Can be console, tty or fullscreen. */
+static struct raw3270_fn fs3270_fn = {
+	.activate = fs3270_activate,
+	.deactivate = fs3270_deactivate,
+	.intv = (void *) fs3270_irq,
+	.release = fs3270_release,
+	.free = fs3270_free_view
+};
+
+/*
+ * This routine is called whenever a 3270 fullscreen device is opened.
+ */
+static int
+fs3270_open(struct inode *inode, struct file *filp)
+{
+	struct fs3270 *fp;
+	struct idal_buffer *ib;
+	int minor, rc = 0;
+
+	if (imajor(filp->f_path.dentry->d_inode) != IBM_FS3270_MAJOR)
+		return -ENODEV;
+	minor = iminor(filp->f_path.dentry->d_inode);
+	/* Check for minor 0 multiplexer. */
+	if (minor == 0) {
+		struct tty_struct *tty = get_current_tty();
+		if (!tty || tty->driver->major != IBM_TTY3270_MAJOR) {
+			tty_kref_put(tty);
+			return -ENODEV;
+		}
+		minor = tty->index + RAW3270_FIRSTMINOR;
+		tty_kref_put(tty);
+	}
+	mutex_lock(&fs3270_mutex);
+	/* Check if some other program is already using fullscreen mode. */
+	fp = (struct fs3270 *) raw3270_find_view(&fs3270_fn, minor);
+	if (!IS_ERR(fp)) {
+		raw3270_put_view(&fp->view);
+		rc = -EBUSY;
+		goto out;
+	}
+	/* Allocate fullscreen view structure. */
+	fp = fs3270_alloc_view();
+	if (IS_ERR(fp)) {
+		rc = PTR_ERR(fp);
+		goto out;
+	}
+
+	init_waitqueue_head(&fp->wait);
+	fp->fs_pid = get_pid(task_pid(current));
+	rc = raw3270_add_view(&fp->view, &fs3270_fn, minor);
+	if (rc) {
+		fs3270_free_view(&fp->view);
+		goto out;
+	}
+
+	/* Allocate idal-buffer. */
+	ib = idal_buffer_alloc(2*fp->view.rows*fp->view.cols + 5, 0);
+	if (IS_ERR(ib)) {
+		raw3270_put_view(&fp->view);
+		raw3270_del_view(&fp->view);
+		rc = PTR_ERR(ib);
+		goto out;
+	}
+	fp->rdbuf = ib;
+
+	rc = raw3270_activate_view(&fp->view);
+	if (rc) {
+		raw3270_put_view(&fp->view);
+		raw3270_del_view(&fp->view);
+		goto out;
+	}
+	nonseekable_open(inode, filp);
+	filp->private_data = fp;
+out:
+	mutex_unlock(&fs3270_mutex);
+	return rc;
+}
+
+/*
+ * This routine is called when the 3270 tty is closed. We wait
+ * for the remaining request to be completed. Then we clean up.
+ */
+static int
+fs3270_close(struct inode *inode, struct file *filp)
+{
+	struct fs3270 *fp;
+
+	fp = filp->private_data;
+	filp->private_data = NULL;
+	if (fp) {
+		put_pid(fp->fs_pid);
+		fp->fs_pid = NULL;
+		raw3270_reset(&fp->view);
+		raw3270_put_view(&fp->view);
+		raw3270_del_view(&fp->view);
+	}
+	return 0;
+}
+
+static const struct file_operations fs3270_fops = {
+	.owner		 = THIS_MODULE,		/* owner */
+	.read		 = fs3270_read,		/* read */
+	.write		 = fs3270_write,	/* write */
+	.unlocked_ioctl	 = fs3270_ioctl,	/* ioctl */
+	.compat_ioctl	 = fs3270_ioctl,	/* ioctl */
+	.open		 = fs3270_open,		/* open */
+	.release	 = fs3270_close,	/* release */
+	.llseek		= no_llseek,
+};
+
+/*
+ * 3270 fullscreen driver initialization.
+ */
+static int __init
+fs3270_init(void)
+{
+	int rc;
+
+	rc = register_chrdev(IBM_FS3270_MAJOR, "fs3270", &fs3270_fops);
+	if (rc)
+		return rc;
+	return 0;
+}
+
+static void __exit
+fs3270_exit(void)
+{
+	unregister_chrdev(IBM_FS3270_MAJOR, "fs3270");
+}
+
+MODULE_LICENSE("GPL");
+MODULE_ALIAS_CHARDEV_MAJOR(IBM_FS3270_MAJOR);
+
+module_init(fs3270_init);
+module_exit(fs3270_exit);
diff --git a/ap/os/linux/linux-3.4.x/drivers/s390/char/keyboard.c b/ap/os/linux/linux-3.4.x/drivers/s390/char/keyboard.c
new file mode 100644
index 0000000..8065881
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/s390/char/keyboard.c
@@ -0,0 +1,556 @@
+/*
+ *  drivers/s390/char/keyboard.c
+ *    ebcdic keycode functions for s390 console drivers
+ *
+ *  S390 version
+ *    Copyright (C) 2003 IBM Deutschland Entwicklung GmbH, IBM Corporation
+ *    Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com),
+ */
+
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/sysrq.h>
+
+#include <linux/consolemap.h>
+#include <linux/kbd_kern.h>
+#include <linux/kbd_diacr.h>
+#include <asm/uaccess.h>
+
+#include "keyboard.h"
+
+/*
+ * Handler Tables.
+ */
+#define K_HANDLERS\
+	k_self,		k_fn,		k_spec,		k_ignore,\
+	k_dead,		k_ignore,	k_ignore,	k_ignore,\
+	k_ignore,	k_ignore,	k_ignore,	k_ignore,\
+	k_ignore,	k_ignore,	k_ignore,	k_ignore
+
+typedef void (k_handler_fn)(struct kbd_data *, unsigned char);
+static k_handler_fn K_HANDLERS;
+static k_handler_fn *k_handler[16] = { K_HANDLERS };
+
+/* maximum values each key_handler can handle */
+static const int kbd_max_vals[] = {
+	255, ARRAY_SIZE(func_table) - 1, NR_FN_HANDLER - 1, 0,
+	NR_DEAD - 1, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+static const int KBD_NR_TYPES = ARRAY_SIZE(kbd_max_vals);
+
+static unsigned char ret_diacr[NR_DEAD] = {
+	'`', '\'', '^', '~', '"', ','
+};
+
+/*
+ * Alloc/free of kbd_data structures.
+ */
+struct kbd_data *
+kbd_alloc(void) {
+	struct kbd_data *kbd;
+	int i;
+
+	kbd = kzalloc(sizeof(struct kbd_data), GFP_KERNEL);
+	if (!kbd)
+		goto out;
+	kbd->key_maps = kzalloc(sizeof(key_maps), GFP_KERNEL);
+	if (!kbd->key_maps)
+		goto out_kbd;
+	for (i = 0; i < ARRAY_SIZE(key_maps); i++) {
+		if (key_maps[i]) {
+			kbd->key_maps[i] = kmemdup(key_maps[i],
+						   sizeof(u_short) * NR_KEYS,
+						   GFP_KERNEL);
+			if (!kbd->key_maps[i])
+				goto out_maps;
+		}
+	}
+	kbd->func_table = kzalloc(sizeof(func_table), GFP_KERNEL);
+	if (!kbd->func_table)
+		goto out_maps;
+	for (i = 0; i < ARRAY_SIZE(func_table); i++) {
+		if (func_table[i]) {
+			kbd->func_table[i] = kstrdup(func_table[i],
+						     GFP_KERNEL);
+			if (!kbd->func_table[i])
+				goto out_func;
+		}
+	}
+	kbd->fn_handler =
+		kzalloc(sizeof(fn_handler_fn *) * NR_FN_HANDLER, GFP_KERNEL);
+	if (!kbd->fn_handler)
+		goto out_func;
+	kbd->accent_table = kmemdup(accent_table,
+				    sizeof(struct kbdiacruc) * MAX_DIACR,
+				    GFP_KERNEL);
+	if (!kbd->accent_table)
+		goto out_fn_handler;
+	kbd->accent_table_size = accent_table_size;
+	return kbd;
+
+out_fn_handler:
+	kfree(kbd->fn_handler);
+out_func:
+	for (i = 0; i < ARRAY_SIZE(func_table); i++)
+		kfree(kbd->func_table[i]);
+	kfree(kbd->func_table);
+out_maps:
+	for (i = 0; i < ARRAY_SIZE(key_maps); i++)
+		kfree(kbd->key_maps[i]);
+	kfree(kbd->key_maps);
+out_kbd:
+	kfree(kbd);
+out:
+	return NULL;
+}
+
+void
+kbd_free(struct kbd_data *kbd)
+{
+	int i;
+
+	kfree(kbd->accent_table);
+	kfree(kbd->fn_handler);
+	for (i = 0; i < ARRAY_SIZE(func_table); i++)
+		kfree(kbd->func_table[i]);
+	kfree(kbd->func_table);
+	for (i = 0; i < ARRAY_SIZE(key_maps); i++)
+		kfree(kbd->key_maps[i]);
+	kfree(kbd->key_maps);
+	kfree(kbd);
+}
+
+/*
+ * Generate ascii -> ebcdic translation table from kbd_data.
+ */
+void
+kbd_ascebc(struct kbd_data *kbd, unsigned char *ascebc)
+{
+	unsigned short *keymap, keysym;
+	int i, j, k;
+
+	memset(ascebc, 0x40, 256);
+	for (i = 0; i < ARRAY_SIZE(key_maps); i++) {
+		keymap = kbd->key_maps[i];
+		if (!keymap)
+			continue;
+		for (j = 0; j < NR_KEYS; j++) {
+			k = ((i & 1) << 7) + j;
+			keysym = keymap[j];
+			if (KTYP(keysym) == (KT_LATIN | 0xf0) ||
+			    KTYP(keysym) == (KT_LETTER | 0xf0))
+				ascebc[KVAL(keysym)] = k;
+			else if (KTYP(keysym) == (KT_DEAD | 0xf0))
+				ascebc[ret_diacr[KVAL(keysym)]] = k;
+		}
+	}
+}
+
+#if 0
+/*
+ * Generate ebcdic -> ascii translation table from kbd_data.
+ */
+void
+kbd_ebcasc(struct kbd_data *kbd, unsigned char *ebcasc)
+{
+	unsigned short *keymap, keysym;
+	int i, j, k;
+
+	memset(ebcasc, ' ', 256);
+	for (i = 0; i < ARRAY_SIZE(key_maps); i++) {
+		keymap = kbd->key_maps[i];
+		if (!keymap)
+			continue;
+		for (j = 0; j < NR_KEYS; j++) {
+			keysym = keymap[j];
+			k = ((i & 1) << 7) + j;
+			if (KTYP(keysym) == (KT_LATIN | 0xf0) ||
+			    KTYP(keysym) == (KT_LETTER | 0xf0))
+				ebcasc[k] = KVAL(keysym);
+			else if (KTYP(keysym) == (KT_DEAD | 0xf0))
+				ebcasc[k] = ret_diacr[KVAL(keysym)];
+		}
+	}
+}
+#endif
+
+/*
+ * We have a combining character DIACR here, followed by the character CH.
+ * If the combination occurs in the table, return the corresponding value.
+ * Otherwise, if CH is a space or equals DIACR, return DIACR.
+ * Otherwise, conclude that DIACR was not combining after all,
+ * queue it and return CH.
+ */
+static unsigned int
+handle_diacr(struct kbd_data *kbd, unsigned int ch)
+{
+	int i, d;
+
+	d = kbd->diacr;
+	kbd->diacr = 0;
+
+	for (i = 0; i < kbd->accent_table_size; i++) {
+		if (kbd->accent_table[i].diacr == d &&
+		    kbd->accent_table[i].base == ch)
+			return kbd->accent_table[i].result;
+	}
+
+	if (ch == ' ' || ch == d)
+		return d;
+
+	kbd_put_queue(kbd->tty, d);
+	return ch;
+}
+
+/*
+ * Handle dead key.
+ */
+static void
+k_dead(struct kbd_data *kbd, unsigned char value)
+{
+	value = ret_diacr[value];
+	kbd->diacr = (kbd->diacr ? handle_diacr(kbd, value) : value);
+}
+
+/*
+ * Normal character handler.
+ */
+static void
+k_self(struct kbd_data *kbd, unsigned char value)
+{
+	if (kbd->diacr)
+		value = handle_diacr(kbd, value);
+	kbd_put_queue(kbd->tty, value);
+}
+
+/*
+ * Special key handlers
+ */
+static void
+k_ignore(struct kbd_data *kbd, unsigned char value)
+{
+}
+
+/*
+ * Function key handler.
+ */
+static void
+k_fn(struct kbd_data *kbd, unsigned char value)
+{
+	if (kbd->func_table[value])
+		kbd_puts_queue(kbd->tty, kbd->func_table[value]);
+}
+
+static void
+k_spec(struct kbd_data *kbd, unsigned char value)
+{
+	if (value >= NR_FN_HANDLER)
+		return;
+	if (kbd->fn_handler[value])
+		kbd->fn_handler[value](kbd);
+}
+
+/*
+ * Put utf8 character to tty flip buffer.
+ * UTF-8 is defined for words of up to 31 bits,
+ * but we need only 16 bits here
+ */
+static void
+to_utf8(struct tty_struct *tty, ushort c) 
+{
+	if (c < 0x80)
+		/*  0******* */
+		kbd_put_queue(tty, c);
+	else if (c < 0x800) {
+		/* 110***** 10****** */
+		kbd_put_queue(tty, 0xc0 | (c >> 6));
+		kbd_put_queue(tty, 0x80 | (c & 0x3f));
+	} else {
+		/* 1110**** 10****** 10****** */
+		kbd_put_queue(tty, 0xe0 | (c >> 12));
+		kbd_put_queue(tty, 0x80 | ((c >> 6) & 0x3f));
+		kbd_put_queue(tty, 0x80 | (c & 0x3f));
+	}
+}
+
+/*
+ * Process keycode.
+ */
+void
+kbd_keycode(struct kbd_data *kbd, unsigned int keycode)
+{
+	unsigned short keysym;
+	unsigned char type, value;
+
+	if (!kbd || !kbd->tty)
+		return;
+
+	if (keycode >= 384)
+		keysym = kbd->key_maps[5][keycode - 384];
+	else if (keycode >= 256)
+		keysym = kbd->key_maps[4][keycode - 256];
+	else if (keycode >= 128)
+		keysym = kbd->key_maps[1][keycode - 128];
+	else
+		keysym = kbd->key_maps[0][keycode];
+
+	type = KTYP(keysym);
+	if (type >= 0xf0) {
+		type -= 0xf0;
+		if (type == KT_LETTER)
+			type = KT_LATIN;
+		value = KVAL(keysym);
+#ifdef CONFIG_MAGIC_SYSRQ	       /* Handle the SysRq Hack */
+		if (kbd->sysrq) {
+			if (kbd->sysrq == K(KT_LATIN, '-')) {
+				kbd->sysrq = 0;
+				handle_sysrq(value);
+				return;
+			}
+			if (value == '-') {
+				kbd->sysrq = K(KT_LATIN, '-');
+				return;
+			}
+			/* Incomplete sysrq sequence. */
+			(*k_handler[KTYP(kbd->sysrq)])(kbd, KVAL(kbd->sysrq));
+			kbd->sysrq = 0;
+		} else if ((type == KT_LATIN && value == '^') ||
+			   (type == KT_DEAD && ret_diacr[value] == '^')) {
+			kbd->sysrq = K(type, value);
+			return;
+		}
+#endif
+		(*k_handler[type])(kbd, value);
+	} else
+		to_utf8(kbd->tty, keysym);
+}
+
+/*
+ * Ioctl stuff.
+ */
+static int
+do_kdsk_ioctl(struct kbd_data *kbd, struct kbentry __user *user_kbe,
+	      int cmd, int perm)
+{
+	struct kbentry tmp;
+	ushort *key_map, val, ov;
+
+	if (copy_from_user(&tmp, user_kbe, sizeof(struct kbentry)))
+		return -EFAULT;
+#if NR_KEYS < 256
+	if (tmp.kb_index >= NR_KEYS)
+		return -EINVAL;
+#endif
+#if MAX_NR_KEYMAPS < 256
+	if (tmp.kb_table >= MAX_NR_KEYMAPS)
+		return -EINVAL;	
+#endif
+
+	switch (cmd) {
+	case KDGKBENT:
+		key_map = kbd->key_maps[tmp.kb_table];
+		if (key_map) {
+		    val = U(key_map[tmp.kb_index]);
+		    if (KTYP(val) >= KBD_NR_TYPES)
+			val = K_HOLE;
+		} else
+		    val = (tmp.kb_index ? K_HOLE : K_NOSUCHMAP);
+		return put_user(val, &user_kbe->kb_value);
+	case KDSKBENT:
+		if (!perm)
+			return -EPERM;
+		if (!tmp.kb_index && tmp.kb_value == K_NOSUCHMAP) {
+			/* disallocate map */
+			key_map = kbd->key_maps[tmp.kb_table];
+			if (key_map) {
+			    kbd->key_maps[tmp.kb_table] = NULL;
+			    kfree(key_map);
+			}
+			break;
+		}
+
+		if (KTYP(tmp.kb_value) >= KBD_NR_TYPES)
+			return -EINVAL;
+		if (KVAL(tmp.kb_value) > kbd_max_vals[KTYP(tmp.kb_value)])
+			return -EINVAL;
+
+		if (!(key_map = kbd->key_maps[tmp.kb_table])) {
+			int j;
+
+			key_map = kmalloc(sizeof(plain_map),
+						     GFP_KERNEL);
+			if (!key_map)
+				return -ENOMEM;
+			kbd->key_maps[tmp.kb_table] = key_map;
+			for (j = 0; j < NR_KEYS; j++)
+				key_map[j] = U(K_HOLE);
+		}
+		ov = U(key_map[tmp.kb_index]);
+		if (tmp.kb_value == ov)
+			break;	/* nothing to do */
+		/*
+		 * Attention Key.
+		 */
+		if (((ov == K_SAK) || (tmp.kb_value == K_SAK)) &&
+		    !capable(CAP_SYS_ADMIN))
+			return -EPERM;
+		key_map[tmp.kb_index] = U(tmp.kb_value);
+		break;
+	}
+	return 0;
+}
+
+static int
+do_kdgkb_ioctl(struct kbd_data *kbd, struct kbsentry __user *u_kbs,
+	       int cmd, int perm)
+{
+	unsigned char kb_func;
+	char *p;
+	int len;
+
+	/* Get u_kbs->kb_func. */
+	if (get_user(kb_func, &u_kbs->kb_func))
+		return -EFAULT;
+#if MAX_NR_FUNC < 256
+	if (kb_func >= MAX_NR_FUNC)
+		return -EINVAL;
+#endif
+
+	switch (cmd) {
+	case KDGKBSENT:
+		p = kbd->func_table[kb_func];
+		if (p) {
+			len = strlen(p);
+			if (len >= sizeof(u_kbs->kb_string))
+				len = sizeof(u_kbs->kb_string) - 1;
+			if (copy_to_user(u_kbs->kb_string, p, len))
+				return -EFAULT;
+		} else
+			len = 0;
+		if (put_user('\0', u_kbs->kb_string + len))
+			return -EFAULT;
+		break;
+	case KDSKBSENT:
+		if (!perm)
+			return -EPERM;
+		len = strnlen_user(u_kbs->kb_string,
+				   sizeof(u_kbs->kb_string) - 1);
+		if (!len)
+			return -EFAULT;
+		if (len > sizeof(u_kbs->kb_string) - 1)
+			return -EINVAL;
+		p = kmalloc(len + 1, GFP_KERNEL);
+		if (!p)
+			return -ENOMEM;
+		if (copy_from_user(p, u_kbs->kb_string, len)) {
+			kfree(p);
+			return -EFAULT;
+		}
+		p[len] = 0;
+		kfree(kbd->func_table[kb_func]);
+		kbd->func_table[kb_func] = p;
+		break;
+	}
+	return 0;
+}
+
+int kbd_ioctl(struct kbd_data *kbd, unsigned int cmd, unsigned long arg)
+{
+	void __user *argp;
+	unsigned int ct;
+	int perm;
+
+	argp = (void __user *)arg;
+
+	/*
+	 * To have permissions to do most of the vt ioctls, we either have
+	 * to be the owner of the tty, or have CAP_SYS_TTY_CONFIG.
+	 */
+	perm = current->signal->tty == kbd->tty || capable(CAP_SYS_TTY_CONFIG);
+	switch (cmd) {
+	case KDGKBTYPE:
+		return put_user(KB_101, (char __user *)argp);
+	case KDGKBENT:
+	case KDSKBENT:
+		return do_kdsk_ioctl(kbd, argp, cmd, perm);
+	case KDGKBSENT:
+	case KDSKBSENT:
+		return do_kdgkb_ioctl(kbd, argp, cmd, perm);
+	case KDGKBDIACR:
+	{
+		struct kbdiacrs __user *a = argp;
+		struct kbdiacr diacr;
+		int i;
+
+		if (put_user(kbd->accent_table_size, &a->kb_cnt))
+			return -EFAULT;
+		for (i = 0; i < kbd->accent_table_size; i++) {
+			diacr.diacr = kbd->accent_table[i].diacr;
+			diacr.base = kbd->accent_table[i].base;
+			diacr.result = kbd->accent_table[i].result;
+			if (copy_to_user(a->kbdiacr + i, &diacr, sizeof(struct kbdiacr)))
+			return -EFAULT;
+		}
+		return 0;
+	}
+	case KDGKBDIACRUC:
+	{
+		struct kbdiacrsuc __user *a = argp;
+
+		ct = kbd->accent_table_size;
+		if (put_user(ct, &a->kb_cnt))
+			return -EFAULT;
+		if (copy_to_user(a->kbdiacruc, kbd->accent_table,
+				 ct * sizeof(struct kbdiacruc)))
+			return -EFAULT;
+		return 0;
+	}
+	case KDSKBDIACR:
+	{
+		struct kbdiacrs __user *a = argp;
+		struct kbdiacr diacr;
+		int i;
+
+		if (!perm)
+			return -EPERM;
+		if (get_user(ct, &a->kb_cnt))
+			return -EFAULT;
+		if (ct >= MAX_DIACR)
+			return -EINVAL;
+		kbd->accent_table_size = ct;
+		for (i = 0; i < ct; i++) {
+			if (copy_from_user(&diacr, a->kbdiacr + i, sizeof(struct kbdiacr)))
+				return -EFAULT;
+			kbd->accent_table[i].diacr = diacr.diacr;
+			kbd->accent_table[i].base = diacr.base;
+			kbd->accent_table[i].result = diacr.result;
+		}
+		return 0;
+	}
+	case KDSKBDIACRUC:
+	{
+		struct kbdiacrsuc __user *a = argp;
+
+		if (!perm)
+			return -EPERM;
+		if (get_user(ct, &a->kb_cnt))
+			return -EFAULT;
+		if (ct >= MAX_DIACR)
+			return -EINVAL;
+		kbd->accent_table_size = ct;
+		if (copy_from_user(kbd->accent_table, a->kbdiacruc,
+				   ct * sizeof(struct kbdiacruc)))
+			return -EFAULT;
+		return 0;
+	}
+	default:
+		return -ENOIOCTLCMD;
+	}
+}
+
+EXPORT_SYMBOL(kbd_ioctl);
+EXPORT_SYMBOL(kbd_ascebc);
+EXPORT_SYMBOL(kbd_free);
+EXPORT_SYMBOL(kbd_alloc);
+EXPORT_SYMBOL(kbd_keycode);
diff --git a/ap/os/linux/linux-3.4.x/drivers/s390/char/keyboard.h b/ap/os/linux/linux-3.4.x/drivers/s390/char/keyboard.h
new file mode 100644
index 0000000..7e736aa
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/s390/char/keyboard.h
@@ -0,0 +1,57 @@
+/*
+ *  drivers/s390/char/keyboard.h
+ *    ebcdic keycode functions for s390 console drivers
+ *
+ *    Copyright (C) 2003 IBM Deutschland Entwicklung GmbH, IBM Corporation
+ *    Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com),
+ */
+
+#include <linux/tty.h>
+#include <linux/tty_flip.h>
+#include <linux/keyboard.h>
+
+#define NR_FN_HANDLER	20
+
+struct kbd_data;
+
+typedef void (fn_handler_fn)(struct kbd_data *);
+
+/*
+ * FIXME: explain key_maps tricks.
+ */
+
+struct kbd_data {
+	struct tty_struct *tty;
+	unsigned short **key_maps;
+	char **func_table;
+	fn_handler_fn **fn_handler;
+	struct kbdiacruc *accent_table;
+	unsigned int accent_table_size;
+	unsigned int diacr;
+	unsigned short sysrq;
+};
+
+struct kbd_data *kbd_alloc(void);
+void kbd_free(struct kbd_data *);
+void kbd_ascebc(struct kbd_data *, unsigned char *);
+
+void kbd_keycode(struct kbd_data *, unsigned int);
+int kbd_ioctl(struct kbd_data *, unsigned int, unsigned long);
+
+/*
+ * Helper Functions.
+ */
+static inline void
+kbd_put_queue(struct tty_struct *tty, int ch)
+{
+	tty_insert_flip_char(tty, ch, 0);
+	tty_schedule_flip(tty);
+}
+
+static inline void
+kbd_puts_queue(struct tty_struct *tty, char *cp)
+{
+	while (*cp)
+		tty_insert_flip_char(tty, *cp++, 0);
+	tty_schedule_flip(tty);
+}
diff --git a/ap/os/linux/linux-3.4.x/drivers/s390/char/monreader.c b/ap/os/linux/linux-3.4.x/drivers/s390/char/monreader.c
new file mode 100644
index 0000000..5b8b859
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/s390/char/monreader.c
@@ -0,0 +1,649 @@
+/*
+ * Character device driver for reading z/VM *MONITOR service records.
+ *
+ * Copyright IBM Corp. 2004, 2009
+ *
+ * Author: Gerald Schaefer <gerald.schaefer@de.ibm.com>
+ */
+
+#define KMSG_COMPONENT "monreader"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/init.h>
+#include <linux/errno.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/miscdevice.h>
+#include <linux/ctype.h>
+#include <linux/spinlock.h>
+#include <linux/interrupt.h>
+#include <linux/poll.h>
+#include <linux/device.h>
+#include <linux/slab.h>
+#include <net/iucv/iucv.h>
+#include <asm/uaccess.h>
+#include <asm/ebcdic.h>
+#include <asm/extmem.h>
+
+
+#define MON_COLLECT_SAMPLE 0x80
+#define MON_COLLECT_EVENT  0x40
+#define MON_SERVICE	   "*MONITOR"
+#define MON_IN_USE	   0x01
+#define MON_MSGLIM	   255
+
+static char mon_dcss_name[9] = "MONDCSS\0";
+
+struct mon_msg {
+	u32 pos;
+	u32 mca_offset;
+	struct iucv_message msg;
+	char msglim_reached;
+	char replied_msglim;
+};
+
+struct mon_private {
+	struct iucv_path *path;
+	struct mon_msg *msg_array[MON_MSGLIM];
+	unsigned int   write_index;
+	unsigned int   read_index;
+	atomic_t msglim_count;
+	atomic_t read_ready;
+	atomic_t iucv_connected;
+	atomic_t iucv_severed;
+};
+
+static unsigned long mon_in_use = 0;
+
+static unsigned long mon_dcss_start;
+static unsigned long mon_dcss_end;
+
+static DECLARE_WAIT_QUEUE_HEAD(mon_read_wait_queue);
+static DECLARE_WAIT_QUEUE_HEAD(mon_conn_wait_queue);
+
+static u8 user_data_connect[16] = {
+	/* Version code, must be 0x01 for shared mode */
+	0x01,
+	/* what to collect */
+	MON_COLLECT_SAMPLE | MON_COLLECT_EVENT,
+	/* DCSS name in EBCDIC, 8 bytes padded with blanks */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+};
+
+static u8 user_data_sever[16] = {
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+};
+
+static struct device *monreader_device;
+
+/******************************************************************************
+ *                             helper functions                               *
+ *****************************************************************************/
+/*
+ * Create the 8 bytes EBCDIC DCSS segment name from
+ * an ASCII name, incl. padding
+ */
+static void dcss_mkname(char *ascii_name, char *ebcdic_name)
+{
+	int i;
+
+	for (i = 0; i < 8; i++) {
+		if (ascii_name[i] == '\0')
+			break;
+		ebcdic_name[i] = toupper(ascii_name[i]);
+	};
+	for (; i < 8; i++)
+		ebcdic_name[i] = ' ';
+	ASCEBC(ebcdic_name, 8);
+}
+
+static inline unsigned long mon_mca_start(struct mon_msg *monmsg)
+{
+	return *(u32 *) &monmsg->msg.rmmsg;
+}
+
+static inline unsigned long mon_mca_end(struct mon_msg *monmsg)
+{
+	return *(u32 *) &monmsg->msg.rmmsg[4];
+}
+
+static inline u8 mon_mca_type(struct mon_msg *monmsg, u8 index)
+{
+	return *((u8 *) mon_mca_start(monmsg) + monmsg->mca_offset + index);
+}
+
+static inline u32 mon_mca_size(struct mon_msg *monmsg)
+{
+	return mon_mca_end(monmsg) - mon_mca_start(monmsg) + 1;
+}
+
+static inline u32 mon_rec_start(struct mon_msg *monmsg)
+{
+	return *((u32 *) (mon_mca_start(monmsg) + monmsg->mca_offset + 4));
+}
+
+static inline u32 mon_rec_end(struct mon_msg *monmsg)
+{
+	return *((u32 *) (mon_mca_start(monmsg) + monmsg->mca_offset + 8));
+}
+
+static int mon_check_mca(struct mon_msg *monmsg)
+{
+	if ((mon_rec_end(monmsg) <= mon_rec_start(monmsg)) ||
+	    (mon_rec_start(monmsg) < mon_dcss_start) ||
+	    (mon_rec_end(monmsg) > mon_dcss_end) ||
+	    (mon_mca_type(monmsg, 0) == 0) ||
+	    (mon_mca_size(monmsg) % 12 != 0) ||
+	    (mon_mca_end(monmsg) <= mon_mca_start(monmsg)) ||
+	    (mon_mca_end(monmsg) > mon_dcss_end) ||
+	    (mon_mca_start(monmsg) < mon_dcss_start) ||
+	    ((mon_mca_type(monmsg, 1) == 0) && (mon_mca_type(monmsg, 2) == 0)))
+		return -EINVAL;
+	return 0;
+}
+
+static int mon_send_reply(struct mon_msg *monmsg,
+			  struct mon_private *monpriv)
+{
+	int rc;
+
+	rc = iucv_message_reply(monpriv->path, &monmsg->msg,
+				IUCV_IPRMDATA, NULL, 0);
+	atomic_dec(&monpriv->msglim_count);
+	if (likely(!monmsg->msglim_reached)) {
+		monmsg->pos = 0;
+		monmsg->mca_offset = 0;
+		monpriv->read_index = (monpriv->read_index + 1) %
+				      MON_MSGLIM;
+		atomic_dec(&monpriv->read_ready);
+	} else
+		monmsg->replied_msglim = 1;
+	if (rc) {
+		pr_err("Reading monitor data failed with rc=%i\n", rc);
+		return -EIO;
+	}
+	return 0;
+}
+
+static void mon_free_mem(struct mon_private *monpriv)
+{
+	int i;
+
+	for (i = 0; i < MON_MSGLIM; i++)
+		if (monpriv->msg_array[i])
+			kfree(monpriv->msg_array[i]);
+	kfree(monpriv);
+}
+
+static struct mon_private *mon_alloc_mem(void)
+{
+	int i;
+	struct mon_private *monpriv;
+
+	monpriv = kzalloc(sizeof(struct mon_private), GFP_KERNEL);
+	if (!monpriv)
+		return NULL;
+	for (i = 0; i < MON_MSGLIM; i++) {
+		monpriv->msg_array[i] = kzalloc(sizeof(struct mon_msg),
+						    GFP_KERNEL);
+		if (!monpriv->msg_array[i]) {
+			mon_free_mem(monpriv);
+			return NULL;
+		}
+	}
+	return monpriv;
+}
+
+static inline void mon_next_mca(struct mon_msg *monmsg)
+{
+	if (likely((mon_mca_size(monmsg) - monmsg->mca_offset) == 12))
+		return;
+	monmsg->mca_offset += 12;
+	monmsg->pos = 0;
+}
+
+static struct mon_msg *mon_next_message(struct mon_private *monpriv)
+{
+	struct mon_msg *monmsg;
+
+	if (!atomic_read(&monpriv->read_ready))
+		return NULL;
+	monmsg = monpriv->msg_array[monpriv->read_index];
+	if (unlikely(monmsg->replied_msglim)) {
+		monmsg->replied_msglim = 0;
+		monmsg->msglim_reached = 0;
+		monmsg->pos = 0;
+		monmsg->mca_offset = 0;
+		monpriv->read_index = (monpriv->read_index + 1) %
+				      MON_MSGLIM;
+		atomic_dec(&monpriv->read_ready);
+		return ERR_PTR(-EOVERFLOW);
+	}
+	return monmsg;
+}
+
+
+/******************************************************************************
+ *                               IUCV handler                                 *
+ *****************************************************************************/
+static void mon_iucv_path_complete(struct iucv_path *path, u8 ipuser[16])
+{
+	struct mon_private *monpriv = path->private;
+
+	atomic_set(&monpriv->iucv_connected, 1);
+	wake_up(&mon_conn_wait_queue);
+}
+
+static void mon_iucv_path_severed(struct iucv_path *path, u8 ipuser[16])
+{
+	struct mon_private *monpriv = path->private;
+
+	pr_err("z/VM *MONITOR system service disconnected with rc=%i\n",
+	       ipuser[0]);
+	iucv_path_sever(path, NULL);
+	atomic_set(&monpriv->iucv_severed, 1);
+	wake_up(&mon_conn_wait_queue);
+	wake_up_interruptible(&mon_read_wait_queue);
+}
+
+static void mon_iucv_message_pending(struct iucv_path *path,
+				     struct iucv_message *msg)
+{
+	struct mon_private *monpriv = path->private;
+
+	memcpy(&monpriv->msg_array[monpriv->write_index]->msg,
+	       msg, sizeof(*msg));
+	if (atomic_inc_return(&monpriv->msglim_count) == MON_MSGLIM) {
+		pr_warning("The read queue for monitor data is full\n");
+		monpriv->msg_array[monpriv->write_index]->msglim_reached = 1;
+	}
+	monpriv->write_index = (monpriv->write_index + 1) % MON_MSGLIM;
+	atomic_inc(&monpriv->read_ready);
+	wake_up_interruptible(&mon_read_wait_queue);
+}
+
+static struct iucv_handler monreader_iucv_handler = {
+	.path_complete	 = mon_iucv_path_complete,
+	.path_severed	 = mon_iucv_path_severed,
+	.message_pending = mon_iucv_message_pending,
+};
+
+/******************************************************************************
+ *                               file operations                              *
+ *****************************************************************************/
+static int mon_open(struct inode *inode, struct file *filp)
+{
+	struct mon_private *monpriv;
+	int rc;
+
+	/*
+	 * only one user allowed
+	 */
+	rc = -EBUSY;
+	if (test_and_set_bit(MON_IN_USE, &mon_in_use))
+		goto out;
+
+	rc = -ENOMEM;
+	monpriv = mon_alloc_mem();
+	if (!monpriv)
+		goto out_use;
+
+	/*
+	 * Connect to *MONITOR service
+	 */
+	monpriv->path = iucv_path_alloc(MON_MSGLIM, IUCV_IPRMDATA, GFP_KERNEL);
+	if (!monpriv->path)
+		goto out_priv;
+	rc = iucv_path_connect(monpriv->path, &monreader_iucv_handler,
+			       MON_SERVICE, NULL, user_data_connect, monpriv);
+	if (rc) {
+		pr_err("Connecting to the z/VM *MONITOR system service "
+		       "failed with rc=%i\n", rc);
+		rc = -EIO;
+		goto out_path;
+	}
+	/*
+	 * Wait for connection confirmation
+	 */
+	wait_event(mon_conn_wait_queue,
+		   atomic_read(&monpriv->iucv_connected) ||
+		   atomic_read(&monpriv->iucv_severed));
+	if (atomic_read(&monpriv->iucv_severed)) {
+		atomic_set(&monpriv->iucv_severed, 0);
+		atomic_set(&monpriv->iucv_connected, 0);
+		rc = -EIO;
+		goto out_path;
+	}
+	filp->private_data = monpriv;
+	dev_set_drvdata(monreader_device, monpriv);
+	return nonseekable_open(inode, filp);
+
+out_path:
+	iucv_path_free(monpriv->path);
+out_priv:
+	mon_free_mem(monpriv);
+out_use:
+	clear_bit(MON_IN_USE, &mon_in_use);
+out:
+	return rc;
+}
+
+static int mon_close(struct inode *inode, struct file *filp)
+{
+	int rc, i;
+	struct mon_private *monpriv = filp->private_data;
+
+	/*
+	 * Close IUCV connection and unregister
+	 */
+	if (monpriv->path) {
+		rc = iucv_path_sever(monpriv->path, user_data_sever);
+		if (rc)
+			pr_warning("Disconnecting the z/VM *MONITOR system "
+				   "service failed with rc=%i\n", rc);
+		iucv_path_free(monpriv->path);
+	}
+
+	atomic_set(&monpriv->iucv_severed, 0);
+	atomic_set(&monpriv->iucv_connected, 0);
+	atomic_set(&monpriv->read_ready, 0);
+	atomic_set(&monpriv->msglim_count, 0);
+	monpriv->write_index  = 0;
+	monpriv->read_index   = 0;
+	dev_set_drvdata(monreader_device, NULL);
+
+	for (i = 0; i < MON_MSGLIM; i++)
+		kfree(monpriv->msg_array[i]);
+	kfree(monpriv);
+	clear_bit(MON_IN_USE, &mon_in_use);
+	return 0;
+}
+
+static ssize_t mon_read(struct file *filp, char __user *data,
+			size_t count, loff_t *ppos)
+{
+	struct mon_private *monpriv = filp->private_data;
+	struct mon_msg *monmsg;
+	int ret;
+	u32 mce_start;
+
+	monmsg = mon_next_message(monpriv);
+	if (IS_ERR(monmsg))
+		return PTR_ERR(monmsg);
+
+	if (!monmsg) {
+		if (filp->f_flags & O_NONBLOCK)
+			return -EAGAIN;
+		ret = wait_event_interruptible(mon_read_wait_queue,
+					atomic_read(&monpriv->read_ready) ||
+					atomic_read(&monpriv->iucv_severed));
+		if (ret)
+			return ret;
+		if (unlikely(atomic_read(&monpriv->iucv_severed)))
+			return -EIO;
+		monmsg = monpriv->msg_array[monpriv->read_index];
+	}
+
+	if (!monmsg->pos)
+		monmsg->pos = mon_mca_start(monmsg) + monmsg->mca_offset;
+	if (mon_check_mca(monmsg))
+		goto reply;
+
+	/* read monitor control element (12 bytes) first */
+	mce_start = mon_mca_start(monmsg) + monmsg->mca_offset;
+	if ((monmsg->pos >= mce_start) && (monmsg->pos < mce_start + 12)) {
+		count = min(count, (size_t) mce_start + 12 - monmsg->pos);
+		ret = copy_to_user(data, (void *) (unsigned long) monmsg->pos,
+				   count);
+		if (ret)
+			return -EFAULT;
+		monmsg->pos += count;
+		if (monmsg->pos == mce_start + 12)
+			monmsg->pos = mon_rec_start(monmsg);
+		goto out_copy;
+	}
+
+	/* read records */
+	if (monmsg->pos <= mon_rec_end(monmsg)) {
+		count = min(count, (size_t) mon_rec_end(monmsg) - monmsg->pos
+					    + 1);
+		ret = copy_to_user(data, (void *) (unsigned long) monmsg->pos,
+				   count);
+		if (ret)
+			return -EFAULT;
+		monmsg->pos += count;
+		if (monmsg->pos > mon_rec_end(monmsg))
+			mon_next_mca(monmsg);
+		goto out_copy;
+	}
+reply:
+	ret = mon_send_reply(monmsg, monpriv);
+	return ret;
+
+out_copy:
+	*ppos += count;
+	return count;
+}
+
+static unsigned int mon_poll(struct file *filp, struct poll_table_struct *p)
+{
+	struct mon_private *monpriv = filp->private_data;
+
+	poll_wait(filp, &mon_read_wait_queue, p);
+	if (unlikely(atomic_read(&monpriv->iucv_severed)))
+		return POLLERR;
+	if (atomic_read(&monpriv->read_ready))
+		return POLLIN | POLLRDNORM;
+	return 0;
+}
+
+static const struct file_operations mon_fops = {
+	.owner   = THIS_MODULE,
+	.open    = &mon_open,
+	.release = &mon_close,
+	.read    = &mon_read,
+	.poll    = &mon_poll,
+	.llseek  = noop_llseek,
+};
+
+static struct miscdevice mon_dev = {
+	.name       = "monreader",
+	.fops       = &mon_fops,
+	.minor      = MISC_DYNAMIC_MINOR,
+};
+
+
+/******************************************************************************
+ *				suspend / resume			      *
+ *****************************************************************************/
+static int monreader_freeze(struct device *dev)
+{
+	struct mon_private *monpriv = dev_get_drvdata(dev);
+	int rc;
+
+	if (!monpriv)
+		return 0;
+	if (monpriv->path) {
+		rc = iucv_path_sever(monpriv->path, user_data_sever);
+		if (rc)
+			pr_warning("Disconnecting the z/VM *MONITOR system "
+				   "service failed with rc=%i\n", rc);
+		iucv_path_free(monpriv->path);
+	}
+	atomic_set(&monpriv->iucv_severed, 0);
+	atomic_set(&monpriv->iucv_connected, 0);
+	atomic_set(&monpriv->read_ready, 0);
+	atomic_set(&monpriv->msglim_count, 0);
+	monpriv->write_index  = 0;
+	monpriv->read_index   = 0;
+	monpriv->path = NULL;
+	return 0;
+}
+
+static int monreader_thaw(struct device *dev)
+{
+	struct mon_private *monpriv = dev_get_drvdata(dev);
+	int rc;
+
+	if (!monpriv)
+		return 0;
+	rc = -ENOMEM;
+	monpriv->path = iucv_path_alloc(MON_MSGLIM, IUCV_IPRMDATA, GFP_KERNEL);
+	if (!monpriv->path)
+		goto out;
+	rc = iucv_path_connect(monpriv->path, &monreader_iucv_handler,
+			       MON_SERVICE, NULL, user_data_connect, monpriv);
+	if (rc) {
+		pr_err("Connecting to the z/VM *MONITOR system service "
+		       "failed with rc=%i\n", rc);
+		goto out_path;
+	}
+	wait_event(mon_conn_wait_queue,
+		   atomic_read(&monpriv->iucv_connected) ||
+		   atomic_read(&monpriv->iucv_severed));
+	if (atomic_read(&monpriv->iucv_severed))
+		goto out_path;
+	return 0;
+out_path:
+	rc = -EIO;
+	iucv_path_free(monpriv->path);
+	monpriv->path = NULL;
+out:
+	atomic_set(&monpriv->iucv_severed, 1);
+	return rc;
+}
+
+static int monreader_restore(struct device *dev)
+{
+	int rc;
+
+	segment_unload(mon_dcss_name);
+	rc = segment_load(mon_dcss_name, SEGMENT_SHARED,
+			  &mon_dcss_start, &mon_dcss_end);
+	if (rc < 0) {
+		segment_warning(rc, mon_dcss_name);
+		panic("fatal monreader resume error: no monitor dcss\n");
+	}
+	return monreader_thaw(dev);
+}
+
+static const struct dev_pm_ops monreader_pm_ops = {
+	.freeze  = monreader_freeze,
+	.thaw	 = monreader_thaw,
+	.restore = monreader_restore,
+};
+
+static struct device_driver monreader_driver = {
+	.name = "monreader",
+	.bus  = &iucv_bus,
+	.pm   = &monreader_pm_ops,
+};
+
+
+/******************************************************************************
+ *                              module init/exit                              *
+ *****************************************************************************/
+static int __init mon_init(void)
+{
+	int rc;
+
+	if (!MACHINE_IS_VM) {
+		pr_err("The z/VM *MONITOR record device driver cannot be "
+		       "loaded without z/VM\n");
+		return -ENODEV;
+	}
+
+	/*
+	 * Register with IUCV and connect to *MONITOR service
+	 */
+	rc = iucv_register(&monreader_iucv_handler, 1);
+	if (rc) {
+		pr_err("The z/VM *MONITOR record device driver failed to "
+		       "register with IUCV\n");
+		return rc;
+	}
+
+	rc = driver_register(&monreader_driver);
+	if (rc)
+		goto out_iucv;
+	monreader_device = kzalloc(sizeof(struct device), GFP_KERNEL);
+	if (!monreader_device)
+		goto out_driver;
+	dev_set_name(monreader_device, "monreader-dev");
+	monreader_device->bus = &iucv_bus;
+	monreader_device->parent = iucv_root;
+	monreader_device->driver = &monreader_driver;
+	monreader_device->release = (void (*)(struct device *))kfree;
+	rc = device_register(monreader_device);
+	if (rc) {
+		put_device(monreader_device);
+		goto out_driver;
+	}
+
+	rc = segment_type(mon_dcss_name);
+	if (rc < 0) {
+		segment_warning(rc, mon_dcss_name);
+		goto out_device;
+	}
+	if (rc != SEG_TYPE_SC) {
+		pr_err("The specified *MONITOR DCSS %s does not have the "
+		       "required type SC\n", mon_dcss_name);
+		rc = -EINVAL;
+		goto out_device;
+	}
+
+	rc = segment_load(mon_dcss_name, SEGMENT_SHARED,
+			  &mon_dcss_start, &mon_dcss_end);
+	if (rc < 0) {
+		segment_warning(rc, mon_dcss_name);
+		rc = -EINVAL;
+		goto out_device;
+	}
+	dcss_mkname(mon_dcss_name, &user_data_connect[8]);
+
+	/*
+	 * misc_register() has to be the last action in module_init(), because
+	 * file operations will be available right after this.
+	 */
+	rc = misc_register(&mon_dev);
+	if (rc < 0 )
+		goto out;
+	return 0;
+
+out:
+	segment_unload(mon_dcss_name);
+out_device:
+	device_unregister(monreader_device);
+out_driver:
+	driver_unregister(&monreader_driver);
+out_iucv:
+	iucv_unregister(&monreader_iucv_handler, 1);
+	return rc;
+}
+
+static void __exit mon_exit(void)
+{
+	segment_unload(mon_dcss_name);
+	misc_deregister(&mon_dev);
+	device_unregister(monreader_device);
+	driver_unregister(&monreader_driver);
+	iucv_unregister(&monreader_iucv_handler, 1);
+	return;
+}
+
+
+module_init(mon_init);
+module_exit(mon_exit);
+
+module_param_string(mondcss, mon_dcss_name, 9, 0444);
+MODULE_PARM_DESC(mondcss, "Name of DCSS segment to be used for *MONITOR "
+		 "service, max. 8 chars. Default is MONDCSS");
+
+MODULE_AUTHOR("Gerald Schaefer <geraldsc@de.ibm.com>");
+MODULE_DESCRIPTION("Character device driver for reading z/VM "
+		   "monitor service records.");
+MODULE_LICENSE("GPL");
diff --git a/ap/os/linux/linux-3.4.x/drivers/s390/char/monwriter.c b/ap/os/linux/linux-3.4.x/drivers/s390/char/monwriter.c
new file mode 100644
index 0000000..4600aa1
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/s390/char/monwriter.c
@@ -0,0 +1,399 @@
+/*
+ * Character device driver for writing z/VM *MONITOR service records.
+ *
+ * Copyright IBM Corp. 2006, 2009
+ *
+ * Author(s): Melissa Howland <Melissa.Howland@us.ibm.com>
+ */
+
+#define KMSG_COMPONENT "monwriter"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/init.h>
+#include <linux/errno.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/miscdevice.h>
+#include <linux/ctype.h>
+#include <linux/poll.h>
+#include <linux/mutex.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <asm/uaccess.h>
+#include <asm/ebcdic.h>
+#include <asm/io.h>
+#include <asm/appldata.h>
+#include <asm/monwriter.h>
+
+#define MONWRITE_MAX_DATALEN	4010
+
+static int mon_max_bufs = 255;
+static int mon_buf_count;
+
+struct mon_buf {
+	struct list_head list;
+	struct monwrite_hdr hdr;
+	int diag_done;
+	char *data;
+};
+
+static LIST_HEAD(mon_priv_list);
+
+struct mon_private {
+	struct list_head priv_list;
+	struct list_head list;
+	struct monwrite_hdr hdr;
+	size_t hdr_to_read;
+	size_t data_to_read;
+	struct mon_buf *current_buf;
+	struct mutex thread_mutex;
+};
+
+/*
+ * helper functions
+ */
+
+static int monwrite_diag(struct monwrite_hdr *myhdr, char *buffer, int fcn)
+{
+	struct appldata_product_id id;
+	int rc;
+
+	strcpy(id.prod_nr, "LNXAPPL");
+	id.prod_fn = myhdr->applid;
+	id.record_nr = myhdr->record_num;
+	id.version_nr = myhdr->version;
+	id.release_nr = myhdr->release;
+	id.mod_lvl = myhdr->mod_level;
+	rc = appldata_asm(&id, fcn, (void *) buffer, myhdr->datalen);
+	if (rc <= 0)
+		return rc;
+	pr_err("Writing monitor data failed with rc=%i\n", rc);
+	if (rc == 5)
+		return -EPERM;
+	return -EINVAL;
+}
+
+static struct mon_buf *monwrite_find_hdr(struct mon_private *monpriv,
+					 struct monwrite_hdr *monhdr)
+{
+	struct mon_buf *entry, *next;
+
+	list_for_each_entry_safe(entry, next, &monpriv->list, list)
+		if ((entry->hdr.mon_function == monhdr->mon_function ||
+		     monhdr->mon_function == MONWRITE_STOP_INTERVAL) &&
+		    entry->hdr.applid == monhdr->applid &&
+		    entry->hdr.record_num == monhdr->record_num &&
+		    entry->hdr.version == monhdr->version &&
+		    entry->hdr.release == monhdr->release &&
+		    entry->hdr.mod_level == monhdr->mod_level)
+			return entry;
+
+	return NULL;
+}
+
+static int monwrite_new_hdr(struct mon_private *monpriv)
+{
+	struct monwrite_hdr *monhdr = &monpriv->hdr;
+	struct mon_buf *monbuf;
+	int rc = 0;
+
+	if (monhdr->datalen > MONWRITE_MAX_DATALEN ||
+	    monhdr->mon_function > MONWRITE_START_CONFIG ||
+	    monhdr->hdrlen != sizeof(struct monwrite_hdr))
+		return -EINVAL;
+	monbuf = NULL;
+	if (monhdr->mon_function != MONWRITE_GEN_EVENT)
+		monbuf = monwrite_find_hdr(monpriv, monhdr);
+	if (monbuf) {
+		if (monhdr->mon_function == MONWRITE_STOP_INTERVAL) {
+			monhdr->datalen = monbuf->hdr.datalen;
+			rc = monwrite_diag(monhdr, monbuf->data,
+					   APPLDATA_STOP_REC);
+			list_del(&monbuf->list);
+			mon_buf_count--;
+			kfree(monbuf->data);
+			kfree(monbuf);
+			monbuf = NULL;
+		}
+	} else if (monhdr->mon_function != MONWRITE_STOP_INTERVAL) {
+		if (mon_buf_count >= mon_max_bufs)
+			return -ENOSPC;
+		monbuf = kzalloc(sizeof(struct mon_buf), GFP_KERNEL);
+		if (!monbuf)
+			return -ENOMEM;
+		monbuf->data = kzalloc(monhdr->datalen,
+				       GFP_KERNEL | GFP_DMA);
+		if (!monbuf->data) {
+			kfree(monbuf);
+			return -ENOMEM;
+		}
+		monbuf->hdr = *monhdr;
+		list_add_tail(&monbuf->list, &monpriv->list);
+		if (monhdr->mon_function != MONWRITE_GEN_EVENT)
+			mon_buf_count++;
+	}
+	monpriv->current_buf = monbuf;
+	return rc;
+}
+
+static int monwrite_new_data(struct mon_private *monpriv)
+{
+	struct monwrite_hdr *monhdr = &monpriv->hdr;
+	struct mon_buf *monbuf = monpriv->current_buf;
+	int rc = 0;
+
+	switch (monhdr->mon_function) {
+	case MONWRITE_START_INTERVAL:
+		if (!monbuf->diag_done) {
+			rc = monwrite_diag(monhdr, monbuf->data,
+					   APPLDATA_START_INTERVAL_REC);
+			monbuf->diag_done = 1;
+		}
+		break;
+	case MONWRITE_START_CONFIG:
+		if (!monbuf->diag_done) {
+			rc = monwrite_diag(monhdr, monbuf->data,
+					   APPLDATA_START_CONFIG_REC);
+			monbuf->diag_done = 1;
+		}
+		break;
+	case MONWRITE_GEN_EVENT:
+		rc = monwrite_diag(monhdr, monbuf->data,
+				   APPLDATA_GEN_EVENT_REC);
+		list_del(&monpriv->current_buf->list);
+		kfree(monpriv->current_buf->data);
+		kfree(monpriv->current_buf);
+		monpriv->current_buf = NULL;
+		break;
+	default:
+		/* monhdr->mon_function is checked in monwrite_new_hdr */
+		BUG();
+	}
+	return rc;
+}
+
+/*
+ * file operations
+ */
+
+static int monwrite_open(struct inode *inode, struct file *filp)
+{
+	struct mon_private *monpriv;
+
+	monpriv = kzalloc(sizeof(struct mon_private), GFP_KERNEL);
+	if (!monpriv)
+		return -ENOMEM;
+	INIT_LIST_HEAD(&monpriv->list);
+	monpriv->hdr_to_read = sizeof(monpriv->hdr);
+	mutex_init(&monpriv->thread_mutex);
+	filp->private_data = monpriv;
+	list_add_tail(&monpriv->priv_list, &mon_priv_list);
+	return nonseekable_open(inode, filp);
+}
+
+static int monwrite_close(struct inode *inode, struct file *filp)
+{
+	struct mon_private *monpriv = filp->private_data;
+	struct mon_buf *entry, *next;
+
+	list_for_each_entry_safe(entry, next, &monpriv->list, list) {
+		if (entry->hdr.mon_function != MONWRITE_GEN_EVENT)
+			monwrite_diag(&entry->hdr, entry->data,
+				      APPLDATA_STOP_REC);
+		mon_buf_count--;
+		list_del(&entry->list);
+		kfree(entry->data);
+		kfree(entry);
+	}
+	list_del(&monpriv->priv_list);
+	kfree(monpriv);
+	return 0;
+}
+
+static ssize_t monwrite_write(struct file *filp, const char __user *data,
+			      size_t count, loff_t *ppos)
+{
+	struct mon_private *monpriv = filp->private_data;
+	size_t len, written;
+	void *to;
+	int rc;
+
+	mutex_lock(&monpriv->thread_mutex);
+	for (written = 0; written < count; ) {
+		if (monpriv->hdr_to_read) {
+			len = min(count - written, monpriv->hdr_to_read);
+			to = (char *) &monpriv->hdr +
+				sizeof(monpriv->hdr) - monpriv->hdr_to_read;
+			if (copy_from_user(to, data + written, len)) {
+				rc = -EFAULT;
+				goto out_error;
+			}
+			monpriv->hdr_to_read -= len;
+			written += len;
+			if (monpriv->hdr_to_read > 0)
+				continue;
+			rc = monwrite_new_hdr(monpriv);
+			if (rc)
+				goto out_error;
+			monpriv->data_to_read = monpriv->current_buf ?
+				monpriv->current_buf->hdr.datalen : 0;
+		}
+
+		if (monpriv->data_to_read) {
+			len = min(count - written, monpriv->data_to_read);
+			to = monpriv->current_buf->data +
+				monpriv->hdr.datalen - monpriv->data_to_read;
+			if (copy_from_user(to, data + written, len)) {
+				rc = -EFAULT;
+				goto out_error;
+			}
+			monpriv->data_to_read -= len;
+			written += len;
+			if (monpriv->data_to_read > 0)
+				continue;
+			rc = monwrite_new_data(monpriv);
+			if (rc)
+				goto out_error;
+		}
+		monpriv->hdr_to_read = sizeof(monpriv->hdr);
+	}
+	mutex_unlock(&monpriv->thread_mutex);
+	return written;
+
+out_error:
+	monpriv->data_to_read = 0;
+	monpriv->hdr_to_read = sizeof(struct monwrite_hdr);
+	mutex_unlock(&monpriv->thread_mutex);
+	return rc;
+}
+
+static const struct file_operations monwrite_fops = {
+	.owner	 = THIS_MODULE,
+	.open	 = &monwrite_open,
+	.release = &monwrite_close,
+	.write	 = &monwrite_write,
+	.llseek  = noop_llseek,
+};
+
+static struct miscdevice mon_dev = {
+	.name	= "monwriter",
+	.fops	= &monwrite_fops,
+	.minor	= MISC_DYNAMIC_MINOR,
+};
+
+/*
+ * suspend/resume
+ */
+
+static int monwriter_freeze(struct device *dev)
+{
+	struct mon_private *monpriv;
+	struct mon_buf *monbuf;
+
+	list_for_each_entry(monpriv, &mon_priv_list, priv_list) {
+		list_for_each_entry(monbuf, &monpriv->list, list) {
+			if (monbuf->hdr.mon_function != MONWRITE_GEN_EVENT)
+				monwrite_diag(&monbuf->hdr, monbuf->data,
+					      APPLDATA_STOP_REC);
+		}
+	}
+	return 0;
+}
+
+static int monwriter_restore(struct device *dev)
+{
+	struct mon_private *monpriv;
+	struct mon_buf *monbuf;
+
+	list_for_each_entry(monpriv, &mon_priv_list, priv_list) {
+		list_for_each_entry(monbuf, &monpriv->list, list) {
+			if (monbuf->hdr.mon_function == MONWRITE_START_INTERVAL)
+				monwrite_diag(&monbuf->hdr, monbuf->data,
+					      APPLDATA_START_INTERVAL_REC);
+			if (monbuf->hdr.mon_function == MONWRITE_START_CONFIG)
+				monwrite_diag(&monbuf->hdr, monbuf->data,
+					      APPLDATA_START_CONFIG_REC);
+		}
+	}
+	return 0;
+}
+
+static int monwriter_thaw(struct device *dev)
+{
+	return monwriter_restore(dev);
+}
+
+static const struct dev_pm_ops monwriter_pm_ops = {
+	.freeze		= monwriter_freeze,
+	.thaw		= monwriter_thaw,
+	.restore	= monwriter_restore,
+};
+
+static struct platform_driver monwriter_pdrv = {
+	.driver = {
+		.name	= "monwriter",
+		.owner	= THIS_MODULE,
+		.pm	= &monwriter_pm_ops,
+	},
+};
+
+static struct platform_device *monwriter_pdev;
+
+/*
+ * module init/exit
+ */
+
+static int __init mon_init(void)
+{
+	int rc;
+
+	if (!MACHINE_IS_VM)
+		return -ENODEV;
+
+	rc = platform_driver_register(&monwriter_pdrv);
+	if (rc)
+		return rc;
+
+	monwriter_pdev = platform_device_register_simple("monwriter", -1, NULL,
+							0);
+	if (IS_ERR(monwriter_pdev)) {
+		rc = PTR_ERR(monwriter_pdev);
+		goto out_driver;
+	}
+
+	/*
+	 * misc_register() has to be the last action in module_init(), because
+	 * file operations will be available right after this.
+	 */
+	rc = misc_register(&mon_dev);
+	if (rc)
+		goto out_device;
+	return 0;
+
+out_device:
+	platform_device_unregister(monwriter_pdev);
+out_driver:
+	platform_driver_unregister(&monwriter_pdrv);
+	return rc;
+}
+
+static void __exit mon_exit(void)
+{
+	misc_deregister(&mon_dev);
+	platform_device_unregister(monwriter_pdev);
+	platform_driver_unregister(&monwriter_pdrv);
+}
+
+module_init(mon_init);
+module_exit(mon_exit);
+
+module_param_named(max_bufs, mon_max_bufs, int, 0644);
+MODULE_PARM_DESC(max_bufs, "Maximum number of sample monitor data buffers "
+		 "that can be active at one time");
+
+MODULE_AUTHOR("Melissa Howland <Melissa.Howland@us.ibm.com>");
+MODULE_DESCRIPTION("Character device driver for writing z/VM "
+		   "APPLDATA monitor records.");
+MODULE_LICENSE("GPL");
diff --git a/ap/os/linux/linux-3.4.x/drivers/s390/char/raw3270.c b/ap/os/linux/linux-3.4.x/drivers/s390/char/raw3270.c
new file mode 100644
index 0000000..f3b8bb8
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/s390/char/raw3270.c
@@ -0,0 +1,1456 @@
+/*
+ * IBM/3270 Driver - core functions.
+ *
+ * Author(s):
+ *   Original 3270 Code for 2.4 written by Richard Hitt (UTS Global)
+ *   Rewritten for 2.5 by Martin Schwidefsky <schwidefsky@de.ibm.com>
+ *     Copyright IBM Corp. 2003, 2009
+ */
+
+#include <linux/module.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/list.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/wait.h>
+
+#include <asm/ccwdev.h>
+#include <asm/cio.h>
+#include <asm/ebcdic.h>
+#include <asm/diag.h>
+
+#include "raw3270.h"
+
+#include <linux/major.h>
+#include <linux/kdev_t.h>
+#include <linux/device.h>
+#include <linux/mutex.h>
+
+static struct class *class3270;
+
+/* The main 3270 data structure. */
+struct raw3270 {
+	struct list_head list;
+	struct ccw_device *cdev;
+	int minor;
+
+	short model, rows, cols;
+	unsigned long flags;
+
+	struct list_head req_queue;	/* Request queue. */
+	struct list_head view_list;	/* List of available views. */
+	struct raw3270_view *view;	/* Active view. */
+
+	struct timer_list timer;	/* Device timer. */
+
+	unsigned char *ascebc;		/* ascii -> ebcdic table */
+	struct device *clttydev;	/* 3270-class tty device ptr */
+	struct device *cltubdev;	/* 3270-class tub device ptr */
+
+	struct raw3270_request init_request;
+	unsigned char init_data[256];
+};
+
+/* raw3270->flags */
+#define RAW3270_FLAGS_14BITADDR	0	/* 14-bit buffer addresses */
+#define RAW3270_FLAGS_BUSY	1	/* Device busy, leave it alone */
+#define RAW3270_FLAGS_ATTN	2	/* Device sent an ATTN interrupt */
+#define RAW3270_FLAGS_READY	4	/* Device is useable by views */
+#define RAW3270_FLAGS_CONSOLE	8	/* Device is the console. */
+#define RAW3270_FLAGS_FROZEN	16	/* set if 3270 is frozen for suspend */
+
+/* Semaphore to protect global data of raw3270 (devices, views, etc). */
+static DEFINE_MUTEX(raw3270_mutex);
+
+/* List of 3270 devices. */
+static LIST_HEAD(raw3270_devices);
+
+/*
+ * Flag to indicate if the driver has been registered. Some operations
+ * like waiting for the end of i/o need to be done differently as long
+ * as the kernel is still starting up (console support).
+ */
+static int raw3270_registered;
+
+/* Module parameters */
+static bool tubxcorrect = 0;
+module_param(tubxcorrect, bool, 0);
+
+/*
+ * Wait queue for device init/delete, view delete.
+ */
+DECLARE_WAIT_QUEUE_HEAD(raw3270_wait_queue);
+
+/*
+ * Encode array for 12 bit 3270 addresses.
+ */
+static unsigned char raw3270_ebcgraf[64] =	{
+	0x40, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7,
+	0xc8, 0xc9, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f,
+	0x50, 0xd1, 0xd2, 0xd3, 0xd4, 0xd5, 0xd6, 0xd7,
+	0xd8, 0xd9, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f,
+	0x60, 0x61, 0xe2, 0xe3, 0xe4, 0xe5, 0xe6, 0xe7,
+	0xe8, 0xe9, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f,
+	0xf0, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7,
+	0xf8, 0xf9, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f
+};
+
+void
+raw3270_buffer_address(struct raw3270 *rp, char *cp, unsigned short addr)
+{
+	if (test_bit(RAW3270_FLAGS_14BITADDR, &rp->flags)) {
+		cp[0] = (addr >> 8) & 0x3f;
+		cp[1] = addr & 0xff;
+	} else {
+		cp[0] = raw3270_ebcgraf[(addr >> 6) & 0x3f];
+		cp[1] = raw3270_ebcgraf[addr & 0x3f];
+	}
+}
+
+/*
+ * Allocate a new 3270 ccw request
+ */
+struct raw3270_request *
+raw3270_request_alloc(size_t size)
+{
+	struct raw3270_request *rq;
+
+	/* Allocate request structure */
+	rq = kzalloc(sizeof(struct raw3270_request), GFP_KERNEL | GFP_DMA);
+	if (!rq)
+		return ERR_PTR(-ENOMEM);
+
+	/* alloc output buffer. */
+	if (size > 0) {
+		rq->buffer = kmalloc(size, GFP_KERNEL | GFP_DMA);
+		if (!rq->buffer) {
+			kfree(rq);
+			return ERR_PTR(-ENOMEM);
+		}
+	}
+	rq->size = size;
+	INIT_LIST_HEAD(&rq->list);
+
+	/*
+	 * Setup ccw.
+	 */
+	rq->ccw.cda = __pa(rq->buffer);
+	rq->ccw.flags = CCW_FLAG_SLI;
+
+	return rq;
+}
+
+/*
+ * Free 3270 ccw request
+ */
+void
+raw3270_request_free (struct raw3270_request *rq)
+{
+	kfree(rq->buffer);
+	kfree(rq);
+}
+
+/*
+ * Reset request to initial state.
+ */
+void
+raw3270_request_reset(struct raw3270_request *rq)
+{
+	BUG_ON(!list_empty(&rq->list));
+	rq->ccw.cmd_code = 0;
+	rq->ccw.count = 0;
+	rq->ccw.cda = __pa(rq->buffer);
+	rq->ccw.flags = CCW_FLAG_SLI;
+	rq->rescnt = 0;
+	rq->rc = 0;
+}
+
+/*
+ * Set command code to ccw of a request.
+ */
+void
+raw3270_request_set_cmd(struct raw3270_request *rq, u8 cmd)
+{
+	rq->ccw.cmd_code = cmd;
+}
+
+/*
+ * Add data fragment to output buffer.
+ */
+int
+raw3270_request_add_data(struct raw3270_request *rq, void *data, size_t size)
+{
+	if (size + rq->ccw.count > rq->size)
+		return -E2BIG;
+	memcpy(rq->buffer + rq->ccw.count, data, size);
+	rq->ccw.count += size;
+	return 0;
+}
+
+/*
+ * Set address/length pair to ccw of a request.
+ */
+void
+raw3270_request_set_data(struct raw3270_request *rq, void *data, size_t size)
+{
+	rq->ccw.cda = __pa(data);
+	rq->ccw.count = size;
+}
+
+/*
+ * Set idal buffer to ccw of a request.
+ */
+void
+raw3270_request_set_idal(struct raw3270_request *rq, struct idal_buffer *ib)
+{
+	rq->ccw.cda = __pa(ib->data);
+	rq->ccw.count = ib->size;
+	rq->ccw.flags |= CCW_FLAG_IDA;
+}
+
+/*
+ * Stop running ccw.
+ */
+static int
+raw3270_halt_io_nolock(struct raw3270 *rp, struct raw3270_request *rq)
+{
+	int retries;
+	int rc;
+
+	if (raw3270_request_final(rq))
+		return 0;
+	/* Check if interrupt has already been processed */
+	for (retries = 0; retries < 5; retries++) {
+		if (retries < 2)
+			rc = ccw_device_halt(rp->cdev, (long) rq);
+		else
+			rc = ccw_device_clear(rp->cdev, (long) rq);
+		if (rc == 0)
+			break;		/* termination successful */
+	}
+	return rc;
+}
+
+static int
+raw3270_halt_io(struct raw3270 *rp, struct raw3270_request *rq)
+{
+	unsigned long flags;
+	int rc;
+
+	spin_lock_irqsave(get_ccwdev_lock(rp->cdev), flags);
+	rc = raw3270_halt_io_nolock(rp, rq);
+	spin_unlock_irqrestore(get_ccwdev_lock(rp->cdev), flags);
+	return rc;
+}
+
+/*
+ * Add the request to the request queue, try to start it if the
+ * 3270 device is idle. Return without waiting for end of i/o.
+ */
+static int
+__raw3270_start(struct raw3270 *rp, struct raw3270_view *view,
+		struct raw3270_request *rq)
+{
+	rq->view = view;
+	raw3270_get_view(view);
+	if (list_empty(&rp->req_queue) &&
+	    !test_bit(RAW3270_FLAGS_BUSY, &rp->flags)) {
+		/* No other requests are on the queue. Start this one. */
+		rq->rc = ccw_device_start(rp->cdev, &rq->ccw,
+					       (unsigned long) rq, 0, 0);
+		if (rq->rc) {
+			raw3270_put_view(view);
+			return rq->rc;
+		}
+	}
+	list_add_tail(&rq->list, &rp->req_queue);
+	return 0;
+}
+
+int
+raw3270_start(struct raw3270_view *view, struct raw3270_request *rq)
+{
+	unsigned long flags;
+	struct raw3270 *rp;
+	int rc;
+
+	spin_lock_irqsave(get_ccwdev_lock(view->dev->cdev), flags);
+	rp = view->dev;
+	if (!rp || rp->view != view ||
+	    test_bit(RAW3270_FLAGS_FROZEN, &rp->flags))
+		rc = -EACCES;
+	else if (!test_bit(RAW3270_FLAGS_READY, &rp->flags))
+		rc = -ENODEV;
+	else
+		rc =  __raw3270_start(rp, view, rq);
+	spin_unlock_irqrestore(get_ccwdev_lock(view->dev->cdev), flags);
+	return rc;
+}
+
+int
+raw3270_start_locked(struct raw3270_view *view, struct raw3270_request *rq)
+{
+	struct raw3270 *rp;
+	int rc;
+
+	rp = view->dev;
+	if (!rp || rp->view != view ||
+	    test_bit(RAW3270_FLAGS_FROZEN, &rp->flags))
+		rc = -EACCES;
+	else if (!test_bit(RAW3270_FLAGS_READY, &rp->flags))
+		rc = -ENODEV;
+	else
+		rc =  __raw3270_start(rp, view, rq);
+	return rc;
+}
+
+int
+raw3270_start_irq(struct raw3270_view *view, struct raw3270_request *rq)
+{
+	struct raw3270 *rp;
+
+	rp = view->dev;
+	rq->view = view;
+	raw3270_get_view(view);
+	list_add_tail(&rq->list, &rp->req_queue);
+	return 0;
+}
+
+/*
+ * 3270 interrupt routine, called from the ccw_device layer
+ */
+static void
+raw3270_irq (struct ccw_device *cdev, unsigned long intparm, struct irb *irb)
+{
+	struct raw3270 *rp;
+	struct raw3270_view *view;
+	struct raw3270_request *rq;
+	int rc;
+
+	rp = dev_get_drvdata(&cdev->dev);
+	if (!rp)
+		return;
+	rq = (struct raw3270_request *) intparm;
+	view = rq ? rq->view : rp->view;
+
+	if (IS_ERR(irb))
+		rc = RAW3270_IO_RETRY;
+	else if (irb->scsw.cmd.fctl & SCSW_FCTL_HALT_FUNC) {
+		rq->rc = -EIO;
+		rc = RAW3270_IO_DONE;
+	} else if (irb->scsw.cmd.dstat == (DEV_STAT_CHN_END | DEV_STAT_DEV_END |
+					   DEV_STAT_UNIT_EXCEP)) {
+		/* Handle CE-DE-UE and subsequent UDE */
+		set_bit(RAW3270_FLAGS_BUSY, &rp->flags);
+		rc = RAW3270_IO_BUSY;
+	} else if (test_bit(RAW3270_FLAGS_BUSY, &rp->flags)) {
+		/* Wait for UDE if busy flag is set. */
+		if (irb->scsw.cmd.dstat & DEV_STAT_DEV_END) {
+			clear_bit(RAW3270_FLAGS_BUSY, &rp->flags);
+			/* Got it, now retry. */
+			rc = RAW3270_IO_RETRY;
+		} else
+			rc = RAW3270_IO_BUSY;
+	} else if (view)
+		rc = view->fn->intv(view, rq, irb);
+	else
+		rc = RAW3270_IO_DONE;
+
+	switch (rc) {
+	case RAW3270_IO_DONE:
+		break;
+	case RAW3270_IO_BUSY:
+		/* 
+		 * Intervention required by the operator. We have to wait
+		 * for unsolicited device end.
+		 */
+		return;
+	case RAW3270_IO_RETRY:
+		if (!rq)
+			break;
+		rq->rc = ccw_device_start(rp->cdev, &rq->ccw,
+					  (unsigned long) rq, 0, 0);
+		if (rq->rc == 0)
+			return;	/* Successfully restarted. */
+		break;
+	case RAW3270_IO_STOP:
+		if (!rq)
+			break;
+		raw3270_halt_io_nolock(rp, rq);
+		rq->rc = -EIO;
+		break;
+	default:
+		BUG();
+	}
+	if (rq) {
+		BUG_ON(list_empty(&rq->list));
+		/* The request completed, remove from queue and do callback. */
+		list_del_init(&rq->list);
+		if (rq->callback)
+			rq->callback(rq, rq->callback_data);
+		/* Do put_device for get_device in raw3270_start. */
+		raw3270_put_view(view);
+	}
+	/*
+	 * Try to start each request on request queue until one is
+	 * started successful.
+	 */
+	while (!list_empty(&rp->req_queue)) {
+		rq = list_entry(rp->req_queue.next,struct raw3270_request,list);
+		rq->rc = ccw_device_start(rp->cdev, &rq->ccw,
+					  (unsigned long) rq, 0, 0);
+		if (rq->rc == 0)
+			break;
+		/* Start failed. Remove request and do callback. */
+		list_del_init(&rq->list);
+		if (rq->callback)
+			rq->callback(rq, rq->callback_data);
+		/* Do put_device for get_device in raw3270_start. */
+		raw3270_put_view(view);
+	}
+}
+
+/*
+ * Size sensing.
+ */
+
+struct raw3270_ua {	/* Query Reply structure for Usable Area */
+	struct {	/* Usable Area Query Reply Base */
+		short l;	/* Length of this structured field */
+		char  sfid;	/* 0x81 if Query Reply */
+		char  qcode;	/* 0x81 if Usable Area */
+		char  flags0;
+		char  flags1;
+		short w;	/* Width of usable area */
+		short h;	/* Heigth of usavle area */
+		char  units;	/* 0x00:in; 0x01:mm */
+		int   xr;
+		int   yr;
+		char  aw;
+		char  ah;
+		short buffsz;	/* Character buffer size, bytes */
+		char  xmin;
+		char  ymin;
+		char  xmax;
+		char  ymax;
+	} __attribute__ ((packed)) uab;
+	struct {	/* Alternate Usable Area Self-Defining Parameter */
+		char  l;	/* Length of this Self-Defining Parm */
+		char  sdpid;	/* 0x02 if Alternate Usable Area */
+		char  res;
+		char  auaid;	/* 0x01 is Id for the A U A */
+		short wauai;	/* Width of AUAi */
+		short hauai;	/* Height of AUAi */
+		char  auaunits;	/* 0x00:in, 0x01:mm */
+		int   auaxr;
+		int   auayr;
+		char  awauai;
+		char  ahauai;
+	} __attribute__ ((packed)) aua;
+} __attribute__ ((packed));
+
+static struct diag210 raw3270_init_diag210;
+static DEFINE_MUTEX(raw3270_init_mutex);
+
+static int
+raw3270_init_irq(struct raw3270_view *view, struct raw3270_request *rq,
+		 struct irb *irb)
+{
+	/*
+	 * Unit-Check Processing:
+	 * Expect Command Reject or Intervention Required.
+	 */
+	if (irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK) {
+		/* Request finished abnormally. */
+		if (irb->ecw[0] & SNS0_INTERVENTION_REQ) {
+			set_bit(RAW3270_FLAGS_BUSY, &view->dev->flags);
+			return RAW3270_IO_BUSY;
+		}
+	}
+	if (rq) {
+		if (irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK) {
+			if (irb->ecw[0] & SNS0_CMD_REJECT)
+				rq->rc = -EOPNOTSUPP;
+			else
+				rq->rc = -EIO;
+		} else
+			/* Request finished normally. Copy residual count. */
+			rq->rescnt = irb->scsw.cmd.count;
+	}
+	if (irb->scsw.cmd.dstat & DEV_STAT_ATTENTION) {
+		set_bit(RAW3270_FLAGS_ATTN, &view->dev->flags);
+		wake_up(&raw3270_wait_queue);
+	}
+	return RAW3270_IO_DONE;
+}
+
+static struct raw3270_fn raw3270_init_fn = {
+	.intv = raw3270_init_irq
+};
+
+static struct raw3270_view raw3270_init_view = {
+	.fn = &raw3270_init_fn
+};
+
+/*
+ * raw3270_wait/raw3270_wait_interruptible/__raw3270_wakeup
+ * Wait for end of request. The request must have been started
+ * with raw3270_start, rc = 0. The device lock may NOT have been
+ * released between calling raw3270_start and raw3270_wait.
+ */
+static void
+raw3270_wake_init(struct raw3270_request *rq, void *data)
+{
+	wake_up((wait_queue_head_t *) data);
+}
+
+/*
+ * Special wait function that can cope with console initialization.
+ */
+static int
+raw3270_start_init(struct raw3270 *rp, struct raw3270_view *view,
+		   struct raw3270_request *rq)
+{
+	unsigned long flags;
+	int rc;
+
+#ifdef CONFIG_TN3270_CONSOLE
+	if (raw3270_registered == 0) {
+		spin_lock_irqsave(get_ccwdev_lock(view->dev->cdev), flags);
+		rq->callback = NULL;
+		rc = __raw3270_start(rp, view, rq);
+		if (rc == 0)
+			while (!raw3270_request_final(rq)) {
+				wait_cons_dev();
+				barrier();
+			}
+		spin_unlock_irqrestore(get_ccwdev_lock(view->dev->cdev), flags);
+		return rq->rc;
+	}
+#endif
+	rq->callback = raw3270_wake_init;
+	rq->callback_data = &raw3270_wait_queue;
+	spin_lock_irqsave(get_ccwdev_lock(view->dev->cdev), flags);
+	rc = __raw3270_start(rp, view, rq);
+	spin_unlock_irqrestore(get_ccwdev_lock(view->dev->cdev), flags);
+	if (rc)
+		return rc;
+	/* Now wait for the completion. */
+	rc = wait_event_interruptible(raw3270_wait_queue,
+				      raw3270_request_final(rq));
+	if (rc == -ERESTARTSYS) {	/* Interrupted by a signal. */
+		raw3270_halt_io(view->dev, rq);
+		/* No wait for the halt to complete. */
+		wait_event(raw3270_wait_queue, raw3270_request_final(rq));
+		return -ERESTARTSYS;
+	}
+	return rq->rc;
+}
+
+static int
+__raw3270_size_device_vm(struct raw3270 *rp)
+{
+	int rc, model;
+	struct ccw_dev_id dev_id;
+
+	ccw_device_get_id(rp->cdev, &dev_id);
+	raw3270_init_diag210.vrdcdvno = dev_id.devno;
+	raw3270_init_diag210.vrdclen = sizeof(struct diag210);
+	rc = diag210(&raw3270_init_diag210);
+	if (rc)
+		return rc;
+	model = raw3270_init_diag210.vrdccrmd;
+	switch (model) {
+	case 2:
+		rp->model = model;
+		rp->rows = 24;
+		rp->cols = 80;
+		break;
+	case 3:
+		rp->model = model;
+		rp->rows = 32;
+		rp->cols = 80;
+		break;
+	case 4:
+		rp->model = model;
+		rp->rows = 43;
+		rp->cols = 80;
+		break;
+	case 5:
+		rp->model = model;
+		rp->rows = 27;
+		rp->cols = 132;
+		break;
+	default:
+		rc = -EOPNOTSUPP;
+		break;
+	}
+	return rc;
+}
+
+static int
+__raw3270_size_device(struct raw3270 *rp)
+{
+	static const unsigned char wbuf[] =
+		{ 0x00, 0x07, 0x01, 0xff, 0x03, 0x00, 0x81 };
+	struct raw3270_ua *uap;
+	int rc;
+
+	/*
+	 * To determine the size of the 3270 device we need to do:
+	 * 1) send a 'read partition' data stream to the device
+	 * 2) wait for the attn interrupt that precedes the query reply
+	 * 3) do a read modified to get the query reply
+	 * To make things worse we have to cope with intervention
+	 * required (3270 device switched to 'stand-by') and command
+	 * rejects (old devices that can't do 'read partition').
+	 */
+	memset(&rp->init_request, 0, sizeof(rp->init_request));
+	memset(&rp->init_data, 0, 256);
+	/* Store 'read partition' data stream to init_data */
+	memcpy(&rp->init_data, wbuf, sizeof(wbuf));
+	INIT_LIST_HEAD(&rp->init_request.list);
+	rp->init_request.ccw.cmd_code = TC_WRITESF;
+	rp->init_request.ccw.flags = CCW_FLAG_SLI;
+	rp->init_request.ccw.count = sizeof(wbuf);
+	rp->init_request.ccw.cda = (__u32) __pa(&rp->init_data);
+
+	rc = raw3270_start_init(rp, &raw3270_init_view, &rp->init_request);
+	if (rc)
+		/* Check error cases: -ERESTARTSYS, -EIO and -EOPNOTSUPP */
+		return rc;
+
+	/* Wait for attention interrupt. */
+#ifdef CONFIG_TN3270_CONSOLE
+	if (raw3270_registered == 0) {
+		unsigned long flags;
+
+		spin_lock_irqsave(get_ccwdev_lock(rp->cdev), flags);
+		while (!test_and_clear_bit(RAW3270_FLAGS_ATTN, &rp->flags))
+			wait_cons_dev();
+		spin_unlock_irqrestore(get_ccwdev_lock(rp->cdev), flags);
+	} else
+#endif
+		rc = wait_event_interruptible(raw3270_wait_queue,
+			test_and_clear_bit(RAW3270_FLAGS_ATTN, &rp->flags));
+	if (rc)
+		return rc;
+
+	/*
+	 * The device accepted the 'read partition' command. Now
+	 * set up a read ccw and issue it.
+	 */
+	rp->init_request.ccw.cmd_code = TC_READMOD;
+	rp->init_request.ccw.flags = CCW_FLAG_SLI;
+	rp->init_request.ccw.count = sizeof(rp->init_data);
+	rp->init_request.ccw.cda = (__u32) __pa(rp->init_data);
+	rc = raw3270_start_init(rp, &raw3270_init_view, &rp->init_request);
+	if (rc)
+		return rc;
+	/* Got a Query Reply */
+	uap = (struct raw3270_ua *) (rp->init_data + 1);
+	/* Paranoia check. */
+	if (rp->init_data[0] != 0x88 || uap->uab.qcode != 0x81)
+		return -EOPNOTSUPP;
+	/* Copy rows/columns of default Usable Area */
+	rp->rows = uap->uab.h;
+	rp->cols = uap->uab.w;
+	/* Check for 14 bit addressing */
+	if ((uap->uab.flags0 & 0x0d) == 0x01)
+		set_bit(RAW3270_FLAGS_14BITADDR, &rp->flags);
+	/* Check for Alternate Usable Area */
+	if (uap->uab.l == sizeof(struct raw3270_ua) &&
+	    uap->aua.sdpid == 0x02) {
+		rp->rows = uap->aua.hauai;
+		rp->cols = uap->aua.wauai;
+	}
+	return 0;
+}
+
+static int
+raw3270_size_device(struct raw3270 *rp)
+{
+	int rc;
+
+	mutex_lock(&raw3270_init_mutex);
+	rp->view = &raw3270_init_view;
+	raw3270_init_view.dev = rp;
+	if (MACHINE_IS_VM)
+		rc = __raw3270_size_device_vm(rp);
+	else
+		rc = __raw3270_size_device(rp);
+	raw3270_init_view.dev = NULL;
+	rp->view = NULL;
+	mutex_unlock(&raw3270_init_mutex);
+	if (rc == 0) {	/* Found something. */
+		/* Try to find a model. */
+		rp->model = 0;
+		if (rp->rows == 24 && rp->cols == 80)
+			rp->model = 2;
+		if (rp->rows == 32 && rp->cols == 80)
+			rp->model = 3;
+		if (rp->rows == 43 && rp->cols == 80)
+			rp->model = 4;
+		if (rp->rows == 27 && rp->cols == 132)
+			rp->model = 5;
+	} else {
+		/* Couldn't detect size. Use default model 2. */
+		rp->model = 2;
+		rp->rows = 24;
+		rp->cols = 80;
+		return 0;
+	}
+	return rc;
+}
+
+static int
+raw3270_reset_device(struct raw3270 *rp)
+{
+	int rc;
+
+	mutex_lock(&raw3270_init_mutex);
+	memset(&rp->init_request, 0, sizeof(rp->init_request));
+	memset(&rp->init_data, 0, sizeof(rp->init_data));
+	/* Store reset data stream to init_data/init_request */
+	rp->init_data[0] = TW_KR;
+	INIT_LIST_HEAD(&rp->init_request.list);
+	rp->init_request.ccw.cmd_code = TC_EWRITEA;
+	rp->init_request.ccw.flags = CCW_FLAG_SLI;
+	rp->init_request.ccw.count = 1;
+	rp->init_request.ccw.cda = (__u32) __pa(rp->init_data);
+	rp->view = &raw3270_init_view;
+	raw3270_init_view.dev = rp;
+	rc = raw3270_start_init(rp, &raw3270_init_view, &rp->init_request);
+	raw3270_init_view.dev = NULL;
+	rp->view = NULL;
+	mutex_unlock(&raw3270_init_mutex);
+	return rc;
+}
+
+int
+raw3270_reset(struct raw3270_view *view)
+{
+	struct raw3270 *rp;
+	int rc;
+
+	rp = view->dev;
+	if (!rp || rp->view != view ||
+	    test_bit(RAW3270_FLAGS_FROZEN, &rp->flags))
+		rc = -EACCES;
+	else if (!test_bit(RAW3270_FLAGS_READY, &rp->flags))
+		rc = -ENODEV;
+	else
+		rc = raw3270_reset_device(view->dev);
+	return rc;
+}
+
+/*
+ * Setup new 3270 device.
+ */
+static int
+raw3270_setup_device(struct ccw_device *cdev, struct raw3270 *rp, char *ascebc)
+{
+	struct list_head *l;
+	struct raw3270 *tmp;
+	int minor;
+
+	memset(rp, 0, sizeof(struct raw3270));
+	/* Copy ebcdic -> ascii translation table. */
+	memcpy(ascebc, _ascebc, 256);
+	if (tubxcorrect) {
+		/* correct brackets and circumflex */
+		ascebc['['] = 0xad;
+		ascebc[']'] = 0xbd;
+		ascebc['^'] = 0xb0;
+	}
+	rp->ascebc = ascebc;
+
+	/* Set defaults. */
+	rp->rows = 24;
+	rp->cols = 80;
+
+	INIT_LIST_HEAD(&rp->req_queue);
+	INIT_LIST_HEAD(&rp->view_list);
+
+	/*
+	 * Add device to list and find the smallest unused minor
+	 * number for it. Note: there is no device with minor 0,
+	 * see special case for fs3270.c:fs3270_open().
+	 */
+	mutex_lock(&raw3270_mutex);
+	/* Keep the list sorted. */
+	minor = RAW3270_FIRSTMINOR;
+	rp->minor = -1;
+	list_for_each(l, &raw3270_devices) {
+		tmp = list_entry(l, struct raw3270, list);
+		if (tmp->minor > minor) {
+			rp->minor = minor;
+			__list_add(&rp->list, l->prev, l);
+			break;
+		}
+		minor++;
+	}
+	if (rp->minor == -1 && minor < RAW3270_MAXDEVS + RAW3270_FIRSTMINOR) {
+		rp->minor = minor;
+		list_add_tail(&rp->list, &raw3270_devices);
+	}
+	mutex_unlock(&raw3270_mutex);
+	/* No free minor number? Then give up. */
+	if (rp->minor == -1)
+		return -EUSERS;
+	rp->cdev = cdev;
+	dev_set_drvdata(&cdev->dev, rp);
+	cdev->handler = raw3270_irq;
+	return 0;
+}
+
+#ifdef CONFIG_TN3270_CONSOLE
+/*
+ * Setup 3270 device configured as console.
+ */
+struct raw3270 __init *raw3270_setup_console(struct ccw_device *cdev)
+{
+	struct raw3270 *rp;
+	char *ascebc;
+	int rc;
+
+	rp = kzalloc(sizeof(struct raw3270), GFP_KERNEL | GFP_DMA);
+	ascebc = kzalloc(256, GFP_KERNEL);
+	rc = raw3270_setup_device(cdev, rp, ascebc);
+	if (rc)
+		return ERR_PTR(rc);
+	set_bit(RAW3270_FLAGS_CONSOLE, &rp->flags);
+	rc = raw3270_reset_device(rp);
+	if (rc)
+		return ERR_PTR(rc);
+	rc = raw3270_size_device(rp);
+	if (rc)
+		return ERR_PTR(rc);
+	rc = raw3270_reset_device(rp);
+	if (rc)
+		return ERR_PTR(rc);
+	set_bit(RAW3270_FLAGS_READY, &rp->flags);
+	return rp;
+}
+
+void
+raw3270_wait_cons_dev(struct raw3270 *rp)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(get_ccwdev_lock(rp->cdev), flags);
+	wait_cons_dev();
+	spin_unlock_irqrestore(get_ccwdev_lock(rp->cdev), flags);
+}
+
+#endif
+
+/*
+ * Create a 3270 device structure.
+ */
+static struct raw3270 *
+raw3270_create_device(struct ccw_device *cdev)
+{
+	struct raw3270 *rp;
+	char *ascebc;
+	int rc;
+
+	rp = kmalloc(sizeof(struct raw3270), GFP_KERNEL | GFP_DMA);
+	if (!rp)
+		return ERR_PTR(-ENOMEM);
+	ascebc = kmalloc(256, GFP_KERNEL);
+	if (!ascebc) {
+		kfree(rp);
+		return ERR_PTR(-ENOMEM);
+	}
+	rc = raw3270_setup_device(cdev, rp, ascebc);
+	if (rc) {
+		kfree(rp->ascebc);
+		kfree(rp);
+		rp = ERR_PTR(rc);
+	}
+	/* Get reference to ccw_device structure. */
+	get_device(&cdev->dev);
+	return rp;
+}
+
+/*
+ * Activate a view.
+ */
+int
+raw3270_activate_view(struct raw3270_view *view)
+{
+	struct raw3270 *rp;
+	struct raw3270_view *oldview, *nv;
+	unsigned long flags;
+	int rc;
+
+	rp = view->dev;
+	if (!rp)
+		return -ENODEV;
+	spin_lock_irqsave(get_ccwdev_lock(rp->cdev), flags);
+	if (rp->view == view)
+		rc = 0;
+	else if (!test_bit(RAW3270_FLAGS_READY, &rp->flags))
+		rc = -ENODEV;
+	else if (test_bit(RAW3270_FLAGS_FROZEN, &rp->flags))
+		rc = -EACCES;
+	else {
+		oldview = NULL;
+		if (rp->view) {
+			oldview = rp->view;
+			oldview->fn->deactivate(oldview);
+		}
+		rp->view = view;
+		rc = view->fn->activate(view);
+		if (rc) {
+			/* Didn't work. Try to reactivate the old view. */
+			rp->view = oldview;
+			if (!oldview || oldview->fn->activate(oldview) != 0) {
+				/* Didn't work as well. Try any other view. */
+				list_for_each_entry(nv, &rp->view_list, list)
+					if (nv != view && nv != oldview) {
+						rp->view = nv;
+						if (nv->fn->activate(nv) == 0)
+							break;
+						rp->view = NULL;
+					}
+			}
+		}
+	}
+	spin_unlock_irqrestore(get_ccwdev_lock(rp->cdev), flags);
+	return rc;
+}
+
+/*
+ * Deactivate current view.
+ */
+void
+raw3270_deactivate_view(struct raw3270_view *view)
+{
+	unsigned long flags;
+	struct raw3270 *rp;
+
+	rp = view->dev;
+	if (!rp)
+		return;
+	spin_lock_irqsave(get_ccwdev_lock(rp->cdev), flags);
+	if (rp->view == view) {
+		view->fn->deactivate(view);
+		rp->view = NULL;
+		/* Move deactivated view to end of list. */
+		list_del_init(&view->list);
+		list_add_tail(&view->list, &rp->view_list);
+		/* Try to activate another view. */
+		if (test_bit(RAW3270_FLAGS_READY, &rp->flags) &&
+		    !test_bit(RAW3270_FLAGS_FROZEN, &rp->flags)) {
+			list_for_each_entry(view, &rp->view_list, list) {
+				rp->view = view;
+				if (view->fn->activate(view) == 0)
+					break;
+				rp->view = NULL;
+			}
+		}
+	}
+	spin_unlock_irqrestore(get_ccwdev_lock(rp->cdev), flags);
+}
+
+/*
+ * Add view to device with minor "minor".
+ */
+int
+raw3270_add_view(struct raw3270_view *view, struct raw3270_fn *fn, int minor)
+{
+	unsigned long flags;
+	struct raw3270 *rp;
+	int rc;
+
+	if (minor <= 0)
+		return -ENODEV;
+	mutex_lock(&raw3270_mutex);
+	rc = -ENODEV;
+	list_for_each_entry(rp, &raw3270_devices, list) {
+		if (rp->minor != minor)
+			continue;
+		spin_lock_irqsave(get_ccwdev_lock(rp->cdev), flags);
+		if (test_bit(RAW3270_FLAGS_READY, &rp->flags)) {
+			atomic_set(&view->ref_count, 2);
+			view->dev = rp;
+			view->fn = fn;
+			view->model = rp->model;
+			view->rows = rp->rows;
+			view->cols = rp->cols;
+			view->ascebc = rp->ascebc;
+			spin_lock_init(&view->lock);
+			list_add(&view->list, &rp->view_list);
+			rc = 0;
+		}
+		spin_unlock_irqrestore(get_ccwdev_lock(rp->cdev), flags);
+		break;
+	}
+	mutex_unlock(&raw3270_mutex);
+	return rc;
+}
+
+/*
+ * Find specific view of device with minor "minor".
+ */
+struct raw3270_view *
+raw3270_find_view(struct raw3270_fn *fn, int minor)
+{
+	struct raw3270 *rp;
+	struct raw3270_view *view, *tmp;
+	unsigned long flags;
+
+	mutex_lock(&raw3270_mutex);
+	view = ERR_PTR(-ENODEV);
+	list_for_each_entry(rp, &raw3270_devices, list) {
+		if (rp->minor != minor)
+			continue;
+		spin_lock_irqsave(get_ccwdev_lock(rp->cdev), flags);
+		if (test_bit(RAW3270_FLAGS_READY, &rp->flags)) {
+			view = ERR_PTR(-ENOENT);
+			list_for_each_entry(tmp, &rp->view_list, list) {
+				if (tmp->fn == fn) {
+					raw3270_get_view(tmp);
+					view = tmp;
+					break;
+				}
+			}
+		}
+		spin_unlock_irqrestore(get_ccwdev_lock(rp->cdev), flags);
+		break;
+	}
+	mutex_unlock(&raw3270_mutex);
+	return view;
+}
+
+/*
+ * Remove view from device and free view structure via call to view->fn->free.
+ */
+void
+raw3270_del_view(struct raw3270_view *view)
+{
+	unsigned long flags;
+	struct raw3270 *rp;
+	struct raw3270_view *nv;
+
+	rp = view->dev;
+	spin_lock_irqsave(get_ccwdev_lock(rp->cdev), flags);
+	if (rp->view == view) {
+		view->fn->deactivate(view);
+		rp->view = NULL;
+	}
+	list_del_init(&view->list);
+	if (!rp->view && test_bit(RAW3270_FLAGS_READY, &rp->flags) &&
+	    !test_bit(RAW3270_FLAGS_FROZEN, &rp->flags)) {
+		/* Try to activate another view. */
+		list_for_each_entry(nv, &rp->view_list, list) {
+			if (nv->fn->activate(nv) == 0) {
+				rp->view = nv;
+				break;
+			}
+		}
+	}
+	spin_unlock_irqrestore(get_ccwdev_lock(rp->cdev), flags);
+	/* Wait for reference counter to drop to zero. */
+	atomic_dec(&view->ref_count);
+	wait_event(raw3270_wait_queue, atomic_read(&view->ref_count) == 0);
+	if (view->fn->free)
+		view->fn->free(view);
+}
+
+/*
+ * Remove a 3270 device structure.
+ */
+static void
+raw3270_delete_device(struct raw3270 *rp)
+{
+	struct ccw_device *cdev;
+
+	/* Remove from device chain. */
+	mutex_lock(&raw3270_mutex);
+	if (rp->clttydev && !IS_ERR(rp->clttydev))
+		device_destroy(class3270, MKDEV(IBM_TTY3270_MAJOR, rp->minor));
+	if (rp->cltubdev && !IS_ERR(rp->cltubdev))
+		device_destroy(class3270, MKDEV(IBM_FS3270_MAJOR, rp->minor));
+	list_del_init(&rp->list);
+	mutex_unlock(&raw3270_mutex);
+
+	/* Disconnect from ccw_device. */
+	cdev = rp->cdev;
+	rp->cdev = NULL;
+	dev_set_drvdata(&cdev->dev, NULL);
+	cdev->handler = NULL;
+
+	/* Put ccw_device structure. */
+	put_device(&cdev->dev);
+
+	/* Now free raw3270 structure. */
+	kfree(rp->ascebc);
+	kfree(rp);
+}
+
+static int
+raw3270_probe (struct ccw_device *cdev)
+{
+	return 0;
+}
+
+/*
+ * Additional attributes for a 3270 device
+ */
+static ssize_t
+raw3270_model_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+	return snprintf(buf, PAGE_SIZE, "%i\n",
+			((struct raw3270 *) dev_get_drvdata(dev))->model);
+}
+static DEVICE_ATTR(model, 0444, raw3270_model_show, NULL);
+
+static ssize_t
+raw3270_rows_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+	return snprintf(buf, PAGE_SIZE, "%i\n",
+			((struct raw3270 *) dev_get_drvdata(dev))->rows);
+}
+static DEVICE_ATTR(rows, 0444, raw3270_rows_show, NULL);
+
+static ssize_t
+raw3270_columns_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+	return snprintf(buf, PAGE_SIZE, "%i\n",
+			((struct raw3270 *) dev_get_drvdata(dev))->cols);
+}
+static DEVICE_ATTR(columns, 0444, raw3270_columns_show, NULL);
+
+static struct attribute * raw3270_attrs[] = {
+	&dev_attr_model.attr,
+	&dev_attr_rows.attr,
+	&dev_attr_columns.attr,
+	NULL,
+};
+
+static struct attribute_group raw3270_attr_group = {
+	.attrs = raw3270_attrs,
+};
+
+static int raw3270_create_attributes(struct raw3270 *rp)
+{
+	int rc;
+
+	rc = sysfs_create_group(&rp->cdev->dev.kobj, &raw3270_attr_group);
+	if (rc)
+		goto out;
+
+	rp->clttydev = device_create(class3270, &rp->cdev->dev,
+				     MKDEV(IBM_TTY3270_MAJOR, rp->minor), NULL,
+				     "tty%s", dev_name(&rp->cdev->dev));
+	if (IS_ERR(rp->clttydev)) {
+		rc = PTR_ERR(rp->clttydev);
+		goto out_ttydev;
+	}
+
+	rp->cltubdev = device_create(class3270, &rp->cdev->dev,
+				     MKDEV(IBM_FS3270_MAJOR, rp->minor), NULL,
+				     "tub%s", dev_name(&rp->cdev->dev));
+	if (!IS_ERR(rp->cltubdev))
+		goto out;
+
+	rc = PTR_ERR(rp->cltubdev);
+	device_destroy(class3270, MKDEV(IBM_TTY3270_MAJOR, rp->minor));
+
+out_ttydev:
+	sysfs_remove_group(&rp->cdev->dev.kobj, &raw3270_attr_group);
+out:
+	return rc;
+}
+
+/*
+ * Notifier for device addition/removal
+ */
+struct raw3270_notifier {
+	struct list_head list;
+	void (*notifier)(int, int);
+};
+
+static LIST_HEAD(raw3270_notifier);
+
+int raw3270_register_notifier(void (*notifier)(int, int))
+{
+	struct raw3270_notifier *np;
+	struct raw3270 *rp;
+
+	np = kmalloc(sizeof(struct raw3270_notifier), GFP_KERNEL);
+	if (!np)
+		return -ENOMEM;
+	np->notifier = notifier;
+	mutex_lock(&raw3270_mutex);
+	list_add_tail(&np->list, &raw3270_notifier);
+	list_for_each_entry(rp, &raw3270_devices, list) {
+		get_device(&rp->cdev->dev);
+		notifier(rp->minor, 1);
+	}
+	mutex_unlock(&raw3270_mutex);
+	return 0;
+}
+
+void raw3270_unregister_notifier(void (*notifier)(int, int))
+{
+	struct raw3270_notifier *np;
+
+	mutex_lock(&raw3270_mutex);
+	list_for_each_entry(np, &raw3270_notifier, list)
+		if (np->notifier == notifier) {
+			list_del(&np->list);
+			kfree(np);
+			break;
+		}
+	mutex_unlock(&raw3270_mutex);
+}
+
+/*
+ * Set 3270 device online.
+ */
+static int
+raw3270_set_online (struct ccw_device *cdev)
+{
+	struct raw3270 *rp;
+	struct raw3270_notifier *np;
+	int rc;
+
+	rp = raw3270_create_device(cdev);
+	if (IS_ERR(rp))
+		return PTR_ERR(rp);
+	rc = raw3270_reset_device(rp);
+	if (rc)
+		goto failure;
+	rc = raw3270_size_device(rp);
+	if (rc)
+		goto failure;
+	rc = raw3270_reset_device(rp);
+	if (rc)
+		goto failure;
+	rc = raw3270_create_attributes(rp);
+	if (rc)
+		goto failure;
+	set_bit(RAW3270_FLAGS_READY, &rp->flags);
+	mutex_lock(&raw3270_mutex);
+	list_for_each_entry(np, &raw3270_notifier, list)
+		np->notifier(rp->minor, 1);
+	mutex_unlock(&raw3270_mutex);
+	return 0;
+
+failure:
+	raw3270_delete_device(rp);
+	return rc;
+}
+
+/*
+ * Remove 3270 device structure.
+ */
+static void
+raw3270_remove (struct ccw_device *cdev)
+{
+	unsigned long flags;
+	struct raw3270 *rp;
+	struct raw3270_view *v;
+	struct raw3270_notifier *np;
+
+	rp = dev_get_drvdata(&cdev->dev);
+	/*
+	 * _remove is the opposite of _probe; it's probe that
+	 * should set up rp.  raw3270_remove gets entered for
+	 * devices even if they haven't been varied online.
+	 * Thus, rp may validly be NULL here.
+	 */
+	if (rp == NULL)
+		return;
+	clear_bit(RAW3270_FLAGS_READY, &rp->flags);
+
+	sysfs_remove_group(&cdev->dev.kobj, &raw3270_attr_group);
+
+	/* Deactivate current view and remove all views. */
+	spin_lock_irqsave(get_ccwdev_lock(cdev), flags);
+	if (rp->view) {
+		rp->view->fn->deactivate(rp->view);
+		rp->view = NULL;
+	}
+	while (!list_empty(&rp->view_list)) {
+		v = list_entry(rp->view_list.next, struct raw3270_view, list);
+		if (v->fn->release)
+			v->fn->release(v);
+		spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
+		raw3270_del_view(v);
+		spin_lock_irqsave(get_ccwdev_lock(cdev), flags);
+	}
+	spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
+
+	mutex_lock(&raw3270_mutex);
+	list_for_each_entry(np, &raw3270_notifier, list)
+		np->notifier(rp->minor, 0);
+	mutex_unlock(&raw3270_mutex);
+
+	/* Reset 3270 device. */
+	raw3270_reset_device(rp);
+	/* And finally remove it. */
+	raw3270_delete_device(rp);
+}
+
+/*
+ * Set 3270 device offline.
+ */
+static int
+raw3270_set_offline (struct ccw_device *cdev)
+{
+	struct raw3270 *rp;
+
+	rp = dev_get_drvdata(&cdev->dev);
+	if (test_bit(RAW3270_FLAGS_CONSOLE, &rp->flags))
+		return -EBUSY;
+	raw3270_remove(cdev);
+	return 0;
+}
+
+static int raw3270_pm_stop(struct ccw_device *cdev)
+{
+	struct raw3270 *rp;
+	struct raw3270_view *view;
+	unsigned long flags;
+
+	rp = dev_get_drvdata(&cdev->dev);
+	if (!rp)
+		return 0;
+	spin_lock_irqsave(get_ccwdev_lock(rp->cdev), flags);
+	if (rp->view)
+		rp->view->fn->deactivate(rp->view);
+	if (!test_bit(RAW3270_FLAGS_CONSOLE, &rp->flags)) {
+		/*
+		 * Release tty and fullscreen for all non-console
+		 * devices.
+		 */
+		list_for_each_entry(view, &rp->view_list, list) {
+			if (view->fn->release)
+				view->fn->release(view);
+		}
+	}
+	set_bit(RAW3270_FLAGS_FROZEN, &rp->flags);
+	spin_unlock_irqrestore(get_ccwdev_lock(rp->cdev), flags);
+	return 0;
+}
+
+static int raw3270_pm_start(struct ccw_device *cdev)
+{
+	struct raw3270 *rp;
+	unsigned long flags;
+
+	rp = dev_get_drvdata(&cdev->dev);
+	if (!rp)
+		return 0;
+	spin_lock_irqsave(get_ccwdev_lock(rp->cdev), flags);
+	clear_bit(RAW3270_FLAGS_FROZEN, &rp->flags);
+	if (rp->view)
+		rp->view->fn->activate(rp->view);
+	spin_unlock_irqrestore(get_ccwdev_lock(rp->cdev), flags);
+	return 0;
+}
+
+void raw3270_pm_unfreeze(struct raw3270_view *view)
+{
+#ifdef CONFIG_TN3270_CONSOLE
+	struct raw3270 *rp;
+
+	rp = view->dev;
+	if (rp && test_bit(RAW3270_FLAGS_FROZEN, &rp->flags))
+		ccw_device_force_console();
+#endif
+}
+
+static struct ccw_device_id raw3270_id[] = {
+	{ CCW_DEVICE(0x3270, 0) },
+	{ CCW_DEVICE(0x3271, 0) },
+	{ CCW_DEVICE(0x3272, 0) },
+	{ CCW_DEVICE(0x3273, 0) },
+	{ CCW_DEVICE(0x3274, 0) },
+	{ CCW_DEVICE(0x3275, 0) },
+	{ CCW_DEVICE(0x3276, 0) },
+	{ CCW_DEVICE(0x3277, 0) },
+	{ CCW_DEVICE(0x3278, 0) },
+	{ CCW_DEVICE(0x3279, 0) },
+	{ CCW_DEVICE(0x3174, 0) },
+	{ /* end of list */ },
+};
+
+static struct ccw_driver raw3270_ccw_driver = {
+	.driver = {
+		.name	= "3270",
+		.owner	= THIS_MODULE,
+	},
+	.ids		= raw3270_id,
+	.probe		= &raw3270_probe,
+	.remove		= &raw3270_remove,
+	.set_online	= &raw3270_set_online,
+	.set_offline	= &raw3270_set_offline,
+	.freeze		= &raw3270_pm_stop,
+	.thaw		= &raw3270_pm_start,
+	.restore	= &raw3270_pm_start,
+	.int_class	= IOINT_C70,
+};
+
+static int
+raw3270_init(void)
+{
+	struct raw3270 *rp;
+	int rc;
+
+	if (raw3270_registered)
+		return 0;
+	raw3270_registered = 1;
+	rc = ccw_driver_register(&raw3270_ccw_driver);
+	if (rc == 0) {
+		/* Create attributes for early (= console) device. */
+		mutex_lock(&raw3270_mutex);
+		class3270 = class_create(THIS_MODULE, "3270");
+		list_for_each_entry(rp, &raw3270_devices, list) {
+			get_device(&rp->cdev->dev);
+			raw3270_create_attributes(rp);
+		}
+		mutex_unlock(&raw3270_mutex);
+	}
+	return rc;
+}
+
+static void
+raw3270_exit(void)
+{
+	ccw_driver_unregister(&raw3270_ccw_driver);
+	class_destroy(class3270);
+}
+
+MODULE_LICENSE("GPL");
+
+module_init(raw3270_init);
+module_exit(raw3270_exit);
+
+EXPORT_SYMBOL(raw3270_request_alloc);
+EXPORT_SYMBOL(raw3270_request_free);
+EXPORT_SYMBOL(raw3270_request_reset);
+EXPORT_SYMBOL(raw3270_request_set_cmd);
+EXPORT_SYMBOL(raw3270_request_add_data);
+EXPORT_SYMBOL(raw3270_request_set_data);
+EXPORT_SYMBOL(raw3270_request_set_idal);
+EXPORT_SYMBOL(raw3270_buffer_address);
+EXPORT_SYMBOL(raw3270_add_view);
+EXPORT_SYMBOL(raw3270_del_view);
+EXPORT_SYMBOL(raw3270_find_view);
+EXPORT_SYMBOL(raw3270_activate_view);
+EXPORT_SYMBOL(raw3270_deactivate_view);
+EXPORT_SYMBOL(raw3270_start);
+EXPORT_SYMBOL(raw3270_start_locked);
+EXPORT_SYMBOL(raw3270_start_irq);
+EXPORT_SYMBOL(raw3270_reset);
+EXPORT_SYMBOL(raw3270_register_notifier);
+EXPORT_SYMBOL(raw3270_unregister_notifier);
+EXPORT_SYMBOL(raw3270_wait_queue);
diff --git a/ap/os/linux/linux-3.4.x/drivers/s390/char/raw3270.h b/ap/os/linux/linux-3.4.x/drivers/s390/char/raw3270.h
new file mode 100644
index 0000000..ed34eb2
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/s390/char/raw3270.h
@@ -0,0 +1,279 @@
+/*
+ * IBM/3270 Driver
+ *
+ * Author(s):
+ *   Original 3270 Code for 2.4 written by Richard Hitt (UTS Global)
+ *   Rewritten for 2.5 by Martin Schwidefsky <schwidefsky@de.ibm.com>
+ *     Copyright IBM Corp. 2003, 2009
+ */
+
+#include <asm/idals.h>
+#include <asm/ioctl.h>
+
+/* ioctls for fullscreen 3270 */
+#define TUBICMD		_IO('3', 3)	/* set ccw command for fs reads. */
+#define TUBOCMD		_IO('3', 4)	/* set ccw command for fs writes. */
+#define TUBGETI		_IO('3', 7)	/* get ccw command for fs reads. */
+#define TUBGETO		_IO('3', 8)	/* get ccw command for fs writes. */
+#define TUBSETMOD	_IO('3',12)	/* FIXME: what does it do ?*/
+#define TUBGETMOD	_IO('3',13)	/* FIXME: what does it do ?*/
+
+/* Local Channel Commands */
+#define TC_WRITE	0x01		/* Write */
+#define TC_RDBUF	0x02		/* Read Buffer */
+#define TC_EWRITE	0x05		/* Erase write */
+#define TC_READMOD	0x06		/* Read modified */
+#define TC_EWRITEA	0x0d		/* Erase write alternate */
+#define TC_WRITESF	0x11		/* Write structured field */
+
+/* Buffer Control Orders */
+#define TO_SF		0x1d		/* Start field */
+#define TO_SBA		0x11		/* Set buffer address */
+#define TO_IC		0x13		/* Insert cursor */
+#define TO_PT		0x05		/* Program tab */
+#define TO_RA		0x3c		/* Repeat to address */
+#define TO_SFE		0x29		/* Start field extended */
+#define TO_EUA		0x12		/* Erase unprotected to address */
+#define TO_MF		0x2c		/* Modify field */
+#define TO_SA		0x28		/* Set attribute */
+
+/* Field Attribute Bytes */
+#define TF_INPUT	0x40		/* Visible input */
+#define TF_INPUTN	0x4c		/* Invisible input */
+#define TF_INMDT	0xc1		/* Visible, Set-MDT */
+#define TF_LOG		0x60
+
+/* Character Attribute Bytes */
+#define TAT_RESET	0x00
+#define TAT_FIELD	0xc0
+#define TAT_EXTHI	0x41
+#define TAT_COLOR	0x42
+#define TAT_CHARS	0x43
+#define TAT_TRANS	0x46
+
+/* Extended-Highlighting Bytes */
+#define TAX_RESET	0x00
+#define TAX_BLINK	0xf1
+#define TAX_REVER	0xf2
+#define TAX_UNDER	0xf4
+
+/* Reset value */
+#define TAR_RESET	0x00
+
+/* Color values */
+#define TAC_RESET	0x00
+#define TAC_BLUE	0xf1
+#define TAC_RED		0xf2
+#define TAC_PINK	0xf3
+#define TAC_GREEN	0xf4
+#define TAC_TURQ	0xf5
+#define TAC_YELLOW	0xf6
+#define TAC_WHITE	0xf7
+#define TAC_DEFAULT	0x00
+
+/* Write Control Characters */
+#define TW_NONE		0x40		/* No particular action */
+#define TW_KR		0xc2		/* Keyboard restore */
+#define TW_PLUSALARM	0x04		/* Add this bit for alarm */
+
+#define RAW3270_FIRSTMINOR	1	/* First minor number */
+#define RAW3270_MAXDEVS		255	/* Max number of 3270 devices */
+
+/* For TUBGETMOD and TUBSETMOD. Should include. */
+struct raw3270_iocb {
+	short model;
+	short line_cnt;
+	short col_cnt;
+	short pf_cnt;
+	short re_cnt;
+	short map;
+};
+
+struct raw3270;
+struct raw3270_view;
+
+/* 3270 CCW request */
+struct raw3270_request {
+	struct list_head list;		/* list head for request queueing. */
+	struct raw3270_view *view;	/* view of this request */
+	struct ccw1 ccw;		/* single ccw. */
+	void *buffer;			/* output buffer. */
+	size_t size;			/* size of output buffer. */
+	int rescnt;			/* residual count from devstat. */
+	int rc;				/* return code for this request. */
+
+	/* Callback for delivering final status. */
+	void (*callback)(struct raw3270_request *, void *);
+	void *callback_data;
+};
+
+struct raw3270_request *raw3270_request_alloc(size_t size);
+struct raw3270_request *raw3270_request_alloc_bootmem(size_t size);
+void raw3270_request_free(struct raw3270_request *);
+void raw3270_request_reset(struct raw3270_request *);
+void raw3270_request_set_cmd(struct raw3270_request *, u8 cmd);
+int  raw3270_request_add_data(struct raw3270_request *, void *, size_t);
+void raw3270_request_set_data(struct raw3270_request *, void *, size_t);
+void raw3270_request_set_idal(struct raw3270_request *, struct idal_buffer *);
+
+static inline int
+raw3270_request_final(struct raw3270_request *rq)
+{
+	return list_empty(&rq->list);
+}
+
+void raw3270_buffer_address(struct raw3270 *, char *, unsigned short);
+
+/* Return value of *intv (see raw3270_fn below) can be one of the following: */
+#define RAW3270_IO_DONE		0	/* request finished */
+#define RAW3270_IO_BUSY		1	/* request still active */
+#define RAW3270_IO_RETRY	2	/* retry current request */
+#define RAW3270_IO_STOP		3	/* kill current request */
+
+/*
+ * Functions of a 3270 view.
+ */
+struct raw3270_fn {
+	int  (*activate)(struct raw3270_view *);
+	void (*deactivate)(struct raw3270_view *);
+	int  (*intv)(struct raw3270_view *,
+		     struct raw3270_request *, struct irb *);
+	void (*release)(struct raw3270_view *);
+	void (*free)(struct raw3270_view *);
+};
+
+/*
+ * View structure chaining. The raw3270_view structure is meant to
+ * be embedded at the start of the real view data structure, e.g.:
+ *   struct example {
+ *     struct raw3270_view view;
+ *     ...
+ *   };
+ */
+struct raw3270_view {
+	struct list_head list;
+	spinlock_t lock;
+	atomic_t ref_count;
+	struct raw3270 *dev;
+	struct raw3270_fn *fn;
+	unsigned int model;
+	unsigned int rows, cols;	/* # of rows & colums of the view */
+	unsigned char *ascebc;		/* ascii -> ebcdic table */
+};
+
+int raw3270_add_view(struct raw3270_view *, struct raw3270_fn *, int);
+int raw3270_activate_view(struct raw3270_view *);
+void raw3270_del_view(struct raw3270_view *);
+void raw3270_deactivate_view(struct raw3270_view *);
+struct raw3270_view *raw3270_find_view(struct raw3270_fn *, int);
+int raw3270_start(struct raw3270_view *, struct raw3270_request *);
+int raw3270_start_locked(struct raw3270_view *, struct raw3270_request *);
+int raw3270_start_irq(struct raw3270_view *, struct raw3270_request *);
+int raw3270_reset(struct raw3270_view *);
+struct raw3270_view *raw3270_view(struct raw3270_view *);
+
+/* Reference count inliner for view structures. */
+static inline void
+raw3270_get_view(struct raw3270_view *view)
+{
+	atomic_inc(&view->ref_count);
+}
+
+extern wait_queue_head_t raw3270_wait_queue;
+
+static inline void
+raw3270_put_view(struct raw3270_view *view)
+{
+	if (atomic_dec_return(&view->ref_count) == 0)
+		wake_up(&raw3270_wait_queue);
+}
+
+struct raw3270 *raw3270_setup_console(struct ccw_device *cdev);
+void raw3270_wait_cons_dev(struct raw3270 *);
+
+/* Notifier for device addition/removal */
+int raw3270_register_notifier(void (*notifier)(int, int));
+void raw3270_unregister_notifier(void (*notifier)(int, int));
+void raw3270_pm_unfreeze(struct raw3270_view *);
+
+/*
+ * Little memory allocator for string objects. 
+ */
+struct string
+{
+	struct list_head list;
+	struct list_head update;
+	unsigned long size;
+	unsigned long len;
+	char string[0];
+} __attribute__ ((aligned(8)));
+
+static inline struct string *
+alloc_string(struct list_head *free_list, unsigned long len)
+{
+	struct string *cs, *tmp;
+	unsigned long size;
+
+	size = (len + 7L) & -8L;
+	list_for_each_entry(cs, free_list, list) {
+		if (cs->size < size)
+			continue;
+		if (cs->size > size + sizeof(struct string)) {
+			char *endaddr = (char *) (cs + 1) + cs->size;
+			tmp = (struct string *) (endaddr - size) - 1;
+			tmp->size = size;
+			cs->size -= size + sizeof(struct string);
+			cs = tmp;
+		} else
+			list_del(&cs->list);
+		cs->len = len;
+		INIT_LIST_HEAD(&cs->list);
+		INIT_LIST_HEAD(&cs->update);
+		return cs;
+	}
+	return NULL;
+}
+
+static inline unsigned long
+free_string(struct list_head *free_list, struct string *cs)
+{
+	struct string *tmp;
+	struct list_head *p, *left;
+
+	/* Find out the left neighbour in free memory list. */
+	left = free_list;
+	list_for_each(p, free_list) {
+		if (list_entry(p, struct string, list) > cs)
+			break;
+		left = p;
+	}
+	/* Try to merge with right neighbour = next element from left. */
+	if (left->next != free_list) {
+		tmp = list_entry(left->next, struct string, list);
+		if ((char *) (cs + 1) + cs->size == (char *) tmp) {
+			list_del(&tmp->list);
+			cs->size += tmp->size + sizeof(struct string);
+		}
+	}
+	/* Try to merge with left neighbour. */
+	if (left != free_list) {
+		tmp = list_entry(left, struct string, list);
+		if ((char *) (tmp + 1) + tmp->size == (char *) cs) {
+			tmp->size += cs->size + sizeof(struct string);
+			return tmp->size;
+		}
+	}
+	__list_add(&cs->list, left, left->next);
+	return cs->size;
+}
+
+static inline void
+add_string_memory(struct list_head *free_list, void *mem, unsigned long size)
+{
+	struct string *cs;
+
+	cs = (struct string *) mem;
+	cs->size = size - sizeof(struct string);
+	free_string(free_list, cs);
+}
+
diff --git a/ap/os/linux/linux-3.4.x/drivers/s390/char/sclp.c b/ap/os/linux/linux-3.4.x/drivers/s390/char/sclp.c
new file mode 100644
index 0000000..30f29a0
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/s390/char/sclp.c
@@ -0,0 +1,1127 @@
+/*
+ * core function to access sclp interface
+ *
+ * Copyright IBM Corp. 1999, 2009
+ *
+ * Author(s): Martin Peschke <mpeschke@de.ibm.com>
+ *	      Martin Schwidefsky <schwidefsky@de.ibm.com>
+ */
+
+#include <linux/kernel_stat.h>
+#include <linux/module.h>
+#include <linux/err.h>
+#include <linux/spinlock.h>
+#include <linux/interrupt.h>
+#include <linux/timer.h>
+#include <linux/reboot.h>
+#include <linux/jiffies.h>
+#include <linux/init.h>
+#include <linux/suspend.h>
+#include <linux/completion.h>
+#include <linux/platform_device.h>
+#include <asm/types.h>
+#include <asm/irq.h>
+
+#include "sclp.h"
+
+#define SCLP_HEADER		"sclp: "
+
+/* Lock to protect internal data consistency. */
+static DEFINE_SPINLOCK(sclp_lock);
+
+/* Mask of events that we can send to the sclp interface. */
+static sccb_mask_t sclp_receive_mask;
+
+/* Mask of events that we can receive from the sclp interface. */
+static sccb_mask_t sclp_send_mask;
+
+/* List of registered event listeners and senders. */
+static struct list_head sclp_reg_list;
+
+/* List of queued requests. */
+static struct list_head sclp_req_queue;
+
+/* Data for read and and init requests. */
+static struct sclp_req sclp_read_req;
+static struct sclp_req sclp_init_req;
+static char sclp_read_sccb[PAGE_SIZE] __attribute__((__aligned__(PAGE_SIZE)));
+static char sclp_init_sccb[PAGE_SIZE] __attribute__((__aligned__(PAGE_SIZE)));
+
+/* Suspend request */
+static DECLARE_COMPLETION(sclp_request_queue_flushed);
+
+static void sclp_suspend_req_cb(struct sclp_req *req, void *data)
+{
+	complete(&sclp_request_queue_flushed);
+}
+
+static struct sclp_req sclp_suspend_req;
+
+/* Timer for request retries. */
+static struct timer_list sclp_request_timer;
+
+/* Internal state: is the driver initialized? */
+static volatile enum sclp_init_state_t {
+	sclp_init_state_uninitialized,
+	sclp_init_state_initializing,
+	sclp_init_state_initialized
+} sclp_init_state = sclp_init_state_uninitialized;
+
+/* Internal state: is a request active at the sclp? */
+static volatile enum sclp_running_state_t {
+	sclp_running_state_idle,
+	sclp_running_state_running,
+	sclp_running_state_reset_pending
+} sclp_running_state = sclp_running_state_idle;
+
+/* Internal state: is a read request pending? */
+static volatile enum sclp_reading_state_t {
+	sclp_reading_state_idle,
+	sclp_reading_state_reading
+} sclp_reading_state = sclp_reading_state_idle;
+
+/* Internal state: is the driver currently serving requests? */
+static volatile enum sclp_activation_state_t {
+	sclp_activation_state_active,
+	sclp_activation_state_deactivating,
+	sclp_activation_state_inactive,
+	sclp_activation_state_activating
+} sclp_activation_state = sclp_activation_state_active;
+
+/* Internal state: is an init mask request pending? */
+static volatile enum sclp_mask_state_t {
+	sclp_mask_state_idle,
+	sclp_mask_state_initializing
+} sclp_mask_state = sclp_mask_state_idle;
+
+/* Internal state: is the driver suspended? */
+static enum sclp_suspend_state_t {
+	sclp_suspend_state_running,
+	sclp_suspend_state_suspended,
+} sclp_suspend_state = sclp_suspend_state_running;
+
+/* Maximum retry counts */
+#define SCLP_INIT_RETRY		3
+#define SCLP_MASK_RETRY		3
+
+/* Timeout intervals in seconds.*/
+#define SCLP_BUSY_INTERVAL	10
+#define SCLP_RETRY_INTERVAL	30
+
+static void sclp_process_queue(void);
+static void __sclp_make_read_req(void);
+static int sclp_init_mask(int calculate);
+static int sclp_init(void);
+
+/* Perform service call. Return 0 on success, non-zero otherwise. */
+int
+sclp_service_call(sclp_cmdw_t command, void *sccb)
+{
+	int cc;
+
+	asm volatile(
+		"	.insn	rre,0xb2200000,%1,%2\n"  /* servc %1,%2 */
+		"	ipm	%0\n"
+		"	srl	%0,28"
+		: "=&d" (cc) : "d" (command), "a" (__pa(sccb))
+		: "cc", "memory");
+	if (cc == 3)
+		return -EIO;
+	if (cc == 2)
+		return -EBUSY;
+	return 0;
+}
+
+
+static void
+__sclp_queue_read_req(void)
+{
+	if (sclp_reading_state == sclp_reading_state_idle) {
+		sclp_reading_state = sclp_reading_state_reading;
+		__sclp_make_read_req();
+		/* Add request to head of queue */
+		list_add(&sclp_read_req.list, &sclp_req_queue);
+	}
+}
+
+/* Set up request retry timer. Called while sclp_lock is locked. */
+static inline void
+__sclp_set_request_timer(unsigned long time, void (*function)(unsigned long),
+			 unsigned long data)
+{
+	del_timer(&sclp_request_timer);
+	sclp_request_timer.function = function;
+	sclp_request_timer.data = data;
+	sclp_request_timer.expires = jiffies + time;
+	add_timer(&sclp_request_timer);
+}
+
+/* Request timeout handler. Restart the request queue. If DATA is non-zero,
+ * force restart of running request. */
+static void
+sclp_request_timeout(unsigned long data)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&sclp_lock, flags);
+	if (data) {
+		if (sclp_running_state == sclp_running_state_running) {
+			/* Break running state and queue NOP read event request
+			 * to get a defined interface state. */
+			__sclp_queue_read_req();
+			sclp_running_state = sclp_running_state_idle;
+		}
+	} else {
+		__sclp_set_request_timer(SCLP_BUSY_INTERVAL * HZ,
+					 sclp_request_timeout, 0);
+	}
+	spin_unlock_irqrestore(&sclp_lock, flags);
+	sclp_process_queue();
+}
+
+/* Try to start a request. Return zero if the request was successfully
+ * started or if it will be started at a later time. Return non-zero otherwise.
+ * Called while sclp_lock is locked. */
+static int
+__sclp_start_request(struct sclp_req *req)
+{
+	int rc;
+
+	if (sclp_running_state != sclp_running_state_idle)
+		return 0;
+	del_timer(&sclp_request_timer);
+	rc = sclp_service_call(req->command, req->sccb);
+	req->start_count++;
+
+	if (rc == 0) {
+		/* Successfully started request */
+		req->status = SCLP_REQ_RUNNING;
+		sclp_running_state = sclp_running_state_running;
+		__sclp_set_request_timer(SCLP_RETRY_INTERVAL * HZ,
+					 sclp_request_timeout, 1);
+		return 0;
+	} else if (rc == -EBUSY) {
+		/* Try again later */
+		__sclp_set_request_timer(SCLP_BUSY_INTERVAL * HZ,
+					 sclp_request_timeout, 0);
+		return 0;
+	}
+	/* Request failed */
+	req->status = SCLP_REQ_FAILED;
+	return rc;
+}
+
+/* Try to start queued requests. */
+static void
+sclp_process_queue(void)
+{
+	struct sclp_req *req;
+	int rc;
+	unsigned long flags;
+
+	spin_lock_irqsave(&sclp_lock, flags);
+	if (sclp_running_state != sclp_running_state_idle) {
+		spin_unlock_irqrestore(&sclp_lock, flags);
+		return;
+	}
+	del_timer(&sclp_request_timer);
+	while (!list_empty(&sclp_req_queue)) {
+		req = list_entry(sclp_req_queue.next, struct sclp_req, list);
+		if (!req->sccb)
+			goto do_post;
+		rc = __sclp_start_request(req);
+		if (rc == 0)
+			break;
+		/* Request failed */
+		if (req->start_count > 1) {
+			/* Cannot abort already submitted request - could still
+			 * be active at the SCLP */
+			__sclp_set_request_timer(SCLP_BUSY_INTERVAL * HZ,
+						 sclp_request_timeout, 0);
+			break;
+		}
+do_post:
+		/* Post-processing for aborted request */
+		list_del(&req->list);
+		if (req->callback) {
+			spin_unlock_irqrestore(&sclp_lock, flags);
+			req->callback(req, req->callback_data);
+			spin_lock_irqsave(&sclp_lock, flags);
+		}
+	}
+	spin_unlock_irqrestore(&sclp_lock, flags);
+}
+
+static int __sclp_can_add_request(struct sclp_req *req)
+{
+	if (req == &sclp_suspend_req || req == &sclp_init_req)
+		return 1;
+	if (sclp_suspend_state != sclp_suspend_state_running)
+		return 0;
+	if (sclp_init_state != sclp_init_state_initialized)
+		return 0;
+	if (sclp_activation_state != sclp_activation_state_active)
+		return 0;
+	return 1;
+}
+
+/* Queue a new request. Return zero on success, non-zero otherwise. */
+int
+sclp_add_request(struct sclp_req *req)
+{
+	unsigned long flags;
+	int rc;
+
+	spin_lock_irqsave(&sclp_lock, flags);
+	if (!__sclp_can_add_request(req)) {
+		spin_unlock_irqrestore(&sclp_lock, flags);
+		return -EIO;
+	}
+	req->status = SCLP_REQ_QUEUED;
+	req->start_count = 0;
+	list_add_tail(&req->list, &sclp_req_queue);
+	rc = 0;
+	/* Start if request is first in list */
+	if (sclp_running_state == sclp_running_state_idle &&
+	    req->list.prev == &sclp_req_queue) {
+		if (!req->sccb) {
+			list_del(&req->list);
+			rc = -ENODATA;
+			goto out;
+		}
+		rc = __sclp_start_request(req);
+		if (rc)
+			list_del(&req->list);
+	}
+out:
+	spin_unlock_irqrestore(&sclp_lock, flags);
+	return rc;
+}
+
+EXPORT_SYMBOL(sclp_add_request);
+
+/* Dispatch events found in request buffer to registered listeners. Return 0
+ * if all events were dispatched, non-zero otherwise. */
+static int
+sclp_dispatch_evbufs(struct sccb_header *sccb)
+{
+	unsigned long flags;
+	struct evbuf_header *evbuf;
+	struct list_head *l;
+	struct sclp_register *reg;
+	int offset;
+	int rc;
+
+	spin_lock_irqsave(&sclp_lock, flags);
+	rc = 0;
+	for (offset = sizeof(struct sccb_header); offset < sccb->length;
+	     offset += evbuf->length) {
+		evbuf = (struct evbuf_header *) ((addr_t) sccb + offset);
+		/* Check for malformed hardware response */
+		if (evbuf->length == 0)
+			break;
+		/* Search for event handler */
+		reg = NULL;
+		list_for_each(l, &sclp_reg_list) {
+			reg = list_entry(l, struct sclp_register, list);
+			if (reg->receive_mask & (1 << (32 - evbuf->type)))
+				break;
+			else
+				reg = NULL;
+		}
+		if (reg && reg->receiver_fn) {
+			spin_unlock_irqrestore(&sclp_lock, flags);
+			reg->receiver_fn(evbuf);
+			spin_lock_irqsave(&sclp_lock, flags);
+		} else if (reg == NULL)
+			rc = -ENOSYS;
+	}
+	spin_unlock_irqrestore(&sclp_lock, flags);
+	return rc;
+}
+
+/* Read event data request callback. */
+static void
+sclp_read_cb(struct sclp_req *req, void *data)
+{
+	unsigned long flags;
+	struct sccb_header *sccb;
+
+	sccb = (struct sccb_header *) req->sccb;
+	if (req->status == SCLP_REQ_DONE && (sccb->response_code == 0x20 ||
+	    sccb->response_code == 0x220))
+		sclp_dispatch_evbufs(sccb);
+	spin_lock_irqsave(&sclp_lock, flags);
+	sclp_reading_state = sclp_reading_state_idle;
+	spin_unlock_irqrestore(&sclp_lock, flags);
+}
+
+/* Prepare read event data request. Called while sclp_lock is locked. */
+static void __sclp_make_read_req(void)
+{
+	struct sccb_header *sccb;
+
+	sccb = (struct sccb_header *) sclp_read_sccb;
+	clear_page(sccb);
+	memset(&sclp_read_req, 0, sizeof(struct sclp_req));
+	sclp_read_req.command = SCLP_CMDW_READ_EVENT_DATA;
+	sclp_read_req.status = SCLP_REQ_QUEUED;
+	sclp_read_req.start_count = 0;
+	sclp_read_req.callback = sclp_read_cb;
+	sclp_read_req.sccb = sccb;
+	sccb->length = PAGE_SIZE;
+	sccb->function_code = 0;
+	sccb->control_mask[2] = 0x80;
+}
+
+/* Search request list for request with matching sccb. Return request if found,
+ * NULL otherwise. Called while sclp_lock is locked. */
+static inline struct sclp_req *
+__sclp_find_req(u32 sccb)
+{
+	struct list_head *l;
+	struct sclp_req *req;
+
+	list_for_each(l, &sclp_req_queue) {
+		req = list_entry(l, struct sclp_req, list);
+		if (sccb == (u32) (addr_t) req->sccb)
+				return req;
+	}
+	return NULL;
+}
+
+/* Handler for external interruption. Perform request post-processing.
+ * Prepare read event data request if necessary. Start processing of next
+ * request on queue. */
+static void sclp_interrupt_handler(struct ext_code ext_code,
+				   unsigned int param32, unsigned long param64)
+{
+	struct sclp_req *req;
+	u32 finished_sccb;
+	u32 evbuf_pending;
+
+	kstat_cpu(smp_processor_id()).irqs[EXTINT_SCP]++;
+	spin_lock(&sclp_lock);
+	finished_sccb = param32 & 0xfffffff8;
+	evbuf_pending = param32 & 0x3;
+	if (finished_sccb) {
+		del_timer(&sclp_request_timer);
+		sclp_running_state = sclp_running_state_reset_pending;
+		req = __sclp_find_req(finished_sccb);
+		if (req) {
+			/* Request post-processing */
+			list_del(&req->list);
+			req->status = SCLP_REQ_DONE;
+			if (req->callback) {
+				spin_unlock(&sclp_lock);
+				req->callback(req, req->callback_data);
+				spin_lock(&sclp_lock);
+			}
+		}
+		sclp_running_state = sclp_running_state_idle;
+	}
+	if (evbuf_pending &&
+	    sclp_activation_state == sclp_activation_state_active)
+		__sclp_queue_read_req();
+	spin_unlock(&sclp_lock);
+	sclp_process_queue();
+}
+
+/* Convert interval in jiffies to TOD ticks. */
+static inline u64
+sclp_tod_from_jiffies(unsigned long jiffies)
+{
+	return (u64) (jiffies / HZ) << 32;
+}
+
+/* Wait until a currently running request finished. Note: while this function
+ * is running, no timers are served on the calling CPU. */
+void
+sclp_sync_wait(void)
+{
+	unsigned long long old_tick;
+	unsigned long flags;
+	unsigned long cr0, cr0_sync;
+	u64 timeout;
+	int irq_context;
+
+	/* We'll be disabling timer interrupts, so we need a custom timeout
+	 * mechanism */
+	timeout = 0;
+	if (timer_pending(&sclp_request_timer)) {
+		/* Get timeout TOD value */
+		timeout = get_clock() +
+			  sclp_tod_from_jiffies(sclp_request_timer.expires -
+						jiffies);
+	}
+	local_irq_save(flags);
+	/* Prevent bottom half from executing once we force interrupts open */
+	irq_context = in_interrupt();
+	if (!irq_context)
+		local_bh_disable();
+	/* Enable service-signal interruption, disable timer interrupts */
+	old_tick = local_tick_disable();
+	trace_hardirqs_on();
+	__ctl_store(cr0, 0, 0);
+	cr0_sync = cr0;
+	cr0_sync &= 0xffff00a0;
+	cr0_sync |= 0x00000200;
+	__ctl_load(cr0_sync, 0, 0);
+	__arch_local_irq_stosm(0x01);
+	/* Loop until driver state indicates finished request */
+	while (sclp_running_state != sclp_running_state_idle) {
+		/* Check for expired request timer */
+		if (timer_pending(&sclp_request_timer) &&
+		    get_clock() > timeout &&
+		    del_timer(&sclp_request_timer))
+			sclp_request_timer.function(sclp_request_timer.data);
+		cpu_relax();
+	}
+	local_irq_disable();
+	__ctl_load(cr0, 0, 0);
+	if (!irq_context)
+		_local_bh_enable();
+	local_tick_enable(old_tick);
+	local_irq_restore(flags);
+}
+EXPORT_SYMBOL(sclp_sync_wait);
+
+/* Dispatch changes in send and receive mask to registered listeners. */
+static void
+sclp_dispatch_state_change(void)
+{
+	struct list_head *l;
+	struct sclp_register *reg;
+	unsigned long flags;
+	sccb_mask_t receive_mask;
+	sccb_mask_t send_mask;
+
+	do {
+		spin_lock_irqsave(&sclp_lock, flags);
+		reg = NULL;
+		list_for_each(l, &sclp_reg_list) {
+			reg = list_entry(l, struct sclp_register, list);
+			receive_mask = reg->send_mask & sclp_receive_mask;
+			send_mask = reg->receive_mask & sclp_send_mask;
+			if (reg->sclp_receive_mask != receive_mask ||
+			    reg->sclp_send_mask != send_mask) {
+				reg->sclp_receive_mask = receive_mask;
+				reg->sclp_send_mask = send_mask;
+				break;
+			} else
+				reg = NULL;
+		}
+		spin_unlock_irqrestore(&sclp_lock, flags);
+		if (reg && reg->state_change_fn)
+			reg->state_change_fn(reg);
+	} while (reg);
+}
+
+struct sclp_statechangebuf {
+	struct evbuf_header	header;
+	u8		validity_sclp_active_facility_mask : 1;
+	u8		validity_sclp_receive_mask : 1;
+	u8		validity_sclp_send_mask : 1;
+	u8		validity_read_data_function_mask : 1;
+	u16		_zeros : 12;
+	u16		mask_length;
+	u64		sclp_active_facility_mask;
+	sccb_mask_t	sclp_receive_mask;
+	sccb_mask_t	sclp_send_mask;
+	u32		read_data_function_mask;
+} __attribute__((packed));
+
+
+/* State change event callback. Inform listeners of changes. */
+static void
+sclp_state_change_cb(struct evbuf_header *evbuf)
+{
+	unsigned long flags;
+	struct sclp_statechangebuf *scbuf;
+
+	scbuf = (struct sclp_statechangebuf *) evbuf;
+	if (scbuf->mask_length != sizeof(sccb_mask_t))
+		return;
+	spin_lock_irqsave(&sclp_lock, flags);
+	if (scbuf->validity_sclp_receive_mask)
+		sclp_receive_mask = scbuf->sclp_receive_mask;
+	if (scbuf->validity_sclp_send_mask)
+		sclp_send_mask = scbuf->sclp_send_mask;
+	spin_unlock_irqrestore(&sclp_lock, flags);
+	if (scbuf->validity_sclp_active_facility_mask)
+		sclp_facilities = scbuf->sclp_active_facility_mask;
+	sclp_dispatch_state_change();
+}
+
+static struct sclp_register sclp_state_change_event = {
+	.receive_mask = EVTYP_STATECHANGE_MASK,
+	.receiver_fn = sclp_state_change_cb
+};
+
+/* Calculate receive and send mask of currently registered listeners.
+ * Called while sclp_lock is locked. */
+static inline void
+__sclp_get_mask(sccb_mask_t *receive_mask, sccb_mask_t *send_mask)
+{
+	struct list_head *l;
+	struct sclp_register *t;
+
+	*receive_mask = 0;
+	*send_mask = 0;
+	list_for_each(l, &sclp_reg_list) {
+		t = list_entry(l, struct sclp_register, list);
+		*receive_mask |= t->receive_mask;
+		*send_mask |= t->send_mask;
+	}
+}
+
+/* Register event listener. Return 0 on success, non-zero otherwise. */
+int
+sclp_register(struct sclp_register *reg)
+{
+	unsigned long flags;
+	sccb_mask_t receive_mask;
+	sccb_mask_t send_mask;
+	int rc;
+
+	rc = sclp_init();
+	if (rc)
+		return rc;
+	spin_lock_irqsave(&sclp_lock, flags);
+	/* Check event mask for collisions */
+	__sclp_get_mask(&receive_mask, &send_mask);
+	if (reg->receive_mask & receive_mask || reg->send_mask & send_mask) {
+		spin_unlock_irqrestore(&sclp_lock, flags);
+		return -EBUSY;
+	}
+	/* Trigger initial state change callback */
+	reg->sclp_receive_mask = 0;
+	reg->sclp_send_mask = 0;
+	reg->pm_event_posted = 0;
+	list_add(&reg->list, &sclp_reg_list);
+	spin_unlock_irqrestore(&sclp_lock, flags);
+	rc = sclp_init_mask(1);
+	if (rc) {
+		spin_lock_irqsave(&sclp_lock, flags);
+		list_del(&reg->list);
+		spin_unlock_irqrestore(&sclp_lock, flags);
+	}
+	return rc;
+}
+
+EXPORT_SYMBOL(sclp_register);
+
+/* Unregister event listener. */
+void
+sclp_unregister(struct sclp_register *reg)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&sclp_lock, flags);
+	list_del(&reg->list);
+	spin_unlock_irqrestore(&sclp_lock, flags);
+	sclp_init_mask(1);
+}
+
+EXPORT_SYMBOL(sclp_unregister);
+
+/* Remove event buffers which are marked processed. Return the number of
+ * remaining event buffers. */
+int
+sclp_remove_processed(struct sccb_header *sccb)
+{
+	struct evbuf_header *evbuf;
+	int unprocessed;
+	u16 remaining;
+
+	evbuf = (struct evbuf_header *) (sccb + 1);
+	unprocessed = 0;
+	remaining = sccb->length - sizeof(struct sccb_header);
+	while (remaining > 0) {
+		remaining -= evbuf->length;
+		if (evbuf->flags & 0x80) {
+			sccb->length -= evbuf->length;
+			memcpy(evbuf, (void *) ((addr_t) evbuf + evbuf->length),
+			       remaining);
+		} else {
+			unprocessed++;
+			evbuf = (struct evbuf_header *)
+					((addr_t) evbuf + evbuf->length);
+		}
+	}
+	return unprocessed;
+}
+
+EXPORT_SYMBOL(sclp_remove_processed);
+
+struct init_sccb {
+	struct sccb_header header;
+	u16 _reserved;
+	u16 mask_length;
+	sccb_mask_t receive_mask;
+	sccb_mask_t send_mask;
+	sccb_mask_t sclp_receive_mask;
+	sccb_mask_t sclp_send_mask;
+} __attribute__((packed));
+
+/* Prepare init mask request. Called while sclp_lock is locked. */
+static inline void
+__sclp_make_init_req(u32 receive_mask, u32 send_mask)
+{
+	struct init_sccb *sccb;
+
+	sccb = (struct init_sccb *) sclp_init_sccb;
+	clear_page(sccb);
+	memset(&sclp_init_req, 0, sizeof(struct sclp_req));
+	sclp_init_req.command = SCLP_CMDW_WRITE_EVENT_MASK;
+	sclp_init_req.status = SCLP_REQ_FILLED;
+	sclp_init_req.start_count = 0;
+	sclp_init_req.callback = NULL;
+	sclp_init_req.callback_data = NULL;
+	sclp_init_req.sccb = sccb;
+	sccb->header.length = sizeof(struct init_sccb);
+	sccb->mask_length = sizeof(sccb_mask_t);
+	sccb->receive_mask = receive_mask;
+	sccb->send_mask = send_mask;
+	sccb->sclp_receive_mask = 0;
+	sccb->sclp_send_mask = 0;
+}
+
+/* Start init mask request. If calculate is non-zero, calculate the mask as
+ * requested by registered listeners. Use zero mask otherwise. Return 0 on
+ * success, non-zero otherwise. */
+static int
+sclp_init_mask(int calculate)
+{
+	unsigned long flags;
+	struct init_sccb *sccb = (struct init_sccb *) sclp_init_sccb;
+	sccb_mask_t receive_mask;
+	sccb_mask_t send_mask;
+	int retry;
+	int rc;
+	unsigned long wait;
+
+	spin_lock_irqsave(&sclp_lock, flags);
+	/* Check if interface is in appropriate state */
+	if (sclp_mask_state != sclp_mask_state_idle) {
+		spin_unlock_irqrestore(&sclp_lock, flags);
+		return -EBUSY;
+	}
+	if (sclp_activation_state == sclp_activation_state_inactive) {
+		spin_unlock_irqrestore(&sclp_lock, flags);
+		return -EINVAL;
+	}
+	sclp_mask_state = sclp_mask_state_initializing;
+	/* Determine mask */
+	if (calculate)
+		__sclp_get_mask(&receive_mask, &send_mask);
+	else {
+		receive_mask = 0;
+		send_mask = 0;
+	}
+	rc = -EIO;
+	for (retry = 0; retry <= SCLP_MASK_RETRY; retry++) {
+		/* Prepare request */
+		__sclp_make_init_req(receive_mask, send_mask);
+		spin_unlock_irqrestore(&sclp_lock, flags);
+		if (sclp_add_request(&sclp_init_req)) {
+			/* Try again later */
+			wait = jiffies + SCLP_BUSY_INTERVAL * HZ;
+			while (time_before(jiffies, wait))
+				sclp_sync_wait();
+			spin_lock_irqsave(&sclp_lock, flags);
+			continue;
+		}
+		while (sclp_init_req.status != SCLP_REQ_DONE &&
+		       sclp_init_req.status != SCLP_REQ_FAILED)
+			sclp_sync_wait();
+		spin_lock_irqsave(&sclp_lock, flags);
+		if (sclp_init_req.status == SCLP_REQ_DONE &&
+		    sccb->header.response_code == 0x20) {
+			/* Successful request */
+			if (calculate) {
+				sclp_receive_mask = sccb->sclp_receive_mask;
+				sclp_send_mask = sccb->sclp_send_mask;
+			} else {
+				sclp_receive_mask = 0;
+				sclp_send_mask = 0;
+			}
+			spin_unlock_irqrestore(&sclp_lock, flags);
+			sclp_dispatch_state_change();
+			spin_lock_irqsave(&sclp_lock, flags);
+			rc = 0;
+			break;
+		}
+	}
+	sclp_mask_state = sclp_mask_state_idle;
+	spin_unlock_irqrestore(&sclp_lock, flags);
+	return rc;
+}
+
+/* Deactivate SCLP interface. On success, new requests will be rejected,
+ * events will no longer be dispatched. Return 0 on success, non-zero
+ * otherwise. */
+int
+sclp_deactivate(void)
+{
+	unsigned long flags;
+	int rc;
+
+	spin_lock_irqsave(&sclp_lock, flags);
+	/* Deactivate can only be called when active */
+	if (sclp_activation_state != sclp_activation_state_active) {
+		spin_unlock_irqrestore(&sclp_lock, flags);
+		return -EINVAL;
+	}
+	sclp_activation_state = sclp_activation_state_deactivating;
+	spin_unlock_irqrestore(&sclp_lock, flags);
+	rc = sclp_init_mask(0);
+	spin_lock_irqsave(&sclp_lock, flags);
+	if (rc == 0)
+		sclp_activation_state = sclp_activation_state_inactive;
+	else
+		sclp_activation_state = sclp_activation_state_active;
+	spin_unlock_irqrestore(&sclp_lock, flags);
+	return rc;
+}
+
+EXPORT_SYMBOL(sclp_deactivate);
+
+/* Reactivate SCLP interface after sclp_deactivate. On success, new
+ * requests will be accepted, events will be dispatched again. Return 0 on
+ * success, non-zero otherwise. */
+int
+sclp_reactivate(void)
+{
+	unsigned long flags;
+	int rc;
+
+	spin_lock_irqsave(&sclp_lock, flags);
+	/* Reactivate can only be called when inactive */
+	if (sclp_activation_state != sclp_activation_state_inactive) {
+		spin_unlock_irqrestore(&sclp_lock, flags);
+		return -EINVAL;
+	}
+	sclp_activation_state = sclp_activation_state_activating;
+	spin_unlock_irqrestore(&sclp_lock, flags);
+	rc = sclp_init_mask(1);
+	spin_lock_irqsave(&sclp_lock, flags);
+	if (rc == 0)
+		sclp_activation_state = sclp_activation_state_active;
+	else
+		sclp_activation_state = sclp_activation_state_inactive;
+	spin_unlock_irqrestore(&sclp_lock, flags);
+	return rc;
+}
+
+EXPORT_SYMBOL(sclp_reactivate);
+
+/* Handler for external interruption used during initialization. Modify
+ * request state to done. */
+static void sclp_check_handler(struct ext_code ext_code,
+			       unsigned int param32, unsigned long param64)
+{
+	u32 finished_sccb;
+
+	kstat_cpu(smp_processor_id()).irqs[EXTINT_SCP]++;
+	finished_sccb = param32 & 0xfffffff8;
+	/* Is this the interrupt we are waiting for? */
+	if (finished_sccb == 0)
+		return;
+	if (finished_sccb != (u32) (addr_t) sclp_init_sccb)
+		panic("sclp: unsolicited interrupt for buffer at 0x%x\n",
+		      finished_sccb);
+	spin_lock(&sclp_lock);
+	if (sclp_running_state == sclp_running_state_running) {
+		sclp_init_req.status = SCLP_REQ_DONE;
+		sclp_running_state = sclp_running_state_idle;
+	}
+	spin_unlock(&sclp_lock);
+}
+
+/* Initial init mask request timed out. Modify request state to failed. */
+static void
+sclp_check_timeout(unsigned long data)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&sclp_lock, flags);
+	if (sclp_running_state == sclp_running_state_running) {
+		sclp_init_req.status = SCLP_REQ_FAILED;
+		sclp_running_state = sclp_running_state_idle;
+	}
+	spin_unlock_irqrestore(&sclp_lock, flags);
+}
+
+/* Perform a check of the SCLP interface. Return zero if the interface is
+ * available and there are no pending requests from a previous instance.
+ * Return non-zero otherwise. */
+static int
+sclp_check_interface(void)
+{
+	struct init_sccb *sccb;
+	unsigned long flags;
+	int retry;
+	int rc;
+
+	spin_lock_irqsave(&sclp_lock, flags);
+	/* Prepare init mask command */
+	rc = register_external_interrupt(0x2401, sclp_check_handler);
+	if (rc) {
+		spin_unlock_irqrestore(&sclp_lock, flags);
+		return rc;
+	}
+	for (retry = 0; retry <= SCLP_INIT_RETRY; retry++) {
+		__sclp_make_init_req(0, 0);
+		sccb = (struct init_sccb *) sclp_init_req.sccb;
+		rc = sclp_service_call(sclp_init_req.command, sccb);
+		if (rc == -EIO)
+			break;
+		sclp_init_req.status = SCLP_REQ_RUNNING;
+		sclp_running_state = sclp_running_state_running;
+		__sclp_set_request_timer(SCLP_RETRY_INTERVAL * HZ,
+					 sclp_check_timeout, 0);
+		spin_unlock_irqrestore(&sclp_lock, flags);
+		/* Enable service-signal interruption - needs to happen
+		 * with IRQs enabled. */
+		service_subclass_irq_register();
+		/* Wait for signal from interrupt or timeout */
+		sclp_sync_wait();
+		/* Disable service-signal interruption - needs to happen
+		 * with IRQs enabled. */
+		service_subclass_irq_unregister();
+		spin_lock_irqsave(&sclp_lock, flags);
+		del_timer(&sclp_request_timer);
+		if (sclp_init_req.status == SCLP_REQ_DONE &&
+		    sccb->header.response_code == 0x20) {
+			rc = 0;
+			break;
+		} else
+			rc = -EBUSY;
+	}
+	unregister_external_interrupt(0x2401, sclp_check_handler);
+	spin_unlock_irqrestore(&sclp_lock, flags);
+	return rc;
+}
+
+/* Reboot event handler. Reset send and receive mask to prevent pending SCLP
+ * events from interfering with rebooted system. */
+static int
+sclp_reboot_event(struct notifier_block *this, unsigned long event, void *ptr)
+{
+	sclp_deactivate();
+	return NOTIFY_DONE;
+}
+
+static struct notifier_block sclp_reboot_notifier = {
+	.notifier_call = sclp_reboot_event
+};
+
+/*
+ * Suspend/resume SCLP notifier implementation
+ */
+
+static void sclp_pm_event(enum sclp_pm_event sclp_pm_event, int rollback)
+{
+	struct sclp_register *reg;
+	unsigned long flags;
+
+	if (!rollback) {
+		spin_lock_irqsave(&sclp_lock, flags);
+		list_for_each_entry(reg, &sclp_reg_list, list)
+			reg->pm_event_posted = 0;
+		spin_unlock_irqrestore(&sclp_lock, flags);
+	}
+	do {
+		spin_lock_irqsave(&sclp_lock, flags);
+		list_for_each_entry(reg, &sclp_reg_list, list) {
+			if (rollback && reg->pm_event_posted)
+				goto found;
+			if (!rollback && !reg->pm_event_posted)
+				goto found;
+		}
+		spin_unlock_irqrestore(&sclp_lock, flags);
+		return;
+found:
+		spin_unlock_irqrestore(&sclp_lock, flags);
+		if (reg->pm_event_fn)
+			reg->pm_event_fn(reg, sclp_pm_event);
+		reg->pm_event_posted = rollback ? 0 : 1;
+	} while (1);
+}
+
+/*
+ * Susend/resume callbacks for platform device
+ */
+
+static int sclp_freeze(struct device *dev)
+{
+	unsigned long flags;
+	int rc;
+
+	sclp_pm_event(SCLP_PM_EVENT_FREEZE, 0);
+
+	spin_lock_irqsave(&sclp_lock, flags);
+	sclp_suspend_state = sclp_suspend_state_suspended;
+	spin_unlock_irqrestore(&sclp_lock, flags);
+
+	/* Init supend data */
+	memset(&sclp_suspend_req, 0, sizeof(sclp_suspend_req));
+	sclp_suspend_req.callback = sclp_suspend_req_cb;
+	sclp_suspend_req.status = SCLP_REQ_FILLED;
+	init_completion(&sclp_request_queue_flushed);
+
+	rc = sclp_add_request(&sclp_suspend_req);
+	if (rc == 0)
+		wait_for_completion(&sclp_request_queue_flushed);
+	else if (rc != -ENODATA)
+		goto fail_thaw;
+
+	rc = sclp_deactivate();
+	if (rc)
+		goto fail_thaw;
+	return 0;
+
+fail_thaw:
+	spin_lock_irqsave(&sclp_lock, flags);
+	sclp_suspend_state = sclp_suspend_state_running;
+	spin_unlock_irqrestore(&sclp_lock, flags);
+	sclp_pm_event(SCLP_PM_EVENT_THAW, 1);
+	return rc;
+}
+
+static int sclp_undo_suspend(enum sclp_pm_event event)
+{
+	unsigned long flags;
+	int rc;
+
+	rc = sclp_reactivate();
+	if (rc)
+		return rc;
+
+	spin_lock_irqsave(&sclp_lock, flags);
+	sclp_suspend_state = sclp_suspend_state_running;
+	spin_unlock_irqrestore(&sclp_lock, flags);
+
+	sclp_pm_event(event, 0);
+	return 0;
+}
+
+static int sclp_thaw(struct device *dev)
+{
+	return sclp_undo_suspend(SCLP_PM_EVENT_THAW);
+}
+
+static int sclp_restore(struct device *dev)
+{
+	return sclp_undo_suspend(SCLP_PM_EVENT_RESTORE);
+}
+
+static const struct dev_pm_ops sclp_pm_ops = {
+	.freeze		= sclp_freeze,
+	.thaw		= sclp_thaw,
+	.restore	= sclp_restore,
+};
+
+static struct platform_driver sclp_pdrv = {
+	.driver = {
+		.name	= "sclp",
+		.owner	= THIS_MODULE,
+		.pm	= &sclp_pm_ops,
+	},
+};
+
+static struct platform_device *sclp_pdev;
+
+/* Initialize SCLP driver. Return zero if driver is operational, non-zero
+ * otherwise. */
+static int
+sclp_init(void)
+{
+	unsigned long flags;
+	int rc = 0;
+
+	spin_lock_irqsave(&sclp_lock, flags);
+	/* Check for previous or running initialization */
+	if (sclp_init_state != sclp_init_state_uninitialized)
+		goto fail_unlock;
+	sclp_init_state = sclp_init_state_initializing;
+	/* Set up variables */
+	INIT_LIST_HEAD(&sclp_req_queue);
+	INIT_LIST_HEAD(&sclp_reg_list);
+	list_add(&sclp_state_change_event.list, &sclp_reg_list);
+	init_timer(&sclp_request_timer);
+	/* Check interface */
+	spin_unlock_irqrestore(&sclp_lock, flags);
+	rc = sclp_check_interface();
+	spin_lock_irqsave(&sclp_lock, flags);
+	if (rc)
+		goto fail_init_state_uninitialized;
+	/* Register reboot handler */
+	rc = register_reboot_notifier(&sclp_reboot_notifier);
+	if (rc)
+		goto fail_init_state_uninitialized;
+	/* Register interrupt handler */
+	rc = register_external_interrupt(0x2401, sclp_interrupt_handler);
+	if (rc)
+		goto fail_unregister_reboot_notifier;
+	sclp_init_state = sclp_init_state_initialized;
+	spin_unlock_irqrestore(&sclp_lock, flags);
+	/* Enable service-signal external interruption - needs to happen with
+	 * IRQs enabled. */
+	service_subclass_irq_register();
+	sclp_init_mask(1);
+	return 0;
+
+fail_unregister_reboot_notifier:
+	unregister_reboot_notifier(&sclp_reboot_notifier);
+fail_init_state_uninitialized:
+	sclp_init_state = sclp_init_state_uninitialized;
+fail_unlock:
+	spin_unlock_irqrestore(&sclp_lock, flags);
+	return rc;
+}
+
+/*
+ * SCLP panic notifier: If we are suspended, we thaw SCLP in order to be able
+ * to print the panic message.
+ */
+static int sclp_panic_notify(struct notifier_block *self,
+			     unsigned long event, void *data)
+{
+	if (sclp_suspend_state == sclp_suspend_state_suspended)
+		sclp_undo_suspend(SCLP_PM_EVENT_THAW);
+	return NOTIFY_OK;
+}
+
+static struct notifier_block sclp_on_panic_nb = {
+	.notifier_call = sclp_panic_notify,
+	.priority = SCLP_PANIC_PRIO,
+};
+
+static __init int sclp_initcall(void)
+{
+	int rc;
+
+	rc = platform_driver_register(&sclp_pdrv);
+	if (rc)
+		return rc;
+	sclp_pdev = platform_device_register_simple("sclp", -1, NULL, 0);
+	rc = IS_ERR(sclp_pdev) ? PTR_ERR(sclp_pdev) : 0;
+	if (rc)
+		goto fail_platform_driver_unregister;
+	rc = atomic_notifier_chain_register(&panic_notifier_list,
+					    &sclp_on_panic_nb);
+	if (rc)
+		goto fail_platform_device_unregister;
+
+	return sclp_init();
+
+fail_platform_device_unregister:
+	platform_device_unregister(sclp_pdev);
+fail_platform_driver_unregister:
+	platform_driver_unregister(&sclp_pdrv);
+	return rc;
+}
+
+arch_initcall(sclp_initcall);
diff --git a/ap/os/linux/linux-3.4.x/drivers/s390/char/sclp.h b/ap/os/linux/linux-3.4.x/drivers/s390/char/sclp.h
new file mode 100644
index 0000000..49a1bb5
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/s390/char/sclp.h
@@ -0,0 +1,213 @@
+/*
+ * Copyright IBM Corp. 1999, 2009
+ *
+ * Author(s): Martin Peschke <mpeschke@de.ibm.com>
+ *	      Martin Schwidefsky <schwidefsky@de.ibm.com>
+ */
+
+#ifndef __SCLP_H__
+#define __SCLP_H__
+
+#include <linux/types.h>
+#include <linux/list.h>
+#include <asm/sclp.h>
+#include <asm/ebcdic.h>
+
+/* maximum number of pages concerning our own memory management */
+#define MAX_KMEM_PAGES (sizeof(unsigned long) << 3)
+#define MAX_CONSOLE_PAGES	6
+
+#define EVTYP_OPCMD		0x01
+#define EVTYP_MSG		0x02
+#define EVTYP_STATECHANGE	0x08
+#define EVTYP_PMSGCMD		0x09
+#define EVTYP_CNTLPROGOPCMD	0x20
+#define EVTYP_CNTLPROGIDENT	0x0B
+#define EVTYP_SIGQUIESCE	0x1D
+#define EVTYP_VT220MSG		0x1A
+#define EVTYP_CONFMGMDATA	0x04
+#define EVTYP_SDIAS		0x1C
+#define EVTYP_ASYNC		0x0A
+#define EVTYP_OCF		0x1E
+
+#define EVTYP_OPCMD_MASK	0x80000000
+#define EVTYP_MSG_MASK		0x40000000
+#define EVTYP_STATECHANGE_MASK	0x01000000
+#define EVTYP_PMSGCMD_MASK	0x00800000
+#define EVTYP_CTLPROGOPCMD_MASK	0x00000001
+#define EVTYP_CTLPROGIDENT_MASK	0x00200000
+#define EVTYP_SIGQUIESCE_MASK	0x00000008
+#define EVTYP_VT220MSG_MASK	0x00000040
+#define EVTYP_CONFMGMDATA_MASK	0x10000000
+#define EVTYP_SDIAS_MASK	0x00000010
+#define EVTYP_ASYNC_MASK	0x00400000
+#define EVTYP_OCF_MASK		0x00000004
+
+#define GNRLMSGFLGS_DOM		0x8000
+#define GNRLMSGFLGS_SNDALRM	0x4000
+#define GNRLMSGFLGS_HOLDMSG	0x2000
+
+#define LNTPFLGS_CNTLTEXT	0x8000
+#define LNTPFLGS_LABELTEXT	0x4000
+#define LNTPFLGS_DATATEXT	0x2000
+#define LNTPFLGS_ENDTEXT	0x1000
+#define LNTPFLGS_PROMPTTEXT	0x0800
+
+typedef unsigned int sclp_cmdw_t;
+
+#define SCLP_CMDW_READ_EVENT_DATA	0x00770005
+#define SCLP_CMDW_WRITE_EVENT_DATA	0x00760005
+#define SCLP_CMDW_WRITE_EVENT_MASK	0x00780005
+
+#define GDS_ID_MDSMU		0x1310
+#define GDS_ID_MDSROUTEINFO	0x1311
+#define GDS_ID_AGUNWRKCORR	0x1549
+#define GDS_ID_SNACONDREPORT	0x1532
+#define GDS_ID_CPMSU		0x1212
+#define GDS_ID_ROUTTARGINSTR	0x154D
+#define GDS_ID_OPREQ		0x8070
+#define GDS_ID_TEXTCMD		0x1320
+
+#define GDS_KEY_SELFDEFTEXTMSG	0x31
+
+enum sclp_pm_event {
+	SCLP_PM_EVENT_FREEZE,
+	SCLP_PM_EVENT_THAW,
+	SCLP_PM_EVENT_RESTORE,
+};
+
+#define SCLP_PANIC_PRIO		1
+#define SCLP_PANIC_PRIO_CLIENT	0
+
+typedef u32 sccb_mask_t;	/* ATTENTION: assumes 32bit mask !!! */
+
+struct sccb_header {
+	u16	length;
+	u8	function_code;
+	u8	control_mask[3];
+	u16	response_code;
+} __attribute__((packed));
+
+extern u64 sclp_facilities;
+#define SCLP_HAS_CHP_INFO	(sclp_facilities & 0x8000000000000000ULL)
+#define SCLP_HAS_CHP_RECONFIG	(sclp_facilities & 0x2000000000000000ULL)
+#define SCLP_HAS_CPU_INFO	(sclp_facilities & 0x0800000000000000ULL)
+#define SCLP_HAS_CPU_RECONFIG	(sclp_facilities & 0x0400000000000000ULL)
+
+
+struct gds_subvector {
+	u8	length;
+	u8	key;
+} __attribute__((packed));
+
+struct gds_vector {
+	u16	length;
+	u16	gds_id;
+} __attribute__((packed));
+
+struct evbuf_header {
+	u16	length;
+	u8	type;
+	u8	flags;
+	u16	_reserved;
+} __attribute__((packed));
+
+struct sclp_req {
+	struct list_head list;		/* list_head for request queueing. */
+	sclp_cmdw_t command;		/* sclp command to execute */
+	void	*sccb;			/* pointer to the sccb to execute */
+	char	status;			/* status of this request */
+	int     start_count;		/* number of SVCs done for this req */
+	/* Callback that is called after reaching final status. */
+	void (*callback)(struct sclp_req *, void *data);
+	void *callback_data;
+};
+
+#define SCLP_REQ_FILLED	  0x00	/* request is ready to be processed */
+#define SCLP_REQ_QUEUED	  0x01	/* request is queued to be processed */
+#define SCLP_REQ_RUNNING  0x02	/* request is currently running */
+#define SCLP_REQ_DONE	  0x03	/* request is completed successfully */
+#define SCLP_REQ_FAILED	  0x05	/* request is finally failed */
+
+/* function pointers that a high level driver has to use for registration */
+/* of some routines it wants to be called from the low level driver */
+struct sclp_register {
+	struct list_head list;
+	/* User wants to receive: */
+	sccb_mask_t receive_mask;
+	/* User wants to send: */
+	sccb_mask_t send_mask;
+	/* H/W can receive: */
+	sccb_mask_t sclp_receive_mask;
+	/* H/W can send: */
+	sccb_mask_t sclp_send_mask;
+	/* called if event type availability changes */
+	void (*state_change_fn)(struct sclp_register *);
+	/* called for events in cp_receive_mask/sclp_receive_mask */
+	void (*receiver_fn)(struct evbuf_header *);
+	/* called for power management events */
+	void (*pm_event_fn)(struct sclp_register *, enum sclp_pm_event);
+	/* pm event posted flag */
+	int pm_event_posted;
+};
+
+/* externals from sclp.c */
+int sclp_add_request(struct sclp_req *req);
+void sclp_sync_wait(void);
+int sclp_register(struct sclp_register *reg);
+void sclp_unregister(struct sclp_register *reg);
+int sclp_remove_processed(struct sccb_header *sccb);
+int sclp_deactivate(void);
+int sclp_reactivate(void);
+int sclp_service_call(sclp_cmdw_t command, void *sccb);
+
+int sclp_sdias_init(void);
+void sclp_sdias_exit(void);
+
+/* useful inlines */
+
+/* VM uses EBCDIC 037, LPAR+native(SE+HMC) use EBCDIC 500 */
+/* translate single character from ASCII to EBCDIC */
+static inline unsigned char
+sclp_ascebc(unsigned char ch)
+{
+	return (MACHINE_IS_VM) ? _ascebc[ch] : _ascebc_500[ch];
+}
+
+/* translate string from EBCDIC to ASCII */
+static inline void
+sclp_ebcasc_str(unsigned char *str, int nr)
+{
+	(MACHINE_IS_VM) ? EBCASC(str, nr) : EBCASC_500(str, nr);
+}
+
+/* translate string from ASCII to EBCDIC */
+static inline void
+sclp_ascebc_str(unsigned char *str, int nr)
+{
+	(MACHINE_IS_VM) ? ASCEBC(str, nr) : ASCEBC_500(str, nr);
+}
+
+static inline struct gds_vector *
+sclp_find_gds_vector(void *start, void *end, u16 id)
+{
+	struct gds_vector *v;
+
+	for (v = start; (void *) v < end; v = (void *) v + v->length)
+		if (v->gds_id == id)
+			return v;
+	return NULL;
+}
+
+static inline struct gds_subvector *
+sclp_find_gds_subvector(void *start, void *end, u8 key)
+{
+	struct gds_subvector *sv;
+
+	for (sv = start; (void *) sv < end; sv = (void *) sv + sv->length)
+		if (sv->key == key)
+			return sv;
+	return NULL;
+}
+
+#endif	 /* __SCLP_H__ */
diff --git a/ap/os/linux/linux-3.4.x/drivers/s390/char/sclp_async.c b/ap/os/linux/linux-3.4.x/drivers/s390/char/sclp_async.c
new file mode 100644
index 0000000..5f9f929
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/s390/char/sclp_async.c
@@ -0,0 +1,211 @@
+/*
+ * Enable Asynchronous Notification via SCLP.
+ *
+ * Copyright IBM Corp. 2009
+ * Author(s): Hans-Joachim Picht <hans@linux.vnet.ibm.com>
+ *
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/stat.h>
+#include <linux/string.h>
+#include <linux/slab.h>
+#include <linux/ctype.h>
+#include <linux/kmod.h>
+#include <linux/err.h>
+#include <linux/errno.h>
+#include <linux/proc_fs.h>
+#include <linux/sysctl.h>
+#include <linux/utsname.h>
+#include "sclp.h"
+
+static int callhome_enabled;
+static struct sclp_req *request;
+static struct sclp_async_sccb *sccb;
+static int sclp_async_send_wait(char *message);
+static struct ctl_table_header *callhome_sysctl_header;
+static DEFINE_SPINLOCK(sclp_async_lock);
+#define SCLP_NORMAL_WRITE	0x00
+
+struct async_evbuf {
+	struct evbuf_header header;
+	u64 reserved;
+	u8 rflags;
+	u8 empty;
+	u8 rtype;
+	u8 otype;
+	char comp_id[12];
+	char data[3000]; /* there is still some space left */
+} __attribute__((packed));
+
+struct sclp_async_sccb {
+	struct sccb_header header;
+	struct async_evbuf evbuf;
+} __attribute__((packed));
+
+static struct sclp_register sclp_async_register = {
+	.send_mask = EVTYP_ASYNC_MASK,
+};
+
+static int call_home_on_panic(struct notifier_block *self,
+			      unsigned long event, void *data)
+{
+	strncat(data, init_utsname()->nodename,
+		sizeof(init_utsname()->nodename));
+	sclp_async_send_wait(data);
+	return NOTIFY_DONE;
+}
+
+static struct notifier_block call_home_panic_nb = {
+	.notifier_call = call_home_on_panic,
+	.priority = INT_MAX,
+};
+
+static int proc_handler_callhome(struct ctl_table *ctl, int write,
+				 void __user *buffer, size_t *count,
+				 loff_t *ppos)
+{
+	unsigned long val;
+	int len, rc;
+	char buf[3];
+
+	if (!*count || (*ppos && !write)) {
+		*count = 0;
+		return 0;
+	}
+	if (!write) {
+		len = snprintf(buf, sizeof(buf), "%d\n", callhome_enabled);
+		rc = copy_to_user(buffer, buf, sizeof(buf));
+		if (rc != 0)
+			return -EFAULT;
+	} else {
+		len = *count;
+		rc = kstrtoul_from_user(buffer, len, 0, &val);
+		if (rc)
+			return rc;
+		if (val != 0 && val != 1)
+			return -EINVAL;
+		callhome_enabled = val;
+	}
+	*count = len;
+	*ppos += len;
+	return 0;
+}
+
+static struct ctl_table callhome_table[] = {
+	{
+		.procname	= "callhome",
+		.mode		= 0644,
+		.proc_handler	= proc_handler_callhome,
+	},
+	{}
+};
+
+static struct ctl_table kern_dir_table[] = {
+	{
+		.procname	= "kernel",
+		.maxlen		= 0,
+		.mode		= 0555,
+		.child		= callhome_table,
+	},
+	{}
+};
+
+/*
+ * Function used to transfer asynchronous notification
+ * records which waits for send completion
+ */
+static int sclp_async_send_wait(char *message)
+{
+	struct async_evbuf *evb;
+	int rc;
+	unsigned long flags;
+
+	if (!callhome_enabled)
+		return 0;
+	sccb->evbuf.header.type = EVTYP_ASYNC;
+	sccb->evbuf.rtype = 0xA5;
+	sccb->evbuf.otype = 0x00;
+	evb = &sccb->evbuf;
+	request->command = SCLP_CMDW_WRITE_EVENT_DATA;
+	request->sccb = sccb;
+	request->status = SCLP_REQ_FILLED;
+	strncpy(sccb->evbuf.data, message, sizeof(sccb->evbuf.data));
+	/*
+	 * Retain Queue
+	 * e.g. 5639CC140 500 Red Hat RHEL5 Linux for zSeries (RHEL AS)
+	 */
+	strncpy(sccb->evbuf.comp_id, "000000000", sizeof(sccb->evbuf.comp_id));
+	sccb->evbuf.header.length = sizeof(sccb->evbuf);
+	sccb->header.length = sizeof(sccb->evbuf) + sizeof(sccb->header);
+	sccb->header.function_code = SCLP_NORMAL_WRITE;
+	rc = sclp_add_request(request);
+	if (rc)
+		return rc;
+	spin_lock_irqsave(&sclp_async_lock, flags);
+	while (request->status != SCLP_REQ_DONE &&
+		request->status != SCLP_REQ_FAILED) {
+		 sclp_sync_wait();
+	}
+	spin_unlock_irqrestore(&sclp_async_lock, flags);
+	if (request->status != SCLP_REQ_DONE)
+		return -EIO;
+	rc = ((struct sclp_async_sccb *)
+	       request->sccb)->header.response_code;
+	if (rc != 0x0020)
+		return -EIO;
+	if (evb->header.flags != 0x80)
+		return -EIO;
+	return rc;
+}
+
+static int __init sclp_async_init(void)
+{
+	int rc;
+
+	rc = sclp_register(&sclp_async_register);
+	if (rc)
+		return rc;
+	rc = -EOPNOTSUPP;
+	if (!(sclp_async_register.sclp_receive_mask & EVTYP_ASYNC_MASK))
+		goto out_sclp;
+	rc = -ENOMEM;
+	callhome_sysctl_header = register_sysctl_table(kern_dir_table);
+	if (!callhome_sysctl_header)
+		goto out_sclp;
+	request = kzalloc(sizeof(struct sclp_req), GFP_KERNEL);
+	sccb = (struct sclp_async_sccb *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
+	if (!request || !sccb)
+		goto out_mem;
+	rc = atomic_notifier_chain_register(&panic_notifier_list,
+					    &call_home_panic_nb);
+	if (!rc)
+		goto out;
+out_mem:
+	kfree(request);
+	free_page((unsigned long) sccb);
+	unregister_sysctl_table(callhome_sysctl_header);
+out_sclp:
+	sclp_unregister(&sclp_async_register);
+out:
+	return rc;
+}
+module_init(sclp_async_init);
+
+static void __exit sclp_async_exit(void)
+{
+	atomic_notifier_chain_unregister(&panic_notifier_list,
+					 &call_home_panic_nb);
+	unregister_sysctl_table(callhome_sysctl_header);
+	sclp_unregister(&sclp_async_register);
+	free_page((unsigned long) sccb);
+	kfree(request);
+}
+module_exit(sclp_async_exit);
+
+MODULE_AUTHOR("Copyright IBM Corp. 2009");
+MODULE_AUTHOR("Hans-Joachim Picht <hans@linux.vnet.ibm.com>");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("SCLP Asynchronous Notification Records");
diff --git a/ap/os/linux/linux-3.4.x/drivers/s390/char/sclp_cmd.c b/ap/os/linux/linux-3.4.x/drivers/s390/char/sclp_cmd.c
new file mode 100644
index 0000000..e73b219
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/s390/char/sclp_cmd.c
@@ -0,0 +1,765 @@
+/*
+ * Copyright IBM Corp. 2007, 2009
+ *
+ * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>,
+ *	      Peter Oberparleiter <peter.oberparleiter@de.ibm.com>
+ */
+
+#define KMSG_COMPONENT "sclp_cmd"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
+#include <linux/completion.h>
+#include <linux/init.h>
+#include <linux/errno.h>
+#include <linux/err.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/mm.h>
+#include <linux/mmzone.h>
+#include <linux/memory.h>
+#include <linux/platform_device.h>
+#include <asm/chpid.h>
+#include <asm/sclp.h>
+#include <asm/setup.h>
+#include <asm/ctl_reg.h>
+
+#include "sclp.h"
+
+#define SCLP_CMDW_READ_SCP_INFO		0x00020001
+#define SCLP_CMDW_READ_SCP_INFO_FORCED	0x00120001
+
+struct read_info_sccb {
+	struct	sccb_header header;	/* 0-7 */
+	u16	rnmax;			/* 8-9 */
+	u8	rnsize;			/* 10 */
+	u8	_reserved0[24 - 11];	/* 11-15 */
+	u8	loadparm[8];		/* 24-31 */
+	u8	_reserved1[48 - 32];	/* 32-47 */
+	u64	facilities;		/* 48-55 */
+	u8	_reserved2[84 - 56];	/* 56-83 */
+	u8	fac84;			/* 84 */
+	u8	_reserved3[91 - 85];	/* 85-90 */
+	u8	flags;			/* 91 */
+	u8	_reserved4[100 - 92];	/* 92-99 */
+	u32	rnsize2;		/* 100-103 */
+	u64	rnmax2;			/* 104-111 */
+	u8	_reserved5[4096 - 112];	/* 112-4095 */
+} __attribute__((packed, aligned(PAGE_SIZE)));
+
+static struct read_info_sccb __initdata early_read_info_sccb;
+static int __initdata early_read_info_sccb_valid;
+
+u64 sclp_facilities;
+static u8 sclp_fac84;
+static unsigned long long rzm;
+static unsigned long long rnmax;
+
+static int __init sclp_cmd_sync_early(sclp_cmdw_t cmd, void *sccb)
+{
+	int rc;
+
+	__ctl_set_bit(0, 9);
+	rc = sclp_service_call(cmd, sccb);
+	if (rc)
+		goto out;
+	__load_psw_mask(PSW_DEFAULT_KEY | PSW_MASK_BASE | PSW_MASK_EA |
+			PSW_MASK_BA | PSW_MASK_EXT | PSW_MASK_WAIT);
+	local_irq_disable();
+out:
+	/* Contents of the sccb might have changed. */
+	barrier();
+	__ctl_clear_bit(0, 9);
+	return rc;
+}
+
+static void __init sclp_read_info_early(void)
+{
+	int rc;
+	int i;
+	struct read_info_sccb *sccb;
+	sclp_cmdw_t commands[] = {SCLP_CMDW_READ_SCP_INFO_FORCED,
+				  SCLP_CMDW_READ_SCP_INFO};
+
+	sccb = &early_read_info_sccb;
+	for (i = 0; i < ARRAY_SIZE(commands); i++) {
+		do {
+			memset(sccb, 0, sizeof(*sccb));
+			sccb->header.length = sizeof(*sccb);
+			sccb->header.function_code = 0x80;
+			sccb->header.control_mask[2] = 0x80;
+			rc = sclp_cmd_sync_early(commands[i], sccb);
+		} while (rc == -EBUSY);
+
+		if (rc)
+			break;
+		if (sccb->header.response_code == 0x10) {
+			early_read_info_sccb_valid = 1;
+			break;
+		}
+		if (sccb->header.response_code != 0x1f0)
+			break;
+	}
+}
+
+void __init sclp_facilities_detect(void)
+{
+	struct read_info_sccb *sccb;
+
+	sclp_read_info_early();
+	if (!early_read_info_sccb_valid)
+		return;
+
+	sccb = &early_read_info_sccb;
+	sclp_facilities = sccb->facilities;
+	sclp_fac84 = sccb->fac84;
+	rnmax = sccb->rnmax ? sccb->rnmax : sccb->rnmax2;
+	rzm = sccb->rnsize ? sccb->rnsize : sccb->rnsize2;
+	rzm <<= 20;
+}
+
+unsigned long long sclp_get_rnmax(void)
+{
+	return rnmax;
+}
+
+unsigned long long sclp_get_rzm(void)
+{
+	return rzm;
+}
+
+/*
+ * This function will be called after sclp_facilities_detect(), which gets
+ * called from early.c code. Therefore the sccb should have valid contents.
+ */
+void __init sclp_get_ipl_info(struct sclp_ipl_info *info)
+{
+	struct read_info_sccb *sccb;
+
+	if (!early_read_info_sccb_valid)
+		return;
+	sccb = &early_read_info_sccb;
+	info->is_valid = 1;
+	if (sccb->flags & 0x2)
+		info->has_dump = 1;
+	memcpy(&info->loadparm, &sccb->loadparm, LOADPARM_LEN);
+}
+
+static void sclp_sync_callback(struct sclp_req *req, void *data)
+{
+	struct completion *completion = data;
+
+	complete(completion);
+}
+
+static int do_sync_request(sclp_cmdw_t cmd, void *sccb)
+{
+	struct completion completion;
+	struct sclp_req *request;
+	int rc;
+
+	request = kzalloc(sizeof(*request), GFP_KERNEL);
+	if (!request)
+		return -ENOMEM;
+	request->command = cmd;
+	request->sccb = sccb;
+	request->status = SCLP_REQ_FILLED;
+	request->callback = sclp_sync_callback;
+	request->callback_data = &completion;
+	init_completion(&completion);
+
+	/* Perform sclp request. */
+	rc = sclp_add_request(request);
+	if (rc)
+		goto out;
+	wait_for_completion(&completion);
+
+	/* Check response. */
+	if (request->status != SCLP_REQ_DONE) {
+		pr_warning("sync request failed (cmd=0x%08x, "
+			   "status=0x%02x)\n", cmd, request->status);
+		rc = -EIO;
+	}
+out:
+	kfree(request);
+	return rc;
+}
+
+/*
+ * CPU configuration related functions.
+ */
+
+#define SCLP_CMDW_READ_CPU_INFO		0x00010001
+#define SCLP_CMDW_CONFIGURE_CPU		0x00110001
+#define SCLP_CMDW_DECONFIGURE_CPU	0x00100001
+
+struct read_cpu_info_sccb {
+	struct	sccb_header header;
+	u16	nr_configured;
+	u16	offset_configured;
+	u16	nr_standby;
+	u16	offset_standby;
+	u8	reserved[4096 - 16];
+} __attribute__((packed, aligned(PAGE_SIZE)));
+
+static void sclp_fill_cpu_info(struct sclp_cpu_info *info,
+			       struct read_cpu_info_sccb *sccb)
+{
+	char *page = (char *) sccb;
+
+	memset(info, 0, sizeof(*info));
+	info->configured = sccb->nr_configured;
+	info->standby = sccb->nr_standby;
+	info->combined = sccb->nr_configured + sccb->nr_standby;
+	info->has_cpu_type = sclp_fac84 & 0x1;
+	memcpy(&info->cpu, page + sccb->offset_configured,
+	       info->combined * sizeof(struct sclp_cpu_entry));
+}
+
+int sclp_get_cpu_info(struct sclp_cpu_info *info)
+{
+	int rc;
+	struct read_cpu_info_sccb *sccb;
+
+	if (!SCLP_HAS_CPU_INFO)
+		return -EOPNOTSUPP;
+	sccb = (void *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
+	if (!sccb)
+		return -ENOMEM;
+	sccb->header.length = sizeof(*sccb);
+	rc = do_sync_request(SCLP_CMDW_READ_CPU_INFO, sccb);
+	if (rc)
+		goto out;
+	if (sccb->header.response_code != 0x0010) {
+		pr_warning("readcpuinfo failed (response=0x%04x)\n",
+			   sccb->header.response_code);
+		rc = -EIO;
+		goto out;
+	}
+	sclp_fill_cpu_info(info, sccb);
+out:
+	free_page((unsigned long) sccb);
+	return rc;
+}
+
+struct cpu_configure_sccb {
+	struct sccb_header header;
+} __attribute__((packed, aligned(8)));
+
+static int do_cpu_configure(sclp_cmdw_t cmd)
+{
+	struct cpu_configure_sccb *sccb;
+	int rc;
+
+	if (!SCLP_HAS_CPU_RECONFIG)
+		return -EOPNOTSUPP;
+	/*
+	 * This is not going to cross a page boundary since we force
+	 * kmalloc to have a minimum alignment of 8 bytes on s390.
+	 */
+	sccb = kzalloc(sizeof(*sccb), GFP_KERNEL | GFP_DMA);
+	if (!sccb)
+		return -ENOMEM;
+	sccb->header.length = sizeof(*sccb);
+	rc = do_sync_request(cmd, sccb);
+	if (rc)
+		goto out;
+	switch (sccb->header.response_code) {
+	case 0x0020:
+	case 0x0120:
+		break;
+	default:
+		pr_warning("configure cpu failed (cmd=0x%08x, "
+			   "response=0x%04x)\n", cmd,
+			   sccb->header.response_code);
+		rc = -EIO;
+		break;
+	}
+out:
+	kfree(sccb);
+	return rc;
+}
+
+int sclp_cpu_configure(u8 cpu)
+{
+	return do_cpu_configure(SCLP_CMDW_CONFIGURE_CPU | cpu << 8);
+}
+
+int sclp_cpu_deconfigure(u8 cpu)
+{
+	return do_cpu_configure(SCLP_CMDW_DECONFIGURE_CPU | cpu << 8);
+}
+
+#ifdef CONFIG_MEMORY_HOTPLUG
+
+static DEFINE_MUTEX(sclp_mem_mutex);
+static LIST_HEAD(sclp_mem_list);
+static u8 sclp_max_storage_id;
+static unsigned long sclp_storage_ids[256 / BITS_PER_LONG];
+static int sclp_mem_state_changed;
+
+struct memory_increment {
+	struct list_head list;
+	u16 rn;
+	int standby;
+	int usecount;
+};
+
+struct assign_storage_sccb {
+	struct sccb_header header;
+	u16 rn;
+} __packed;
+
+int arch_get_memory_phys_device(unsigned long start_pfn)
+{
+	if (!rzm)
+		return 0;
+	return PFN_PHYS(start_pfn) >> ilog2(rzm);
+}
+
+static unsigned long long rn2addr(u16 rn)
+{
+	return (unsigned long long) (rn - 1) * rzm;
+}
+
+static int do_assign_storage(sclp_cmdw_t cmd, u16 rn)
+{
+	struct assign_storage_sccb *sccb;
+	int rc;
+
+	sccb = (void *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
+	if (!sccb)
+		return -ENOMEM;
+	sccb->header.length = PAGE_SIZE;
+	sccb->rn = rn;
+	rc = do_sync_request(cmd, sccb);
+	if (rc)
+		goto out;
+	switch (sccb->header.response_code) {
+	case 0x0020:
+	case 0x0120:
+		break;
+	default:
+		pr_warning("assign storage failed (cmd=0x%08x, "
+			   "response=0x%04x, rn=0x%04x)\n", cmd,
+			   sccb->header.response_code, rn);
+		rc = -EIO;
+		break;
+	}
+out:
+	free_page((unsigned long) sccb);
+	return rc;
+}
+
+static int sclp_assign_storage(u16 rn)
+{
+	return do_assign_storage(0x000d0001, rn);
+}
+
+static int sclp_unassign_storage(u16 rn)
+{
+	return do_assign_storage(0x000c0001, rn);
+}
+
+struct attach_storage_sccb {
+	struct sccb_header header;
+	u16 :16;
+	u16 assigned;
+	u32 :32;
+	u32 entries[0];
+} __packed;
+
+static int sclp_attach_storage(u8 id)
+{
+	struct attach_storage_sccb *sccb;
+	int rc;
+	int i;
+
+	sccb = (void *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
+	if (!sccb)
+		return -ENOMEM;
+	sccb->header.length = PAGE_SIZE;
+	rc = do_sync_request(0x00080001 | id << 8, sccb);
+	if (rc)
+		goto out;
+	switch (sccb->header.response_code) {
+	case 0x0020:
+		set_bit(id, sclp_storage_ids);
+		for (i = 0; i < sccb->assigned; i++) {
+			if (sccb->entries[i])
+				sclp_unassign_storage(sccb->entries[i] >> 16);
+		}
+		break;
+	default:
+		rc = -EIO;
+		break;
+	}
+out:
+	free_page((unsigned long) sccb);
+	return rc;
+}
+
+static int sclp_mem_change_state(unsigned long start, unsigned long size,
+				 int online)
+{
+	struct memory_increment *incr;
+	unsigned long long istart;
+	int rc = 0;
+
+	list_for_each_entry(incr, &sclp_mem_list, list) {
+		istart = rn2addr(incr->rn);
+		if (start + size - 1 < istart)
+			break;
+		if (start > istart + rzm - 1)
+			continue;
+		if (online) {
+			if (incr->usecount++)
+				continue;
+			/*
+			 * Don't break the loop if one assign fails. Loop may
+			 * be walked again on CANCEL and we can't save
+			 * information if state changed before or not.
+			 * So continue and increase usecount for all increments.
+			 */
+			rc |= sclp_assign_storage(incr->rn);
+		} else {
+			if (--incr->usecount)
+				continue;
+			sclp_unassign_storage(incr->rn);
+		}
+	}
+	return rc ? -EIO : 0;
+}
+
+static int sclp_mem_notifier(struct notifier_block *nb,
+			     unsigned long action, void *data)
+{
+	unsigned long start, size;
+	struct memory_notify *arg;
+	unsigned char id;
+	int rc = 0;
+
+	arg = data;
+	start = arg->start_pfn << PAGE_SHIFT;
+	size = arg->nr_pages << PAGE_SHIFT;
+	mutex_lock(&sclp_mem_mutex);
+	for_each_clear_bit(id, sclp_storage_ids, sclp_max_storage_id + 1)
+		sclp_attach_storage(id);
+	switch (action) {
+	case MEM_ONLINE:
+	case MEM_GOING_OFFLINE:
+	case MEM_CANCEL_OFFLINE:
+		break;
+	case MEM_GOING_ONLINE:
+		rc = sclp_mem_change_state(start, size, 1);
+		break;
+	case MEM_CANCEL_ONLINE:
+		sclp_mem_change_state(start, size, 0);
+		break;
+	case MEM_OFFLINE:
+		sclp_mem_change_state(start, size, 0);
+		break;
+	default:
+		rc = -EINVAL;
+		break;
+	}
+	if (!rc)
+		sclp_mem_state_changed = 1;
+	mutex_unlock(&sclp_mem_mutex);
+	return rc ? NOTIFY_BAD : NOTIFY_OK;
+}
+
+static struct notifier_block sclp_mem_nb = {
+	.notifier_call = sclp_mem_notifier,
+};
+
+static void __init add_memory_merged(u16 rn)
+{
+	static u16 first_rn, num;
+	unsigned long long start, size;
+
+	if (rn && first_rn && (first_rn + num == rn)) {
+		num++;
+		return;
+	}
+	if (!first_rn)
+		goto skip_add;
+	start = rn2addr(first_rn);
+	size = (unsigned long long ) num * rzm;
+	if (start >= VMEM_MAX_PHYS)
+		goto skip_add;
+	if (start + size > VMEM_MAX_PHYS)
+		size = VMEM_MAX_PHYS - start;
+	if (memory_end_set && (start >= memory_end))
+		goto skip_add;
+	if (memory_end_set && (start + size > memory_end))
+		size = memory_end - start;
+	add_memory(0, start, size);
+skip_add:
+	first_rn = rn;
+	num = 1;
+}
+
+static void __init sclp_add_standby_memory(void)
+{
+	struct memory_increment *incr;
+
+	list_for_each_entry(incr, &sclp_mem_list, list)
+		if (incr->standby)
+			add_memory_merged(incr->rn);
+	add_memory_merged(0);
+}
+
+#define MEM_SCT_SIZE (1UL << SECTION_SIZE_BITS)
+
+static void __init insert_increment(u16 rn, int standby, int assigned)
+{
+	struct memory_increment *incr, *new_incr;
+	struct list_head *prev;
+	u16 last_rn;
+
+	new_incr = kzalloc(sizeof(*new_incr), GFP_KERNEL);
+	if (!new_incr)
+		return;
+	new_incr->rn = rn;
+	new_incr->standby = standby;
+	if (!standby)
+		new_incr->usecount = rzm > MEM_SCT_SIZE ? rzm/MEM_SCT_SIZE : 1;
+	last_rn = 0;
+	prev = &sclp_mem_list;
+	list_for_each_entry(incr, &sclp_mem_list, list) {
+		if (assigned && incr->rn > rn)
+			break;
+		if (!assigned && incr->rn - last_rn > 1)
+			break;
+		last_rn = incr->rn;
+		prev = &incr->list;
+	}
+	if (!assigned)
+		new_incr->rn = last_rn + 1;
+	if (new_incr->rn > rnmax) {
+		kfree(new_incr);
+		return;
+	}
+	list_add(&new_incr->list, prev);
+}
+
+static int sclp_mem_freeze(struct device *dev)
+{
+	if (!sclp_mem_state_changed)
+		return 0;
+	pr_err("Memory hotplug state changed, suspend refused.\n");
+	return -EPERM;
+}
+
+struct read_storage_sccb {
+	struct sccb_header header;
+	u16 max_id;
+	u16 assigned;
+	u16 standby;
+	u16 :16;
+	u32 entries[0];
+} __packed;
+
+static const struct dev_pm_ops sclp_mem_pm_ops = {
+	.freeze		= sclp_mem_freeze,
+};
+
+static struct platform_driver sclp_mem_pdrv = {
+	.driver = {
+		.name	= "sclp_mem",
+		.pm	= &sclp_mem_pm_ops,
+	},
+};
+
+static int __init sclp_detect_standby_memory(void)
+{
+	struct platform_device *sclp_pdev;
+	struct read_storage_sccb *sccb;
+	int i, id, assigned, rc;
+
+	if (!early_read_info_sccb_valid)
+		return 0;
+	if ((sclp_facilities & 0xe00000000000ULL) != 0xe00000000000ULL)
+		return 0;
+	rc = -ENOMEM;
+	sccb = (void *) __get_free_page(GFP_KERNEL | GFP_DMA);
+	if (!sccb)
+		goto out;
+	assigned = 0;
+	for (id = 0; id <= sclp_max_storage_id; id++) {
+		memset(sccb, 0, PAGE_SIZE);
+		sccb->header.length = PAGE_SIZE;
+		rc = do_sync_request(0x00040001 | id << 8, sccb);
+		if (rc)
+			goto out;
+		switch (sccb->header.response_code) {
+		case 0x0010:
+			set_bit(id, sclp_storage_ids);
+			for (i = 0; i < sccb->assigned; i++) {
+				if (!sccb->entries[i])
+					continue;
+				assigned++;
+				insert_increment(sccb->entries[i] >> 16, 0, 1);
+			}
+			break;
+		case 0x0310:
+			break;
+		case 0x0410:
+			for (i = 0; i < sccb->assigned; i++) {
+				if (!sccb->entries[i])
+					continue;
+				assigned++;
+				insert_increment(sccb->entries[i] >> 16, 1, 1);
+			}
+			break;
+		default:
+			rc = -EIO;
+			break;
+		}
+		if (!rc)
+			sclp_max_storage_id = sccb->max_id;
+	}
+	if (rc || list_empty(&sclp_mem_list))
+		goto out;
+	for (i = 1; i <= rnmax - assigned; i++)
+		insert_increment(0, 1, 0);
+	rc = register_memory_notifier(&sclp_mem_nb);
+	if (rc)
+		goto out;
+	rc = platform_driver_register(&sclp_mem_pdrv);
+	if (rc)
+		goto out;
+	sclp_pdev = platform_device_register_simple("sclp_mem", -1, NULL, 0);
+	rc = IS_ERR(sclp_pdev) ? PTR_ERR(sclp_pdev) : 0;
+	if (rc)
+		goto out_driver;
+	sclp_add_standby_memory();
+	goto out;
+out_driver:
+	platform_driver_unregister(&sclp_mem_pdrv);
+out:
+	free_page((unsigned long) sccb);
+	return rc;
+}
+__initcall(sclp_detect_standby_memory);
+
+#endif /* CONFIG_MEMORY_HOTPLUG */
+
+/*
+ * Channel path configuration related functions.
+ */
+
+#define SCLP_CMDW_CONFIGURE_CHPATH		0x000f0001
+#define SCLP_CMDW_DECONFIGURE_CHPATH		0x000e0001
+#define SCLP_CMDW_READ_CHPATH_INFORMATION	0x00030001
+
+struct chp_cfg_sccb {
+	struct sccb_header header;
+	u8 ccm;
+	u8 reserved[6];
+	u8 cssid;
+} __attribute__((packed));
+
+static int do_chp_configure(sclp_cmdw_t cmd)
+{
+	struct chp_cfg_sccb *sccb;
+	int rc;
+
+	if (!SCLP_HAS_CHP_RECONFIG)
+		return -EOPNOTSUPP;
+	/* Prepare sccb. */
+	sccb = (struct chp_cfg_sccb *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
+	if (!sccb)
+		return -ENOMEM;
+	sccb->header.length = sizeof(*sccb);
+	rc = do_sync_request(cmd, sccb);
+	if (rc)
+		goto out;
+	switch (sccb->header.response_code) {
+	case 0x0020:
+	case 0x0120:
+	case 0x0440:
+	case 0x0450:
+		break;
+	default:
+		pr_warning("configure channel-path failed "
+			   "(cmd=0x%08x, response=0x%04x)\n", cmd,
+			   sccb->header.response_code);
+		rc = -EIO;
+		break;
+	}
+out:
+	free_page((unsigned long) sccb);
+	return rc;
+}
+
+/**
+ * sclp_chp_configure - perform configure channel-path sclp command
+ * @chpid: channel-path ID
+ *
+ * Perform configure channel-path command sclp command for specified chpid.
+ * Return 0 after command successfully finished, non-zero otherwise.
+ */
+int sclp_chp_configure(struct chp_id chpid)
+{
+	return do_chp_configure(SCLP_CMDW_CONFIGURE_CHPATH | chpid.id << 8);
+}
+
+/**
+ * sclp_chp_deconfigure - perform deconfigure channel-path sclp command
+ * @chpid: channel-path ID
+ *
+ * Perform deconfigure channel-path command sclp command for specified chpid
+ * and wait for completion. On success return 0. Return non-zero otherwise.
+ */
+int sclp_chp_deconfigure(struct chp_id chpid)
+{
+	return do_chp_configure(SCLP_CMDW_DECONFIGURE_CHPATH | chpid.id << 8);
+}
+
+struct chp_info_sccb {
+	struct sccb_header header;
+	u8 recognized[SCLP_CHP_INFO_MASK_SIZE];
+	u8 standby[SCLP_CHP_INFO_MASK_SIZE];
+	u8 configured[SCLP_CHP_INFO_MASK_SIZE];
+	u8 ccm;
+	u8 reserved[6];
+	u8 cssid;
+} __attribute__((packed));
+
+/**
+ * sclp_chp_read_info - perform read channel-path information sclp command
+ * @info: resulting channel-path information data
+ *
+ * Perform read channel-path information sclp command and wait for completion.
+ * On success, store channel-path information in @info and return 0. Return
+ * non-zero otherwise.
+ */
+int sclp_chp_read_info(struct sclp_chp_info *info)
+{
+	struct chp_info_sccb *sccb;
+	int rc;
+
+	if (!SCLP_HAS_CHP_INFO)
+		return -EOPNOTSUPP;
+	/* Prepare sccb. */
+	sccb = (struct chp_info_sccb *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
+	if (!sccb)
+		return -ENOMEM;
+	sccb->header.length = sizeof(*sccb);
+	rc = do_sync_request(SCLP_CMDW_READ_CHPATH_INFORMATION, sccb);
+	if (rc)
+		goto out;
+	if (sccb->header.response_code != 0x0010) {
+		pr_warning("read channel-path info failed "
+			   "(response=0x%04x)\n", sccb->header.response_code);
+		rc = -EIO;
+		goto out;
+	}
+	memcpy(info->recognized, sccb->recognized, SCLP_CHP_INFO_MASK_SIZE);
+	memcpy(info->standby, sccb->standby, SCLP_CHP_INFO_MASK_SIZE);
+	memcpy(info->configured, sccb->configured, SCLP_CHP_INFO_MASK_SIZE);
+out:
+	free_page((unsigned long) sccb);
+	return rc;
+}
diff --git a/ap/os/linux/linux-3.4.x/drivers/s390/char/sclp_con.c b/ap/os/linux/linux-3.4.x/drivers/s390/char/sclp_con.c
new file mode 100644
index 0000000..ecf45c5
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/s390/char/sclp_con.c
@@ -0,0 +1,327 @@
+/*
+ * SCLP line mode console driver
+ *
+ * Copyright IBM Corp. 1999, 2009
+ * Author(s): Martin Peschke <mpeschke@de.ibm.com>
+ *	      Martin Schwidefsky <schwidefsky@de.ibm.com>
+ */
+
+#include <linux/kmod.h>
+#include <linux/console.h>
+#include <linux/init.h>
+#include <linux/timer.h>
+#include <linux/jiffies.h>
+#include <linux/termios.h>
+#include <linux/err.h>
+#include <linux/reboot.h>
+#include <linux/gfp.h>
+
+#include "sclp.h"
+#include "sclp_rw.h"
+#include "sclp_tty.h"
+
+#define sclp_console_major 4		/* TTYAUX_MAJOR */
+#define sclp_console_minor 64
+#define sclp_console_name  "ttyS"
+
+/* Lock to guard over changes to global variables */
+static spinlock_t sclp_con_lock;
+/* List of free pages that can be used for console output buffering */
+static struct list_head sclp_con_pages;
+/* List of full struct sclp_buffer structures ready for output */
+static struct list_head sclp_con_outqueue;
+/* Pointer to current console buffer */
+static struct sclp_buffer *sclp_conbuf;
+/* Timer for delayed output of console messages */
+static struct timer_list sclp_con_timer;
+/* Suspend mode flag */
+static int sclp_con_suspended;
+/* Flag that output queue is currently running */
+static int sclp_con_queue_running;
+
+/* Output format for console messages */
+static unsigned short sclp_con_columns;
+static unsigned short sclp_con_width_htab;
+
+static void
+sclp_conbuf_callback(struct sclp_buffer *buffer, int rc)
+{
+	unsigned long flags;
+	void *page;
+
+	do {
+		page = sclp_unmake_buffer(buffer);
+		spin_lock_irqsave(&sclp_con_lock, flags);
+
+		/* Remove buffer from outqueue */
+		list_del(&buffer->list);
+		list_add_tail((struct list_head *) page, &sclp_con_pages);
+
+		/* Check if there is a pending buffer on the out queue. */
+		buffer = NULL;
+		if (!list_empty(&sclp_con_outqueue))
+			buffer = list_first_entry(&sclp_con_outqueue,
+						  struct sclp_buffer, list);
+		if (!buffer || sclp_con_suspended) {
+			sclp_con_queue_running = 0;
+			spin_unlock_irqrestore(&sclp_con_lock, flags);
+			break;
+		}
+		spin_unlock_irqrestore(&sclp_con_lock, flags);
+	} while (sclp_emit_buffer(buffer, sclp_conbuf_callback));
+}
+
+/*
+ * Finalize and emit first pending buffer.
+ */
+static void sclp_conbuf_emit(void)
+{
+	struct sclp_buffer* buffer;
+	unsigned long flags;
+	int rc;
+
+	spin_lock_irqsave(&sclp_con_lock, flags);
+	if (sclp_conbuf)
+		list_add_tail(&sclp_conbuf->list, &sclp_con_outqueue);
+	sclp_conbuf = NULL;
+	if (sclp_con_queue_running || sclp_con_suspended)
+		goto out_unlock;
+	if (list_empty(&sclp_con_outqueue))
+		goto out_unlock;
+	buffer = list_first_entry(&sclp_con_outqueue, struct sclp_buffer,
+				  list);
+	sclp_con_queue_running = 1;
+	spin_unlock_irqrestore(&sclp_con_lock, flags);
+
+	rc = sclp_emit_buffer(buffer, sclp_conbuf_callback);
+	if (rc)
+		sclp_conbuf_callback(buffer, rc);
+	return;
+out_unlock:
+	spin_unlock_irqrestore(&sclp_con_lock, flags);
+}
+
+/*
+ * Wait until out queue is empty
+ */
+static void sclp_console_sync_queue(void)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&sclp_con_lock, flags);
+	if (timer_pending(&sclp_con_timer))
+		del_timer(&sclp_con_timer);
+	while (sclp_con_queue_running) {
+		spin_unlock_irqrestore(&sclp_con_lock, flags);
+		sclp_sync_wait();
+		spin_lock_irqsave(&sclp_con_lock, flags);
+	}
+	spin_unlock_irqrestore(&sclp_con_lock, flags);
+}
+
+/*
+ * When this routine is called from the timer then we flush the
+ * temporary write buffer without further waiting on a final new line.
+ */
+static void
+sclp_console_timeout(unsigned long data)
+{
+	sclp_conbuf_emit();
+}
+
+/*
+ * Writes the given message to S390 system console
+ */
+static void
+sclp_console_write(struct console *console, const char *message,
+		   unsigned int count)
+{
+	unsigned long flags;
+	void *page;
+	int written;
+
+	if (count == 0)
+		return;
+	spin_lock_irqsave(&sclp_con_lock, flags);
+	/*
+	 * process escape characters, write message into buffer,
+	 * send buffer to SCLP
+	 */
+	do {
+		/* make sure we have a console output buffer */
+		if (sclp_conbuf == NULL) {
+			while (list_empty(&sclp_con_pages)) {
+				if (sclp_con_suspended)
+					goto out;
+				spin_unlock_irqrestore(&sclp_con_lock, flags);
+				sclp_sync_wait();
+				spin_lock_irqsave(&sclp_con_lock, flags);
+			}
+			page = sclp_con_pages.next;
+			list_del((struct list_head *) page);
+			sclp_conbuf = sclp_make_buffer(page, sclp_con_columns,
+						       sclp_con_width_htab);
+		}
+		/* try to write the string to the current output buffer */
+		written = sclp_write(sclp_conbuf, (const unsigned char *)
+				     message, count);
+		if (written == count)
+			break;
+		/*
+		 * Not all characters could be written to the current
+		 * output buffer. Emit the buffer, create a new buffer
+		 * and then output the rest of the string.
+		 */
+		spin_unlock_irqrestore(&sclp_con_lock, flags);
+		sclp_conbuf_emit();
+		spin_lock_irqsave(&sclp_con_lock, flags);
+		message += written;
+		count -= written;
+	} while (count > 0);
+	/* Setup timer to output current console buffer after 1/10 second */
+	if (sclp_conbuf != NULL && sclp_chars_in_buffer(sclp_conbuf) != 0 &&
+	    !timer_pending(&sclp_con_timer)) {
+		init_timer(&sclp_con_timer);
+		sclp_con_timer.function = sclp_console_timeout;
+		sclp_con_timer.data = 0UL;
+		sclp_con_timer.expires = jiffies + HZ/10;
+		add_timer(&sclp_con_timer);
+	}
+out:
+	spin_unlock_irqrestore(&sclp_con_lock, flags);
+}
+
+static struct tty_driver *
+sclp_console_device(struct console *c, int *index)
+{
+	*index = c->index;
+	return sclp_tty_driver;
+}
+
+/*
+ * Make sure that all buffers will be flushed to the SCLP.
+ */
+static void
+sclp_console_flush(void)
+{
+	sclp_conbuf_emit();
+	sclp_console_sync_queue();
+}
+
+/*
+ * Resume console: If there are cached messages, emit them.
+ */
+static void sclp_console_resume(void)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&sclp_con_lock, flags);
+	sclp_con_suspended = 0;
+	spin_unlock_irqrestore(&sclp_con_lock, flags);
+	sclp_conbuf_emit();
+}
+
+/*
+ * Suspend console: Set suspend flag and flush console
+ */
+static void sclp_console_suspend(void)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&sclp_con_lock, flags);
+	sclp_con_suspended = 1;
+	spin_unlock_irqrestore(&sclp_con_lock, flags);
+	sclp_console_flush();
+}
+
+static int sclp_console_notify(struct notifier_block *self,
+			       unsigned long event, void *data)
+{
+	sclp_console_flush();
+	return NOTIFY_OK;
+}
+
+static struct notifier_block on_panic_nb = {
+	.notifier_call = sclp_console_notify,
+	.priority = SCLP_PANIC_PRIO_CLIENT,
+};
+
+static struct notifier_block on_reboot_nb = {
+	.notifier_call = sclp_console_notify,
+	.priority = 1,
+};
+
+/*
+ * used to register the SCLP console to the kernel and to
+ * give printk necessary information
+ */
+static struct console sclp_console =
+{
+	.name = sclp_console_name,
+	.write = sclp_console_write,
+	.device = sclp_console_device,
+	.flags = CON_PRINTBUFFER,
+	.index = 0 /* ttyS0 */
+};
+
+/*
+ * This function is called for SCLP suspend and resume events.
+ */
+void sclp_console_pm_event(enum sclp_pm_event sclp_pm_event)
+{
+	switch (sclp_pm_event) {
+	case SCLP_PM_EVENT_FREEZE:
+		sclp_console_suspend();
+		break;
+	case SCLP_PM_EVENT_RESTORE:
+	case SCLP_PM_EVENT_THAW:
+		sclp_console_resume();
+		break;
+	}
+}
+
+/*
+ * called by console_init() in drivers/char/tty_io.c at boot-time.
+ */
+static int __init
+sclp_console_init(void)
+{
+	void *page;
+	int i;
+	int rc;
+
+	if (!CONSOLE_IS_SCLP)
+		return 0;
+	rc = sclp_rw_init();
+	if (rc)
+		return rc;
+	/* Allocate pages for output buffering */
+	INIT_LIST_HEAD(&sclp_con_pages);
+	for (i = 0; i < MAX_CONSOLE_PAGES; i++) {
+		page = (void *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
+		list_add_tail(page, &sclp_con_pages);
+	}
+	INIT_LIST_HEAD(&sclp_con_outqueue);
+	spin_lock_init(&sclp_con_lock);
+	sclp_conbuf = NULL;
+	init_timer(&sclp_con_timer);
+
+	/* Set output format */
+	if (MACHINE_IS_VM)
+		/*
+		 * save 4 characters for the CPU number
+		 * written at start of each line by VM/CP
+		 */
+		sclp_con_columns = 76;
+	else
+		sclp_con_columns = 80;
+	sclp_con_width_htab = 8;
+
+	/* enable printk-access to this driver */
+	atomic_notifier_chain_register(&panic_notifier_list, &on_panic_nb);
+	register_reboot_notifier(&on_reboot_nb);
+	register_console(&sclp_console);
+	return 0;
+}
+
+console_initcall(sclp_console_init);
diff --git a/ap/os/linux/linux-3.4.x/drivers/s390/char/sclp_config.c b/ap/os/linux/linux-3.4.x/drivers/s390/char/sclp_config.c
new file mode 100644
index 0000000..3c03c10
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/s390/char/sclp_config.c
@@ -0,0 +1,79 @@
+/*
+ *  drivers/s390/char/sclp_config.c
+ *
+ *    Copyright IBM Corp. 2007
+ *    Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>
+ */
+
+#define KMSG_COMPONENT "sclp_config"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
+#include <linux/init.h>
+#include <linux/errno.h>
+#include <linux/cpu.h>
+#include <linux/device.h>
+#include <linux/workqueue.h>
+#include <asm/smp.h>
+
+#include "sclp.h"
+
+struct conf_mgm_data {
+	u8 reserved;
+	u8 ev_qualifier;
+} __attribute__((packed));
+
+#define EV_QUAL_CPU_CHANGE	1
+#define EV_QUAL_CAP_CHANGE	3
+
+static struct work_struct sclp_cpu_capability_work;
+static struct work_struct sclp_cpu_change_work;
+
+static void sclp_cpu_capability_notify(struct work_struct *work)
+{
+	int cpu;
+	struct device *dev;
+
+	s390_adjust_jiffies();
+	pr_warning("cpu capability changed.\n");
+	get_online_cpus();
+	for_each_online_cpu(cpu) {
+		dev = get_cpu_device(cpu);
+		kobject_uevent(&dev->kobj, KOBJ_CHANGE);
+	}
+	put_online_cpus();
+}
+
+static void __ref sclp_cpu_change_notify(struct work_struct *work)
+{
+	smp_rescan_cpus();
+}
+
+static void sclp_conf_receiver_fn(struct evbuf_header *evbuf)
+{
+	struct conf_mgm_data *cdata;
+
+	cdata = (struct conf_mgm_data *)(evbuf + 1);
+	switch (cdata->ev_qualifier) {
+	case EV_QUAL_CPU_CHANGE:
+		schedule_work(&sclp_cpu_change_work);
+		break;
+	case EV_QUAL_CAP_CHANGE:
+		schedule_work(&sclp_cpu_capability_work);
+		break;
+	}
+}
+
+static struct sclp_register sclp_conf_register =
+{
+	.receive_mask = EVTYP_CONFMGMDATA_MASK,
+	.receiver_fn  = sclp_conf_receiver_fn,
+};
+
+static int __init sclp_conf_init(void)
+{
+	INIT_WORK(&sclp_cpu_capability_work, sclp_cpu_capability_notify);
+	INIT_WORK(&sclp_cpu_change_work, sclp_cpu_change_notify);
+	return sclp_register(&sclp_conf_register);
+}
+
+__initcall(sclp_conf_init);
diff --git a/ap/os/linux/linux-3.4.x/drivers/s390/char/sclp_cpi.c b/ap/os/linux/linux-3.4.x/drivers/s390/char/sclp_cpi.c
new file mode 100644
index 0000000..5716487
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/s390/char/sclp_cpi.c
@@ -0,0 +1,41 @@
+/*
+ *  drivers/s390/char/sclp_cpi.c
+ *    SCLP control programm identification
+ *
+ *    Copyright IBM Corp. 2001, 2007
+ *    Author(s): Martin Peschke <mpeschke@de.ibm.com>
+ *		 Michael Ernst <mernst@de.ibm.com>
+ */
+
+#include <linux/kmod.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/version.h>
+#include "sclp_cpi_sys.h"
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Identify this operating system instance "
+		   "to the System z hardware");
+MODULE_AUTHOR("Martin Peschke <mpeschke@de.ibm.com>, "
+	      "Michael Ernst <mernst@de.ibm.com>");
+
+static char *system_name = "";
+static char *sysplex_name = "";
+
+module_param(system_name, charp, 0);
+MODULE_PARM_DESC(system_name, "e.g. hostname - max. 8 characters");
+module_param(sysplex_name, charp, 0);
+MODULE_PARM_DESC(sysplex_name, "if applicable - max. 8 characters");
+
+static int __init cpi_module_init(void)
+{
+	return sclp_cpi_set_data(system_name, sysplex_name, "LINUX",
+				 LINUX_VERSION_CODE);
+}
+
+static void __exit cpi_module_exit(void)
+{
+}
+
+module_init(cpi_module_init);
+module_exit(cpi_module_exit);
diff --git a/ap/os/linux/linux-3.4.x/drivers/s390/char/sclp_cpi_sys.c b/ap/os/linux/linux-3.4.x/drivers/s390/char/sclp_cpi_sys.c
new file mode 100644
index 0000000..bd1b9c9
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/s390/char/sclp_cpi_sys.c
@@ -0,0 +1,430 @@
+/*
+ *  drivers/s390/char/sclp_cpi_sys.c
+ *    SCLP control program identification sysfs interface
+ *
+ *    Copyright IBM Corp. 2001, 2007
+ *    Author(s): Martin Peschke <mpeschke@de.ibm.com>
+ *		 Michael Ernst <mernst@de.ibm.com>
+ */
+
+#define KMSG_COMPONENT "sclp_cpi"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/stat.h>
+#include <linux/device.h>
+#include <linux/string.h>
+#include <linux/ctype.h>
+#include <linux/kmod.h>
+#include <linux/timer.h>
+#include <linux/err.h>
+#include <linux/slab.h>
+#include <linux/completion.h>
+#include <linux/export.h>
+#include <asm/ebcdic.h>
+#include <asm/sclp.h>
+
+#include "sclp.h"
+#include "sclp_rw.h"
+#include "sclp_cpi_sys.h"
+
+#define CPI_LENGTH_NAME 8
+#define CPI_LENGTH_LEVEL 16
+
+static DEFINE_MUTEX(sclp_cpi_mutex);
+
+struct cpi_evbuf {
+	struct evbuf_header header;
+	u8	id_format;
+	u8	reserved0;
+	u8	system_type[CPI_LENGTH_NAME];
+	u64	reserved1;
+	u8	system_name[CPI_LENGTH_NAME];
+	u64	reserved2;
+	u64	system_level;
+	u64	reserved3;
+	u8	sysplex_name[CPI_LENGTH_NAME];
+	u8	reserved4[16];
+} __attribute__((packed));
+
+struct cpi_sccb {
+	struct sccb_header header;
+	struct cpi_evbuf cpi_evbuf;
+} __attribute__((packed));
+
+static struct sclp_register sclp_cpi_event = {
+	.send_mask = EVTYP_CTLPROGIDENT_MASK,
+};
+
+static char system_name[CPI_LENGTH_NAME + 1];
+static char sysplex_name[CPI_LENGTH_NAME + 1];
+static char system_type[CPI_LENGTH_NAME + 1];
+static u64 system_level;
+
+static void set_data(char *field, char *data)
+{
+	memset(field, ' ', CPI_LENGTH_NAME);
+	memcpy(field, data, strlen(data));
+	sclp_ascebc_str(field, CPI_LENGTH_NAME);
+}
+
+static void cpi_callback(struct sclp_req *req, void *data)
+{
+	struct completion *completion = data;
+
+	complete(completion);
+}
+
+static struct sclp_req *cpi_prepare_req(void)
+{
+	struct sclp_req *req;
+	struct cpi_sccb *sccb;
+	struct cpi_evbuf *evb;
+
+	req = kzalloc(sizeof(struct sclp_req), GFP_KERNEL);
+	if (!req)
+		return ERR_PTR(-ENOMEM);
+	sccb = (struct cpi_sccb *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
+	if (!sccb) {
+		kfree(req);
+		return ERR_PTR(-ENOMEM);
+	}
+
+	/* setup SCCB for Control-Program Identification */
+	sccb->header.length = sizeof(struct cpi_sccb);
+	sccb->cpi_evbuf.header.length = sizeof(struct cpi_evbuf);
+	sccb->cpi_evbuf.header.type = 0x0b;
+	evb = &sccb->cpi_evbuf;
+
+	/* set system type */
+	set_data(evb->system_type, system_type);
+
+	/* set system name */
+	set_data(evb->system_name, system_name);
+
+	/* set system level */
+	evb->system_level = system_level;
+
+	/* set sysplex name */
+	set_data(evb->sysplex_name, sysplex_name);
+
+	/* prepare request data structure presented to SCLP driver */
+	req->command = SCLP_CMDW_WRITE_EVENT_DATA;
+	req->sccb = sccb;
+	req->status = SCLP_REQ_FILLED;
+	req->callback = cpi_callback;
+	return req;
+}
+
+static void cpi_free_req(struct sclp_req *req)
+{
+	free_page((unsigned long) req->sccb);
+	kfree(req);
+}
+
+static int cpi_req(void)
+{
+	struct completion completion;
+	struct sclp_req *req;
+	int rc;
+	int response;
+
+	rc = sclp_register(&sclp_cpi_event);
+	if (rc)
+		goto out;
+	if (!(sclp_cpi_event.sclp_receive_mask & EVTYP_CTLPROGIDENT_MASK)) {
+		rc = -EOPNOTSUPP;
+		goto out_unregister;
+	}
+
+	req = cpi_prepare_req();
+	if (IS_ERR(req)) {
+		rc = PTR_ERR(req);
+		goto out_unregister;
+	}
+
+	init_completion(&completion);
+	req->callback_data = &completion;
+
+	/* Add request to sclp queue */
+	rc = sclp_add_request(req);
+	if (rc)
+		goto out_free_req;
+
+	wait_for_completion(&completion);
+
+	if (req->status != SCLP_REQ_DONE) {
+		pr_warning("request failed (status=0x%02x)\n",
+			   req->status);
+		rc = -EIO;
+		goto out_free_req;
+	}
+
+	response = ((struct cpi_sccb *) req->sccb)->header.response_code;
+	if (response != 0x0020) {
+		pr_warning("request failed with response code 0x%x\n",
+			   response);
+		rc = -EIO;
+	}
+
+out_free_req:
+	cpi_free_req(req);
+
+out_unregister:
+	sclp_unregister(&sclp_cpi_event);
+
+out:
+	return rc;
+}
+
+static int check_string(const char *attr, const char *str)
+{
+	size_t len;
+	size_t i;
+
+	len = strlen(str);
+
+	if ((len > 0) && (str[len - 1] == '\n'))
+		len--;
+
+	if (len > CPI_LENGTH_NAME)
+		return -EINVAL;
+
+	for (i = 0; i < len ; i++) {
+		if (isalpha(str[i]) || isdigit(str[i]) ||
+		    strchr("$@# ", str[i]))
+			continue;
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static void set_string(char *attr, const char *value)
+{
+	size_t len;
+	size_t i;
+
+	len = strlen(value);
+
+	if ((len > 0) && (value[len - 1] == '\n'))
+		len--;
+
+	for (i = 0; i < CPI_LENGTH_NAME; i++) {
+		if (i < len)
+			attr[i] = toupper(value[i]);
+		else
+			attr[i] = ' ';
+	}
+}
+
+static ssize_t system_name_show(struct kobject *kobj,
+				struct kobj_attribute *attr, char *page)
+{
+	int rc;
+
+	mutex_lock(&sclp_cpi_mutex);
+	rc = snprintf(page, PAGE_SIZE, "%s\n", system_name);
+	mutex_unlock(&sclp_cpi_mutex);
+	return rc;
+}
+
+static ssize_t system_name_store(struct kobject *kobj,
+				 struct kobj_attribute *attr,
+				 const char *buf,
+	size_t len)
+{
+	int rc;
+
+	rc = check_string("system_name", buf);
+	if (rc)
+		return rc;
+
+	mutex_lock(&sclp_cpi_mutex);
+	set_string(system_name, buf);
+	mutex_unlock(&sclp_cpi_mutex);
+
+	return len;
+}
+
+static struct kobj_attribute system_name_attr =
+	__ATTR(system_name, 0644, system_name_show, system_name_store);
+
+static ssize_t sysplex_name_show(struct kobject *kobj,
+				 struct kobj_attribute *attr, char *page)
+{
+	int rc;
+
+	mutex_lock(&sclp_cpi_mutex);
+	rc = snprintf(page, PAGE_SIZE, "%s\n", sysplex_name);
+	mutex_unlock(&sclp_cpi_mutex);
+	return rc;
+}
+
+static ssize_t sysplex_name_store(struct kobject *kobj,
+				  struct kobj_attribute *attr,
+				  const char *buf,
+	size_t len)
+{
+	int rc;
+
+	rc = check_string("sysplex_name", buf);
+	if (rc)
+		return rc;
+
+	mutex_lock(&sclp_cpi_mutex);
+	set_string(sysplex_name, buf);
+	mutex_unlock(&sclp_cpi_mutex);
+
+	return len;
+}
+
+static struct kobj_attribute sysplex_name_attr =
+	__ATTR(sysplex_name, 0644, sysplex_name_show, sysplex_name_store);
+
+static ssize_t system_type_show(struct kobject *kobj,
+				struct kobj_attribute *attr, char *page)
+{
+	int rc;
+
+	mutex_lock(&sclp_cpi_mutex);
+	rc = snprintf(page, PAGE_SIZE, "%s\n", system_type);
+	mutex_unlock(&sclp_cpi_mutex);
+	return rc;
+}
+
+static ssize_t system_type_store(struct kobject *kobj,
+				 struct kobj_attribute *attr,
+				 const char *buf,
+	size_t len)
+{
+	int rc;
+
+	rc = check_string("system_type", buf);
+	if (rc)
+		return rc;
+
+	mutex_lock(&sclp_cpi_mutex);
+	set_string(system_type, buf);
+	mutex_unlock(&sclp_cpi_mutex);
+
+	return len;
+}
+
+static struct kobj_attribute system_type_attr =
+	__ATTR(system_type, 0644, system_type_show, system_type_store);
+
+static ssize_t system_level_show(struct kobject *kobj,
+				 struct kobj_attribute *attr, char *page)
+{
+	unsigned long long level;
+
+	mutex_lock(&sclp_cpi_mutex);
+	level = system_level;
+	mutex_unlock(&sclp_cpi_mutex);
+	return snprintf(page, PAGE_SIZE, "%#018llx\n", level);
+}
+
+static ssize_t system_level_store(struct kobject *kobj,
+				  struct kobj_attribute *attr,
+				  const char *buf,
+	size_t len)
+{
+	unsigned long long level;
+	char *endp;
+
+	level = simple_strtoull(buf, &endp, 16);
+
+	if (endp == buf)
+		return -EINVAL;
+	if (*endp == '\n')
+		endp++;
+	if (*endp)
+		return -EINVAL;
+
+	mutex_lock(&sclp_cpi_mutex);
+	system_level = level;
+	mutex_unlock(&sclp_cpi_mutex);
+	return len;
+}
+
+static struct kobj_attribute system_level_attr =
+	__ATTR(system_level, 0644, system_level_show, system_level_store);
+
+static ssize_t set_store(struct kobject *kobj,
+			 struct kobj_attribute *attr,
+			 const char *buf, size_t len)
+{
+	int rc;
+
+	mutex_lock(&sclp_cpi_mutex);
+	rc = cpi_req();
+	mutex_unlock(&sclp_cpi_mutex);
+	if (rc)
+		return rc;
+
+	return len;
+}
+
+static struct kobj_attribute set_attr = __ATTR(set, 0200, NULL, set_store);
+
+static struct attribute *cpi_attrs[] = {
+	&system_name_attr.attr,
+	&sysplex_name_attr.attr,
+	&system_type_attr.attr,
+	&system_level_attr.attr,
+	&set_attr.attr,
+	NULL,
+};
+
+static struct attribute_group cpi_attr_group = {
+	.attrs = cpi_attrs,
+};
+
+static struct kset *cpi_kset;
+
+int sclp_cpi_set_data(const char *system, const char *sysplex, const char *type,
+		      const u64 level)
+{
+	int rc;
+
+	rc = check_string("system_name", system);
+	if (rc)
+		return rc;
+	rc = check_string("sysplex_name", sysplex);
+	if (rc)
+		return rc;
+	rc = check_string("system_type", type);
+	if (rc)
+		return rc;
+
+	mutex_lock(&sclp_cpi_mutex);
+	set_string(system_name, system);
+	set_string(sysplex_name, sysplex);
+	set_string(system_type, type);
+	system_level = level;
+
+	rc = cpi_req();
+	mutex_unlock(&sclp_cpi_mutex);
+
+	return rc;
+}
+EXPORT_SYMBOL(sclp_cpi_set_data);
+
+static int __init cpi_init(void)
+{
+	int rc;
+
+	cpi_kset = kset_create_and_add("cpi", NULL, firmware_kobj);
+	if (!cpi_kset)
+		return -ENOMEM;
+
+	rc = sysfs_create_group(&cpi_kset->kobj, &cpi_attr_group);
+	if (rc)
+		kset_unregister(cpi_kset);
+
+	return rc;
+}
+
+__initcall(cpi_init);
diff --git a/ap/os/linux/linux-3.4.x/drivers/s390/char/sclp_cpi_sys.h b/ap/os/linux/linux-3.4.x/drivers/s390/char/sclp_cpi_sys.h
new file mode 100644
index 0000000..deef3e6
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/s390/char/sclp_cpi_sys.h
@@ -0,0 +1,15 @@
+/*
+ *  drivers/s390/char/sclp_cpi_sys.h
+ *    SCLP control program identification sysfs interface
+ *
+ *    Copyright IBM Corp. 2007
+ *    Author(s): Michael Ernst <mernst@de.ibm.com>
+ */
+
+#ifndef __SCLP_CPI_SYS_H__
+#define __SCLP_CPI_SYS_H__
+
+int sclp_cpi_set_data(const char *system, const char *sysplex,
+		      const char *type, u64 level);
+
+#endif	 /* __SCLP_CPI_SYS_H__ */
diff --git a/ap/os/linux/linux-3.4.x/drivers/s390/char/sclp_ocf.c b/ap/os/linux/linux-3.4.x/drivers/s390/char/sclp_ocf.c
new file mode 100644
index 0000000..ab294d5
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/s390/char/sclp_ocf.c
@@ -0,0 +1,145 @@
+/*
+ *  drivers/s390/char/sclp_ocf.c
+ *    SCLP OCF communication parameters sysfs interface
+ *
+ *    Copyright IBM Corp. 2011
+ *    Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
+ */
+
+#define KMSG_COMPONENT "sclp_ocf"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/stat.h>
+#include <linux/device.h>
+#include <linux/string.h>
+#include <linux/ctype.h>
+#include <linux/kmod.h>
+#include <linux/timer.h>
+#include <linux/err.h>
+#include <asm/ebcdic.h>
+#include <asm/sclp.h>
+
+#include "sclp.h"
+
+#define OCF_LENGTH_HMC_NETWORK 8UL
+#define OCF_LENGTH_CPC_NAME 8UL
+
+static char hmc_network[OCF_LENGTH_HMC_NETWORK + 1];
+static char cpc_name[OCF_LENGTH_CPC_NAME + 1];
+
+static DEFINE_SPINLOCK(sclp_ocf_lock);
+static struct work_struct sclp_ocf_change_work;
+
+static struct kset *ocf_kset;
+
+static void sclp_ocf_change_notify(struct work_struct *work)
+{
+	kobject_uevent(&ocf_kset->kobj, KOBJ_CHANGE);
+}
+
+/* Handler for OCF event. Look for the CPC image name. */
+static void sclp_ocf_handler(struct evbuf_header *evbuf)
+{
+	struct gds_vector *v;
+	struct gds_subvector *sv, *netid, *cpc;
+	size_t size;
+
+	/* Find the 0x9f00 block. */
+	v = sclp_find_gds_vector(evbuf + 1, (void *) evbuf + evbuf->length,
+				 0x9f00);
+	if (!v)
+		return;
+	/* Find the 0x9f22 block inside the 0x9f00 block. */
+	v = sclp_find_gds_vector(v + 1, (void *) v + v->length, 0x9f22);
+	if (!v)
+		return;
+	/* Find the 0x81 block inside the 0x9f22 block. */
+	sv = sclp_find_gds_subvector(v + 1, (void *) v + v->length, 0x81);
+	if (!sv)
+		return;
+	/* Find the 0x01 block inside the 0x81 block. */
+	netid = sclp_find_gds_subvector(sv + 1, (void *) sv + sv->length, 1);
+	/* Find the 0x02 block inside the 0x81 block. */
+	cpc = sclp_find_gds_subvector(sv + 1, (void *) sv + sv->length, 2);
+	/* Copy network name and cpc name. */
+	spin_lock(&sclp_ocf_lock);
+	if (netid) {
+		size = min(OCF_LENGTH_HMC_NETWORK, (size_t) netid->length);
+		memcpy(hmc_network, netid + 1, size);
+		EBCASC(hmc_network, size);
+		hmc_network[size] = 0;
+	}
+	if (cpc) {
+		size = min(OCF_LENGTH_CPC_NAME, (size_t) cpc->length);
+		memcpy(cpc_name, cpc + 1, size);
+		EBCASC(cpc_name, size);
+		cpc_name[size] = 0;
+	}
+	spin_unlock(&sclp_ocf_lock);
+	schedule_work(&sclp_ocf_change_work);
+}
+
+static struct sclp_register sclp_ocf_event = {
+	.receive_mask = EVTYP_OCF_MASK,
+	.receiver_fn = sclp_ocf_handler,
+};
+
+static ssize_t cpc_name_show(struct kobject *kobj,
+			     struct kobj_attribute *attr, char *page)
+{
+	int rc;
+
+	spin_lock_irq(&sclp_ocf_lock);
+	rc = snprintf(page, PAGE_SIZE, "%s\n", cpc_name);
+	spin_unlock_irq(&sclp_ocf_lock);
+	return rc;
+}
+
+static struct kobj_attribute cpc_name_attr =
+	__ATTR(cpc_name, 0444, cpc_name_show, NULL);
+
+static ssize_t hmc_network_show(struct kobject *kobj,
+				struct kobj_attribute *attr, char *page)
+{
+	int rc;
+
+	spin_lock_irq(&sclp_ocf_lock);
+	rc = snprintf(page, PAGE_SIZE, "%s\n", hmc_network);
+	spin_unlock_irq(&sclp_ocf_lock);
+	return rc;
+}
+
+static struct kobj_attribute hmc_network_attr =
+	__ATTR(hmc_network, 0444, hmc_network_show, NULL);
+
+static struct attribute *ocf_attrs[] = {
+	&cpc_name_attr.attr,
+	&hmc_network_attr.attr,
+	NULL,
+};
+
+static struct attribute_group ocf_attr_group = {
+	.attrs = ocf_attrs,
+};
+
+static int __init ocf_init(void)
+{
+	int rc;
+
+	INIT_WORK(&sclp_ocf_change_work, sclp_ocf_change_notify);
+	ocf_kset = kset_create_and_add("ocf", NULL, firmware_kobj);
+	if (!ocf_kset)
+		return -ENOMEM;
+
+	rc = sysfs_create_group(&ocf_kset->kobj, &ocf_attr_group);
+	if (rc) {
+		kset_unregister(ocf_kset);
+		return rc;
+	}
+
+	return sclp_register(&sclp_ocf_event);
+}
+
+device_initcall(ocf_init);
diff --git a/ap/os/linux/linux-3.4.x/drivers/s390/char/sclp_quiesce.c b/ap/os/linux/linux-3.4.x/drivers/s390/char/sclp_quiesce.c
new file mode 100644
index 0000000..69df137
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/s390/char/sclp_quiesce.c
@@ -0,0 +1,85 @@
+/*
+ *  drivers/s390/char/sclp_quiesce.c
+ *     signal quiesce handler
+ *
+ *  (C) Copyright IBM Corp. 1999,2004
+ *  Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
+ *             Peter Oberparleiter <peter.oberparleiter@de.ibm.com>
+ */
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/cpumask.h>
+#include <linux/smp.h>
+#include <linux/init.h>
+#include <linux/reboot.h>
+#include <linux/atomic.h>
+#include <asm/ptrace.h>
+#include <asm/smp.h>
+
+#include "sclp.h"
+
+static void (*old_machine_restart)(char *);
+static void (*old_machine_halt)(void);
+static void (*old_machine_power_off)(void);
+
+/* Shutdown handler. Signal completion of shutdown by loading special PSW. */
+static void do_machine_quiesce(void)
+{
+	psw_t quiesce_psw;
+
+	smp_send_stop();
+	quiesce_psw.mask =
+		PSW_MASK_BASE | PSW_MASK_EA | PSW_MASK_BA | PSW_MASK_WAIT;
+	quiesce_psw.addr = 0xfff;
+	__load_psw(quiesce_psw);
+}
+
+/* Handler for quiesce event. Start shutdown procedure. */
+static void sclp_quiesce_handler(struct evbuf_header *evbuf)
+{
+	if (_machine_restart != (void *) do_machine_quiesce) {
+		old_machine_restart = _machine_restart;
+		old_machine_halt = _machine_halt;
+		old_machine_power_off = _machine_power_off;
+		_machine_restart = (void *) do_machine_quiesce;
+		_machine_halt = do_machine_quiesce;
+		_machine_power_off = do_machine_quiesce;
+	}
+	ctrl_alt_del();
+}
+
+/* Undo machine restart/halt/power_off modification on resume */
+static void sclp_quiesce_pm_event(struct sclp_register *reg,
+				  enum sclp_pm_event sclp_pm_event)
+{
+	switch (sclp_pm_event) {
+	case SCLP_PM_EVENT_RESTORE:
+		if (old_machine_restart) {
+			_machine_restart = old_machine_restart;
+			_machine_halt = old_machine_halt;
+			_machine_power_off = old_machine_power_off;
+			old_machine_restart = NULL;
+			old_machine_halt = NULL;
+			old_machine_power_off = NULL;
+		}
+		break;
+	case SCLP_PM_EVENT_FREEZE:
+	case SCLP_PM_EVENT_THAW:
+		break;
+	}
+}
+
+static struct sclp_register sclp_quiesce_event = {
+	.receive_mask = EVTYP_SIGQUIESCE_MASK,
+	.receiver_fn = sclp_quiesce_handler,
+	.pm_event_fn = sclp_quiesce_pm_event
+};
+
+/* Initialize quiesce driver. */
+static int __init sclp_quiesce_init(void)
+{
+	return sclp_register(&sclp_quiesce_event);
+}
+
+module_init(sclp_quiesce_init);
diff --git a/ap/os/linux/linux-3.4.x/drivers/s390/char/sclp_rw.c b/ap/os/linux/linux-3.4.x/drivers/s390/char/sclp_rw.c
new file mode 100644
index 0000000..4be63be
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/s390/char/sclp_rw.c
@@ -0,0 +1,474 @@
+/*
+ * driver: reading from and writing to system console on S/390 via SCLP
+ *
+ * Copyright IBM Corp. 1999, 2009
+ *
+ * Author(s): Martin Peschke <mpeschke@de.ibm.com>
+ *	      Martin Schwidefsky <schwidefsky@de.ibm.com>
+ */
+
+#include <linux/kmod.h>
+#include <linux/types.h>
+#include <linux/err.h>
+#include <linux/string.h>
+#include <linux/spinlock.h>
+#include <linux/ctype.h>
+#include <asm/uaccess.h>
+
+#include "sclp.h"
+#include "sclp_rw.h"
+
+/*
+ * The room for the SCCB (only for writing) is not equal to a pages size
+ * (as it is specified as the maximum size in the SCLP documentation)
+ * because of the additional data structure described above.
+ */
+#define MAX_SCCB_ROOM (PAGE_SIZE - sizeof(struct sclp_buffer))
+
+static void sclp_rw_pm_event(struct sclp_register *reg,
+			     enum sclp_pm_event sclp_pm_event)
+{
+	sclp_console_pm_event(sclp_pm_event);
+}
+
+/* Event type structure for write message and write priority message */
+static struct sclp_register sclp_rw_event = {
+	.send_mask = EVTYP_MSG_MASK | EVTYP_PMSGCMD_MASK,
+	.pm_event_fn = sclp_rw_pm_event,
+};
+
+/*
+ * Setup a sclp write buffer. Gets a page as input (4K) and returns
+ * a pointer to a struct sclp_buffer structure that is located at the
+ * end of the input page. This reduces the buffer space by a few
+ * bytes but simplifies things.
+ */
+struct sclp_buffer *
+sclp_make_buffer(void *page, unsigned short columns, unsigned short htab)
+{
+	struct sclp_buffer *buffer;
+	struct write_sccb *sccb;
+
+	sccb = (struct write_sccb *) page;
+	/*
+	 * We keep the struct sclp_buffer structure at the end
+	 * of the sccb page.
+	 */
+	buffer = ((struct sclp_buffer *) ((addr_t) sccb + PAGE_SIZE)) - 1;
+	buffer->sccb = sccb;
+	buffer->retry_count = 0;
+	buffer->mto_number = 0;
+	buffer->mto_char_sum = 0;
+	buffer->current_line = NULL;
+	buffer->current_length = 0;
+	buffer->columns = columns;
+	buffer->htab = htab;
+
+	/* initialize sccb */
+	memset(sccb, 0, sizeof(struct write_sccb));
+	sccb->header.length = sizeof(struct write_sccb);
+	sccb->msg_buf.header.length = sizeof(struct msg_buf);
+	sccb->msg_buf.header.type = EVTYP_MSG;
+	sccb->msg_buf.mdb.header.length = sizeof(struct mdb);
+	sccb->msg_buf.mdb.header.type = 1;
+	sccb->msg_buf.mdb.header.tag = 0xD4C4C240;	/* ebcdic "MDB " */
+	sccb->msg_buf.mdb.header.revision_code = 1;
+	sccb->msg_buf.mdb.go.length = sizeof(struct go);
+	sccb->msg_buf.mdb.go.type = 1;
+
+	return buffer;
+}
+
+/*
+ * Return a pointer to the original page that has been used to create
+ * the buffer.
+ */
+void *
+sclp_unmake_buffer(struct sclp_buffer *buffer)
+{
+	return buffer->sccb;
+}
+
+/*
+ * Initialize a new Message Text Object (MTO) at the end of the provided buffer
+ * with enough room for max_len characters. Return 0 on success.
+ */
+static int
+sclp_initialize_mto(struct sclp_buffer *buffer, int max_len)
+{
+	struct write_sccb *sccb;
+	struct mto *mto;
+	int mto_size;
+
+	/* max size of new Message Text Object including message text  */
+	mto_size = sizeof(struct mto) + max_len;
+
+	/* check if current buffer sccb can contain the mto */
+	sccb = buffer->sccb;
+	if ((MAX_SCCB_ROOM - sccb->header.length) < mto_size)
+		return -ENOMEM;
+
+	/* find address of new message text object */
+	mto = (struct mto *)(((addr_t) sccb) + sccb->header.length);
+
+	/*
+	 * fill the new Message-Text Object,
+	 * starting behind the former last byte of the SCCB
+	 */
+	memset(mto, 0, sizeof(struct mto));
+	mto->length = sizeof(struct mto);
+	mto->type = 4;	/* message text object */
+	mto->line_type_flags = LNTPFLGS_ENDTEXT; /* end text */
+
+	/* set pointer to first byte after struct mto. */
+	buffer->current_line = (char *) (mto + 1);
+	buffer->current_length = 0;
+
+	return 0;
+}
+
+/*
+ * Finalize MTO initialized by sclp_initialize_mto(), updating the sizes of
+ * MTO, enclosing MDB, event buffer and SCCB.
+ */
+static void
+sclp_finalize_mto(struct sclp_buffer *buffer)
+{
+	struct write_sccb *sccb;
+	struct mto *mto;
+	int str_len, mto_size;
+
+	str_len = buffer->current_length;
+	buffer->current_line = NULL;
+	buffer->current_length = 0;
+
+	/* real size of new Message Text Object including message text	*/
+	mto_size = sizeof(struct mto) + str_len;
+
+	/* find address of new message text object */
+	sccb = buffer->sccb;
+	mto = (struct mto *)(((addr_t) sccb) + sccb->header.length);
+
+	/* set size of message text object */
+	mto->length = mto_size;
+
+	/*
+	 * update values of sizes
+	 * (SCCB, Event(Message) Buffer, Message Data Block)
+	 */
+	sccb->header.length += mto_size;
+	sccb->msg_buf.header.length += mto_size;
+	sccb->msg_buf.mdb.header.length += mto_size;
+
+	/*
+	 * count number of buffered messages (= number of Message Text
+	 * Objects) and number of buffered characters
+	 * for the SCCB currently used for buffering and at all
+	 */
+	buffer->mto_number++;
+	buffer->mto_char_sum += str_len;
+}
+
+/*
+ * processing of a message including escape characters,
+ * returns number of characters written to the output sccb
+ * ("processed" means that is not guaranteed that the character have already
+ *  been sent to the SCLP but that it will be done at least next time the SCLP
+ *  is not busy)
+ */
+int
+sclp_write(struct sclp_buffer *buffer, const unsigned char *msg, int count)
+{
+	int spaces, i_msg;
+	int rc;
+
+	/*
+	 * parse msg for escape sequences (\t,\v ...) and put formated
+	 * msg into an mto (created by sclp_initialize_mto).
+	 *
+	 * We have to do this work ourselfs because there is no support for
+	 * these characters on the native machine and only partial support
+	 * under VM (Why does VM interpret \n but the native machine doesn't ?)
+	 *
+	 * Depending on i/o-control setting the message is always written
+	 * immediately or we wait for a final new line maybe coming with the
+	 * next message. Besides we avoid a buffer overrun by writing its
+	 * content.
+	 *
+	 * RESTRICTIONS:
+	 *
+	 * \r and \b work within one line because we are not able to modify
+	 * previous output that have already been accepted by the SCLP.
+	 *
+	 * \t combined with following \r is not correctly represented because
+	 * \t is expanded to some spaces but \r does not know about a
+	 * previous \t and decreases the current position by one column.
+	 * This is in order to a slim and quick implementation.
+	 */
+	for (i_msg = 0; i_msg < count; i_msg++) {
+		switch (msg[i_msg]) {
+		case '\n':	/* new line, line feed (ASCII)	*/
+			/* check if new mto needs to be created */
+			if (buffer->current_line == NULL) {
+				rc = sclp_initialize_mto(buffer, 0);
+				if (rc)
+					return i_msg;
+			}
+			sclp_finalize_mto(buffer);
+			break;
+		case '\a':	/* bell, one for several times	*/
+			/* set SCLP sound alarm bit in General Object */
+			buffer->sccb->msg_buf.mdb.go.general_msg_flags |=
+				GNRLMSGFLGS_SNDALRM;
+			break;
+		case '\t':	/* horizontal tabulator	 */
+			/* check if new mto needs to be created */
+			if (buffer->current_line == NULL) {
+				rc = sclp_initialize_mto(buffer,
+							 buffer->columns);
+				if (rc)
+					return i_msg;
+			}
+			/* "go to (next htab-boundary + 1, same line)" */
+			do {
+				if (buffer->current_length >= buffer->columns)
+					break;
+				/* ok, add a blank */
+				*buffer->current_line++ = 0x40;
+				buffer->current_length++;
+			} while (buffer->current_length % buffer->htab);
+			break;
+		case '\f':	/* form feed  */
+		case '\v':	/* vertical tabulator  */
+			/* "go to (actual column, actual line + 1)" */
+			/* = new line, leading spaces */
+			if (buffer->current_line != NULL) {
+				spaces = buffer->current_length;
+				sclp_finalize_mto(buffer);
+				rc = sclp_initialize_mto(buffer,
+							 buffer->columns);
+				if (rc)
+					return i_msg;
+				memset(buffer->current_line, 0x40, spaces);
+				buffer->current_line += spaces;
+				buffer->current_length = spaces;
+			} else {
+				/* one an empty line this is the same as \n */
+				rc = sclp_initialize_mto(buffer,
+							 buffer->columns);
+				if (rc)
+					return i_msg;
+				sclp_finalize_mto(buffer);
+			}
+			break;
+		case '\b':	/* backspace  */
+			/* "go to (actual column - 1, actual line)" */
+			/* decrement counter indicating position, */
+			/* do not remove last character */
+			if (buffer->current_line != NULL &&
+			    buffer->current_length > 0) {
+				buffer->current_length--;
+				buffer->current_line--;
+			}
+			break;
+		case 0x00:	/* end of string  */
+			/* transfer current line to SCCB */
+			if (buffer->current_line != NULL)
+				sclp_finalize_mto(buffer);
+			/* skip the rest of the message including the 0 byte */
+			i_msg = count - 1;
+			break;
+		default:	/* no escape character	*/
+			/* do not output unprintable characters */
+			if (!isprint(msg[i_msg]))
+				break;
+			/* check if new mto needs to be created */
+			if (buffer->current_line == NULL) {
+				rc = sclp_initialize_mto(buffer,
+							 buffer->columns);
+				if (rc)
+					return i_msg;
+			}
+			*buffer->current_line++ = sclp_ascebc(msg[i_msg]);
+			buffer->current_length++;
+			break;
+		}
+		/* check if current mto is full */
+		if (buffer->current_line != NULL &&
+		    buffer->current_length >= buffer->columns)
+			sclp_finalize_mto(buffer);
+	}
+
+	/* return number of processed characters */
+	return i_msg;
+}
+
+/*
+ * Return the number of free bytes in the sccb
+ */
+int
+sclp_buffer_space(struct sclp_buffer *buffer)
+{
+	int count;
+
+	count = MAX_SCCB_ROOM - buffer->sccb->header.length;
+	if (buffer->current_line != NULL)
+		count -= sizeof(struct mto) + buffer->current_length;
+	return count;
+}
+
+/*
+ * Return number of characters in buffer
+ */
+int
+sclp_chars_in_buffer(struct sclp_buffer *buffer)
+{
+	int count;
+
+	count = buffer->mto_char_sum;
+	if (buffer->current_line != NULL)
+		count += buffer->current_length;
+	return count;
+}
+
+/*
+ * sets or provides some values that influence the drivers behaviour
+ */
+void
+sclp_set_columns(struct sclp_buffer *buffer, unsigned short columns)
+{
+	buffer->columns = columns;
+	if (buffer->current_line != NULL &&
+	    buffer->current_length > buffer->columns)
+		sclp_finalize_mto(buffer);
+}
+
+void
+sclp_set_htab(struct sclp_buffer *buffer, unsigned short htab)
+{
+	buffer->htab = htab;
+}
+
+/*
+ * called by sclp_console_init and/or sclp_tty_init
+ */
+int
+sclp_rw_init(void)
+{
+	static int init_done = 0;
+	int rc;
+
+	if (init_done)
+		return 0;
+
+	rc = sclp_register(&sclp_rw_event);
+	if (rc == 0)
+		init_done = 1;
+	return rc;
+}
+
+#define SCLP_BUFFER_MAX_RETRY		1
+
+/*
+ * second half of Write Event Data-function that has to be done after
+ * interruption indicating completion of Service Call.
+ */
+static void
+sclp_writedata_callback(struct sclp_req *request, void *data)
+{
+	int rc;
+	struct sclp_buffer *buffer;
+	struct write_sccb *sccb;
+
+	buffer = (struct sclp_buffer *) data;
+	sccb = buffer->sccb;
+
+	if (request->status == SCLP_REQ_FAILED) {
+		if (buffer->callback != NULL)
+			buffer->callback(buffer, -EIO);
+		return;
+	}
+	/* check SCLP response code and choose suitable action	*/
+	switch (sccb->header.response_code) {
+	case 0x0020 :
+		/* Normal completion, buffer processed, message(s) sent */
+		rc = 0;
+		break;
+
+	case 0x0340: /* Contained SCLP equipment check */
+		if (++buffer->retry_count > SCLP_BUFFER_MAX_RETRY) {
+			rc = -EIO;
+			break;
+		}
+		/* remove processed buffers and requeue rest */
+		if (sclp_remove_processed((struct sccb_header *) sccb) > 0) {
+			/* not all buffers were processed */
+			sccb->header.response_code = 0x0000;
+			buffer->request.status = SCLP_REQ_FILLED;
+			rc = sclp_add_request(request);
+			if (rc == 0)
+				return;
+		} else
+			rc = 0;
+		break;
+
+	case 0x0040: /* SCLP equipment check */
+	case 0x05f0: /* Target resource in improper state */
+		if (++buffer->retry_count > SCLP_BUFFER_MAX_RETRY) {
+			rc = -EIO;
+			break;
+		}
+		/* retry request */
+		sccb->header.response_code = 0x0000;
+		buffer->request.status = SCLP_REQ_FILLED;
+		rc = sclp_add_request(request);
+		if (rc == 0)
+			return;
+		break;
+	default:
+		if (sccb->header.response_code == 0x71f0)
+			rc = -ENOMEM;
+		else
+			rc = -EINVAL;
+		break;
+	}
+	if (buffer->callback != NULL)
+		buffer->callback(buffer, rc);
+}
+
+/*
+ * Setup the request structure in the struct sclp_buffer to do SCLP Write
+ * Event Data and pass the request to the core SCLP loop. Return zero on
+ * success, non-zero otherwise.
+ */
+int
+sclp_emit_buffer(struct sclp_buffer *buffer,
+		 void (*callback)(struct sclp_buffer *, int))
+{
+	struct write_sccb *sccb;
+
+	/* add current line if there is one */
+	if (buffer->current_line != NULL)
+		sclp_finalize_mto(buffer);
+
+	/* Are there messages in the output buffer ? */
+	if (buffer->mto_number == 0)
+		return -EIO;
+
+	sccb = buffer->sccb;
+	if (sclp_rw_event.sclp_receive_mask & EVTYP_MSG_MASK)
+		/* Use normal write message */
+		sccb->msg_buf.header.type = EVTYP_MSG;
+	else if (sclp_rw_event.sclp_receive_mask & EVTYP_PMSGCMD_MASK)
+		/* Use write priority message */
+		sccb->msg_buf.header.type = EVTYP_PMSGCMD;
+	else
+		return -ENOSYS;
+	buffer->request.command = SCLP_CMDW_WRITE_EVENT_DATA;
+	buffer->request.status = SCLP_REQ_FILLED;
+	buffer->request.callback = sclp_writedata_callback;
+	buffer->request.callback_data = buffer;
+	buffer->request.sccb = sccb;
+	buffer->callback = callback;
+	return sclp_add_request(&buffer->request);
+}
diff --git a/ap/os/linux/linux-3.4.x/drivers/s390/char/sclp_rw.h b/ap/os/linux/linux-3.4.x/drivers/s390/char/sclp_rw.h
new file mode 100644
index 0000000..7a7bfc9
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/s390/char/sclp_rw.h
@@ -0,0 +1,101 @@
+/*
+ * interface to the SCLP-read/write driver
+ *
+ * Copyright IBM Corporation 1999, 2009
+ *
+ * Author(s): Martin Peschke <mpeschke@de.ibm.com>
+ *	      Martin Schwidefsky <schwidefsky@de.ibm.com>
+ */
+
+#ifndef __SCLP_RW_H__
+#define __SCLP_RW_H__
+
+#include <linux/list.h>
+
+struct mto {
+	u16 length;
+	u16 type;
+	u16 line_type_flags;
+	u8  alarm_control;
+	u8  _reserved[3];
+} __attribute__((packed));
+
+struct go {
+	u16 length;
+	u16 type;
+	u32 domid;
+	u8  hhmmss_time[8];
+	u8  th_time[3];
+	u8  reserved_0;
+	u8  dddyyyy_date[7];
+	u8  _reserved_1;
+	u16 general_msg_flags;
+	u8  _reserved_2[10];
+	u8  originating_system_name[8];
+	u8  job_guest_name[8];
+} __attribute__((packed));
+
+struct mdb_header {
+	u16 length;
+	u16 type;
+	u32 tag;
+	u32 revision_code;
+} __attribute__((packed));
+
+struct mdb {
+	struct mdb_header header;
+	struct go go;
+} __attribute__((packed));
+
+struct msg_buf {
+	struct evbuf_header header;
+	struct mdb mdb;
+} __attribute__((packed));
+
+struct write_sccb {
+	struct sccb_header header;
+	struct msg_buf msg_buf;
+} __attribute__((packed));
+
+/* The number of empty mto buffers that can be contained in a single sccb. */
+#define NR_EMPTY_MTO_PER_SCCB ((PAGE_SIZE - sizeof(struct sclp_buffer) - \
+			sizeof(struct write_sccb)) / sizeof(struct mto))
+
+/*
+ * data structure for information about list of SCCBs (only for writing),
+ * will be located at the end of a SCCBs page
+ */
+struct sclp_buffer {
+	struct list_head list;		/* list_head for sccb_info chain */
+	struct sclp_req request;
+	struct write_sccb *sccb;
+	char *current_line;
+	int current_length;
+	int retry_count;
+	/* output format settings */
+	unsigned short columns;
+	unsigned short htab;
+	/* statistics about this buffer */
+	unsigned int mto_char_sum;	/* # chars in sccb */
+	unsigned int mto_number;	/* # mtos in sccb */
+	/* Callback that is called after reaching final status. */
+	void (*callback)(struct sclp_buffer *, int);
+};
+
+int sclp_rw_init(void);
+struct sclp_buffer *sclp_make_buffer(void *, unsigned short, unsigned short);
+void *sclp_unmake_buffer(struct sclp_buffer *);
+int sclp_buffer_space(struct sclp_buffer *);
+int sclp_write(struct sclp_buffer *buffer, const unsigned char *, int);
+int sclp_emit_buffer(struct sclp_buffer *,void (*)(struct sclp_buffer *,int));
+void sclp_set_columns(struct sclp_buffer *, unsigned short);
+void sclp_set_htab(struct sclp_buffer *, unsigned short);
+int sclp_chars_in_buffer(struct sclp_buffer *);
+
+#ifdef CONFIG_SCLP_CONSOLE
+void sclp_console_pm_event(enum sclp_pm_event sclp_pm_event);
+#else
+static inline void sclp_console_pm_event(enum sclp_pm_event sclp_pm_event) { }
+#endif
+
+#endif	/* __SCLP_RW_H__ */
diff --git a/ap/os/linux/linux-3.4.x/drivers/s390/char/sclp_sdias.c b/ap/os/linux/linux-3.4.x/drivers/s390/char/sclp_sdias.c
new file mode 100644
index 0000000..69e6c50
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/s390/char/sclp_sdias.c
@@ -0,0 +1,313 @@
+/*
+ * Sclp "store data in absolut storage"
+ *
+ * Copyright IBM Corp. 2003,2007
+ * Author(s): Michael Holzheu
+ */
+
+#define KMSG_COMPONENT "sclp_sdias"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
+#include <linux/completion.h>
+#include <linux/sched.h>
+#include <asm/sclp.h>
+#include <asm/debug.h>
+#include <asm/ipl.h>
+
+#include "sclp.h"
+#include "sclp_rw.h"
+
+#define TRACE(x...) debug_sprintf_event(sdias_dbf, 1, x)
+
+#define SDIAS_RETRIES 300
+#define SDIAS_SLEEP_TICKS 50
+
+#define EQ_STORE_DATA	0x0
+#define EQ_SIZE		0x1
+#define DI_FCP_DUMP	0x0
+#define ASA_SIZE_32	0x0
+#define ASA_SIZE_64	0x1
+#define EVSTATE_ALL_STORED	0x0
+#define EVSTATE_NO_DATA		0x3
+#define EVSTATE_PART_STORED	0x10
+
+static struct debug_info *sdias_dbf;
+
+static struct sclp_register sclp_sdias_register = {
+	.send_mask = EVTYP_SDIAS_MASK,
+};
+
+struct sdias_evbuf {
+	struct	evbuf_header hdr;
+	u8	event_qual;
+	u8	data_id;
+	u64	reserved2;
+	u32	event_id;
+	u16	reserved3;
+	u8	asa_size;
+	u8	event_status;
+	u32	reserved4;
+	u32	blk_cnt;
+	u64	asa;
+	u32	reserved5;
+	u32	fbn;
+	u32	reserved6;
+	u32	lbn;
+	u16	reserved7;
+	u16	dbs;
+} __attribute__((packed));
+
+struct sdias_sccb {
+	struct sccb_header  hdr;
+	struct sdias_evbuf  evbuf;
+} __attribute__((packed));
+
+static struct sdias_sccb sccb __attribute__((aligned(4096)));
+static struct sdias_evbuf sdias_evbuf;
+
+static DECLARE_COMPLETION(evbuf_accepted);
+static DECLARE_COMPLETION(evbuf_done);
+static DEFINE_MUTEX(sdias_mutex);
+
+/*
+ * Called by SCLP base when read event data has been completed (async mode only)
+ */
+static void sclp_sdias_receiver_fn(struct evbuf_header *evbuf)
+{
+	memcpy(&sdias_evbuf, evbuf,
+	       min_t(unsigned long, sizeof(sdias_evbuf), evbuf->length));
+	complete(&evbuf_done);
+	TRACE("sclp_sdias_receiver_fn done\n");
+}
+
+/*
+ * Called by SCLP base when sdias event has been accepted
+ */
+static void sdias_callback(struct sclp_req *request, void *data)
+{
+	complete(&evbuf_accepted);
+	TRACE("callback done\n");
+}
+
+static int sdias_sclp_send(struct sclp_req *req)
+{
+	int retries;
+	int rc;
+
+	for (retries = SDIAS_RETRIES; retries; retries--) {
+		TRACE("add request\n");
+		rc = sclp_add_request(req);
+		if (rc) {
+			/* not initiated, wait some time and retry */
+			set_current_state(TASK_INTERRUPTIBLE);
+			TRACE("add request failed: rc = %i\n",rc);
+			schedule_timeout(SDIAS_SLEEP_TICKS);
+			continue;
+		}
+		/* initiated, wait for completion of service call */
+		wait_for_completion(&evbuf_accepted);
+		if (req->status == SCLP_REQ_FAILED) {
+			TRACE("sclp request failed\n");
+			continue;
+		}
+		/* if not accepted, retry */
+		if (!(sccb.evbuf.hdr.flags & 0x80)) {
+			TRACE("sclp request failed: flags=%x\n",
+			      sccb.evbuf.hdr.flags);
+			continue;
+		}
+		/*
+		 * for the sync interface the response is in the initial sccb
+		 */
+		if (!sclp_sdias_register.receiver_fn) {
+			memcpy(&sdias_evbuf, &sccb.evbuf, sizeof(sdias_evbuf));
+			TRACE("sync request done\n");
+			return 0;
+		}
+		/* otherwise we wait for completion */
+		wait_for_completion(&evbuf_done);
+		TRACE("request done\n");
+		return 0;
+	}
+	return -EIO;
+}
+
+/*
+ * Get number of blocks (4K) available in the HSA
+ */
+int sclp_sdias_blk_count(void)
+{
+	struct sclp_req request;
+	int rc;
+
+	mutex_lock(&sdias_mutex);
+
+	memset(&sccb, 0, sizeof(sccb));
+	memset(&request, 0, sizeof(request));
+
+	sccb.hdr.length = sizeof(sccb);
+	sccb.evbuf.hdr.length = sizeof(struct sdias_evbuf);
+	sccb.evbuf.hdr.type = EVTYP_SDIAS;
+	sccb.evbuf.event_qual = EQ_SIZE;
+	sccb.evbuf.data_id = DI_FCP_DUMP;
+	sccb.evbuf.event_id = 4712;
+	sccb.evbuf.dbs = 1;
+
+	request.sccb = &sccb;
+	request.command = SCLP_CMDW_WRITE_EVENT_DATA;
+	request.status = SCLP_REQ_FILLED;
+	request.callback = sdias_callback;
+
+	rc = sdias_sclp_send(&request);
+	if (rc) {
+		pr_err("sclp_send failed for get_nr_blocks\n");
+		goto out;
+	}
+	if (sccb.hdr.response_code != 0x0020) {
+		TRACE("send failed: %x\n", sccb.hdr.response_code);
+		rc = -EIO;
+		goto out;
+	}
+
+	switch (sdias_evbuf.event_status) {
+		case 0:
+			rc = sdias_evbuf.blk_cnt;
+			break;
+		default:
+			pr_err("SCLP error: %x\n", sdias_evbuf.event_status);
+			rc = -EIO;
+			goto out;
+	}
+	TRACE("%i blocks\n", rc);
+out:
+	mutex_unlock(&sdias_mutex);
+	return rc;
+}
+
+/*
+ * Copy from HSA to absolute storage (not reentrant):
+ *
+ * @dest     : Address of buffer where data should be copied
+ * @start_blk: Start Block (beginning with 1)
+ * @nr_blks  : Number of 4K blocks to copy
+ *
+ * Return Value: 0 : Requested 'number' of blocks of data copied
+ *		 <0: ERROR - negative event status
+ */
+int sclp_sdias_copy(void *dest, int start_blk, int nr_blks)
+{
+	struct sclp_req request;
+	int rc;
+
+	mutex_lock(&sdias_mutex);
+
+	memset(&sccb, 0, sizeof(sccb));
+	memset(&request, 0, sizeof(request));
+
+	sccb.hdr.length = sizeof(sccb);
+	sccb.evbuf.hdr.length = sizeof(struct sdias_evbuf);
+	sccb.evbuf.hdr.type = EVTYP_SDIAS;
+	sccb.evbuf.hdr.flags = 0;
+	sccb.evbuf.event_qual = EQ_STORE_DATA;
+	sccb.evbuf.data_id = DI_FCP_DUMP;
+	sccb.evbuf.event_id = 4712;
+#ifdef __s390x__
+	sccb.evbuf.asa_size = ASA_SIZE_64;
+#else
+	sccb.evbuf.asa_size = ASA_SIZE_32;
+#endif
+	sccb.evbuf.event_status = 0;
+	sccb.evbuf.blk_cnt = nr_blks;
+	sccb.evbuf.asa = (unsigned long)dest;
+	sccb.evbuf.fbn = start_blk;
+	sccb.evbuf.lbn = 0;
+	sccb.evbuf.dbs = 1;
+
+	request.sccb	 = &sccb;
+	request.command  = SCLP_CMDW_WRITE_EVENT_DATA;
+	request.status	 = SCLP_REQ_FILLED;
+	request.callback = sdias_callback;
+
+	rc = sdias_sclp_send(&request);
+	if (rc) {
+		pr_err("sclp_send failed: %x\n", rc);
+		goto out;
+	}
+	if (sccb.hdr.response_code != 0x0020) {
+		TRACE("copy failed: %x\n", sccb.hdr.response_code);
+		rc = -EIO;
+		goto out;
+	}
+
+	switch (sdias_evbuf.event_status) {
+		case EVSTATE_ALL_STORED:
+			TRACE("all stored\n");
+		case EVSTATE_PART_STORED:
+			TRACE("part stored: %i\n", sdias_evbuf.blk_cnt);
+			break;
+		case EVSTATE_NO_DATA:
+			TRACE("no data\n");
+		default:
+			pr_err("Error from SCLP while copying hsa. "
+			       "Event status = %x\n",
+			       sdias_evbuf.event_status);
+			rc = -EIO;
+	}
+out:
+	mutex_unlock(&sdias_mutex);
+	return rc;
+}
+
+static int __init sclp_sdias_register_check(void)
+{
+	int rc;
+
+	rc = sclp_register(&sclp_sdias_register);
+	if (rc)
+		return rc;
+	if (sclp_sdias_blk_count() == 0) {
+		sclp_unregister(&sclp_sdias_register);
+		return -ENODEV;
+	}
+	return 0;
+}
+
+static int __init sclp_sdias_init_sync(void)
+{
+	TRACE("Try synchronous mode\n");
+	sclp_sdias_register.receive_mask = 0;
+	sclp_sdias_register.receiver_fn = NULL;
+	return sclp_sdias_register_check();
+}
+
+static int __init sclp_sdias_init_async(void)
+{
+	TRACE("Try asynchronous mode\n");
+	sclp_sdias_register.receive_mask = EVTYP_SDIAS_MASK;
+	sclp_sdias_register.receiver_fn = sclp_sdias_receiver_fn;
+	return sclp_sdias_register_check();
+}
+
+int __init sclp_sdias_init(void)
+{
+	if (ipl_info.type != IPL_TYPE_FCP_DUMP)
+		return 0;
+	sdias_dbf = debug_register("dump_sdias", 4, 1, 4 * sizeof(long));
+	debug_register_view(sdias_dbf, &debug_sprintf_view);
+	debug_set_level(sdias_dbf, 6);
+	if (sclp_sdias_init_sync() == 0)
+		goto out;
+	if (sclp_sdias_init_async() == 0)
+		goto out;
+	TRACE("init failed\n");
+	return -ENODEV;
+out:
+	TRACE("init done\n");
+	return 0;
+}
+
+void __exit sclp_sdias_exit(void)
+{
+	debug_unregister(sdias_dbf);
+	sclp_unregister(&sclp_sdias_register);
+}
diff --git a/ap/os/linux/linux-3.4.x/drivers/s390/char/sclp_tty.c b/ap/os/linux/linux-3.4.x/drivers/s390/char/sclp_tty.c
new file mode 100644
index 0000000..40a9d69
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/s390/char/sclp_tty.c
@@ -0,0 +1,574 @@
+/*
+ *  drivers/s390/char/sclp_tty.c
+ *    SCLP line mode terminal driver.
+ *
+ *  S390 version
+ *    Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation
+ *    Author(s): Martin Peschke <mpeschke@de.ibm.com>
+ *		 Martin Schwidefsky <schwidefsky@de.ibm.com>
+ */
+
+#include <linux/module.h>
+#include <linux/kmod.h>
+#include <linux/tty.h>
+#include <linux/tty_driver.h>
+#include <linux/tty_flip.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/gfp.h>
+#include <asm/uaccess.h>
+
+#include "ctrlchar.h"
+#include "sclp.h"
+#include "sclp_rw.h"
+#include "sclp_tty.h"
+
+/*
+ * size of a buffer that collects single characters coming in
+ * via sclp_tty_put_char()
+ */
+#define SCLP_TTY_BUF_SIZE 512
+
+/*
+ * There is exactly one SCLP terminal, so we can keep things simple
+ * and allocate all variables statically.
+ */
+
+/* Lock to guard over changes to global variables. */
+static spinlock_t sclp_tty_lock;
+/* List of free pages that can be used for console output buffering. */
+static struct list_head sclp_tty_pages;
+/* List of full struct sclp_buffer structures ready for output. */
+static struct list_head sclp_tty_outqueue;
+/* Counter how many buffers are emitted. */
+static int sclp_tty_buffer_count;
+/* Pointer to current console buffer. */
+static struct sclp_buffer *sclp_ttybuf;
+/* Timer for delayed output of console messages. */
+static struct timer_list sclp_tty_timer;
+
+static struct tty_struct *sclp_tty;
+static unsigned char sclp_tty_chars[SCLP_TTY_BUF_SIZE];
+static unsigned short int sclp_tty_chars_count;
+
+struct tty_driver *sclp_tty_driver;
+
+static int sclp_tty_tolower;
+static int sclp_tty_columns = 80;
+
+#define SPACES_PER_TAB 8
+#define CASE_DELIMITER 0x6c /* to separate upper and lower case (% in EBCDIC) */
+
+/* This routine is called whenever we try to open a SCLP terminal. */
+static int
+sclp_tty_open(struct tty_struct *tty, struct file *filp)
+{
+	sclp_tty = tty;
+	tty->driver_data = NULL;
+	tty->low_latency = 0;
+	return 0;
+}
+
+/* This routine is called when the SCLP terminal is closed. */
+static void
+sclp_tty_close(struct tty_struct *tty, struct file *filp)
+{
+	if (tty->count > 1)
+		return;
+	sclp_tty = NULL;
+}
+
+/*
+ * This routine returns the numbers of characters the tty driver
+ * will accept for queuing to be written.  This number is subject
+ * to change as output buffers get emptied, or if the output flow
+ * control is acted. This is not an exact number because not every
+ * character needs the same space in the sccb. The worst case is
+ * a string of newlines. Every newlines creates a new mto which
+ * needs 8 bytes.
+ */
+static int
+sclp_tty_write_room (struct tty_struct *tty)
+{
+	unsigned long flags;
+	struct list_head *l;
+	int count;
+
+	spin_lock_irqsave(&sclp_tty_lock, flags);
+	count = 0;
+	if (sclp_ttybuf != NULL)
+		count = sclp_buffer_space(sclp_ttybuf) / sizeof(struct mto);
+	list_for_each(l, &sclp_tty_pages)
+		count += NR_EMPTY_MTO_PER_SCCB;
+	spin_unlock_irqrestore(&sclp_tty_lock, flags);
+	return count;
+}
+
+static void
+sclp_ttybuf_callback(struct sclp_buffer *buffer, int rc)
+{
+	unsigned long flags;
+	void *page;
+
+	do {
+		page = sclp_unmake_buffer(buffer);
+		spin_lock_irqsave(&sclp_tty_lock, flags);
+		/* Remove buffer from outqueue */
+		list_del(&buffer->list);
+		sclp_tty_buffer_count--;
+		list_add_tail((struct list_head *) page, &sclp_tty_pages);
+		/* Check if there is a pending buffer on the out queue. */
+		buffer = NULL;
+		if (!list_empty(&sclp_tty_outqueue))
+			buffer = list_entry(sclp_tty_outqueue.next,
+					    struct sclp_buffer, list);
+		spin_unlock_irqrestore(&sclp_tty_lock, flags);
+	} while (buffer && sclp_emit_buffer(buffer, sclp_ttybuf_callback));
+	/* check if the tty needs a wake up call */
+	if (sclp_tty != NULL) {
+		tty_wakeup(sclp_tty);
+	}
+}
+
+static inline void
+__sclp_ttybuf_emit(struct sclp_buffer *buffer)
+{
+	unsigned long flags;
+	int count;
+	int rc;
+
+	spin_lock_irqsave(&sclp_tty_lock, flags);
+	list_add_tail(&buffer->list, &sclp_tty_outqueue);
+	count = sclp_tty_buffer_count++;
+	spin_unlock_irqrestore(&sclp_tty_lock, flags);
+	if (count)
+		return;
+	rc = sclp_emit_buffer(buffer, sclp_ttybuf_callback);
+	if (rc)
+		sclp_ttybuf_callback(buffer, rc);
+}
+
+/*
+ * When this routine is called from the timer then we flush the
+ * temporary write buffer.
+ */
+static void
+sclp_tty_timeout(unsigned long data)
+{
+	unsigned long flags;
+	struct sclp_buffer *buf;
+
+	spin_lock_irqsave(&sclp_tty_lock, flags);
+	buf = sclp_ttybuf;
+	sclp_ttybuf = NULL;
+	spin_unlock_irqrestore(&sclp_tty_lock, flags);
+
+	if (buf != NULL) {
+		__sclp_ttybuf_emit(buf);
+	}
+}
+
+/*
+ * Write a string to the sclp tty.
+ */
+static int sclp_tty_write_string(const unsigned char *str, int count, int may_fail)
+{
+	unsigned long flags;
+	void *page;
+	int written;
+	int overall_written;
+	struct sclp_buffer *buf;
+
+	if (count <= 0)
+		return 0;
+	overall_written = 0;
+	spin_lock_irqsave(&sclp_tty_lock, flags);
+	do {
+		/* Create a sclp output buffer if none exists yet */
+		if (sclp_ttybuf == NULL) {
+			while (list_empty(&sclp_tty_pages)) {
+				spin_unlock_irqrestore(&sclp_tty_lock, flags);
+				if (may_fail)
+					goto out;
+				else
+					sclp_sync_wait();
+				spin_lock_irqsave(&sclp_tty_lock, flags);
+			}
+			page = sclp_tty_pages.next;
+			list_del((struct list_head *) page);
+			sclp_ttybuf = sclp_make_buffer(page, sclp_tty_columns,
+						       SPACES_PER_TAB);
+		}
+		/* try to write the string to the current output buffer */
+		written = sclp_write(sclp_ttybuf, str, count);
+		overall_written += written;
+		if (written == count)
+			break;
+		/*
+		 * Not all characters could be written to the current
+		 * output buffer. Emit the buffer, create a new buffer
+		 * and then output the rest of the string.
+		 */
+		buf = sclp_ttybuf;
+		sclp_ttybuf = NULL;
+		spin_unlock_irqrestore(&sclp_tty_lock, flags);
+		__sclp_ttybuf_emit(buf);
+		spin_lock_irqsave(&sclp_tty_lock, flags);
+		str += written;
+		count -= written;
+	} while (count > 0);
+	/* Setup timer to output current console buffer after 1/10 second */
+	if (sclp_ttybuf && sclp_chars_in_buffer(sclp_ttybuf) &&
+	    !timer_pending(&sclp_tty_timer)) {
+		init_timer(&sclp_tty_timer);
+		sclp_tty_timer.function = sclp_tty_timeout;
+		sclp_tty_timer.data = 0UL;
+		sclp_tty_timer.expires = jiffies + HZ/10;
+		add_timer(&sclp_tty_timer);
+	}
+	spin_unlock_irqrestore(&sclp_tty_lock, flags);
+out:
+	return overall_written;
+}
+
+/*
+ * This routine is called by the kernel to write a series of characters to the
+ * tty device. The characters may come from user space or kernel space. This
+ * routine will return the number of characters actually accepted for writing.
+ */
+static int
+sclp_tty_write(struct tty_struct *tty, const unsigned char *buf, int count)
+{
+	if (sclp_tty_chars_count > 0) {
+		sclp_tty_write_string(sclp_tty_chars, sclp_tty_chars_count, 0);
+		sclp_tty_chars_count = 0;
+	}
+	return sclp_tty_write_string(buf, count, 1);
+}
+
+/*
+ * This routine is called by the kernel to write a single character to the tty
+ * device. If the kernel uses this routine, it must call the flush_chars()
+ * routine (if defined) when it is done stuffing characters into the driver.
+ *
+ * Characters provided to sclp_tty_put_char() are buffered by the SCLP driver.
+ * If the given character is a '\n' the contents of the SCLP write buffer
+ * - including previous characters from sclp_tty_put_char() and strings from
+ * sclp_write() without final '\n' - will be written.
+ */
+static int
+sclp_tty_put_char(struct tty_struct *tty, unsigned char ch)
+{
+	sclp_tty_chars[sclp_tty_chars_count++] = ch;
+	if (ch == '\n' || sclp_tty_chars_count >= SCLP_TTY_BUF_SIZE) {
+		sclp_tty_write_string(sclp_tty_chars, sclp_tty_chars_count, 0);
+		sclp_tty_chars_count = 0;
+	}
+	return 1;
+}
+
+/*
+ * This routine is called by the kernel after it has written a series of
+ * characters to the tty device using put_char().
+ */
+static void
+sclp_tty_flush_chars(struct tty_struct *tty)
+{
+	if (sclp_tty_chars_count > 0) {
+		sclp_tty_write_string(sclp_tty_chars, sclp_tty_chars_count, 0);
+		sclp_tty_chars_count = 0;
+	}
+}
+
+/*
+ * This routine returns the number of characters in the write buffer of the
+ * SCLP driver. The provided number includes all characters that are stored
+ * in the SCCB (will be written next time the SCLP is not busy) as well as
+ * characters in the write buffer (will not be written as long as there is a
+ * final line feed missing).
+ */
+static int
+sclp_tty_chars_in_buffer(struct tty_struct *tty)
+{
+	unsigned long flags;
+	struct list_head *l;
+	struct sclp_buffer *t;
+	int count;
+
+	spin_lock_irqsave(&sclp_tty_lock, flags);
+	count = 0;
+	if (sclp_ttybuf != NULL)
+		count = sclp_chars_in_buffer(sclp_ttybuf);
+	list_for_each(l, &sclp_tty_outqueue) {
+		t = list_entry(l, struct sclp_buffer, list);
+		count += sclp_chars_in_buffer(t);
+	}
+	spin_unlock_irqrestore(&sclp_tty_lock, flags);
+	return count;
+}
+
+/*
+ * removes all content from buffers of low level driver
+ */
+static void
+sclp_tty_flush_buffer(struct tty_struct *tty)
+{
+	if (sclp_tty_chars_count > 0) {
+		sclp_tty_write_string(sclp_tty_chars, sclp_tty_chars_count, 0);
+		sclp_tty_chars_count = 0;
+	}
+}
+
+/*
+ * push input to tty
+ */
+static void
+sclp_tty_input(unsigned char* buf, unsigned int count)
+{
+	unsigned int cchar;
+
+	/*
+	 * If this tty driver is currently closed
+	 * then throw the received input away.
+	 */
+	if (sclp_tty == NULL)
+		return;
+	cchar = ctrlchar_handle(buf, count, sclp_tty);
+	switch (cchar & CTRLCHAR_MASK) {
+	case CTRLCHAR_SYSRQ:
+		break;
+	case CTRLCHAR_CTRL:
+		tty_insert_flip_char(sclp_tty, cchar, TTY_NORMAL);
+		tty_flip_buffer_push(sclp_tty);
+		break;
+	case CTRLCHAR_NONE:
+		/* send (normal) input to line discipline */
+		if (count < 2 ||
+		    (strncmp((const char *) buf + count - 2, "^n", 2) &&
+		     strncmp((const char *) buf + count - 2, "\252n", 2))) {
+			/* add the auto \n */
+			tty_insert_flip_string(sclp_tty, buf, count);
+			tty_insert_flip_char(sclp_tty, '\n', TTY_NORMAL);
+		} else
+			tty_insert_flip_string(sclp_tty, buf, count - 2);
+		tty_flip_buffer_push(sclp_tty);
+		break;
+	}
+}
+
+/*
+ * get a EBCDIC string in upper/lower case,
+ * find out characters in lower/upper case separated by a special character,
+ * modifiy original string,
+ * returns length of resulting string
+ */
+static int sclp_switch_cases(unsigned char *buf, int count)
+{
+	unsigned char *ip, *op;
+	int toggle;
+
+	/* initially changing case is off */
+	toggle = 0;
+	ip = op = buf;
+	while (count-- > 0) {
+		/* compare with special character */
+		if (*ip == CASE_DELIMITER) {
+			/* followed by another special character? */
+			if (count && ip[1] == CASE_DELIMITER) {
+				/*
+				 * ... then put a single copy of the special
+				 * character to the output string
+				 */
+				*op++ = *ip++;
+				count--;
+			} else
+				/*
+				 * ... special character follower by a normal
+				 * character toggles the case change behaviour
+				 */
+				toggle = ~toggle;
+			/* skip special character */
+			ip++;
+		} else
+			/* not the special character */
+			if (toggle)
+				/* but case switching is on */
+				if (sclp_tty_tolower)
+					/* switch to uppercase */
+					*op++ = _ebc_toupper[(int) *ip++];
+				else
+					/* switch to lowercase */
+					*op++ = _ebc_tolower[(int) *ip++];
+			else
+				/* no case switching, copy the character */
+				*op++ = *ip++;
+	}
+	/* return length of reformatted string. */
+	return op - buf;
+}
+
+static void sclp_get_input(struct gds_subvector *sv)
+{
+	unsigned char *str;
+	int count;
+
+	str = (unsigned char *) (sv + 1);
+	count = sv->length - sizeof(*sv);
+	if (sclp_tty_tolower)
+		EBC_TOLOWER(str, count);
+	count = sclp_switch_cases(str, count);
+	/* convert EBCDIC to ASCII (modify original input in SCCB) */
+	sclp_ebcasc_str(str, count);
+
+	/* transfer input to high level driver */
+	sclp_tty_input(str, count);
+}
+
+static inline void sclp_eval_selfdeftextmsg(struct gds_subvector *sv)
+{
+	void *end;
+
+	end = (void *) sv + sv->length;
+	for (sv = sv + 1; (void *) sv < end; sv = (void *) sv + sv->length)
+		if (sv->key == 0x30)
+			sclp_get_input(sv);
+}
+
+static inline void sclp_eval_textcmd(struct gds_vector *v)
+{
+	struct gds_subvector *sv;
+	void *end;
+
+	end = (void *) v + v->length;
+	for (sv = (struct gds_subvector *) (v + 1);
+	     (void *) sv < end; sv = (void *) sv + sv->length)
+		if (sv->key == GDS_KEY_SELFDEFTEXTMSG)
+			sclp_eval_selfdeftextmsg(sv);
+
+}
+
+static inline void sclp_eval_cpmsu(struct gds_vector *v)
+{
+	void *end;
+
+	end = (void *) v + v->length;
+	for (v = v + 1; (void *) v < end; v = (void *) v + v->length)
+		if (v->gds_id == GDS_ID_TEXTCMD)
+			sclp_eval_textcmd(v);
+}
+
+
+static inline void sclp_eval_mdsmu(struct gds_vector *v)
+{
+	v = sclp_find_gds_vector(v + 1, (void *) v + v->length, GDS_ID_CPMSU);
+	if (v)
+		sclp_eval_cpmsu(v);
+}
+
+static void sclp_tty_receiver(struct evbuf_header *evbuf)
+{
+	struct gds_vector *v;
+
+	v = sclp_find_gds_vector(evbuf + 1, (void *) evbuf + evbuf->length,
+				 GDS_ID_MDSMU);
+	if (v)
+		sclp_eval_mdsmu(v);
+}
+
+static void
+sclp_tty_state_change(struct sclp_register *reg)
+{
+}
+
+static struct sclp_register sclp_input_event =
+{
+	.receive_mask = EVTYP_OPCMD_MASK | EVTYP_PMSGCMD_MASK,
+	.state_change_fn = sclp_tty_state_change,
+	.receiver_fn = sclp_tty_receiver
+};
+
+static const struct tty_operations sclp_ops = {
+	.open = sclp_tty_open,
+	.close = sclp_tty_close,
+	.write = sclp_tty_write,
+	.put_char = sclp_tty_put_char,
+	.flush_chars = sclp_tty_flush_chars,
+	.write_room = sclp_tty_write_room,
+	.chars_in_buffer = sclp_tty_chars_in_buffer,
+	.flush_buffer = sclp_tty_flush_buffer,
+};
+
+static int __init
+sclp_tty_init(void)
+{
+	struct tty_driver *driver;
+	void *page;
+	int i;
+	int rc;
+
+	if (!CONSOLE_IS_SCLP)
+		return 0;
+	driver = alloc_tty_driver(1);
+	if (!driver)
+		return -ENOMEM;
+
+	rc = sclp_rw_init();
+	if (rc) {
+		put_tty_driver(driver);
+		return rc;
+	}
+	/* Allocate pages for output buffering */
+	INIT_LIST_HEAD(&sclp_tty_pages);
+	for (i = 0; i < MAX_KMEM_PAGES; i++) {
+		page = (void *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
+		if (page == NULL) {
+			put_tty_driver(driver);
+			return -ENOMEM;
+		}
+		list_add_tail((struct list_head *) page, &sclp_tty_pages);
+	}
+	INIT_LIST_HEAD(&sclp_tty_outqueue);
+	spin_lock_init(&sclp_tty_lock);
+	init_timer(&sclp_tty_timer);
+	sclp_ttybuf = NULL;
+	sclp_tty_buffer_count = 0;
+	if (MACHINE_IS_VM) {
+		/*
+		 * save 4 characters for the CPU number
+		 * written at start of each line by VM/CP
+		 */
+		sclp_tty_columns = 76;
+		/* case input lines to lowercase */
+		sclp_tty_tolower = 1;
+	}
+	sclp_tty_chars_count = 0;
+	sclp_tty = NULL;
+
+	rc = sclp_register(&sclp_input_event);
+	if (rc) {
+		put_tty_driver(driver);
+		return rc;
+	}
+
+	driver->driver_name = "sclp_line";
+	driver->name = "sclp_line";
+	driver->major = TTY_MAJOR;
+	driver->minor_start = 64;
+	driver->type = TTY_DRIVER_TYPE_SYSTEM;
+	driver->subtype = SYSTEM_TYPE_TTY;
+	driver->init_termios = tty_std_termios;
+	driver->init_termios.c_iflag = IGNBRK | IGNPAR;
+	driver->init_termios.c_oflag = ONLCR | XTABS;
+	driver->init_termios.c_lflag = ISIG | ECHO;
+	driver->flags = TTY_DRIVER_REAL_RAW;
+	tty_set_operations(driver, &sclp_ops);
+	rc = tty_register_driver(driver);
+	if (rc) {
+		put_tty_driver(driver);
+		return rc;
+	}
+	sclp_tty_driver = driver;
+	return 0;
+}
+module_init(sclp_tty_init);
diff --git a/ap/os/linux/linux-3.4.x/drivers/s390/char/sclp_tty.h b/ap/os/linux/linux-3.4.x/drivers/s390/char/sclp_tty.h
new file mode 100644
index 0000000..4b965b2
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/s390/char/sclp_tty.h
@@ -0,0 +1,18 @@
+/*
+ *  drivers/s390/char/sclp_tty.h
+ *    interface to the SCLP-read/write driver
+ *
+ *  S390 version
+ *    Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation
+ *    Author(s): Martin Peschke <mpeschke@de.ibm.com>
+ *		 Martin Schwidefsky <schwidefsky@de.ibm.com>
+ */
+
+#ifndef __SCLP_TTY_H__
+#define __SCLP_TTY_H__
+
+#include <linux/tty_driver.h>
+
+extern struct tty_driver *sclp_tty_driver;
+
+#endif	/* __SCLP_TTY_H__ */
diff --git a/ap/os/linux/linux-3.4.x/drivers/s390/char/sclp_vt220.c b/ap/os/linux/linux-3.4.x/drivers/s390/char/sclp_vt220.c
new file mode 100644
index 0000000..b635472
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/s390/char/sclp_vt220.c
@@ -0,0 +1,830 @@
+/*
+ * SCLP VT220 terminal driver.
+ *
+ * Copyright IBM Corp. 2003, 2009
+ *
+ * Author(s): Peter Oberparleiter <Peter.Oberparleiter@de.ibm.com>
+ */
+
+#include <linux/module.h>
+#include <linux/spinlock.h>
+#include <linux/list.h>
+#include <linux/wait.h>
+#include <linux/timer.h>
+#include <linux/kernel.h>
+#include <linux/tty.h>
+#include <linux/tty_driver.h>
+#include <linux/tty_flip.h>
+#include <linux/errno.h>
+#include <linux/mm.h>
+#include <linux/major.h>
+#include <linux/console.h>
+#include <linux/kdev_t.h>
+#include <linux/interrupt.h>
+#include <linux/init.h>
+#include <linux/reboot.h>
+#include <linux/slab.h>
+
+#include <asm/uaccess.h>
+#include "sclp.h"
+
+#define SCLP_VT220_MAJOR		TTY_MAJOR
+#define SCLP_VT220_MINOR		65
+#define SCLP_VT220_DRIVER_NAME		"sclp_vt220"
+#define SCLP_VT220_DEVICE_NAME		"ttysclp"
+#define SCLP_VT220_CONSOLE_NAME		"ttyS"
+#define SCLP_VT220_CONSOLE_INDEX	1	/* console=ttyS1 */
+#define SCLP_VT220_BUF_SIZE		80
+
+/* Representation of a single write request */
+struct sclp_vt220_request {
+	struct list_head list;
+	struct sclp_req sclp_req;
+	int retry_count;
+};
+
+/* VT220 SCCB */
+struct sclp_vt220_sccb {
+	struct sccb_header header;
+	struct evbuf_header evbuf;
+};
+
+#define SCLP_VT220_MAX_CHARS_PER_BUFFER	(PAGE_SIZE - \
+					 sizeof(struct sclp_vt220_request) - \
+					 sizeof(struct sclp_vt220_sccb))
+
+/* Structures and data needed to register tty driver */
+static struct tty_driver *sclp_vt220_driver;
+
+/* The tty_struct that the kernel associated with us */
+static struct tty_struct *sclp_vt220_tty;
+
+/* Lock to protect internal data from concurrent access */
+static spinlock_t sclp_vt220_lock;
+
+/* List of empty pages to be used as write request buffers */
+static struct list_head sclp_vt220_empty;
+
+/* List of pending requests */
+static struct list_head sclp_vt220_outqueue;
+
+/* Suspend mode flag */
+static int sclp_vt220_suspended;
+
+/* Flag that output queue is currently running */
+static int sclp_vt220_queue_running;
+
+/* Timer used for delaying write requests to merge subsequent messages into
+ * a single buffer */
+static struct timer_list sclp_vt220_timer;
+
+/* Pointer to current request buffer which has been partially filled but not
+ * yet sent */
+static struct sclp_vt220_request *sclp_vt220_current_request;
+
+/* Number of characters in current request buffer */
+static int sclp_vt220_buffered_chars;
+
+/* Counter controlling core driver initialization. */
+static int __initdata sclp_vt220_init_count;
+
+/* Flag indicating that sclp_vt220_current_request should really
+ * have been already queued but wasn't because the SCLP was processing
+ * another buffer */
+static int sclp_vt220_flush_later;
+
+static void sclp_vt220_receiver_fn(struct evbuf_header *evbuf);
+static void sclp_vt220_pm_event_fn(struct sclp_register *reg,
+				   enum sclp_pm_event sclp_pm_event);
+static int __sclp_vt220_emit(struct sclp_vt220_request *request);
+static void sclp_vt220_emit_current(void);
+
+/* Registration structure for our interest in SCLP event buffers */
+static struct sclp_register sclp_vt220_register = {
+	.send_mask		= EVTYP_VT220MSG_MASK,
+	.receive_mask		= EVTYP_VT220MSG_MASK,
+	.state_change_fn	= NULL,
+	.receiver_fn		= sclp_vt220_receiver_fn,
+	.pm_event_fn		= sclp_vt220_pm_event_fn,
+};
+
+
+/*
+ * Put provided request buffer back into queue and check emit pending
+ * buffers if necessary.
+ */
+static void
+sclp_vt220_process_queue(struct sclp_vt220_request *request)
+{
+	unsigned long flags;
+	void *page;
+
+	do {
+		/* Put buffer back to list of empty buffers */
+		page = request->sclp_req.sccb;
+		spin_lock_irqsave(&sclp_vt220_lock, flags);
+		/* Move request from outqueue to empty queue */
+		list_del(&request->list);
+		list_add_tail((struct list_head *) page, &sclp_vt220_empty);
+		/* Check if there is a pending buffer on the out queue. */
+		request = NULL;
+		if (!list_empty(&sclp_vt220_outqueue))
+			request = list_entry(sclp_vt220_outqueue.next,
+					     struct sclp_vt220_request, list);
+		if (!request || sclp_vt220_suspended) {
+			sclp_vt220_queue_running = 0;
+			spin_unlock_irqrestore(&sclp_vt220_lock, flags);
+			break;
+		}
+		spin_unlock_irqrestore(&sclp_vt220_lock, flags);
+	} while (__sclp_vt220_emit(request));
+	if (request == NULL && sclp_vt220_flush_later)
+		sclp_vt220_emit_current();
+	/* Check if the tty needs a wake up call */
+	if (sclp_vt220_tty != NULL) {
+		tty_wakeup(sclp_vt220_tty);
+	}
+}
+
+#define SCLP_BUFFER_MAX_RETRY		1
+
+/*
+ * Callback through which the result of a write request is reported by the
+ * SCLP.
+ */
+static void
+sclp_vt220_callback(struct sclp_req *request, void *data)
+{
+	struct sclp_vt220_request *vt220_request;
+	struct sclp_vt220_sccb *sccb;
+
+	vt220_request = (struct sclp_vt220_request *) data;
+	if (request->status == SCLP_REQ_FAILED) {
+		sclp_vt220_process_queue(vt220_request);
+		return;
+	}
+	sccb = (struct sclp_vt220_sccb *) vt220_request->sclp_req.sccb;
+
+	/* Check SCLP response code and choose suitable action	*/
+	switch (sccb->header.response_code) {
+	case 0x0020 :
+		break;
+
+	case 0x05f0: /* Target resource in improper state */
+		break;
+
+	case 0x0340: /* Contained SCLP equipment check */
+		if (++vt220_request->retry_count > SCLP_BUFFER_MAX_RETRY)
+			break;
+		/* Remove processed buffers and requeue rest */
+		if (sclp_remove_processed((struct sccb_header *) sccb) > 0) {
+			/* Not all buffers were processed */
+			sccb->header.response_code = 0x0000;
+			vt220_request->sclp_req.status = SCLP_REQ_FILLED;
+			if (sclp_add_request(request) == 0)
+				return;
+		}
+		break;
+
+	case 0x0040: /* SCLP equipment check */
+		if (++vt220_request->retry_count > SCLP_BUFFER_MAX_RETRY)
+			break;
+		sccb->header.response_code = 0x0000;
+		vt220_request->sclp_req.status = SCLP_REQ_FILLED;
+		if (sclp_add_request(request) == 0)
+			return;
+		break;
+
+	default:
+		break;
+	}
+	sclp_vt220_process_queue(vt220_request);
+}
+
+/*
+ * Emit vt220 request buffer to SCLP. Return zero on success, non-zero
+ * otherwise.
+ */
+static int
+__sclp_vt220_emit(struct sclp_vt220_request *request)
+{
+	if (!(sclp_vt220_register.sclp_receive_mask & EVTYP_VT220MSG_MASK)) {
+		request->sclp_req.status = SCLP_REQ_FAILED;
+		return -EIO;
+	}
+	request->sclp_req.command = SCLP_CMDW_WRITE_EVENT_DATA;
+	request->sclp_req.status = SCLP_REQ_FILLED;
+	request->sclp_req.callback = sclp_vt220_callback;
+	request->sclp_req.callback_data = (void *) request;
+
+	return sclp_add_request(&request->sclp_req);
+}
+
+/*
+ * Queue and emit current request.
+ */
+static void
+sclp_vt220_emit_current(void)
+{
+	unsigned long flags;
+	struct sclp_vt220_request *request;
+	struct sclp_vt220_sccb *sccb;
+
+	spin_lock_irqsave(&sclp_vt220_lock, flags);
+	if (sclp_vt220_current_request) {
+		sccb = (struct sclp_vt220_sccb *) 
+				sclp_vt220_current_request->sclp_req.sccb;
+		/* Only emit buffers with content */
+		if (sccb->header.length != sizeof(struct sclp_vt220_sccb)) {
+			list_add_tail(&sclp_vt220_current_request->list,
+				      &sclp_vt220_outqueue);
+			sclp_vt220_current_request = NULL;
+			if (timer_pending(&sclp_vt220_timer))
+				del_timer(&sclp_vt220_timer);
+		}
+		sclp_vt220_flush_later = 0;
+	}
+	if (sclp_vt220_queue_running || sclp_vt220_suspended)
+		goto out_unlock;
+	if (list_empty(&sclp_vt220_outqueue))
+		goto out_unlock;
+	request = list_first_entry(&sclp_vt220_outqueue,
+				   struct sclp_vt220_request, list);
+	sclp_vt220_queue_running = 1;
+	spin_unlock_irqrestore(&sclp_vt220_lock, flags);
+
+	if (__sclp_vt220_emit(request))
+		sclp_vt220_process_queue(request);
+	return;
+out_unlock:
+	spin_unlock_irqrestore(&sclp_vt220_lock, flags);
+}
+
+#define SCLP_NORMAL_WRITE	0x00
+
+/*
+ * Helper function to initialize a page with the sclp request structure.
+ */
+static struct sclp_vt220_request *
+sclp_vt220_initialize_page(void *page)
+{
+	struct sclp_vt220_request *request;
+	struct sclp_vt220_sccb *sccb;
+
+	/* Place request structure at end of page */
+	request = ((struct sclp_vt220_request *)
+			((addr_t) page + PAGE_SIZE)) - 1;
+	request->retry_count = 0;
+	request->sclp_req.sccb = page;
+	/* SCCB goes at start of page */
+	sccb = (struct sclp_vt220_sccb *) page;
+	memset((void *) sccb, 0, sizeof(struct sclp_vt220_sccb));
+	sccb->header.length = sizeof(struct sclp_vt220_sccb);
+	sccb->header.function_code = SCLP_NORMAL_WRITE;
+	sccb->header.response_code = 0x0000;
+	sccb->evbuf.type = EVTYP_VT220MSG;
+	sccb->evbuf.length = sizeof(struct evbuf_header);
+
+	return request;
+}
+
+static inline unsigned int
+sclp_vt220_space_left(struct sclp_vt220_request *request)
+{
+	struct sclp_vt220_sccb *sccb;
+	sccb = (struct sclp_vt220_sccb *) request->sclp_req.sccb;
+	return PAGE_SIZE - sizeof(struct sclp_vt220_request) -
+	       sccb->header.length;
+}
+
+static inline unsigned int
+sclp_vt220_chars_stored(struct sclp_vt220_request *request)
+{
+	struct sclp_vt220_sccb *sccb;
+	sccb = (struct sclp_vt220_sccb *) request->sclp_req.sccb;
+	return sccb->evbuf.length - sizeof(struct evbuf_header);
+}
+
+/*
+ * Add msg to buffer associated with request. Return the number of characters
+ * added.
+ */
+static int
+sclp_vt220_add_msg(struct sclp_vt220_request *request,
+		   const unsigned char *msg, int count, int convertlf)
+{
+	struct sclp_vt220_sccb *sccb;
+	void *buffer;
+	unsigned char c;
+	int from;
+	int to;
+
+	if (count > sclp_vt220_space_left(request))
+		count = sclp_vt220_space_left(request);
+	if (count <= 0)
+		return 0;
+
+	sccb = (struct sclp_vt220_sccb *) request->sclp_req.sccb;
+	buffer = (void *) ((addr_t) sccb + sccb->header.length);
+
+	if (convertlf) {
+		/* Perform Linefeed conversion (0x0a -> 0x0a 0x0d)*/
+		for (from=0, to=0;
+		     (from < count) && (to < sclp_vt220_space_left(request));
+		     from++) {
+			/* Retrieve character */
+			c = msg[from];
+			/* Perform conversion */
+			if (c == 0x0a) {
+				if (to + 1 < sclp_vt220_space_left(request)) {
+					((unsigned char *) buffer)[to++] = c;
+					((unsigned char *) buffer)[to++] = 0x0d;
+				} else
+					break;
+
+			} else
+				((unsigned char *) buffer)[to++] = c;
+		}
+		sccb->header.length += to;
+		sccb->evbuf.length += to;
+		return from;
+	} else {
+		memcpy(buffer, (const void *) msg, count);
+		sccb->header.length += count;
+		sccb->evbuf.length += count;
+		return count;
+	}
+}
+
+/*
+ * Emit buffer after having waited long enough for more data to arrive.
+ */
+static void
+sclp_vt220_timeout(unsigned long data)
+{
+	sclp_vt220_emit_current();
+}
+
+#define BUFFER_MAX_DELAY	HZ/20
+
+/* 
+ * Internal implementation of the write function. Write COUNT bytes of data
+ * from memory at BUF
+ * to the SCLP interface. In case that the data does not fit into the current
+ * write buffer, emit the current one and allocate a new one. If there are no
+ * more empty buffers available, wait until one gets emptied. If DO_SCHEDULE
+ * is non-zero, the buffer will be scheduled for emitting after a timeout -
+ * otherwise the user has to explicitly call the flush function.
+ * A non-zero CONVERTLF parameter indicates that 0x0a characters in the message
+ * buffer should be converted to 0x0a 0x0d. After completion, return the number
+ * of bytes written.
+ */
+static int
+__sclp_vt220_write(const unsigned char *buf, int count, int do_schedule,
+		   int convertlf, int may_fail)
+{
+	unsigned long flags;
+	void *page;
+	int written;
+	int overall_written;
+
+	if (count <= 0)
+		return 0;
+	overall_written = 0;
+	spin_lock_irqsave(&sclp_vt220_lock, flags);
+	do {
+		/* Create an sclp output buffer if none exists yet */
+		if (sclp_vt220_current_request == NULL) {
+			while (list_empty(&sclp_vt220_empty)) {
+				spin_unlock_irqrestore(&sclp_vt220_lock, flags);
+				if (may_fail || sclp_vt220_suspended)
+					goto out;
+				else
+					sclp_sync_wait();
+				spin_lock_irqsave(&sclp_vt220_lock, flags);
+			}
+			page = (void *) sclp_vt220_empty.next;
+			list_del((struct list_head *) page);
+			sclp_vt220_current_request =
+				sclp_vt220_initialize_page(page);
+		}
+		/* Try to write the string to the current request buffer */
+		written = sclp_vt220_add_msg(sclp_vt220_current_request,
+					     buf, count, convertlf);
+		overall_written += written;
+		if (written == count)
+			break;
+		/*
+		 * Not all characters could be written to the current
+		 * output buffer. Emit the buffer, create a new buffer
+		 * and then output the rest of the string.
+		 */
+		spin_unlock_irqrestore(&sclp_vt220_lock, flags);
+		sclp_vt220_emit_current();
+		spin_lock_irqsave(&sclp_vt220_lock, flags);
+		buf += written;
+		count -= written;
+	} while (count > 0);
+	/* Setup timer to output current console buffer after some time */
+	if (sclp_vt220_current_request != NULL &&
+	    !timer_pending(&sclp_vt220_timer) && do_schedule) {
+		sclp_vt220_timer.function = sclp_vt220_timeout;
+		sclp_vt220_timer.data = 0UL;
+		sclp_vt220_timer.expires = jiffies + BUFFER_MAX_DELAY;
+		add_timer(&sclp_vt220_timer);
+	}
+	spin_unlock_irqrestore(&sclp_vt220_lock, flags);
+out:
+	return overall_written;
+}
+
+/*
+ * This routine is called by the kernel to write a series of
+ * characters to the tty device.  The characters may come from
+ * user space or kernel space.  This routine will return the
+ * number of characters actually accepted for writing.
+ */
+static int
+sclp_vt220_write(struct tty_struct *tty, const unsigned char *buf, int count)
+{
+	return __sclp_vt220_write(buf, count, 1, 0, 1);
+}
+
+#define SCLP_VT220_SESSION_ENDED	0x01
+#define	SCLP_VT220_SESSION_STARTED	0x80
+#define SCLP_VT220_SESSION_DATA		0x00
+
+/*
+ * Called by the SCLP to report incoming event buffers.
+ */
+static void
+sclp_vt220_receiver_fn(struct evbuf_header *evbuf)
+{
+	char *buffer;
+	unsigned int count;
+
+	/* Ignore input if device is not open */
+	if (sclp_vt220_tty == NULL)
+		return;
+
+	buffer = (char *) ((addr_t) evbuf + sizeof(struct evbuf_header));
+	count = evbuf->length - sizeof(struct evbuf_header);
+
+	switch (*buffer) {
+	case SCLP_VT220_SESSION_ENDED:
+	case SCLP_VT220_SESSION_STARTED:
+		break;
+	case SCLP_VT220_SESSION_DATA:
+		/* Send input to line discipline */
+		buffer++;
+		count--;
+		tty_insert_flip_string(sclp_vt220_tty, buffer, count);
+		tty_flip_buffer_push(sclp_vt220_tty);
+		break;
+	}
+}
+
+/*
+ * This routine is called when a particular tty device is opened.
+ */
+static int
+sclp_vt220_open(struct tty_struct *tty, struct file *filp)
+{
+	if (tty->count == 1) {
+		sclp_vt220_tty = tty;
+		tty->driver_data = kmalloc(SCLP_VT220_BUF_SIZE, GFP_KERNEL);
+		if (tty->driver_data == NULL)
+			return -ENOMEM;
+		tty->low_latency = 0;
+		if (!tty->winsize.ws_row && !tty->winsize.ws_col) {
+			tty->winsize.ws_row = 24;
+			tty->winsize.ws_col = 80;
+		}
+	}
+	return 0;
+}
+
+/*
+ * This routine is called when a particular tty device is closed.
+ */
+static void
+sclp_vt220_close(struct tty_struct *tty, struct file *filp)
+{
+	if (tty->count == 1) {
+		sclp_vt220_tty = NULL;
+		kfree(tty->driver_data);
+		tty->driver_data = NULL;
+	}
+}
+
+/*
+ * This routine is called by the kernel to write a single
+ * character to the tty device.  If the kernel uses this routine,
+ * it must call the flush_chars() routine (if defined) when it is
+ * done stuffing characters into the driver.
+ */
+static int
+sclp_vt220_put_char(struct tty_struct *tty, unsigned char ch)
+{
+	return __sclp_vt220_write(&ch, 1, 0, 0, 1);
+}
+
+/*
+ * This routine is called by the kernel after it has written a
+ * series of characters to the tty device using put_char().  
+ */
+static void
+sclp_vt220_flush_chars(struct tty_struct *tty)
+{
+	if (!sclp_vt220_queue_running)
+		sclp_vt220_emit_current();
+	else
+		sclp_vt220_flush_later = 1;
+}
+
+/*
+ * This routine returns the numbers of characters the tty driver
+ * will accept for queuing to be written.  This number is subject
+ * to change as output buffers get emptied, or if the output flow
+ * control is acted.
+ */
+static int
+sclp_vt220_write_room(struct tty_struct *tty)
+{
+	unsigned long flags;
+	struct list_head *l;
+	int count;
+
+	spin_lock_irqsave(&sclp_vt220_lock, flags);
+	count = 0;
+	if (sclp_vt220_current_request != NULL)
+		count = sclp_vt220_space_left(sclp_vt220_current_request);
+	list_for_each(l, &sclp_vt220_empty)
+		count += SCLP_VT220_MAX_CHARS_PER_BUFFER;
+	spin_unlock_irqrestore(&sclp_vt220_lock, flags);
+	return count;
+}
+
+/*
+ * Return number of buffered chars.
+ */
+static int
+sclp_vt220_chars_in_buffer(struct tty_struct *tty)
+{
+	unsigned long flags;
+	struct list_head *l;
+	struct sclp_vt220_request *r;
+	int count;
+
+	spin_lock_irqsave(&sclp_vt220_lock, flags);
+	count = 0;
+	if (sclp_vt220_current_request != NULL)
+		count = sclp_vt220_chars_stored(sclp_vt220_current_request);
+	list_for_each(l, &sclp_vt220_outqueue) {
+		r = list_entry(l, struct sclp_vt220_request, list);
+		count += sclp_vt220_chars_stored(r);
+	}
+	spin_unlock_irqrestore(&sclp_vt220_lock, flags);
+	return count;
+}
+
+/*
+ * Pass on all buffers to the hardware. Return only when there are no more
+ * buffers pending.
+ */
+static void
+sclp_vt220_flush_buffer(struct tty_struct *tty)
+{
+	sclp_vt220_emit_current();
+}
+
+/* Release allocated pages. */
+static void __init __sclp_vt220_free_pages(void)
+{
+	struct list_head *page, *p;
+
+	list_for_each_safe(page, p, &sclp_vt220_empty) {
+		list_del(page);
+		free_page((unsigned long) page);
+	}
+}
+
+/* Release memory and unregister from sclp core. Controlled by init counting -
+ * only the last invoker will actually perform these actions. */
+static void __init __sclp_vt220_cleanup(void)
+{
+	sclp_vt220_init_count--;
+	if (sclp_vt220_init_count != 0)
+		return;
+	sclp_unregister(&sclp_vt220_register);
+	__sclp_vt220_free_pages();
+}
+
+/* Allocate buffer pages and register with sclp core. Controlled by init
+ * counting - only the first invoker will actually perform these actions. */
+static int __init __sclp_vt220_init(int num_pages)
+{
+	void *page;
+	int i;
+	int rc;
+
+	sclp_vt220_init_count++;
+	if (sclp_vt220_init_count != 1)
+		return 0;
+	spin_lock_init(&sclp_vt220_lock);
+	INIT_LIST_HEAD(&sclp_vt220_empty);
+	INIT_LIST_HEAD(&sclp_vt220_outqueue);
+	init_timer(&sclp_vt220_timer);
+	sclp_vt220_current_request = NULL;
+	sclp_vt220_buffered_chars = 0;
+	sclp_vt220_tty = NULL;
+	sclp_vt220_flush_later = 0;
+
+	/* Allocate pages for output buffering */
+	rc = -ENOMEM;
+	for (i = 0; i < num_pages; i++) {
+		page = (void *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
+		if (!page)
+			goto out;
+		list_add_tail(page, &sclp_vt220_empty);
+	}
+	rc = sclp_register(&sclp_vt220_register);
+out:
+	if (rc) {
+		__sclp_vt220_free_pages();
+		sclp_vt220_init_count--;
+	}
+	return rc;
+}
+
+static const struct tty_operations sclp_vt220_ops = {
+	.open = sclp_vt220_open,
+	.close = sclp_vt220_close,
+	.write = sclp_vt220_write,
+	.put_char = sclp_vt220_put_char,
+	.flush_chars = sclp_vt220_flush_chars,
+	.write_room = sclp_vt220_write_room,
+	.chars_in_buffer = sclp_vt220_chars_in_buffer,
+	.flush_buffer = sclp_vt220_flush_buffer,
+};
+
+/*
+ * Register driver with SCLP and Linux and initialize internal tty structures.
+ */
+static int __init sclp_vt220_tty_init(void)
+{
+	struct tty_driver *driver;
+	int rc;
+
+	/* Note: we're not testing for CONSOLE_IS_SCLP here to preserve
+	 * symmetry between VM and LPAR systems regarding ttyS1. */
+	driver = alloc_tty_driver(1);
+	if (!driver)
+		return -ENOMEM;
+	rc = __sclp_vt220_init(MAX_KMEM_PAGES);
+	if (rc)
+		goto out_driver;
+
+	driver->driver_name = SCLP_VT220_DRIVER_NAME;
+	driver->name = SCLP_VT220_DEVICE_NAME;
+	driver->major = SCLP_VT220_MAJOR;
+	driver->minor_start = SCLP_VT220_MINOR;
+	driver->type = TTY_DRIVER_TYPE_SYSTEM;
+	driver->subtype = SYSTEM_TYPE_TTY;
+	driver->init_termios = tty_std_termios;
+	driver->flags = TTY_DRIVER_REAL_RAW;
+	tty_set_operations(driver, &sclp_vt220_ops);
+
+	rc = tty_register_driver(driver);
+	if (rc)
+		goto out_init;
+	sclp_vt220_driver = driver;
+	return 0;
+
+out_init:
+	__sclp_vt220_cleanup();
+out_driver:
+	put_tty_driver(driver);
+	return rc;
+}
+__initcall(sclp_vt220_tty_init);
+
+static void __sclp_vt220_flush_buffer(void)
+{
+	unsigned long flags;
+
+	sclp_vt220_emit_current();
+	spin_lock_irqsave(&sclp_vt220_lock, flags);
+	if (timer_pending(&sclp_vt220_timer))
+		del_timer(&sclp_vt220_timer);
+	while (sclp_vt220_queue_running) {
+		spin_unlock_irqrestore(&sclp_vt220_lock, flags);
+		sclp_sync_wait();
+		spin_lock_irqsave(&sclp_vt220_lock, flags);
+	}
+	spin_unlock_irqrestore(&sclp_vt220_lock, flags);
+}
+
+/*
+ * Resume console: If there are cached messages, emit them.
+ */
+static void sclp_vt220_resume(void)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&sclp_vt220_lock, flags);
+	sclp_vt220_suspended = 0;
+	spin_unlock_irqrestore(&sclp_vt220_lock, flags);
+	sclp_vt220_emit_current();
+}
+
+/*
+ * Suspend console: Set suspend flag and flush console
+ */
+static void sclp_vt220_suspend(void)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&sclp_vt220_lock, flags);
+	sclp_vt220_suspended = 1;
+	spin_unlock_irqrestore(&sclp_vt220_lock, flags);
+	__sclp_vt220_flush_buffer();
+}
+
+static void sclp_vt220_pm_event_fn(struct sclp_register *reg,
+				   enum sclp_pm_event sclp_pm_event)
+{
+	switch (sclp_pm_event) {
+	case SCLP_PM_EVENT_FREEZE:
+		sclp_vt220_suspend();
+		break;
+	case SCLP_PM_EVENT_RESTORE:
+	case SCLP_PM_EVENT_THAW:
+		sclp_vt220_resume();
+		break;
+	}
+}
+
+#ifdef CONFIG_SCLP_VT220_CONSOLE
+
+static void
+sclp_vt220_con_write(struct console *con, const char *buf, unsigned int count)
+{
+	__sclp_vt220_write((const unsigned char *) buf, count, 1, 1, 0);
+}
+
+static struct tty_driver *
+sclp_vt220_con_device(struct console *c, int *index)
+{
+	*index = 0;
+	return sclp_vt220_driver;
+}
+
+static int
+sclp_vt220_notify(struct notifier_block *self,
+			  unsigned long event, void *data)
+{
+	__sclp_vt220_flush_buffer();
+	return NOTIFY_OK;
+}
+
+static struct notifier_block on_panic_nb = {
+	.notifier_call = sclp_vt220_notify,
+	.priority = 1,
+};
+
+static struct notifier_block on_reboot_nb = {
+	.notifier_call = sclp_vt220_notify,
+	.priority = 1,
+};
+
+/* Structure needed to register with printk */
+static struct console sclp_vt220_console =
+{
+	.name = SCLP_VT220_CONSOLE_NAME,
+	.write = sclp_vt220_con_write,
+	.device = sclp_vt220_con_device,
+	.flags = CON_PRINTBUFFER,
+	.index = SCLP_VT220_CONSOLE_INDEX
+};
+
+static int __init
+sclp_vt220_con_init(void)
+{
+	int rc;
+
+	if (!CONSOLE_IS_SCLP)
+		return 0;
+	rc = __sclp_vt220_init(MAX_CONSOLE_PAGES);
+	if (rc)
+		return rc;
+	/* Attach linux console */
+	atomic_notifier_chain_register(&panic_notifier_list, &on_panic_nb);
+	register_reboot_notifier(&on_reboot_nb);
+	register_console(&sclp_vt220_console);
+	return 0;
+}
+
+console_initcall(sclp_vt220_con_init);
+#endif /* CONFIG_SCLP_VT220_CONSOLE */
+
diff --git a/ap/os/linux/linux-3.4.x/drivers/s390/char/tape.h b/ap/os/linux/linux-3.4.x/drivers/s390/char/tape.h
new file mode 100644
index 0000000..267b54e
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/s390/char/tape.h
@@ -0,0 +1,414 @@
+/*
+ *  drivers/s390/char/tape.h
+ *    tape device driver for 3480/3490E/3590 tapes.
+ *
+ *  S390 and zSeries version
+ *    Copyright IBM Corp. 2001, 2009
+ *    Author(s): Carsten Otte <cotte@de.ibm.com>
+ *		 Tuan Ngo-Anh <ngoanh@de.ibm.com>
+ *		 Martin Schwidefsky <schwidefsky@de.ibm.com>
+ *		 Stefan Bader <shbader@de.ibm.com>
+ */
+
+#ifndef _TAPE_H
+#define _TAPE_H
+
+#include <asm/ccwdev.h>
+#include <asm/debug.h>
+#include <asm/idals.h>
+#include <linux/blkdev.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mtio.h>
+#include <linux/interrupt.h>
+#include <linux/workqueue.h>
+
+struct gendisk;
+
+/*
+ * Define DBF_LIKE_HELL for lots of messages in the debug feature.
+ */
+#define DBF_LIKE_HELL
+#ifdef  DBF_LIKE_HELL
+#define DBF_LH(level, str, ...) \
+do { \
+	debug_sprintf_event(TAPE_DBF_AREA, level, str, ## __VA_ARGS__); \
+} while (0)
+#else
+#define DBF_LH(level, str, ...) do {} while(0)
+#endif
+
+/*
+ * macros s390 debug feature (dbf)
+ */
+#define DBF_EVENT(d_level, d_str...) \
+do { \
+	debug_sprintf_event(TAPE_DBF_AREA, d_level, d_str); \
+} while (0)
+
+#define DBF_EXCEPTION(d_level, d_str...) \
+do { \
+	debug_sprintf_exception(TAPE_DBF_AREA, d_level, d_str); \
+} while (0)
+
+#define TAPE_VERSION_MAJOR 2
+#define TAPE_VERSION_MINOR 0
+#define TAPE_MAGIC "tape"
+
+#define TAPE_MINORS_PER_DEV 2	    /* two minors per device */
+#define TAPEBLOCK_HSEC_SIZE	2048
+#define TAPEBLOCK_HSEC_S2B	2
+#define TAPEBLOCK_RETRIES	5
+
+enum tape_medium_state {
+	MS_UNKNOWN,
+	MS_LOADED,
+	MS_UNLOADED,
+	MS_SIZE
+};
+
+enum tape_state {
+	TS_UNUSED=0,
+	TS_IN_USE,
+	TS_BLKUSE,
+	TS_INIT,
+	TS_NOT_OPER,
+	TS_SIZE
+};
+
+enum tape_op {
+	TO_BLOCK,	/* Block read */
+	TO_BSB,		/* Backward space block */
+	TO_BSF,		/* Backward space filemark */
+	TO_DSE,		/* Data security erase */
+	TO_FSB,		/* Forward space block */
+	TO_FSF,		/* Forward space filemark */
+	TO_LBL,		/* Locate block label */
+	TO_NOP,		/* No operation */
+	TO_RBA,		/* Read backward */
+	TO_RBI,		/* Read block information */
+	TO_RFO,		/* Read forward */
+	TO_REW,		/* Rewind tape */
+	TO_RUN,		/* Rewind and unload tape */
+	TO_WRI,		/* Write block */
+	TO_WTM,		/* Write tape mark */
+	TO_MSEN,	/* Medium sense */
+	TO_LOAD,	/* Load tape */
+	TO_READ_CONFIG, /* Read configuration data */
+	TO_READ_ATTMSG, /* Read attention message */
+	TO_DIS,		/* Tape display */
+	TO_ASSIGN,	/* Assign tape to channel path */
+	TO_UNASSIGN,	/* Unassign tape from channel path */
+	TO_CRYPT_ON,	/* Enable encrpytion */
+	TO_CRYPT_OFF,	/* Disable encrpytion */
+	TO_KEKL_SET,	/* Set KEK label */
+	TO_KEKL_QUERY,	/* Query KEK label */
+	TO_RDC,		/* Read device characteristics */
+	TO_SIZE,	/* #entries in tape_op_t */
+};
+
+/* Forward declaration */
+struct tape_device;
+
+/* tape_request->status can be: */
+enum tape_request_status {
+	TAPE_REQUEST_INIT,	/* request is ready to be processed */
+	TAPE_REQUEST_QUEUED,	/* request is queued to be processed */
+	TAPE_REQUEST_IN_IO,	/* request is currently in IO */
+	TAPE_REQUEST_DONE,	/* request is completed. */
+	TAPE_REQUEST_CANCEL,	/* request should be canceled. */
+	TAPE_REQUEST_LONG_BUSY, /* request has to be restarted after long busy */
+};
+
+/* Tape CCW request */
+struct tape_request {
+	struct list_head list;		/* list head for request queueing. */
+	struct tape_device *device;	/* tape device of this request */
+	struct ccw1 *cpaddr;		/* address of the channel program. */
+	void *cpdata;			/* pointer to ccw data. */
+	enum tape_request_status status;/* status of this request */
+	int options;			/* options for execution. */
+	int retries;			/* retry counter for error recovery. */
+	int rescnt;			/* residual count from devstat. */
+
+	/* Callback for delivering final status. */
+	void (*callback)(struct tape_request *, void *);
+	void *callback_data;
+
+	enum tape_op op;
+	int rc;
+};
+
+/* Function type for magnetic tape commands */
+typedef int (*tape_mtop_fn)(struct tape_device *, int);
+
+/* Size of the array containing the mtops for a discipline */
+#define TAPE_NR_MTOPS (MTMKPART+1)
+
+/* Tape Discipline */
+struct tape_discipline {
+	struct module *owner;
+	int  (*setup_device)(struct tape_device *);
+	void (*cleanup_device)(struct tape_device *);
+	int (*irq)(struct tape_device *, struct tape_request *, struct irb *);
+	struct tape_request *(*read_block)(struct tape_device *, size_t);
+	struct tape_request *(*write_block)(struct tape_device *, size_t);
+	void (*process_eov)(struct tape_device*);
+#ifdef CONFIG_S390_TAPE_BLOCK
+	/* Block device stuff. */
+	struct tape_request *(*bread)(struct tape_device *, struct request *);
+	void (*check_locate)(struct tape_device *, struct tape_request *);
+	void (*free_bread)(struct tape_request *);
+#endif
+	/* ioctl function for additional ioctls. */
+	int (*ioctl_fn)(struct tape_device *, unsigned int, unsigned long);
+	/* Array of tape commands with TAPE_NR_MTOPS entries */
+	tape_mtop_fn *mtop_array;
+};
+
+/*
+ * The discipline irq function either returns an error code (<0) which
+ * means that the request has failed with an error or one of the following:
+ */
+#define TAPE_IO_SUCCESS		0	/* request successful */
+#define TAPE_IO_PENDING		1	/* request still running */
+#define TAPE_IO_RETRY		2	/* retry to current request */
+#define TAPE_IO_STOP		3	/* stop the running request */
+#define TAPE_IO_LONG_BUSY	4	/* delay the running request */
+
+/* Char Frontend Data */
+struct tape_char_data {
+	struct idal_buffer *idal_buf;	/* idal buffer for user char data */
+	int block_size;			/*   of size block_size. */
+};
+
+#ifdef CONFIG_S390_TAPE_BLOCK
+/* Block Frontend Data */
+struct tape_blk_data
+{
+	struct tape_device *	device;
+	/* Block device request queue. */
+	struct request_queue *	request_queue;
+	spinlock_t		request_queue_lock;
+
+	/* Task to move entries from block request to CCS request queue. */
+	struct work_struct	requeue_task;
+	atomic_t		requeue_scheduled;
+
+	/* Current position on the tape. */
+	long			block_position;
+	int			medium_changed;
+	struct gendisk *	disk;
+};
+#endif
+
+/* Tape Info */
+struct tape_device {
+	/* entry in tape_device_list */
+	struct list_head		node;
+
+	int				cdev_id;
+	struct ccw_device *		cdev;
+	struct tape_class_device *	nt;
+	struct tape_class_device *	rt;
+
+	/* Device mutex to serialize tape commands. */
+	struct mutex			mutex;
+
+	/* Device discipline information. */
+	struct tape_discipline *	discipline;
+	void *				discdata;
+
+	/* Generic status flags */
+	long				tape_generic_status;
+
+	/* Device state information. */
+	wait_queue_head_t		state_change_wq;
+	enum tape_state			tape_state;
+	enum tape_medium_state		medium_state;
+	unsigned char *			modeset_byte;
+
+	/* Reference count. */
+	atomic_t			ref_count;
+
+	/* Request queue. */
+	struct list_head		req_queue;
+
+	/* Request wait queue. */
+	wait_queue_head_t		wait_queue;
+
+	/* Each tape device has (currently) two minor numbers. */
+	int				first_minor;
+
+	/* Number of tapemarks required for correct termination. */
+	int				required_tapemarks;
+
+	/* Block ID of the BOF */
+	unsigned int			bof;
+
+	/* Character device frontend data */
+	struct tape_char_data		char_data;
+#ifdef CONFIG_S390_TAPE_BLOCK
+	/* Block dev frontend data */
+	struct tape_blk_data		blk_data;
+#endif
+
+	/* Function to start or stop the next request later. */
+	struct delayed_work		tape_dnr;
+
+	/* Timer for long busy */
+	struct timer_list		lb_timeout;
+
+};
+
+/* Externals from tape_core.c */
+extern struct tape_request *tape_alloc_request(int cplength, int datasize);
+extern void tape_free_request(struct tape_request *);
+extern int tape_do_io(struct tape_device *, struct tape_request *);
+extern int tape_do_io_async(struct tape_device *, struct tape_request *);
+extern int tape_do_io_interruptible(struct tape_device *, struct tape_request *);
+extern int tape_cancel_io(struct tape_device *, struct tape_request *);
+void tape_hotplug_event(struct tape_device *, int major, int action);
+
+static inline int
+tape_do_io_free(struct tape_device *device, struct tape_request *request)
+{
+	int rc;
+
+	rc = tape_do_io(device, request);
+	tape_free_request(request);
+	return rc;
+}
+
+static inline void
+tape_do_io_async_free(struct tape_device *device, struct tape_request *request)
+{
+	request->callback = (void *) tape_free_request;
+	request->callback_data = NULL;
+	tape_do_io_async(device, request);
+}
+
+extern int tape_oper_handler(int irq, int status);
+extern void tape_noper_handler(int irq, int status);
+extern int tape_open(struct tape_device *);
+extern int tape_release(struct tape_device *);
+extern int tape_mtop(struct tape_device *, int, int);
+extern void tape_state_set(struct tape_device *, enum tape_state);
+
+extern int tape_generic_online(struct tape_device *, struct tape_discipline *);
+extern int tape_generic_offline(struct ccw_device *);
+extern int tape_generic_pm_suspend(struct ccw_device *);
+
+/* Externals from tape_devmap.c */
+extern int tape_generic_probe(struct ccw_device *);
+extern void tape_generic_remove(struct ccw_device *);
+
+extern struct tape_device *tape_find_device(int devindex);
+extern struct tape_device *tape_get_device(struct tape_device *);
+extern void tape_put_device(struct tape_device *);
+
+/* Externals from tape_char.c */
+extern int tapechar_init(void);
+extern void tapechar_exit(void);
+extern int  tapechar_setup_device(struct tape_device *);
+extern void tapechar_cleanup_device(struct tape_device *);
+
+/* Externals from tape_block.c */
+#ifdef CONFIG_S390_TAPE_BLOCK
+extern int tapeblock_init (void);
+extern void tapeblock_exit(void);
+extern int tapeblock_setup_device(struct tape_device *);
+extern void tapeblock_cleanup_device(struct tape_device *);
+#else
+static inline int tapeblock_init (void) {return 0;}
+static inline void tapeblock_exit (void) {;}
+static inline int tapeblock_setup_device(struct tape_device *t) {return 0;}
+static inline void tapeblock_cleanup_device (struct tape_device *t) {;}
+#endif
+
+/* tape initialisation functions */
+#ifdef CONFIG_PROC_FS
+extern void tape_proc_init (void);
+extern void tape_proc_cleanup (void);
+#else
+static inline void tape_proc_init (void) {;}
+static inline void tape_proc_cleanup (void) {;}
+#endif
+
+/* a function for dumping device sense info */
+extern void tape_dump_sense_dbf(struct tape_device *, struct tape_request *,
+				struct irb *);
+
+/* functions for handling the status of a device */
+extern void tape_med_state_set(struct tape_device *, enum tape_medium_state);
+
+/* The debug area */
+extern debug_info_t *TAPE_DBF_AREA;
+
+/* functions for building ccws */
+static inline struct ccw1 *
+tape_ccw_cc(struct ccw1 *ccw, __u8 cmd_code, __u16 memsize, void *cda)
+{
+	ccw->cmd_code = cmd_code;
+	ccw->flags = CCW_FLAG_CC;
+	ccw->count = memsize;
+	ccw->cda = (__u32)(addr_t) cda;
+	return ccw + 1;
+}
+
+static inline struct ccw1 *
+tape_ccw_end(struct ccw1 *ccw, __u8 cmd_code, __u16 memsize, void *cda)
+{
+	ccw->cmd_code = cmd_code;
+	ccw->flags = 0;
+	ccw->count = memsize;
+	ccw->cda = (__u32)(addr_t) cda;
+	return ccw + 1;
+}
+
+static inline struct ccw1 *
+tape_ccw_cmd(struct ccw1 *ccw, __u8 cmd_code)
+{
+	ccw->cmd_code = cmd_code;
+	ccw->flags = 0;
+	ccw->count = 0;
+	ccw->cda = (__u32)(addr_t) &ccw->cmd_code;
+	return ccw + 1;
+}
+
+static inline struct ccw1 *
+tape_ccw_repeat(struct ccw1 *ccw, __u8 cmd_code, int count)
+{
+	while (count-- > 0) {
+		ccw->cmd_code = cmd_code;
+		ccw->flags = CCW_FLAG_CC;
+		ccw->count = 0;
+		ccw->cda = (__u32)(addr_t) &ccw->cmd_code;
+		ccw++;
+	}
+	return ccw;
+}
+
+static inline struct ccw1 *
+tape_ccw_cc_idal(struct ccw1 *ccw, __u8 cmd_code, struct idal_buffer *idal)
+{
+	ccw->cmd_code = cmd_code;
+	ccw->flags    = CCW_FLAG_CC;
+	idal_buffer_set_cda(idal, ccw);
+	return ccw++;
+}
+
+static inline struct ccw1 *
+tape_ccw_end_idal(struct ccw1 *ccw, __u8 cmd_code, struct idal_buffer *idal)
+{
+	ccw->cmd_code = cmd_code;
+	ccw->flags    = 0;
+	idal_buffer_set_cda(idal, ccw);
+	return ccw++;
+}
+
+/* Global vars */
+extern const char *tape_state_verbose[];
+extern const char *tape_op_verbose[];
+
+#endif /* for ifdef tape.h */
diff --git a/ap/os/linux/linux-3.4.x/drivers/s390/char/tape_34xx.c b/ap/os/linux/linux-3.4.x/drivers/s390/char/tape_34xx.c
new file mode 100644
index 0000000..934ef33
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/s390/char/tape_34xx.c
@@ -0,0 +1,1371 @@
+/*
+ *  drivers/s390/char/tape_34xx.c
+ *    tape device discipline for 3480/3490 tapes.
+ *
+ *    Copyright IBM Corp. 2001, 2009
+ *    Author(s): Carsten Otte <cotte@de.ibm.com>
+ *		 Tuan Ngo-Anh <ngoanh@de.ibm.com>
+ *		 Martin Schwidefsky <schwidefsky@de.ibm.com>
+ */
+
+#define KMSG_COMPONENT "tape_34xx"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/bio.h>
+#include <linux/workqueue.h>
+#include <linux/slab.h>
+
+#define TAPE_DBF_AREA	tape_34xx_dbf
+
+#include "tape.h"
+#include "tape_std.h"
+
+/*
+ * Pointer to debug area.
+ */
+debug_info_t *TAPE_DBF_AREA = NULL;
+EXPORT_SYMBOL(TAPE_DBF_AREA);
+
+#define TAPE34XX_FMT_3480	0
+#define TAPE34XX_FMT_3480_2_XF	1
+#define TAPE34XX_FMT_3480_XF	2
+
+struct tape_34xx_block_id {
+	unsigned int	wrap		: 1;
+	unsigned int	segment		: 7;
+	unsigned int	format		: 2;
+	unsigned int	block		: 22;
+};
+
+/*
+ * A list of block ID's is used to faster seek blocks.
+ */
+struct tape_34xx_sbid {
+	struct list_head		list;
+	struct tape_34xx_block_id	bid;
+};
+
+static void tape_34xx_delete_sbid_from(struct tape_device *, int);
+
+/*
+ * Medium sense for 34xx tapes. There is no 'real' medium sense call.
+ * So we just do a normal sense.
+ */
+static void __tape_34xx_medium_sense(struct tape_request *request)
+{
+	struct tape_device *device = request->device;
+	unsigned char *sense;
+
+	if (request->rc == 0) {
+		sense = request->cpdata;
+
+		/*
+		 * This isn't quite correct. But since INTERVENTION_REQUIRED
+		 * means that the drive is 'neither ready nor on-line' it is
+		 * only slightly inaccurate to say there is no tape loaded if
+		 * the drive isn't online...
+		 */
+		if (sense[0] & SENSE_INTERVENTION_REQUIRED)
+			tape_med_state_set(device, MS_UNLOADED);
+		else
+			tape_med_state_set(device, MS_LOADED);
+
+		if (sense[1] & SENSE_WRITE_PROTECT)
+			device->tape_generic_status |= GMT_WR_PROT(~0);
+		else
+			device->tape_generic_status &= ~GMT_WR_PROT(~0);
+	} else
+		DBF_EVENT(4, "tape_34xx: medium sense failed with rc=%d\n",
+			request->rc);
+	tape_free_request(request);
+}
+
+static int tape_34xx_medium_sense(struct tape_device *device)
+{
+	struct tape_request *request;
+	int rc;
+
+	request = tape_alloc_request(1, 32);
+	if (IS_ERR(request)) {
+		DBF_EXCEPTION(6, "MSEN fail\n");
+		return PTR_ERR(request);
+	}
+
+	request->op = TO_MSEN;
+	tape_ccw_end(request->cpaddr, SENSE, 32, request->cpdata);
+	rc = tape_do_io_interruptible(device, request);
+	__tape_34xx_medium_sense(request);
+	return rc;
+}
+
+static void tape_34xx_medium_sense_async(struct tape_device *device)
+{
+	struct tape_request *request;
+
+	request = tape_alloc_request(1, 32);
+	if (IS_ERR(request)) {
+		DBF_EXCEPTION(6, "MSEN fail\n");
+		return;
+	}
+
+	request->op = TO_MSEN;
+	tape_ccw_end(request->cpaddr, SENSE, 32, request->cpdata);
+	request->callback = (void *) __tape_34xx_medium_sense;
+	request->callback_data = NULL;
+	tape_do_io_async(device, request);
+}
+
+struct tape_34xx_work {
+	struct tape_device	*device;
+	enum tape_op		 op;
+	struct work_struct	 work;
+};
+
+/*
+ * These functions are currently used only to schedule a medium_sense for
+ * later execution. This is because we get an interrupt whenever a medium
+ * is inserted but cannot call tape_do_io* from an interrupt context.
+ * Maybe that's useful for other actions we want to start from the
+ * interrupt handler.
+ * Note: the work handler is called by the system work queue. The tape
+ * commands started by the handler need to be asynchrounous, otherwise
+ * a deadlock can occur e.g. in case of a deferred cc=1 (see __tape_do_irq).
+ */
+static void
+tape_34xx_work_handler(struct work_struct *work)
+{
+	struct tape_34xx_work *p =
+		container_of(work, struct tape_34xx_work, work);
+	struct tape_device *device = p->device;
+
+	switch(p->op) {
+		case TO_MSEN:
+			tape_34xx_medium_sense_async(device);
+			break;
+		default:
+			DBF_EVENT(3, "T34XX: internal error: unknown work\n");
+	}
+	tape_put_device(device);
+	kfree(p);
+}
+
+static int
+tape_34xx_schedule_work(struct tape_device *device, enum tape_op op)
+{
+	struct tape_34xx_work *p;
+
+	if ((p = kzalloc(sizeof(*p), GFP_ATOMIC)) == NULL)
+		return -ENOMEM;
+
+	INIT_WORK(&p->work, tape_34xx_work_handler);
+
+	p->device = tape_get_device(device);
+	p->op     = op;
+
+	schedule_work(&p->work);
+	return 0;
+}
+
+/*
+ * Done Handler is called when dev stat = DEVICE-END (successful operation)
+ */
+static inline int
+tape_34xx_done(struct tape_request *request)
+{
+	DBF_EVENT(6, "%s done\n", tape_op_verbose[request->op]);
+
+	switch (request->op) {
+		case TO_DSE:
+		case TO_RUN:
+		case TO_WRI:
+		case TO_WTM:
+		case TO_ASSIGN:
+		case TO_UNASSIGN:
+			tape_34xx_delete_sbid_from(request->device, 0);
+			break;
+		default:
+			;
+	}
+	return TAPE_IO_SUCCESS;
+}
+
+static inline int
+tape_34xx_erp_failed(struct tape_request *request, int rc)
+{
+	DBF_EVENT(3, "Error recovery failed for %s (RC=%d)\n",
+		  tape_op_verbose[request->op], rc);
+	return rc;
+}
+
+static inline int
+tape_34xx_erp_succeeded(struct tape_request *request)
+{
+	DBF_EVENT(3, "Error Recovery successful for %s\n",
+		  tape_op_verbose[request->op]);
+	return tape_34xx_done(request);
+}
+
+static inline int
+tape_34xx_erp_retry(struct tape_request *request)
+{
+	DBF_EVENT(3, "xerp retr %s\n", tape_op_verbose[request->op]);
+	return TAPE_IO_RETRY;
+}
+
+/*
+ * This function is called, when no request is outstanding and we get an
+ * interrupt
+ */
+static int
+tape_34xx_unsolicited_irq(struct tape_device *device, struct irb *irb)
+{
+	if (irb->scsw.cmd.dstat == 0x85) { /* READY */
+		/* A medium was inserted in the drive. */
+		DBF_EVENT(6, "xuud med\n");
+		tape_34xx_delete_sbid_from(device, 0);
+		tape_34xx_schedule_work(device, TO_MSEN);
+	} else {
+		DBF_EVENT(3, "unsol.irq! dev end: %08x\n", device->cdev_id);
+		tape_dump_sense_dbf(device, NULL, irb);
+	}
+	return TAPE_IO_SUCCESS;
+}
+
+/*
+ * Read Opposite Error Recovery Function:
+ * Used, when Read Forward does not work
+ */
+static int
+tape_34xx_erp_read_opposite(struct tape_device *device,
+			    struct tape_request *request)
+{
+	if (request->op == TO_RFO) {
+		/*
+		 * We did read forward, but the data could not be read
+		 * *correctly*. We transform the request to a read backward
+		 * and try again.
+		 */
+		tape_std_read_backward(device, request);
+		return tape_34xx_erp_retry(request);
+	}
+
+	/*
+	 * We tried to read forward and backward, but hat no
+	 * success -> failed.
+	 */
+	return tape_34xx_erp_failed(request, -EIO);
+}
+
+static int
+tape_34xx_erp_bug(struct tape_device *device, struct tape_request *request,
+		  struct irb *irb, int no)
+{
+	if (request->op != TO_ASSIGN) {
+		dev_err(&device->cdev->dev, "An unexpected condition %d "
+			"occurred in tape error recovery\n", no);
+		tape_dump_sense_dbf(device, request, irb);
+	}
+	return tape_34xx_erp_failed(request, -EIO);
+}
+
+/*
+ * Handle data overrun between cu and drive. The channel speed might
+ * be too slow.
+ */
+static int
+tape_34xx_erp_overrun(struct tape_device *device, struct tape_request *request,
+		      struct irb *irb)
+{
+	if (irb->ecw[3] == 0x40) {
+		dev_warn (&device->cdev->dev, "A data overrun occurred between"
+			" the control unit and tape unit\n");
+		return tape_34xx_erp_failed(request, -EIO);
+	}
+	return tape_34xx_erp_bug(device, request, irb, -1);
+}
+
+/*
+ * Handle record sequence error.
+ */
+static int
+tape_34xx_erp_sequence(struct tape_device *device,
+		       struct tape_request *request, struct irb *irb)
+{
+	if (irb->ecw[3] == 0x41) {
+		/*
+		 * cu detected incorrect block-id sequence on tape.
+		 */
+		dev_warn (&device->cdev->dev, "The block ID sequence on the "
+			"tape is incorrect\n");
+		return tape_34xx_erp_failed(request, -EIO);
+	}
+	/*
+	 * Record sequence error bit is set, but erpa does not
+	 * show record sequence error.
+	 */
+	return tape_34xx_erp_bug(device, request, irb, -2);
+}
+
+/*
+ * This function analyses the tape's sense-data in case of a unit-check.
+ * If possible, it tries to recover from the error. Else the user is
+ * informed about the problem.
+ */
+static int
+tape_34xx_unit_check(struct tape_device *device, struct tape_request *request,
+		     struct irb *irb)
+{
+	int inhibit_cu_recovery;
+	__u8* sense;
+
+	inhibit_cu_recovery = (*device->modeset_byte & 0x80) ? 1 : 0;
+	sense = irb->ecw;
+
+#ifdef CONFIG_S390_TAPE_BLOCK
+	if (request->op == TO_BLOCK) {
+		/*
+		 * Recovery for block device requests. Set the block_position
+		 * to something invalid and retry.
+		 */
+		device->blk_data.block_position = -1;
+		if (request->retries-- <= 0)
+			return tape_34xx_erp_failed(request, -EIO);
+		else
+			return tape_34xx_erp_retry(request);
+	}
+#endif
+
+	if (
+		sense[0] & SENSE_COMMAND_REJECT &&
+		sense[1] & SENSE_WRITE_PROTECT
+	) {
+		if (
+			request->op == TO_DSE ||
+			request->op == TO_WRI ||
+			request->op == TO_WTM
+		) {
+			/* medium is write protected */
+			return tape_34xx_erp_failed(request, -EACCES);
+		} else {
+			return tape_34xx_erp_bug(device, request, irb, -3);
+		}
+	}
+
+	/*
+	 * Special cases for various tape-states when reaching
+	 * end of recorded area
+	 *
+	 * FIXME: Maybe a special case of the special case:
+	 *        sense[0] == SENSE_EQUIPMENT_CHECK &&
+	 *        sense[1] == SENSE_DRIVE_ONLINE    &&
+	 *        sense[3] == 0x47 (Volume Fenced)
+	 *
+	 *        This was caused by continued FSF or FSR after an
+	 *        'End Of Data'.
+	 */
+	if ((
+		sense[0] == SENSE_DATA_CHECK      ||
+		sense[0] == SENSE_EQUIPMENT_CHECK ||
+		sense[0] == SENSE_EQUIPMENT_CHECK + SENSE_DEFERRED_UNIT_CHECK
+	) && (
+		sense[1] == SENSE_DRIVE_ONLINE ||
+		sense[1] == SENSE_BEGINNING_OF_TAPE + SENSE_WRITE_MODE
+	)) {
+		switch (request->op) {
+		/*
+		 * sense[0] == SENSE_DATA_CHECK   &&
+		 * sense[1] == SENSE_DRIVE_ONLINE
+		 * sense[3] == 0x36 (End Of Data)
+		 *
+		 * Further seeks might return a 'Volume Fenced'.
+		 */
+		case TO_FSF:
+		case TO_FSB:
+			/* Trying to seek beyond end of recorded area */
+			return tape_34xx_erp_failed(request, -ENOSPC);
+		case TO_BSB:
+			return tape_34xx_erp_retry(request);
+
+		/*
+		 * sense[0] == SENSE_DATA_CHECK   &&
+		 * sense[1] == SENSE_DRIVE_ONLINE &&
+		 * sense[3] == 0x36 (End Of Data)
+		 */
+		case TO_LBL:
+			/* Block could not be located. */
+			tape_34xx_delete_sbid_from(device, 0);
+			return tape_34xx_erp_failed(request, -EIO);
+
+		case TO_RFO:
+			/* Read beyond end of recorded area -> 0 bytes read */
+			return tape_34xx_erp_failed(request, 0);
+
+		/*
+		 * sense[0] == SENSE_EQUIPMENT_CHECK &&
+		 * sense[1] == SENSE_DRIVE_ONLINE    &&
+		 * sense[3] == 0x38 (Physical End Of Volume)
+		 */
+		case TO_WRI:
+			/* Writing at physical end of volume */
+			return tape_34xx_erp_failed(request, -ENOSPC);
+		default:
+			return tape_34xx_erp_failed(request, 0);
+		}
+	}
+
+	/* Sensing special bits */
+	if (sense[0] & SENSE_BUS_OUT_CHECK)
+		return tape_34xx_erp_retry(request);
+
+	if (sense[0] & SENSE_DATA_CHECK) {
+		/*
+		 * hardware failure, damaged tape or improper
+		 * operating conditions
+		 */
+		switch (sense[3]) {
+		case 0x23:
+			/* a read data check occurred */
+			if ((sense[2] & SENSE_TAPE_SYNC_MODE) ||
+			    inhibit_cu_recovery)
+				// data check is not permanent, may be
+				// recovered. We always use async-mode with
+				// cu-recovery, so this should *never* happen.
+				return tape_34xx_erp_bug(device, request,
+							 irb, -4);
+
+			/* data check is permanent, CU recovery has failed */
+			dev_warn (&device->cdev->dev, "A read error occurred "
+				"that cannot be recovered\n");
+			return tape_34xx_erp_failed(request, -EIO);
+		case 0x25:
+			// a write data check occurred
+			if ((sense[2] & SENSE_TAPE_SYNC_MODE) ||
+			    inhibit_cu_recovery)
+				// data check is not permanent, may be
+				// recovered. We always use async-mode with
+				// cu-recovery, so this should *never* happen.
+				return tape_34xx_erp_bug(device, request,
+							 irb, -5);
+
+			// data check is permanent, cu-recovery has failed
+			dev_warn (&device->cdev->dev, "A write error on the "
+				"tape cannot be recovered\n");
+			return tape_34xx_erp_failed(request, -EIO);
+		case 0x26:
+			/* Data Check (read opposite) occurred. */
+			return tape_34xx_erp_read_opposite(device, request);
+		case 0x28:
+			/* ID-Mark at tape start couldn't be written */
+			dev_warn (&device->cdev->dev, "Writing the ID-mark "
+				"failed\n");
+			return tape_34xx_erp_failed(request, -EIO);
+		case 0x31:
+			/* Tape void. Tried to read beyond end of device. */
+			dev_warn (&device->cdev->dev, "Reading the tape beyond"
+				" the end of the recorded area failed\n");
+			return tape_34xx_erp_failed(request, -ENOSPC);
+		case 0x41:
+			/* Record sequence error. */
+			dev_warn (&device->cdev->dev, "The tape contains an "
+				"incorrect block ID sequence\n");
+			return tape_34xx_erp_failed(request, -EIO);
+		default:
+			/* all data checks for 3480 should result in one of
+			 * the above erpa-codes. For 3490, other data-check
+			 * conditions do exist. */
+			if (device->cdev->id.driver_info == tape_3480)
+				return tape_34xx_erp_bug(device, request,
+							 irb, -6);
+		}
+	}
+
+	if (sense[0] & SENSE_OVERRUN)
+		return tape_34xx_erp_overrun(device, request, irb);
+
+	if (sense[1] & SENSE_RECORD_SEQUENCE_ERR)
+		return tape_34xx_erp_sequence(device, request, irb);
+
+	/* Sensing erpa codes */
+	switch (sense[3]) {
+	case 0x00:
+		/* Unit check with erpa code 0. Report and ignore. */
+		return TAPE_IO_SUCCESS;
+	case 0x21:
+		/*
+		 * Data streaming not operational. CU will switch to
+		 * interlock mode. Reissue the command.
+		 */
+		return tape_34xx_erp_retry(request);
+	case 0x22:
+		/*
+		 * Path equipment check. Might be drive adapter error, buffer
+		 * error on the lower interface, internal path not usable,
+		 * or error during cartridge load.
+		 */
+		dev_warn (&device->cdev->dev, "A path equipment check occurred"
+			" for the tape device\n");
+		return tape_34xx_erp_failed(request, -EIO);
+	case 0x24:
+		/*
+		 * Load display check. Load display was command was issued,
+		 * but the drive is displaying a drive check message. Can
+		 * be threated as "device end".
+		 */
+		return tape_34xx_erp_succeeded(request);
+	case 0x27:
+		/*
+		 * Command reject. May indicate illegal channel program or
+		 * buffer over/underrun. Since all channel programs are
+		 * issued by this driver and ought be correct, we assume a
+		 * over/underrun situation and retry the channel program.
+		 */
+		return tape_34xx_erp_retry(request);
+	case 0x29:
+		/*
+		 * Function incompatible. Either the tape is idrc compressed
+		 * but the hardware isn't capable to do idrc, or a perform
+		 * subsystem func is issued and the CU is not on-line.
+		 */
+		return tape_34xx_erp_failed(request, -EIO);
+	case 0x2a:
+		/*
+		 * Unsolicited environmental data. An internal counter
+		 * overflows, we can ignore this and reissue the cmd.
+		 */
+		return tape_34xx_erp_retry(request);
+	case 0x2b:
+		/*
+		 * Environmental data present. Indicates either unload
+		 * completed ok or read buffered log command completed ok.
+		 */
+		if (request->op == TO_RUN) {
+			/* Rewind unload completed ok. */
+			tape_med_state_set(device, MS_UNLOADED);
+			return tape_34xx_erp_succeeded(request);
+		}
+		/* tape_34xx doesn't use read buffered log commands. */
+		return tape_34xx_erp_bug(device, request, irb, sense[3]);
+	case 0x2c:
+		/*
+		 * Permanent equipment check. CU has tried recovery, but
+		 * did not succeed.
+		 */
+		return tape_34xx_erp_failed(request, -EIO);
+	case 0x2d:
+		/* Data security erase failure. */
+		if (request->op == TO_DSE)
+			return tape_34xx_erp_failed(request, -EIO);
+		/* Data security erase failure, but no such command issued. */
+		return tape_34xx_erp_bug(device, request, irb, sense[3]);
+	case 0x2e:
+		/*
+		 * Not capable. This indicates either that the drive fails
+		 * reading the format id mark or that that format specified
+		 * is not supported by the drive.
+		 */
+		dev_warn (&device->cdev->dev, "The tape unit cannot process "
+			"the tape format\n");
+		return tape_34xx_erp_failed(request, -EMEDIUMTYPE);
+	case 0x30:
+		/* The medium is write protected. */
+		dev_warn (&device->cdev->dev, "The tape medium is write-"
+			"protected\n");
+		return tape_34xx_erp_failed(request, -EACCES);
+	case 0x32:
+		// Tension loss. We cannot recover this, it's an I/O error.
+		dev_warn (&device->cdev->dev, "The tape does not have the "
+			"required tape tension\n");
+		return tape_34xx_erp_failed(request, -EIO);
+	case 0x33:
+		/*
+		 * Load Failure. The cartridge was not inserted correctly or
+		 * the tape is not threaded correctly.
+		 */
+		dev_warn (&device->cdev->dev, "The tape unit failed to load"
+			" the cartridge\n");
+		tape_34xx_delete_sbid_from(device, 0);
+		return tape_34xx_erp_failed(request, -EIO);
+	case 0x34:
+		/*
+		 * Unload failure. The drive cannot maintain tape tension
+		 * and control tape movement during an unload operation.
+		 */
+		dev_warn (&device->cdev->dev, "Automatic unloading of the tape"
+			" cartridge failed\n");
+		if (request->op == TO_RUN)
+			return tape_34xx_erp_failed(request, -EIO);
+		return tape_34xx_erp_bug(device, request, irb, sense[3]);
+	case 0x35:
+		/*
+		 * Drive equipment check. One of the following:
+		 * - cu cannot recover from a drive detected error
+		 * - a check code message is shown on drive display
+		 * - the cartridge loader does not respond correctly
+		 * - a failure occurs during an index, load, or unload cycle
+		 */
+		dev_warn (&device->cdev->dev, "An equipment check has occurred"
+			" on the tape unit\n");
+		return tape_34xx_erp_failed(request, -EIO);
+	case 0x36:
+		if (device->cdev->id.driver_info == tape_3490)
+			/* End of data. */
+			return tape_34xx_erp_failed(request, -EIO);
+		/* This erpa is reserved for 3480 */
+		return tape_34xx_erp_bug(device, request, irb, sense[3]);
+	case 0x37:
+		/*
+		 * Tape length error. The tape is shorter than reported in
+		 * the beginning-of-tape data.
+		 */
+		dev_warn (&device->cdev->dev, "The tape information states an"
+			" incorrect length\n");
+		return tape_34xx_erp_failed(request, -EIO);
+	case 0x38:
+		/*
+		 * Physical end of tape. A read/write operation reached
+		 * the physical end of tape.
+		 */
+		if (request->op==TO_WRI ||
+		    request->op==TO_DSE ||
+		    request->op==TO_WTM)
+			return tape_34xx_erp_failed(request, -ENOSPC);
+		return tape_34xx_erp_failed(request, -EIO);
+	case 0x39:
+		/* Backward at Beginning of tape. */
+		return tape_34xx_erp_failed(request, -EIO);
+	case 0x3a:
+		/* Drive switched to not ready. */
+		dev_warn (&device->cdev->dev, "The tape unit is not ready\n");
+		return tape_34xx_erp_failed(request, -EIO);
+	case 0x3b:
+		/* Manual rewind or unload. This causes an I/O error. */
+		dev_warn (&device->cdev->dev, "The tape medium has been "
+			"rewound or unloaded manually\n");
+		tape_34xx_delete_sbid_from(device, 0);
+		return tape_34xx_erp_failed(request, -EIO);
+	case 0x42:
+		/*
+		 * Degraded mode. A condition that can cause degraded
+		 * performance is detected.
+		 */
+		dev_warn (&device->cdev->dev, "The tape subsystem is running "
+			"in degraded mode\n");
+		return tape_34xx_erp_retry(request);
+	case 0x43:
+		/* Drive not ready. */
+		tape_34xx_delete_sbid_from(device, 0);
+		tape_med_state_set(device, MS_UNLOADED);
+		/* Some commands commands are successful even in this case */
+		if (sense[1] & SENSE_DRIVE_ONLINE) {
+			switch(request->op) {
+				case TO_ASSIGN:
+				case TO_UNASSIGN:
+				case TO_DIS:
+				case TO_NOP:
+					return tape_34xx_done(request);
+					break;
+				default:
+					break;
+			}
+		}
+		return tape_34xx_erp_failed(request, -ENOMEDIUM);
+	case 0x44:
+		/* Locate Block unsuccessful. */
+		if (request->op != TO_BLOCK && request->op != TO_LBL)
+			/* No locate block was issued. */
+			return tape_34xx_erp_bug(device, request,
+						 irb, sense[3]);
+		return tape_34xx_erp_failed(request, -EIO);
+	case 0x45:
+		/* The drive is assigned to a different channel path. */
+		dev_warn (&device->cdev->dev, "The tape unit is already "
+			"assigned\n");
+		return tape_34xx_erp_failed(request, -EIO);
+	case 0x46:
+		/*
+		 * Drive not on-line. Drive may be switched offline,
+		 * the power supply may be switched off or
+		 * the drive address may not be set correctly.
+		 */
+		dev_warn (&device->cdev->dev, "The tape unit is not online\n");
+		return tape_34xx_erp_failed(request, -EIO);
+	case 0x47:
+		/* Volume fenced. CU reports volume integrity is lost. */
+		dev_warn (&device->cdev->dev, "The control unit has fenced "
+			"access to the tape volume\n");
+		tape_34xx_delete_sbid_from(device, 0);
+		return tape_34xx_erp_failed(request, -EIO);
+	case 0x48:
+		/* Log sense data and retry request. */
+		return tape_34xx_erp_retry(request);
+	case 0x49:
+		/* Bus out check. A parity check error on the bus was found. */
+		dev_warn (&device->cdev->dev, "A parity error occurred on the "
+			"tape bus\n");
+		return tape_34xx_erp_failed(request, -EIO);
+	case 0x4a:
+		/* Control unit erp failed. */
+		dev_warn (&device->cdev->dev, "I/O error recovery failed on "
+			"the tape control unit\n");
+		return tape_34xx_erp_failed(request, -EIO);
+	case 0x4b:
+		/*
+		 * CU and drive incompatible. The drive requests micro-program
+		 * patches, which are not available on the CU.
+		 */
+		dev_warn (&device->cdev->dev, "The tape unit requires a "
+			"firmware update\n");
+		return tape_34xx_erp_failed(request, -EIO);
+	case 0x4c:
+		/*
+		 * Recovered Check-One failure. Cu develops a hardware error,
+		 * but is able to recover.
+		 */
+		return tape_34xx_erp_retry(request);
+	case 0x4d:
+		if (device->cdev->id.driver_info == tape_3490)
+			/*
+			 * Resetting event received. Since the driver does
+			 * not support resetting event recovery (which has to
+			 * be handled by the I/O Layer), retry our command.
+			 */
+			return tape_34xx_erp_retry(request);
+		/* This erpa is reserved for 3480. */
+		return tape_34xx_erp_bug(device, request, irb, sense[3]);
+	case 0x4e:
+		if (device->cdev->id.driver_info == tape_3490) {
+			/*
+			 * Maximum block size exceeded. This indicates, that
+			 * the block to be written is larger than allowed for
+			 * buffered mode.
+			 */
+			dev_warn (&device->cdev->dev, "The maximum block size"
+				" for buffered mode is exceeded\n");
+			return tape_34xx_erp_failed(request, -ENOBUFS);
+		}
+		/* This erpa is reserved for 3480. */
+		return tape_34xx_erp_bug(device, request, irb, sense[3]);
+	case 0x50:
+		/*
+		 * Read buffered log (Overflow). CU is running in extended
+		 * buffered log mode, and a counter overflows. This should
+		 * never happen, since we're never running in extended
+		 * buffered log mode.
+		 */
+		return tape_34xx_erp_retry(request);
+	case 0x51:
+		/*
+		 * Read buffered log (EOV). EOF processing occurs while the
+		 * CU is in extended buffered log mode. This should never
+		 * happen, since we're never running in extended buffered
+		 * log mode.
+		 */
+		return tape_34xx_erp_retry(request);
+	case 0x52:
+		/* End of Volume complete. Rewind unload completed ok. */
+		if (request->op == TO_RUN) {
+			tape_med_state_set(device, MS_UNLOADED);
+			tape_34xx_delete_sbid_from(device, 0);
+			return tape_34xx_erp_succeeded(request);
+		}
+		return tape_34xx_erp_bug(device, request, irb, sense[3]);
+	case 0x53:
+		/* Global command intercept. */
+		return tape_34xx_erp_retry(request);
+	case 0x54:
+		/* Channel interface recovery (temporary). */
+		return tape_34xx_erp_retry(request);
+	case 0x55:
+		/* Channel interface recovery (permanent). */
+		dev_warn (&device->cdev->dev, "A channel interface error cannot be"
+			" recovered\n");
+		return tape_34xx_erp_failed(request, -EIO);
+	case 0x56:
+		/* Channel protocol error. */
+		dev_warn (&device->cdev->dev, "A channel protocol error "
+			"occurred\n");
+		return tape_34xx_erp_failed(request, -EIO);
+	case 0x57:
+		if (device->cdev->id.driver_info == tape_3480) {
+			/* Attention intercept. */
+			return tape_34xx_erp_retry(request);
+		} else {
+			/* Global status intercept. */
+			return tape_34xx_erp_retry(request);
+		}
+	case 0x5a:
+		/*
+		 * Tape length incompatible. The tape inserted is too long,
+		 * which could cause damage to the tape or the drive.
+		 */
+		dev_warn (&device->cdev->dev, "The tape unit does not support "
+			"the tape length\n");
+		return tape_34xx_erp_failed(request, -EIO);
+	case 0x5b:
+		/* Format 3480 XF incompatible */
+		if (sense[1] & SENSE_BEGINNING_OF_TAPE)
+			/* The tape will get overwritten. */
+			return tape_34xx_erp_retry(request);
+		dev_warn (&device->cdev->dev, "The tape unit does not support"
+			" format 3480 XF\n");
+		return tape_34xx_erp_failed(request, -EIO);
+	case 0x5c:
+		/* Format 3480-2 XF incompatible */
+		dev_warn (&device->cdev->dev, "The tape unit does not support tape "
+			"format 3480-2 XF\n");
+		return tape_34xx_erp_failed(request, -EIO);
+	case 0x5d:
+		/* Tape length violation. */
+		dev_warn (&device->cdev->dev, "The tape unit does not support"
+			" the current tape length\n");
+		return tape_34xx_erp_failed(request, -EMEDIUMTYPE);
+	case 0x5e:
+		/* Compaction algorithm incompatible. */
+		dev_warn (&device->cdev->dev, "The tape unit does not support"
+			" the compaction algorithm\n");
+		return tape_34xx_erp_failed(request, -EMEDIUMTYPE);
+
+		/* The following erpas should have been covered earlier. */
+	case 0x23: /* Read data check. */
+	case 0x25: /* Write data check. */
+	case 0x26: /* Data check (read opposite). */
+	case 0x28: /* Write id mark check. */
+	case 0x31: /* Tape void. */
+	case 0x40: /* Overrun error. */
+	case 0x41: /* Record sequence error. */
+		/* All other erpas are reserved for future use. */
+	default:
+		return tape_34xx_erp_bug(device, request, irb, sense[3]);
+	}
+}
+
+/*
+ * 3480/3490 interrupt handler
+ */
+static int
+tape_34xx_irq(struct tape_device *device, struct tape_request *request,
+	      struct irb *irb)
+{
+	if (request == NULL)
+		return tape_34xx_unsolicited_irq(device, irb);
+
+	if ((irb->scsw.cmd.dstat & DEV_STAT_UNIT_EXCEP) &&
+	    (irb->scsw.cmd.dstat & DEV_STAT_DEV_END) &&
+	    (request->op == TO_WRI)) {
+		/* Write at end of volume */
+		return tape_34xx_erp_failed(request, -ENOSPC);
+	}
+
+	if (irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK)
+		return tape_34xx_unit_check(device, request, irb);
+
+	if (irb->scsw.cmd.dstat & DEV_STAT_DEV_END) {
+		/*
+		 * A unit exception occurs on skipping over a tapemark block.
+		 */
+		if (irb->scsw.cmd.dstat & DEV_STAT_UNIT_EXCEP) {
+			if (request->op == TO_BSB || request->op == TO_FSB)
+				request->rescnt++;
+			else
+				DBF_EVENT(5, "Unit Exception!\n");
+		}
+		return tape_34xx_done(request);
+	}
+
+	DBF_EVENT(6, "xunknownirq\n");
+	tape_dump_sense_dbf(device, request, irb);
+	return TAPE_IO_STOP;
+}
+
+/*
+ * ioctl_overload
+ */
+static int
+tape_34xx_ioctl(struct tape_device *device, unsigned int cmd, unsigned long arg)
+{
+	if (cmd == TAPE390_DISPLAY) {
+		struct display_struct disp;
+
+		if (copy_from_user(&disp, (char __user *) arg, sizeof(disp)) != 0)
+			return -EFAULT;
+
+		return tape_std_display(device, &disp);
+	} else
+		return -EINVAL;
+}
+
+static inline void
+tape_34xx_append_new_sbid(struct tape_34xx_block_id bid, struct list_head *l)
+{
+	struct tape_34xx_sbid *	new_sbid;
+
+	new_sbid = kmalloc(sizeof(*new_sbid), GFP_ATOMIC);
+	if (!new_sbid)
+		return;
+
+	new_sbid->bid = bid;
+	list_add(&new_sbid->list, l);
+}
+
+/*
+ * Build up the search block ID list. The block ID consists of a logical
+ * block number and a hardware specific part. The hardware specific part
+ * helps the tape drive to speed up searching for a specific block.
+ */
+static void
+tape_34xx_add_sbid(struct tape_device *device, struct tape_34xx_block_id bid)
+{
+	struct list_head *	sbid_list;
+	struct tape_34xx_sbid *	sbid;
+	struct list_head *	l;
+
+	/*
+	 * immediately return if there is no list at all or the block to add
+	 * is located in segment 1 of wrap 0 because this position is used
+	 * if no hardware position data is supplied.
+	 */
+	sbid_list = (struct list_head *) device->discdata;
+	if (!sbid_list || (bid.segment < 2 && bid.wrap == 0))
+		return;
+
+	/*
+	 * Search the position where to insert the new entry. Hardware
+	 * acceleration uses only the segment and wrap number. So we
+	 * need only one entry for a specific wrap/segment combination.
+	 * If there is a block with a lower number but the same hard-
+	 * ware position data we just update the block number in the
+	 * existing entry.
+	 */
+	list_for_each(l, sbid_list) {
+		sbid = list_entry(l, struct tape_34xx_sbid, list);
+
+		if (
+			(sbid->bid.segment == bid.segment) &&
+			(sbid->bid.wrap    == bid.wrap)
+		) {
+			if (bid.block < sbid->bid.block)
+				sbid->bid = bid;
+			else return;
+			break;
+		}
+
+		/* Sort in according to logical block number. */
+		if (bid.block < sbid->bid.block) {
+			tape_34xx_append_new_sbid(bid, l->prev);
+			break;
+		}
+	}
+	/* List empty or new block bigger than last entry. */
+	if (l == sbid_list)
+		tape_34xx_append_new_sbid(bid, l->prev);
+
+	DBF_LH(4, "Current list is:\n");
+	list_for_each(l, sbid_list) {
+		sbid = list_entry(l, struct tape_34xx_sbid, list);
+		DBF_LH(4, "%d:%03d@%05d\n",
+			sbid->bid.wrap,
+			sbid->bid.segment,
+			sbid->bid.block
+		);
+	}
+}
+
+/*
+ * Delete all entries from the search block ID list that belong to tape blocks
+ * equal or higher than the given number.
+ */
+static void
+tape_34xx_delete_sbid_from(struct tape_device *device, int from)
+{
+	struct list_head *	sbid_list;
+	struct tape_34xx_sbid *	sbid;
+	struct list_head *	l;
+	struct list_head *	n;
+
+	sbid_list = (struct list_head *) device->discdata;
+	if (!sbid_list)
+		return;
+
+	list_for_each_safe(l, n, sbid_list) {
+		sbid = list_entry(l, struct tape_34xx_sbid, list);
+		if (sbid->bid.block >= from) {
+			DBF_LH(4, "Delete sbid %d:%03d@%05d\n",
+				sbid->bid.wrap,
+				sbid->bid.segment,
+				sbid->bid.block
+			);
+			list_del(l);
+			kfree(sbid);
+		}
+	}
+}
+
+/*
+ * Merge hardware position data into a block id.
+ */
+static void
+tape_34xx_merge_sbid(
+	struct tape_device *		device,
+	struct tape_34xx_block_id *	bid
+) {
+	struct tape_34xx_sbid *	sbid;
+	struct tape_34xx_sbid *	sbid_to_use;
+	struct list_head *	sbid_list;
+	struct list_head *	l;
+
+	sbid_list = (struct list_head *) device->discdata;
+	bid->wrap    = 0;
+	bid->segment = 1;
+
+	if (!sbid_list || list_empty(sbid_list))
+		return;
+
+	sbid_to_use = NULL;
+	list_for_each(l, sbid_list) {
+		sbid = list_entry(l, struct tape_34xx_sbid, list);
+
+		if (sbid->bid.block >= bid->block)
+			break;
+		sbid_to_use = sbid;
+	}
+	if (sbid_to_use) {
+		bid->wrap    = sbid_to_use->bid.wrap;
+		bid->segment = sbid_to_use->bid.segment;
+		DBF_LH(4, "Use %d:%03d@%05d for %05d\n",
+			sbid_to_use->bid.wrap,
+			sbid_to_use->bid.segment,
+			sbid_to_use->bid.block,
+			bid->block
+		);
+	}
+}
+
+static int
+tape_34xx_setup_device(struct tape_device * device)
+{
+	int			rc;
+	struct list_head *	discdata;
+
+	DBF_EVENT(6, "34xx device setup\n");
+	if ((rc = tape_std_assign(device)) == 0) {
+		if ((rc = tape_34xx_medium_sense(device)) != 0) {
+			DBF_LH(3, "34xx medium sense returned %d\n", rc);
+		}
+	}
+	discdata = kmalloc(sizeof(struct list_head), GFP_KERNEL);
+	if (discdata) {
+			INIT_LIST_HEAD(discdata);
+			device->discdata = discdata;
+	}
+
+	return rc;
+}
+
+static void
+tape_34xx_cleanup_device(struct tape_device *device)
+{
+	tape_std_unassign(device);
+	
+	if (device->discdata) {
+		tape_34xx_delete_sbid_from(device, 0);
+		kfree(device->discdata);
+		device->discdata = NULL;
+	}
+}
+
+
+/*
+ * MTTELL: Tell block. Return the number of block relative to current file.
+ */
+static int
+tape_34xx_mttell(struct tape_device *device, int mt_count)
+{
+	struct {
+		struct tape_34xx_block_id	cbid;
+		struct tape_34xx_block_id	dbid;
+	} __attribute__ ((packed)) block_id;
+	int rc;
+
+	rc = tape_std_read_block_id(device, (__u64 *) &block_id);
+	if (rc)
+		return rc;
+
+	tape_34xx_add_sbid(device, block_id.cbid);
+	return block_id.cbid.block;
+}
+
+/*
+ * MTSEEK: seek to the specified block.
+ */
+static int
+tape_34xx_mtseek(struct tape_device *device, int mt_count)
+{
+	struct tape_request *request;
+	struct tape_34xx_block_id *	bid;
+
+	if (mt_count > 0x3fffff) {
+		DBF_EXCEPTION(6, "xsee parm\n");
+		return -EINVAL;
+	}
+	request = tape_alloc_request(3, 4);
+	if (IS_ERR(request))
+		return PTR_ERR(request);
+
+	/* setup ccws */
+	request->op = TO_LBL;
+	bid         = (struct tape_34xx_block_id *) request->cpdata;
+	bid->format = (*device->modeset_byte & 0x08) ?
+			TAPE34XX_FMT_3480_XF : TAPE34XX_FMT_3480;
+	bid->block  = mt_count;
+	tape_34xx_merge_sbid(device, bid);
+
+	tape_ccw_cc(request->cpaddr, MODE_SET_DB, 1, device->modeset_byte);
+	tape_ccw_cc(request->cpaddr + 1, LOCATE, 4, request->cpdata);
+	tape_ccw_end(request->cpaddr + 2, NOP, 0, NULL);
+
+	/* execute it */
+	return tape_do_io_free(device, request);
+}
+
+#ifdef CONFIG_S390_TAPE_BLOCK
+/*
+ * Tape block read for 34xx.
+ */
+static struct tape_request *
+tape_34xx_bread(struct tape_device *device, struct request *req)
+{
+	struct tape_request *request;
+	struct ccw1 *ccw;
+	int count = 0;
+	unsigned off;
+	char *dst;
+	struct bio_vec *bv;
+	struct req_iterator iter;
+	struct tape_34xx_block_id *	start_block;
+
+	DBF_EVENT(6, "xBREDid:");
+
+	/* Count the number of blocks for the request. */
+	rq_for_each_segment(bv, req, iter)
+		count += bv->bv_len >> (TAPEBLOCK_HSEC_S2B + 9);
+
+	/* Allocate the ccw request. */
+	request = tape_alloc_request(3+count+1, 8);
+	if (IS_ERR(request))
+		return request;
+
+	/* Setup ccws. */
+	request->op = TO_BLOCK;
+	start_block = (struct tape_34xx_block_id *) request->cpdata;
+	start_block->block = blk_rq_pos(req) >> TAPEBLOCK_HSEC_S2B;
+	DBF_EVENT(6, "start_block = %i\n", start_block->block);
+
+	ccw = request->cpaddr;
+	ccw = tape_ccw_cc(ccw, MODE_SET_DB, 1, device->modeset_byte);
+
+	/*
+	 * We always setup a nop after the mode set ccw. This slot is
+	 * used in tape_std_check_locate to insert a locate ccw if the
+	 * current tape position doesn't match the start block to be read.
+	 * The second nop will be filled with a read block id which is in
+	 * turn used by tape_34xx_free_bread to populate the segment bid
+	 * table.
+	 */
+	ccw = tape_ccw_cc(ccw, NOP, 0, NULL);
+	ccw = tape_ccw_cc(ccw, NOP, 0, NULL);
+
+	rq_for_each_segment(bv, req, iter) {
+		dst = kmap(bv->bv_page) + bv->bv_offset;
+		for (off = 0; off < bv->bv_len; off += TAPEBLOCK_HSEC_SIZE) {
+			ccw->flags = CCW_FLAG_CC;
+			ccw->cmd_code = READ_FORWARD;
+			ccw->count = TAPEBLOCK_HSEC_SIZE;
+			set_normalized_cda(ccw, (void*) __pa(dst));
+			ccw++;
+			dst += TAPEBLOCK_HSEC_SIZE;
+		}
+	}
+
+	ccw = tape_ccw_end(ccw, NOP, 0, NULL);
+	DBF_EVENT(6, "xBREDccwg\n");
+	return request;
+}
+
+static void
+tape_34xx_free_bread (struct tape_request *request)
+{
+	struct ccw1* ccw;
+
+	ccw = request->cpaddr;
+	if ((ccw + 2)->cmd_code == READ_BLOCK_ID) {
+		struct {
+			struct tape_34xx_block_id	cbid;
+			struct tape_34xx_block_id	dbid;
+		} __attribute__ ((packed)) *rbi_data;
+
+		rbi_data = request->cpdata;
+
+		if (request->device)
+			tape_34xx_add_sbid(request->device, rbi_data->cbid);
+	}
+
+	/* Last ccw is a nop and doesn't need clear_normalized_cda */
+	for (; ccw->flags & CCW_FLAG_CC; ccw++)
+		if (ccw->cmd_code == READ_FORWARD)
+			clear_normalized_cda(ccw);
+	tape_free_request(request);
+}
+
+/*
+ * check_locate is called just before the tape request is passed to
+ * the common io layer for execution. It has to check the current
+ * tape position and insert a locate ccw if it doesn't match the
+ * start block for the request.
+ */
+static void
+tape_34xx_check_locate(struct tape_device *device, struct tape_request *request)
+{
+	struct tape_34xx_block_id *	start_block;
+
+	start_block = (struct tape_34xx_block_id *) request->cpdata;
+	if (start_block->block == device->blk_data.block_position)
+		return;
+
+	DBF_LH(4, "Block seek(%06d+%06d)\n", start_block->block, device->bof);
+	start_block->wrap    = 0;
+	start_block->segment = 1;
+	start_block->format  = (*device->modeset_byte & 0x08) ?
+				TAPE34XX_FMT_3480_XF :
+				TAPE34XX_FMT_3480;
+	start_block->block   = start_block->block + device->bof;
+	tape_34xx_merge_sbid(device, start_block);
+	tape_ccw_cc(request->cpaddr + 1, LOCATE, 4, request->cpdata);
+	tape_ccw_cc(request->cpaddr + 2, READ_BLOCK_ID, 8, request->cpdata);
+}
+#endif
+
+/*
+ * List of 3480/3490 magnetic tape commands.
+ */
+static tape_mtop_fn tape_34xx_mtop[TAPE_NR_MTOPS] = {
+	[MTRESET]	 = tape_std_mtreset,
+	[MTFSF]		 = tape_std_mtfsf,
+	[MTBSF]		 = tape_std_mtbsf,
+	[MTFSR]		 = tape_std_mtfsr,
+	[MTBSR]		 = tape_std_mtbsr,
+	[MTWEOF]	 = tape_std_mtweof,
+	[MTREW]		 = tape_std_mtrew,
+	[MTOFFL]	 = tape_std_mtoffl,
+	[MTNOP]		 = tape_std_mtnop,
+	[MTRETEN]	 = tape_std_mtreten,
+	[MTBSFM]	 = tape_std_mtbsfm,
+	[MTFSFM]	 = tape_std_mtfsfm,
+	[MTEOM]		 = tape_std_mteom,
+	[MTERASE]	 = tape_std_mterase,
+	[MTRAS1]	 = NULL,
+	[MTRAS2]	 = NULL,
+	[MTRAS3]	 = NULL,
+	[MTSETBLK]	 = tape_std_mtsetblk,
+	[MTSETDENSITY]	 = NULL,
+	[MTSEEK]	 = tape_34xx_mtseek,
+	[MTTELL]	 = tape_34xx_mttell,
+	[MTSETDRVBUFFER] = NULL,
+	[MTFSS]		 = NULL,
+	[MTBSS]		 = NULL,
+	[MTWSM]		 = NULL,
+	[MTLOCK]	 = NULL,
+	[MTUNLOCK]	 = NULL,
+	[MTLOAD]	 = tape_std_mtload,
+	[MTUNLOAD]	 = tape_std_mtunload,
+	[MTCOMPRESSION]	 = tape_std_mtcompression,
+	[MTSETPART]	 = NULL,
+	[MTMKPART]	 = NULL
+};
+
+/*
+ * Tape discipline structure for 3480 and 3490.
+ */
+static struct tape_discipline tape_discipline_34xx = {
+	.owner = THIS_MODULE,
+	.setup_device = tape_34xx_setup_device,
+	.cleanup_device = tape_34xx_cleanup_device,
+	.process_eov = tape_std_process_eov,
+	.irq = tape_34xx_irq,
+	.read_block = tape_std_read_block,
+	.write_block = tape_std_write_block,
+#ifdef CONFIG_S390_TAPE_BLOCK
+	.bread = tape_34xx_bread,
+	.free_bread = tape_34xx_free_bread,
+	.check_locate = tape_34xx_check_locate,
+#endif
+	.ioctl_fn = tape_34xx_ioctl,
+	.mtop_array = tape_34xx_mtop
+};
+
+static struct ccw_device_id tape_34xx_ids[] = {
+	{ CCW_DEVICE_DEVTYPE(0x3480, 0, 0x3480, 0), .driver_info = tape_3480},
+	{ CCW_DEVICE_DEVTYPE(0x3490, 0, 0x3490, 0), .driver_info = tape_3490},
+	{ /* end of list */ },
+};
+
+static int
+tape_34xx_online(struct ccw_device *cdev)
+{
+	return tape_generic_online(
+		dev_get_drvdata(&cdev->dev),
+		&tape_discipline_34xx
+	);
+}
+
+static struct ccw_driver tape_34xx_driver = {
+	.driver = {
+		.name = "tape_34xx",
+		.owner = THIS_MODULE,
+	},
+	.ids = tape_34xx_ids,
+	.probe = tape_generic_probe,
+	.remove = tape_generic_remove,
+	.set_online = tape_34xx_online,
+	.set_offline = tape_generic_offline,
+	.freeze = tape_generic_pm_suspend,
+	.int_class = IOINT_TAP,
+};
+
+static int
+tape_34xx_init (void)
+{
+	int rc;
+
+	TAPE_DBF_AREA = debug_register ( "tape_34xx", 2, 2, 4*sizeof(long));
+	debug_register_view(TAPE_DBF_AREA, &debug_sprintf_view);
+#ifdef DBF_LIKE_HELL
+	debug_set_level(TAPE_DBF_AREA, 6);
+#endif
+
+	DBF_EVENT(3, "34xx init\n");
+	/* Register driver for 3480/3490 tapes. */
+	rc = ccw_driver_register(&tape_34xx_driver);
+	if (rc)
+		DBF_EVENT(3, "34xx init failed\n");
+	else
+		DBF_EVENT(3, "34xx registered\n");
+	return rc;
+}
+
+static void
+tape_34xx_exit(void)
+{
+	ccw_driver_unregister(&tape_34xx_driver);
+
+	debug_unregister(TAPE_DBF_AREA);
+}
+
+MODULE_DEVICE_TABLE(ccw, tape_34xx_ids);
+MODULE_AUTHOR("(C) 2001-2002 IBM Deutschland Entwicklung GmbH");
+MODULE_DESCRIPTION("Linux on zSeries channel attached 3480 tape device driver");
+MODULE_LICENSE("GPL");
+
+module_init(tape_34xx_init);
+module_exit(tape_34xx_exit);
diff --git a/ap/os/linux/linux-3.4.x/drivers/s390/char/tape_3590.c b/ap/os/linux/linux-3.4.x/drivers/s390/char/tape_3590.c
new file mode 100644
index 0000000..49c6aab
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/s390/char/tape_3590.c
@@ -0,0 +1,1812 @@
+/*
+ *  drivers/s390/char/tape_3590.c
+ *    tape device discipline for 3590 tapes.
+ *
+ *    Copyright IBM Corp. 2001, 2009
+ *    Author(s): Stefan Bader <shbader@de.ibm.com>
+ *		 Michael Holzheu <holzheu@de.ibm.com>
+ *		 Martin Schwidefsky <schwidefsky@de.ibm.com>
+ */
+
+#define KMSG_COMPONENT "tape_3590"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/init.h>
+#include <linux/bio.h>
+#include <asm/ebcdic.h>
+
+#define TAPE_DBF_AREA	tape_3590_dbf
+#define BUFSIZE 512	/* size of buffers for dynamic generated messages */
+
+#include "tape.h"
+#include "tape_std.h"
+#include "tape_3590.h"
+
+static struct workqueue_struct *tape_3590_wq;
+
+/*
+ * Pointer to debug area.
+ */
+debug_info_t *TAPE_DBF_AREA = NULL;
+EXPORT_SYMBOL(TAPE_DBF_AREA);
+
+/*******************************************************************
+ * Error Recovery functions:
+ * - Read Opposite:		 implemented
+ * - Read Device (buffered) log: BRA
+ * - Read Library log:		 BRA
+ * - Swap Devices:		 BRA
+ * - Long Busy:			 implemented
+ * - Special Intercept:		 BRA
+ * - Read Alternate:		 implemented
+ *******************************************************************/
+
+static const char *tape_3590_msg[TAPE_3590_MAX_MSG] = {
+	[0x00] = "",
+	[0x10] = "Lost Sense",
+	[0x11] = "Assigned Elsewhere",
+	[0x12] = "Allegiance Reset",
+	[0x13] = "Shared Access Violation",
+	[0x20] = "Command Reject",
+	[0x21] = "Configuration Error",
+	[0x22] = "Protection Exception",
+	[0x23] = "Write Protect",
+	[0x24] = "Write Length",
+	[0x25] = "Read-Only Format",
+	[0x31] = "Beginning of Partition",
+	[0x33] = "End of Partition",
+	[0x34] = "End of Data",
+	[0x35] = "Block not found",
+	[0x40] = "Device Intervention",
+	[0x41] = "Loader Intervention",
+	[0x42] = "Library Intervention",
+	[0x50] = "Write Error",
+	[0x51] = "Erase Error",
+	[0x52] = "Formatting Error",
+	[0x53] = "Read Error",
+	[0x54] = "Unsupported Format",
+	[0x55] = "No Formatting",
+	[0x56] = "Positioning lost",
+	[0x57] = "Read Length",
+	[0x60] = "Unsupported Medium",
+	[0x61] = "Medium Length Error",
+	[0x62] = "Medium removed",
+	[0x64] = "Load Check",
+	[0x65] = "Unload Check",
+	[0x70] = "Equipment Check",
+	[0x71] = "Bus out Check",
+	[0x72] = "Protocol Error",
+	[0x73] = "Interface Error",
+	[0x74] = "Overrun",
+	[0x75] = "Halt Signal",
+	[0x90] = "Device fenced",
+	[0x91] = "Device Path fenced",
+	[0xa0] = "Volume misplaced",
+	[0xa1] = "Volume inaccessible",
+	[0xa2] = "Volume in input",
+	[0xa3] = "Volume ejected",
+	[0xa4] = "All categories reserved",
+	[0xa5] = "Duplicate Volume",
+	[0xa6] = "Library Manager Offline",
+	[0xa7] = "Library Output Station full",
+	[0xa8] = "Vision System non-operational",
+	[0xa9] = "Library Manager Equipment Check",
+	[0xaa] = "Library Equipment Check",
+	[0xab] = "All Library Cells full",
+	[0xac] = "No Cleaner Volumes in Library",
+	[0xad] = "I/O Station door open",
+	[0xae] = "Subsystem environmental alert",
+};
+
+static int crypt_supported(struct tape_device *device)
+{
+	return TAPE390_CRYPT_SUPPORTED(TAPE_3590_CRYPT_INFO(device));
+}
+
+static int crypt_enabled(struct tape_device *device)
+{
+	return TAPE390_CRYPT_ON(TAPE_3590_CRYPT_INFO(device));
+}
+
+static void ext_to_int_kekl(struct tape390_kekl *in,
+			    struct tape3592_kekl *out)
+{
+	int i;
+
+	memset(out, 0, sizeof(*out));
+	if (in->type == TAPE390_KEKL_TYPE_HASH)
+		out->flags |= 0x40;
+	if (in->type_on_tape == TAPE390_KEKL_TYPE_HASH)
+		out->flags |= 0x80;
+	strncpy(out->label, in->label, 64);
+	for (i = strlen(in->label); i < sizeof(out->label); i++)
+		out->label[i] = ' ';
+	ASCEBC(out->label, sizeof(out->label));
+}
+
+static void int_to_ext_kekl(struct tape3592_kekl *in,
+			    struct tape390_kekl *out)
+{
+	memset(out, 0, sizeof(*out));
+	if(in->flags & 0x40)
+		out->type = TAPE390_KEKL_TYPE_HASH;
+	else
+		out->type = TAPE390_KEKL_TYPE_LABEL;
+	if(in->flags & 0x80)
+		out->type_on_tape = TAPE390_KEKL_TYPE_HASH;
+	else
+		out->type_on_tape = TAPE390_KEKL_TYPE_LABEL;
+	memcpy(out->label, in->label, sizeof(in->label));
+	EBCASC(out->label, sizeof(in->label));
+	strim(out->label);
+}
+
+static void int_to_ext_kekl_pair(struct tape3592_kekl_pair *in,
+				 struct tape390_kekl_pair *out)
+{
+	if (in->count == 0) {
+		out->kekl[0].type = TAPE390_KEKL_TYPE_NONE;
+		out->kekl[0].type_on_tape = TAPE390_KEKL_TYPE_NONE;
+		out->kekl[1].type = TAPE390_KEKL_TYPE_NONE;
+		out->kekl[1].type_on_tape = TAPE390_KEKL_TYPE_NONE;
+	} else if (in->count == 1) {
+		int_to_ext_kekl(&in->kekl[0], &out->kekl[0]);
+		out->kekl[1].type = TAPE390_KEKL_TYPE_NONE;
+		out->kekl[1].type_on_tape = TAPE390_KEKL_TYPE_NONE;
+	} else if (in->count == 2) {
+		int_to_ext_kekl(&in->kekl[0], &out->kekl[0]);
+		int_to_ext_kekl(&in->kekl[1], &out->kekl[1]);
+	} else {
+		printk("Invalid KEKL number: %d\n", in->count);
+		BUG();
+	}
+}
+
+static int check_ext_kekl(struct tape390_kekl *kekl)
+{
+	if (kekl->type == TAPE390_KEKL_TYPE_NONE)
+		goto invalid;
+	if (kekl->type > TAPE390_KEKL_TYPE_HASH)
+		goto invalid;
+	if (kekl->type_on_tape == TAPE390_KEKL_TYPE_NONE)
+		goto invalid;
+	if (kekl->type_on_tape > TAPE390_KEKL_TYPE_HASH)
+		goto invalid;
+	if ((kekl->type == TAPE390_KEKL_TYPE_HASH) &&
+	    (kekl->type_on_tape == TAPE390_KEKL_TYPE_LABEL))
+		goto invalid;
+
+	return 0;
+invalid:
+	return -EINVAL;
+}
+
+static int check_ext_kekl_pair(struct tape390_kekl_pair *kekls)
+{
+	if (check_ext_kekl(&kekls->kekl[0]))
+		goto invalid;
+	if (check_ext_kekl(&kekls->kekl[1]))
+		goto invalid;
+
+	return 0;
+invalid:
+	return -EINVAL;
+}
+
+/*
+ * Query KEKLs
+ */
+static int tape_3592_kekl_query(struct tape_device *device,
+				struct tape390_kekl_pair *ext_kekls)
+{
+	struct tape_request *request;
+	struct tape3592_kekl_query_order *order;
+	struct tape3592_kekl_query_data *int_kekls;
+	int rc;
+
+	DBF_EVENT(6, "tape3592_kekl_query\n");
+	int_kekls = kmalloc(sizeof(*int_kekls), GFP_KERNEL|GFP_DMA);
+	if (!int_kekls)
+		return -ENOMEM;
+	request = tape_alloc_request(2, sizeof(*order));
+	if (IS_ERR(request)) {
+		rc = PTR_ERR(request);
+		goto fail_malloc;
+	}
+	order = request->cpdata;
+	memset(order,0,sizeof(*order));
+	order->code = 0xe2;
+	order->max_count = 2;
+	request->op = TO_KEKL_QUERY;
+	tape_ccw_cc(request->cpaddr, PERF_SUBSYS_FUNC, sizeof(*order), order);
+	tape_ccw_end(request->cpaddr + 1, READ_SS_DATA, sizeof(*int_kekls),
+		     int_kekls);
+	rc = tape_do_io(device, request);
+	if (rc)
+		goto fail_request;
+	int_to_ext_kekl_pair(&int_kekls->kekls, ext_kekls);
+
+	rc = 0;
+fail_request:
+	tape_free_request(request);
+fail_malloc:
+	kfree(int_kekls);
+	return rc;
+}
+
+/*
+ * IOCTL: Query KEKLs
+ */
+static int tape_3592_ioctl_kekl_query(struct tape_device *device,
+				      unsigned long arg)
+{
+	int rc;
+	struct tape390_kekl_pair *ext_kekls;
+
+	DBF_EVENT(6, "tape_3592_ioctl_kekl_query\n");
+	if (!crypt_supported(device))
+		return -ENOSYS;
+	if (!crypt_enabled(device))
+		return -EUNATCH;
+	ext_kekls = kmalloc(sizeof(*ext_kekls), GFP_KERNEL);
+	if (!ext_kekls)
+		return -ENOMEM;
+	rc = tape_3592_kekl_query(device, ext_kekls);
+	if (rc != 0)
+		goto fail;
+	if (copy_to_user((char __user *) arg, ext_kekls, sizeof(*ext_kekls))) {
+		rc = -EFAULT;
+		goto fail;
+	}
+	rc = 0;
+fail:
+	kfree(ext_kekls);
+	return rc;
+}
+
+static int tape_3590_mttell(struct tape_device *device, int mt_count);
+
+/*
+ * Set KEKLs
+ */
+static int tape_3592_kekl_set(struct tape_device *device,
+			      struct tape390_kekl_pair *ext_kekls)
+{
+	struct tape_request *request;
+	struct tape3592_kekl_set_order *order;
+
+	DBF_EVENT(6, "tape3592_kekl_set\n");
+	if (check_ext_kekl_pair(ext_kekls)) {
+		DBF_EVENT(6, "invalid kekls\n");
+		return -EINVAL;
+	}
+	if (tape_3590_mttell(device, 0) != 0)
+		return -EBADSLT;
+	request = tape_alloc_request(1, sizeof(*order));
+	if (IS_ERR(request))
+		return PTR_ERR(request);
+	order = request->cpdata;
+	memset(order, 0, sizeof(*order));
+	order->code = 0xe3;
+	order->kekls.count = 2;
+	ext_to_int_kekl(&ext_kekls->kekl[0], &order->kekls.kekl[0]);
+	ext_to_int_kekl(&ext_kekls->kekl[1], &order->kekls.kekl[1]);
+	request->op = TO_KEKL_SET;
+	tape_ccw_end(request->cpaddr, PERF_SUBSYS_FUNC, sizeof(*order), order);
+
+	return tape_do_io_free(device, request);
+}
+
+/*
+ * IOCTL: Set KEKLs
+ */
+static int tape_3592_ioctl_kekl_set(struct tape_device *device,
+				    unsigned long arg)
+{
+	int rc;
+	struct tape390_kekl_pair *ext_kekls;
+
+	DBF_EVENT(6, "tape_3592_ioctl_kekl_set\n");
+	if (!crypt_supported(device))
+		return -ENOSYS;
+	if (!crypt_enabled(device))
+		return -EUNATCH;
+	ext_kekls = kmalloc(sizeof(*ext_kekls), GFP_KERNEL);
+	if (!ext_kekls)
+		return -ENOMEM;
+	if (copy_from_user(ext_kekls, (char __user *)arg, sizeof(*ext_kekls))) {
+		rc = -EFAULT;
+		goto out;
+	}
+	rc = tape_3592_kekl_set(device, ext_kekls);
+out:
+	kfree(ext_kekls);
+	return rc;
+}
+
+/*
+ * Enable encryption
+ */
+static struct tape_request *__tape_3592_enable_crypt(struct tape_device *device)
+{
+	struct tape_request *request;
+	char *data;
+
+	DBF_EVENT(6, "tape_3592_enable_crypt\n");
+	if (!crypt_supported(device))
+		return ERR_PTR(-ENOSYS);
+	request = tape_alloc_request(2, 72);
+	if (IS_ERR(request))
+		return request;
+	data = request->cpdata;
+	memset(data,0,72);
+
+	data[0]       = 0x05;
+	data[36 + 0]  = 0x03;
+	data[36 + 1]  = 0x03;
+	data[36 + 4]  = 0x40;
+	data[36 + 6]  = 0x01;
+	data[36 + 14] = 0x2f;
+	data[36 + 18] = 0xc3;
+	data[36 + 35] = 0x72;
+	request->op = TO_CRYPT_ON;
+	tape_ccw_cc(request->cpaddr, MODE_SET_CB, 36, data);
+	tape_ccw_end(request->cpaddr + 1, MODE_SET_CB, 36, data + 36);
+	return request;
+}
+
+static int tape_3592_enable_crypt(struct tape_device *device)
+{
+	struct tape_request *request;
+
+	request = __tape_3592_enable_crypt(device);
+	if (IS_ERR(request))
+		return PTR_ERR(request);
+	return tape_do_io_free(device, request);
+}
+
+static void tape_3592_enable_crypt_async(struct tape_device *device)
+{
+	struct tape_request *request;
+
+	request = __tape_3592_enable_crypt(device);
+	if (!IS_ERR(request))
+		tape_do_io_async_free(device, request);
+}
+
+/*
+ * Disable encryption
+ */
+static struct tape_request *__tape_3592_disable_crypt(struct tape_device *device)
+{
+	struct tape_request *request;
+	char *data;
+
+	DBF_EVENT(6, "tape_3592_disable_crypt\n");
+	if (!crypt_supported(device))
+		return ERR_PTR(-ENOSYS);
+	request = tape_alloc_request(2, 72);
+	if (IS_ERR(request))
+		return request;
+	data = request->cpdata;
+	memset(data,0,72);
+
+	data[0]       = 0x05;
+	data[36 + 0]  = 0x03;
+	data[36 + 1]  = 0x03;
+	data[36 + 35] = 0x32;
+
+	request->op = TO_CRYPT_OFF;
+	tape_ccw_cc(request->cpaddr, MODE_SET_CB, 36, data);
+	tape_ccw_end(request->cpaddr + 1, MODE_SET_CB, 36, data + 36);
+
+	return request;
+}
+
+static int tape_3592_disable_crypt(struct tape_device *device)
+{
+	struct tape_request *request;
+
+	request = __tape_3592_disable_crypt(device);
+	if (IS_ERR(request))
+		return PTR_ERR(request);
+	return tape_do_io_free(device, request);
+}
+
+static void tape_3592_disable_crypt_async(struct tape_device *device)
+{
+	struct tape_request *request;
+
+	request = __tape_3592_disable_crypt(device);
+	if (!IS_ERR(request))
+		tape_do_io_async_free(device, request);
+}
+
+/*
+ * IOCTL: Set encryption status
+ */
+static int tape_3592_ioctl_crypt_set(struct tape_device *device,
+				     unsigned long arg)
+{
+	struct tape390_crypt_info info;
+
+	DBF_EVENT(6, "tape_3592_ioctl_crypt_set\n");
+	if (!crypt_supported(device))
+		return -ENOSYS;
+	if (copy_from_user(&info, (char __user *)arg, sizeof(info)))
+		return -EFAULT;
+	if (info.status & ~TAPE390_CRYPT_ON_MASK)
+		return -EINVAL;
+	if (info.status & TAPE390_CRYPT_ON_MASK)
+		return tape_3592_enable_crypt(device);
+	else
+		return tape_3592_disable_crypt(device);
+}
+
+static int tape_3590_sense_medium(struct tape_device *device);
+
+/*
+ * IOCTL: Query enryption status
+ */
+static int tape_3592_ioctl_crypt_query(struct tape_device *device,
+				       unsigned long arg)
+{
+	DBF_EVENT(6, "tape_3592_ioctl_crypt_query\n");
+	if (!crypt_supported(device))
+		return -ENOSYS;
+	tape_3590_sense_medium(device);
+	if (copy_to_user((char __user *) arg, &TAPE_3590_CRYPT_INFO(device),
+		sizeof(TAPE_3590_CRYPT_INFO(device))))
+		return -EFAULT;
+	else
+		return 0;
+}
+
+/*
+ * 3590 IOCTL Overload
+ */
+static int
+tape_3590_ioctl(struct tape_device *device, unsigned int cmd, unsigned long arg)
+{
+	switch (cmd) {
+	case TAPE390_DISPLAY: {
+		struct display_struct disp;
+
+		if (copy_from_user(&disp, (char __user *) arg, sizeof(disp)))
+			return -EFAULT;
+
+		return tape_std_display(device, &disp);
+	}
+	case TAPE390_KEKL_SET:
+		return tape_3592_ioctl_kekl_set(device, arg);
+	case TAPE390_KEKL_QUERY:
+		return tape_3592_ioctl_kekl_query(device, arg);
+	case TAPE390_CRYPT_SET:
+		return tape_3592_ioctl_crypt_set(device, arg);
+	case TAPE390_CRYPT_QUERY:
+		return tape_3592_ioctl_crypt_query(device, arg);
+	default:
+		return -EINVAL;	/* no additional ioctls */
+	}
+}
+
+/*
+ * SENSE Medium: Get Sense data about medium state
+ */
+static int tape_3590_sense_medium(struct tape_device *device)
+{
+	struct tape_request *request;
+
+	request = tape_alloc_request(1, 128);
+	if (IS_ERR(request))
+		return PTR_ERR(request);
+	request->op = TO_MSEN;
+	tape_ccw_end(request->cpaddr, MEDIUM_SENSE, 128, request->cpdata);
+	return tape_do_io_free(device, request);
+}
+
+static void tape_3590_sense_medium_async(struct tape_device *device)
+{
+	struct tape_request *request;
+
+	request = tape_alloc_request(1, 128);
+	if (IS_ERR(request))
+		return;
+	request->op = TO_MSEN;
+	tape_ccw_end(request->cpaddr, MEDIUM_SENSE, 128, request->cpdata);
+	tape_do_io_async_free(device, request);
+}
+
+/*
+ * MTTELL: Tell block. Return the number of block relative to current file.
+ */
+static int
+tape_3590_mttell(struct tape_device *device, int mt_count)
+{
+	__u64 block_id;
+	int rc;
+
+	rc = tape_std_read_block_id(device, &block_id);
+	if (rc)
+		return rc;
+	return block_id >> 32;
+}
+
+/*
+ * MTSEEK: seek to the specified block.
+ */
+static int
+tape_3590_mtseek(struct tape_device *device, int count)
+{
+	struct tape_request *request;
+
+	DBF_EVENT(6, "xsee id: %x\n", count);
+	request = tape_alloc_request(3, 4);
+	if (IS_ERR(request))
+		return PTR_ERR(request);
+	request->op = TO_LBL;
+	tape_ccw_cc(request->cpaddr, MODE_SET_DB, 1, device->modeset_byte);
+	*(__u32 *) request->cpdata = count;
+	tape_ccw_cc(request->cpaddr + 1, LOCATE, 4, request->cpdata);
+	tape_ccw_end(request->cpaddr + 2, NOP, 0, NULL);
+	return tape_do_io_free(device, request);
+}
+
+/*
+ * Read Opposite Error Recovery Function:
+ * Used, when Read Forward does not work
+ */
+static void
+tape_3590_read_opposite(struct tape_device *device,
+			struct tape_request *request)
+{
+	struct tape_3590_disc_data *data;
+
+	/*
+	 * We have allocated 4 ccws in tape_std_read, so we can now
+	 * transform the request to a read backward, followed by a
+	 * forward space block.
+	 */
+	request->op = TO_RBA;
+	tape_ccw_cc(request->cpaddr, MODE_SET_DB, 1, device->modeset_byte);
+	data = device->discdata;
+	tape_ccw_cc_idal(request->cpaddr + 1, data->read_back_op,
+			 device->char_data.idal_buf);
+	tape_ccw_cc(request->cpaddr + 2, FORSPACEBLOCK, 0, NULL);
+	tape_ccw_end(request->cpaddr + 3, NOP, 0, NULL);
+	DBF_EVENT(6, "xrop ccwg\n");
+}
+
+/*
+ * Read Attention Msg
+ * This should be done after an interrupt with attention bit (0x80)
+ * in device state.
+ *
+ * After a "read attention message" request there are two possible
+ * results:
+ *
+ * 1. A unit check is presented, when attention sense is present (e.g. when
+ * a medium has been unloaded). The attention sense comes then
+ * together with the unit check. The recovery action is either "retry"
+ * (in case there is an attention message pending) or "permanent error".
+ *
+ * 2. The attention msg is written to the "read subsystem data" buffer.
+ * In this case we probably should print it to the console.
+ */
+static void tape_3590_read_attmsg_async(struct tape_device *device)
+{
+	struct tape_request *request;
+	char *buf;
+
+	request = tape_alloc_request(3, 4096);
+	if (IS_ERR(request))
+		return;
+	request->op = TO_READ_ATTMSG;
+	buf = request->cpdata;
+	buf[0] = PREP_RD_SS_DATA;
+	buf[6] = RD_ATTMSG;	/* read att msg */
+	tape_ccw_cc(request->cpaddr, PERFORM_SS_FUNC, 12, buf);
+	tape_ccw_cc(request->cpaddr + 1, READ_SS_DATA, 4096 - 12, buf + 12);
+	tape_ccw_end(request->cpaddr + 2, NOP, 0, NULL);
+	tape_do_io_async_free(device, request);
+}
+
+/*
+ * These functions are used to schedule follow-up actions from within an
+ * interrupt context (like unsolicited interrupts).
+ * Note: the work handler is called by the system work queue. The tape
+ * commands started by the handler need to be asynchrounous, otherwise
+ * a deadlock can occur e.g. in case of a deferred cc=1 (see __tape_do_irq).
+ */
+struct work_handler_data {
+	struct tape_device *device;
+	enum tape_op        op;
+	struct work_struct  work;
+};
+
+static void
+tape_3590_work_handler(struct work_struct *work)
+{
+	struct work_handler_data *p =
+		container_of(work, struct work_handler_data, work);
+
+	switch (p->op) {
+	case TO_MSEN:
+		tape_3590_sense_medium_async(p->device);
+		break;
+	case TO_READ_ATTMSG:
+		tape_3590_read_attmsg_async(p->device);
+		break;
+	case TO_CRYPT_ON:
+		tape_3592_enable_crypt_async(p->device);
+		break;
+	case TO_CRYPT_OFF:
+		tape_3592_disable_crypt_async(p->device);
+		break;
+	default:
+		DBF_EVENT(3, "T3590: work handler undefined for "
+			  "operation 0x%02x\n", p->op);
+	}
+	tape_put_device(p->device);
+	kfree(p);
+}
+
+static int
+tape_3590_schedule_work(struct tape_device *device, enum tape_op op)
+{
+	struct work_handler_data *p;
+
+	if ((p = kzalloc(sizeof(*p), GFP_ATOMIC)) == NULL)
+		return -ENOMEM;
+
+	INIT_WORK(&p->work, tape_3590_work_handler);
+
+	p->device = tape_get_device(device);
+	p->op = op;
+
+	queue_work(tape_3590_wq, &p->work);
+	return 0;
+}
+
+#ifdef CONFIG_S390_TAPE_BLOCK
+/*
+ * Tape Block READ
+ */
+static struct tape_request *
+tape_3590_bread(struct tape_device *device, struct request *req)
+{
+	struct tape_request *request;
+	struct ccw1 *ccw;
+	int count = 0, start_block;
+	unsigned off;
+	char *dst;
+	struct bio_vec *bv;
+	struct req_iterator iter;
+
+	DBF_EVENT(6, "xBREDid:");
+	start_block = blk_rq_pos(req) >> TAPEBLOCK_HSEC_S2B;
+	DBF_EVENT(6, "start_block = %i\n", start_block);
+
+	rq_for_each_segment(bv, req, iter)
+		count += bv->bv_len >> (TAPEBLOCK_HSEC_S2B + 9);
+
+	request = tape_alloc_request(2 + count + 1, 4);
+	if (IS_ERR(request))
+		return request;
+	request->op = TO_BLOCK;
+	*(__u32 *) request->cpdata = start_block;
+	ccw = request->cpaddr;
+	ccw = tape_ccw_cc(ccw, MODE_SET_DB, 1, device->modeset_byte);
+
+	/*
+	 * We always setup a nop after the mode set ccw. This slot is
+	 * used in tape_std_check_locate to insert a locate ccw if the
+	 * current tape position doesn't match the start block to be read.
+	 */
+	ccw = tape_ccw_cc(ccw, NOP, 0, NULL);
+
+	rq_for_each_segment(bv, req, iter) {
+		dst = page_address(bv->bv_page) + bv->bv_offset;
+		for (off = 0; off < bv->bv_len; off += TAPEBLOCK_HSEC_SIZE) {
+			ccw->flags = CCW_FLAG_CC;
+			ccw->cmd_code = READ_FORWARD;
+			ccw->count = TAPEBLOCK_HSEC_SIZE;
+			set_normalized_cda(ccw, (void *) __pa(dst));
+			ccw++;
+			dst += TAPEBLOCK_HSEC_SIZE;
+		}
+		BUG_ON(off > bv->bv_len);
+	}
+	ccw = tape_ccw_end(ccw, NOP, 0, NULL);
+	DBF_EVENT(6, "xBREDccwg\n");
+	return request;
+}
+
+static void
+tape_3590_free_bread(struct tape_request *request)
+{
+	struct ccw1 *ccw;
+
+	/* Last ccw is a nop and doesn't need clear_normalized_cda */
+	for (ccw = request->cpaddr; ccw->flags & CCW_FLAG_CC; ccw++)
+		if (ccw->cmd_code == READ_FORWARD)
+			clear_normalized_cda(ccw);
+	tape_free_request(request);
+}
+
+/*
+ * check_locate is called just before the tape request is passed to
+ * the common io layer for execution. It has to check the current
+ * tape position and insert a locate ccw if it doesn't match the
+ * start block for the request.
+ */
+static void
+tape_3590_check_locate(struct tape_device *device, struct tape_request *request)
+{
+	__u32 *start_block;
+
+	start_block = (__u32 *) request->cpdata;
+	if (*start_block != device->blk_data.block_position) {
+		/* Add the start offset of the file to get the real block. */
+		*start_block += device->bof;
+		tape_ccw_cc(request->cpaddr + 1, LOCATE, 4, request->cpdata);
+	}
+}
+#endif
+
+static void tape_3590_med_state_set(struct tape_device *device,
+				    struct tape_3590_med_sense *sense)
+{
+	struct tape390_crypt_info *c_info;
+
+	c_info = &TAPE_3590_CRYPT_INFO(device);
+
+	DBF_EVENT(6, "medium state: %x:%x\n", sense->macst, sense->masst);
+	switch (sense->macst) {
+	case 0x04:
+	case 0x05:
+	case 0x06:
+		tape_med_state_set(device, MS_UNLOADED);
+		TAPE_3590_CRYPT_INFO(device).medium_status = 0;
+		return;
+	case 0x08:
+	case 0x09:
+		tape_med_state_set(device, MS_LOADED);
+		break;
+	default:
+		tape_med_state_set(device, MS_UNKNOWN);
+		return;
+	}
+	c_info->medium_status |= TAPE390_MEDIUM_LOADED_MASK;
+	if (sense->flags & MSENSE_CRYPT_MASK) {
+		DBF_EVENT(6, "Medium is encrypted (%04x)\n", sense->flags);
+		c_info->medium_status |= TAPE390_MEDIUM_ENCRYPTED_MASK;
+	} else	{
+		DBF_EVENT(6, "Medium is not encrypted %04x\n", sense->flags);
+		c_info->medium_status &= ~TAPE390_MEDIUM_ENCRYPTED_MASK;
+	}
+}
+
+/*
+ * The done handler is called at device/channel end and wakes up the sleeping
+ * process
+ */
+static int
+tape_3590_done(struct tape_device *device, struct tape_request *request)
+{
+
+	DBF_EVENT(6, "%s done\n", tape_op_verbose[request->op]);
+
+	switch (request->op) {
+	case TO_BSB:
+	case TO_BSF:
+	case TO_DSE:
+	case TO_FSB:
+	case TO_FSF:
+	case TO_LBL:
+	case TO_RFO:
+	case TO_RBA:
+	case TO_REW:
+	case TO_WRI:
+	case TO_WTM:
+	case TO_BLOCK:
+	case TO_LOAD:
+		tape_med_state_set(device, MS_LOADED);
+		break;
+	case TO_RUN:
+		tape_med_state_set(device, MS_UNLOADED);
+		tape_3590_schedule_work(device, TO_CRYPT_OFF);
+		break;
+	case TO_MSEN:
+		tape_3590_med_state_set(device, request->cpdata);
+		break;
+	case TO_CRYPT_ON:
+		TAPE_3590_CRYPT_INFO(device).status
+			|= TAPE390_CRYPT_ON_MASK;
+		*(device->modeset_byte) |= 0x03;
+		break;
+	case TO_CRYPT_OFF:
+		TAPE_3590_CRYPT_INFO(device).status
+			&= ~TAPE390_CRYPT_ON_MASK;
+		*(device->modeset_byte) &= ~0x03;
+		break;
+	case TO_RBI:	/* RBI seems to succeed even without medium loaded. */
+	case TO_NOP:	/* Same to NOP. */
+	case TO_READ_CONFIG:
+	case TO_READ_ATTMSG:
+	case TO_DIS:
+	case TO_ASSIGN:
+	case TO_UNASSIGN:
+	case TO_SIZE:
+	case TO_KEKL_SET:
+	case TO_KEKL_QUERY:
+	case TO_RDC:
+		break;
+	}
+	return TAPE_IO_SUCCESS;
+}
+
+/*
+ * This function is called, when error recovery was successful
+ */
+static inline int
+tape_3590_erp_succeded(struct tape_device *device, struct tape_request *request)
+{
+	DBF_EVENT(3, "Error Recovery successful for %s\n",
+		  tape_op_verbose[request->op]);
+	return tape_3590_done(device, request);
+}
+
+/*
+ * This function is called, when error recovery was not successful
+ */
+static inline int
+tape_3590_erp_failed(struct tape_device *device, struct tape_request *request,
+		     struct irb *irb, int rc)
+{
+	DBF_EVENT(3, "Error Recovery failed for %s\n",
+		  tape_op_verbose[request->op]);
+	tape_dump_sense_dbf(device, request, irb);
+	return rc;
+}
+
+/*
+ * Error Recovery do retry
+ */
+static inline int
+tape_3590_erp_retry(struct tape_device *device, struct tape_request *request,
+		    struct irb *irb)
+{
+	DBF_EVENT(2, "Retry: %s\n", tape_op_verbose[request->op]);
+	tape_dump_sense_dbf(device, request, irb);
+	return TAPE_IO_RETRY;
+}
+
+/*
+ * Handle unsolicited interrupts
+ */
+static int
+tape_3590_unsolicited_irq(struct tape_device *device, struct irb *irb)
+{
+	if (irb->scsw.cmd.dstat == DEV_STAT_CHN_END)
+		/* Probably result of halt ssch */
+		return TAPE_IO_PENDING;
+	else if (irb->scsw.cmd.dstat == 0x85)
+		/* Device Ready */
+		DBF_EVENT(3, "unsol.irq! tape ready: %08x\n", device->cdev_id);
+	else if (irb->scsw.cmd.dstat & DEV_STAT_ATTENTION) {
+		tape_3590_schedule_work(device, TO_READ_ATTMSG);
+	} else {
+		DBF_EVENT(3, "unsol.irq! dev end: %08x\n", device->cdev_id);
+		tape_dump_sense_dbf(device, NULL, irb);
+	}
+	/* check medium state */
+	tape_3590_schedule_work(device, TO_MSEN);
+	return TAPE_IO_SUCCESS;
+}
+
+/*
+ * Basic Recovery routine
+ */
+static int
+tape_3590_erp_basic(struct tape_device *device, struct tape_request *request,
+		    struct irb *irb, int rc)
+{
+	struct tape_3590_sense *sense;
+
+	sense = (struct tape_3590_sense *) irb->ecw;
+
+	switch (sense->bra) {
+	case SENSE_BRA_PER:
+		return tape_3590_erp_failed(device, request, irb, rc);
+	case SENSE_BRA_CONT:
+		return tape_3590_erp_succeded(device, request);
+	case SENSE_BRA_RE:
+		return tape_3590_erp_retry(device, request, irb);
+	case SENSE_BRA_DRE:
+		return tape_3590_erp_failed(device, request, irb, rc);
+	default:
+		BUG();
+		return TAPE_IO_STOP;
+	}
+}
+
+/*
+ *  RDL: Read Device (buffered) log
+ */
+static int
+tape_3590_erp_read_buf_log(struct tape_device *device,
+			   struct tape_request *request, struct irb *irb)
+{
+	/*
+	 * We just do the basic error recovery at the moment (retry).
+	 * Perhaps in the future, we read the log and dump it somewhere...
+	 */
+	return tape_3590_erp_basic(device, request, irb, -EIO);
+}
+
+/*
+ *  SWAP: Swap Devices
+ */
+static int
+tape_3590_erp_swap(struct tape_device *device, struct tape_request *request,
+		   struct irb *irb)
+{
+	/*
+	 * This error recovery should swap the tapes
+	 * if the original has a problem. The operation
+	 * should proceed with the new tape... this
+	 * should probably be done in user space!
+	 */
+	dev_warn (&device->cdev->dev, "The tape medium must be loaded into a "
+		"different tape unit\n");
+	return tape_3590_erp_basic(device, request, irb, -EIO);
+}
+
+/*
+ *  LBY: Long Busy
+ */
+static int
+tape_3590_erp_long_busy(struct tape_device *device,
+			struct tape_request *request, struct irb *irb)
+{
+	DBF_EVENT(6, "Device is busy\n");
+	return TAPE_IO_LONG_BUSY;
+}
+
+/*
+ *  SPI: Special Intercept
+ */
+static int
+tape_3590_erp_special_interrupt(struct tape_device *device,
+				struct tape_request *request, struct irb *irb)
+{
+	return tape_3590_erp_basic(device, request, irb, -EIO);
+}
+
+/*
+ *  RDA: Read Alternate
+ */
+static int
+tape_3590_erp_read_alternate(struct tape_device *device,
+			     struct tape_request *request, struct irb *irb)
+{
+	struct tape_3590_disc_data *data;
+
+	/*
+	 * The issued Read Backward or Read Previous command is not
+	 * supported by the device
+	 * The recovery action should be to issue another command:
+	 * Read Revious: if Read Backward is not supported
+	 * Read Backward: if Read Previous is not supported
+	 */
+	data = device->discdata;
+	if (data->read_back_op == READ_PREVIOUS) {
+		DBF_EVENT(2, "(%08x): No support for READ_PREVIOUS command\n",
+			  device->cdev_id);
+		data->read_back_op = READ_BACKWARD;
+	} else {
+		DBF_EVENT(2, "(%08x): No support for READ_BACKWARD command\n",
+			  device->cdev_id);
+		data->read_back_op = READ_PREVIOUS;
+	}
+	tape_3590_read_opposite(device, request);
+	return tape_3590_erp_retry(device, request, irb);
+}
+
+/*
+ * Error Recovery read opposite
+ */
+static int
+tape_3590_erp_read_opposite(struct tape_device *device,
+			    struct tape_request *request, struct irb *irb)
+{
+	switch (request->op) {
+	case TO_RFO:
+		/*
+		 * We did read forward, but the data could not be read.
+		 * We will read backward and then skip forward again.
+		 */
+		tape_3590_read_opposite(device, request);
+		return tape_3590_erp_retry(device, request, irb);
+	case TO_RBA:
+		/* We tried to read forward and backward, but hat no success */
+		return tape_3590_erp_failed(device, request, irb, -EIO);
+		break;
+	default:
+		return tape_3590_erp_failed(device, request, irb, -EIO);
+	}
+}
+
+/*
+ * Print an MIM (Media Information  Message) (message code f0)
+ */
+static void
+tape_3590_print_mim_msg_f0(struct tape_device *device, struct irb *irb)
+{
+	struct tape_3590_sense *sense;
+	char *exception, *service;
+
+	exception = kmalloc(BUFSIZE, GFP_ATOMIC);
+	service = kmalloc(BUFSIZE, GFP_ATOMIC);
+
+	if (!exception || !service)
+		goto out_nomem;
+
+	sense = (struct tape_3590_sense *) irb->ecw;
+	/* Exception Message */
+	switch (sense->fmt.f70.emc) {
+	case 0x02:
+		snprintf(exception, BUFSIZE, "Data degraded");
+		break;
+	case 0x03:
+		snprintf(exception, BUFSIZE, "Data degraded in partion %i",
+			sense->fmt.f70.mp);
+		break;
+	case 0x04:
+		snprintf(exception, BUFSIZE, "Medium degraded");
+		break;
+	case 0x05:
+		snprintf(exception, BUFSIZE, "Medium degraded in partition %i",
+			sense->fmt.f70.mp);
+		break;
+	case 0x06:
+		snprintf(exception, BUFSIZE, "Block 0 Error");
+		break;
+	case 0x07:
+		snprintf(exception, BUFSIZE, "Medium Exception 0x%02x",
+			sense->fmt.f70.md);
+		break;
+	default:
+		snprintf(exception, BUFSIZE, "0x%02x",
+			sense->fmt.f70.emc);
+		break;
+	}
+	/* Service Message */
+	switch (sense->fmt.f70.smc) {
+	case 0x02:
+		snprintf(service, BUFSIZE, "Reference Media maintenance "
+			"procedure %i", sense->fmt.f70.md);
+		break;
+	default:
+		snprintf(service, BUFSIZE, "0x%02x",
+			sense->fmt.f70.smc);
+		break;
+	}
+
+	dev_warn (&device->cdev->dev, "Tape media information: exception %s, "
+		"service %s\n", exception, service);
+
+out_nomem:
+	kfree(exception);
+	kfree(service);
+}
+
+/*
+ * Print an I/O Subsystem Service Information Message (message code f1)
+ */
+static void
+tape_3590_print_io_sim_msg_f1(struct tape_device *device, struct irb *irb)
+{
+	struct tape_3590_sense *sense;
+	char *exception, *service;
+
+	exception = kmalloc(BUFSIZE, GFP_ATOMIC);
+	service = kmalloc(BUFSIZE, GFP_ATOMIC);
+
+	if (!exception || !service)
+		goto out_nomem;
+
+	sense = (struct tape_3590_sense *) irb->ecw;
+	/* Exception Message */
+	switch (sense->fmt.f71.emc) {
+	case 0x01:
+		snprintf(exception, BUFSIZE, "Effect of failure is unknown");
+		break;
+	case 0x02:
+		snprintf(exception, BUFSIZE, "CU Exception - no performance "
+			"impact");
+		break;
+	case 0x03:
+		snprintf(exception, BUFSIZE, "CU Exception on channel "
+			"interface 0x%02x", sense->fmt.f71.md[0]);
+		break;
+	case 0x04:
+		snprintf(exception, BUFSIZE, "CU Exception on device path "
+			"0x%02x", sense->fmt.f71.md[0]);
+		break;
+	case 0x05:
+		snprintf(exception, BUFSIZE, "CU Exception on library path "
+			"0x%02x", sense->fmt.f71.md[0]);
+		break;
+	case 0x06:
+		snprintf(exception, BUFSIZE, "CU Exception on node 0x%02x",
+			sense->fmt.f71.md[0]);
+		break;
+	case 0x07:
+		snprintf(exception, BUFSIZE, "CU Exception on partition "
+			"0x%02x", sense->fmt.f71.md[0]);
+		break;
+	default:
+		snprintf(exception, BUFSIZE, "0x%02x",
+			sense->fmt.f71.emc);
+	}
+	/* Service Message */
+	switch (sense->fmt.f71.smc) {
+	case 0x01:
+		snprintf(service, BUFSIZE, "Repair impact is unknown");
+		break;
+	case 0x02:
+		snprintf(service, BUFSIZE, "Repair will not impact cu "
+			"performance");
+		break;
+	case 0x03:
+		if (sense->fmt.f71.mdf == 0)
+			snprintf(service, BUFSIZE, "Repair will disable node "
+				"0x%x on CU", sense->fmt.f71.md[1]);
+		else
+			snprintf(service, BUFSIZE, "Repair will disable "
+				"nodes (0x%x-0x%x) on CU", sense->fmt.f71.md[1],
+				sense->fmt.f71.md[2]);
+		break;
+	case 0x04:
+		if (sense->fmt.f71.mdf == 0)
+			snprintf(service, BUFSIZE, "Repair will disable "
+				"channel path 0x%x on CU",
+				sense->fmt.f71.md[1]);
+		else
+			snprintf(service, BUFSIZE, "Repair will disable cannel"
+				" paths (0x%x-0x%x) on CU",
+				sense->fmt.f71.md[1], sense->fmt.f71.md[2]);
+		break;
+	case 0x05:
+		if (sense->fmt.f71.mdf == 0)
+			snprintf(service, BUFSIZE, "Repair will disable device"
+				" path 0x%x on CU", sense->fmt.f71.md[1]);
+		else
+			snprintf(service, BUFSIZE, "Repair will disable device"
+				" paths (0x%x-0x%x) on CU",
+				sense->fmt.f71.md[1], sense->fmt.f71.md[2]);
+		break;
+	case 0x06:
+		if (sense->fmt.f71.mdf == 0)
+			snprintf(service, BUFSIZE, "Repair will disable "
+				"library path 0x%x on CU",
+				sense->fmt.f71.md[1]);
+		else
+			snprintf(service, BUFSIZE, "Repair will disable "
+				"library paths (0x%x-0x%x) on CU",
+				sense->fmt.f71.md[1], sense->fmt.f71.md[2]);
+		break;
+	case 0x07:
+		snprintf(service, BUFSIZE, "Repair will disable access to CU");
+		break;
+	default:
+		snprintf(service, BUFSIZE, "0x%02x",
+			sense->fmt.f71.smc);
+	}
+
+	dev_warn (&device->cdev->dev, "I/O subsystem information: exception"
+		" %s, service %s\n", exception, service);
+out_nomem:
+	kfree(exception);
+	kfree(service);
+}
+
+/*
+ * Print an Device Subsystem Service Information Message (message code f2)
+ */
+static void
+tape_3590_print_dev_sim_msg_f2(struct tape_device *device, struct irb *irb)
+{
+	struct tape_3590_sense *sense;
+	char *exception, *service;
+
+	exception = kmalloc(BUFSIZE, GFP_ATOMIC);
+	service = kmalloc(BUFSIZE, GFP_ATOMIC);
+
+	if (!exception || !service)
+		goto out_nomem;
+
+	sense = (struct tape_3590_sense *) irb->ecw;
+	/* Exception Message */
+	switch (sense->fmt.f71.emc) {
+	case 0x01:
+		snprintf(exception, BUFSIZE, "Effect of failure is unknown");
+		break;
+	case 0x02:
+		snprintf(exception, BUFSIZE, "DV Exception - no performance"
+			" impact");
+		break;
+	case 0x03:
+		snprintf(exception, BUFSIZE, "DV Exception on channel "
+			"interface 0x%02x", sense->fmt.f71.md[0]);
+		break;
+	case 0x04:
+		snprintf(exception, BUFSIZE, "DV Exception on loader 0x%02x",
+			sense->fmt.f71.md[0]);
+		break;
+	case 0x05:
+		snprintf(exception, BUFSIZE, "DV Exception on message display"
+			" 0x%02x", sense->fmt.f71.md[0]);
+		break;
+	case 0x06:
+		snprintf(exception, BUFSIZE, "DV Exception in tape path");
+		break;
+	case 0x07:
+		snprintf(exception, BUFSIZE, "DV Exception in drive");
+		break;
+	default:
+		snprintf(exception, BUFSIZE, "0x%02x",
+			sense->fmt.f71.emc);
+	}
+	/* Service Message */
+	switch (sense->fmt.f71.smc) {
+	case 0x01:
+		snprintf(service, BUFSIZE, "Repair impact is unknown");
+		break;
+	case 0x02:
+		snprintf(service, BUFSIZE, "Repair will not impact device "
+			"performance");
+		break;
+	case 0x03:
+		if (sense->fmt.f71.mdf == 0)
+			snprintf(service, BUFSIZE, "Repair will disable "
+				"channel path 0x%x on DV",
+				sense->fmt.f71.md[1]);
+		else
+			snprintf(service, BUFSIZE, "Repair will disable "
+				"channel path (0x%x-0x%x) on DV",
+				sense->fmt.f71.md[1], sense->fmt.f71.md[2]);
+		break;
+	case 0x04:
+		if (sense->fmt.f71.mdf == 0)
+			snprintf(service, BUFSIZE, "Repair will disable "
+				"interface 0x%x on DV", sense->fmt.f71.md[1]);
+		else
+			snprintf(service, BUFSIZE, "Repair will disable "
+				"interfaces (0x%x-0x%x) on DV",
+				sense->fmt.f71.md[1], sense->fmt.f71.md[2]);
+		break;
+	case 0x05:
+		if (sense->fmt.f71.mdf == 0)
+			snprintf(service, BUFSIZE, "Repair will disable loader"
+				" 0x%x on DV", sense->fmt.f71.md[1]);
+		else
+			snprintf(service, BUFSIZE, "Repair will disable loader"
+				" (0x%x-0x%x) on DV",
+				sense->fmt.f71.md[1], sense->fmt.f71.md[2]);
+		break;
+	case 0x07:
+		snprintf(service, BUFSIZE, "Repair will disable access to DV");
+		break;
+	case 0x08:
+		if (sense->fmt.f71.mdf == 0)
+			snprintf(service, BUFSIZE, "Repair will disable "
+				"message display 0x%x on DV",
+				sense->fmt.f71.md[1]);
+		else
+			snprintf(service, BUFSIZE, "Repair will disable "
+				"message displays (0x%x-0x%x) on DV",
+				 sense->fmt.f71.md[1], sense->fmt.f71.md[2]);
+		break;
+	case 0x09:
+		snprintf(service, BUFSIZE, "Clean DV");
+		break;
+	default:
+		snprintf(service, BUFSIZE, "0x%02x",
+			sense->fmt.f71.smc);
+	}
+
+	dev_warn (&device->cdev->dev, "Device subsystem information: exception"
+		" %s, service %s\n", exception, service);
+out_nomem:
+	kfree(exception);
+	kfree(service);
+}
+
+/*
+ * Print standard ERA Message
+ */
+static void
+tape_3590_print_era_msg(struct tape_device *device, struct irb *irb)
+{
+	struct tape_3590_sense *sense;
+
+	sense = (struct tape_3590_sense *) irb->ecw;
+	if (sense->mc == 0)
+		return;
+	if ((sense->mc > 0) && (sense->mc < TAPE_3590_MAX_MSG)) {
+		if (tape_3590_msg[sense->mc] != NULL)
+			dev_warn (&device->cdev->dev, "The tape unit has "
+				"issued sense message %s\n",
+				tape_3590_msg[sense->mc]);
+		else
+			dev_warn (&device->cdev->dev, "The tape unit has "
+				"issued an unknown sense message code 0x%x\n",
+				sense->mc);
+		return;
+	}
+	if (sense->mc == 0xf0) {
+		/* Standard Media Information Message */
+		dev_warn (&device->cdev->dev, "MIM SEV=%i, MC=%02x, ES=%x/%x, "
+			"RC=%02x-%04x-%02x\n", sense->fmt.f70.sev, sense->mc,
+			sense->fmt.f70.emc, sense->fmt.f70.smc,
+			sense->fmt.f70.refcode, sense->fmt.f70.mid,
+			sense->fmt.f70.fid);
+		tape_3590_print_mim_msg_f0(device, irb);
+		return;
+	}
+	if (sense->mc == 0xf1) {
+		/* Standard I/O Subsystem Service Information Message */
+		dev_warn (&device->cdev->dev, "IOSIM SEV=%i, DEVTYPE=3590/%02x,"
+			" MC=%02x, ES=%x/%x, REF=0x%04x-0x%04x-0x%04x\n",
+			sense->fmt.f71.sev, device->cdev->id.dev_model,
+			sense->mc, sense->fmt.f71.emc, sense->fmt.f71.smc,
+			sense->fmt.f71.refcode1, sense->fmt.f71.refcode2,
+			sense->fmt.f71.refcode3);
+		tape_3590_print_io_sim_msg_f1(device, irb);
+		return;
+	}
+	if (sense->mc == 0xf2) {
+		/* Standard Device Service Information Message */
+		dev_warn (&device->cdev->dev, "DEVSIM SEV=%i, DEVTYPE=3590/%02x"
+			", MC=%02x, ES=%x/%x, REF=0x%04x-0x%04x-0x%04x\n",
+			sense->fmt.f71.sev, device->cdev->id.dev_model,
+			sense->mc, sense->fmt.f71.emc, sense->fmt.f71.smc,
+			sense->fmt.f71.refcode1, sense->fmt.f71.refcode2,
+			sense->fmt.f71.refcode3);
+		tape_3590_print_dev_sim_msg_f2(device, irb);
+		return;
+	}
+	if (sense->mc == 0xf3) {
+		/* Standard Library Service Information Message */
+		return;
+	}
+	dev_warn (&device->cdev->dev, "The tape unit has issued an unknown "
+		"sense message code %x\n", sense->mc);
+}
+
+static int tape_3590_crypt_error(struct tape_device *device,
+				 struct tape_request *request, struct irb *irb)
+{
+	u8 cu_rc;
+	u16 ekm_rc2;
+	char *sense;
+
+	sense = ((struct tape_3590_sense *) irb->ecw)->fmt.data;
+	cu_rc = sense[0];
+	ekm_rc2 = *((u16*) &sense[10]);
+	if ((cu_rc == 0) && (ekm_rc2 == 0xee31))
+		/* key not defined on EKM */
+		return tape_3590_erp_basic(device, request, irb, -EKEYREJECTED);
+	if ((cu_rc == 1) || (cu_rc == 2))
+		/* No connection to EKM */
+		return tape_3590_erp_basic(device, request, irb, -ENOTCONN);
+
+	dev_err (&device->cdev->dev, "The tape unit failed to obtain the "
+		"encryption key from EKM\n");
+
+	return tape_3590_erp_basic(device, request, irb, -ENOKEY);
+}
+
+/*
+ *  3590 error Recovery routine:
+ *  If possible, it tries to recover from the error. If this is not possible,
+ *  inform the user about the problem.
+ */
+static int
+tape_3590_unit_check(struct tape_device *device, struct tape_request *request,
+		     struct irb *irb)
+{
+	struct tape_3590_sense *sense;
+
+#ifdef CONFIG_S390_TAPE_BLOCK
+	if (request->op == TO_BLOCK) {
+		/*
+		 * Recovery for block device requests. Set the block_position
+		 * to something invalid and retry.
+		 */
+		device->blk_data.block_position = -1;
+		if (request->retries-- <= 0)
+			return tape_3590_erp_failed(device, request, irb, -EIO);
+		else
+			return tape_3590_erp_retry(device, request, irb);
+	}
+#endif
+
+	sense = (struct tape_3590_sense *) irb->ecw;
+
+	DBF_EVENT(6, "Unit Check: RQC = %x\n", sense->rc_rqc);
+
+	/*
+	 * First check all RC-QRCs where we want to do something special
+	 *   - "break":     basic error recovery is done
+	 *   - "goto out:": just print error message if available
+	 */
+	switch (sense->rc_rqc) {
+
+	case 0x1110:
+		tape_3590_print_era_msg(device, irb);
+		return tape_3590_erp_read_buf_log(device, request, irb);
+
+	case 0x2011:
+		tape_3590_print_era_msg(device, irb);
+		return tape_3590_erp_read_alternate(device, request, irb);
+
+	case 0x2230:
+	case 0x2231:
+		tape_3590_print_era_msg(device, irb);
+		return tape_3590_erp_special_interrupt(device, request, irb);
+	case 0x2240:
+		return tape_3590_crypt_error(device, request, irb);
+
+	case 0x3010:
+		DBF_EVENT(2, "(%08x): Backward at Beginning of Partition\n",
+			  device->cdev_id);
+		return tape_3590_erp_basic(device, request, irb, -ENOSPC);
+	case 0x3012:
+		DBF_EVENT(2, "(%08x): Forward at End of Partition\n",
+			  device->cdev_id);
+		return tape_3590_erp_basic(device, request, irb, -ENOSPC);
+	case 0x3020:
+		DBF_EVENT(2, "(%08x): End of Data Mark\n", device->cdev_id);
+		return tape_3590_erp_basic(device, request, irb, -ENOSPC);
+
+	case 0x3122:
+		DBF_EVENT(2, "(%08x): Rewind Unload initiated\n",
+			  device->cdev_id);
+		return tape_3590_erp_basic(device, request, irb, -EIO);
+	case 0x3123:
+		DBF_EVENT(2, "(%08x): Rewind Unload complete\n",
+			  device->cdev_id);
+		tape_med_state_set(device, MS_UNLOADED);
+		tape_3590_schedule_work(device, TO_CRYPT_OFF);
+		return tape_3590_erp_basic(device, request, irb, 0);
+
+	case 0x4010:
+		/*
+		 * print additional msg since default msg
+		 * "device intervention" is not very meaningfull
+		 */
+		tape_med_state_set(device, MS_UNLOADED);
+		tape_3590_schedule_work(device, TO_CRYPT_OFF);
+		return tape_3590_erp_basic(device, request, irb, -ENOMEDIUM);
+	case 0x4012:		/* Device Long Busy */
+		/* XXX: Also use long busy handling here? */
+		DBF_EVENT(6, "(%08x): LONG BUSY\n", device->cdev_id);
+		tape_3590_print_era_msg(device, irb);
+		return tape_3590_erp_basic(device, request, irb, -EBUSY);
+	case 0x4014:
+		DBF_EVENT(6, "(%08x): Crypto LONG BUSY\n", device->cdev_id);
+		return tape_3590_erp_long_busy(device, request, irb);
+
+	case 0x5010:
+		if (sense->rac == 0xd0) {
+			/* Swap */
+			tape_3590_print_era_msg(device, irb);
+			return tape_3590_erp_swap(device, request, irb);
+		}
+		if (sense->rac == 0x26) {
+			/* Read Opposite */
+			tape_3590_print_era_msg(device, irb);
+			return tape_3590_erp_read_opposite(device, request,
+							   irb);
+		}
+		return tape_3590_erp_basic(device, request, irb, -EIO);
+	case 0x5020:
+	case 0x5021:
+	case 0x5022:
+	case 0x5040:
+	case 0x5041:
+	case 0x5042:
+		tape_3590_print_era_msg(device, irb);
+		return tape_3590_erp_swap(device, request, irb);
+
+	case 0x5110:
+	case 0x5111:
+		return tape_3590_erp_basic(device, request, irb, -EMEDIUMTYPE);
+
+	case 0x5120:
+	case 0x1120:
+		tape_med_state_set(device, MS_UNLOADED);
+		tape_3590_schedule_work(device, TO_CRYPT_OFF);
+		return tape_3590_erp_basic(device, request, irb, -ENOMEDIUM);
+
+	case 0x6020:
+		return tape_3590_erp_basic(device, request, irb, -EMEDIUMTYPE);
+
+	case 0x8011:
+		return tape_3590_erp_basic(device, request, irb, -EPERM);
+	case 0x8013:
+		dev_warn (&device->cdev->dev, "A different host has privileged"
+			" access to the tape unit\n");
+		return tape_3590_erp_basic(device, request, irb, -EPERM);
+	default:
+		return tape_3590_erp_basic(device, request, irb, -EIO);
+	}
+}
+
+/*
+ * 3590 interrupt handler:
+ */
+static int
+tape_3590_irq(struct tape_device *device, struct tape_request *request,
+	      struct irb *irb)
+{
+	if (request == NULL)
+		return tape_3590_unsolicited_irq(device, irb);
+
+	if ((irb->scsw.cmd.dstat & DEV_STAT_UNIT_EXCEP) &&
+	    (irb->scsw.cmd.dstat & DEV_STAT_DEV_END) &&
+	    (request->op == TO_WRI)) {
+		/* Write at end of volume */
+		DBF_EVENT(2, "End of volume\n");
+		return tape_3590_erp_failed(device, request, irb, -ENOSPC);
+	}
+
+	if (irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK)
+		return tape_3590_unit_check(device, request, irb);
+
+	if (irb->scsw.cmd.dstat & DEV_STAT_DEV_END) {
+		if (irb->scsw.cmd.dstat == DEV_STAT_UNIT_EXCEP) {
+			if (request->op == TO_FSB || request->op == TO_BSB)
+				request->rescnt++;
+			else
+				DBF_EVENT(5, "Unit Exception!\n");
+		}
+
+		return tape_3590_done(device, request);
+	}
+
+	if (irb->scsw.cmd.dstat & DEV_STAT_CHN_END) {
+		DBF_EVENT(2, "cannel end\n");
+		return TAPE_IO_PENDING;
+	}
+
+	if (irb->scsw.cmd.dstat & DEV_STAT_ATTENTION) {
+		DBF_EVENT(2, "Unit Attention when busy..\n");
+		return TAPE_IO_PENDING;
+	}
+
+	DBF_EVENT(6, "xunknownirq\n");
+	tape_dump_sense_dbf(device, request, irb);
+	return TAPE_IO_STOP;
+}
+
+
+static int tape_3590_read_dev_chars(struct tape_device *device,
+				    struct tape_3590_rdc_data *rdc_data)
+{
+	int rc;
+	struct tape_request *request;
+
+	request = tape_alloc_request(1, sizeof(*rdc_data));
+	if (IS_ERR(request))
+		return PTR_ERR(request);
+	request->op = TO_RDC;
+	tape_ccw_end(request->cpaddr, CCW_CMD_RDC, sizeof(*rdc_data),
+		     request->cpdata);
+	rc = tape_do_io(device, request);
+	if (rc == 0)
+		memcpy(rdc_data, request->cpdata, sizeof(*rdc_data));
+	tape_free_request(request);
+	return rc;
+}
+
+/*
+ * Setup device function
+ */
+static int
+tape_3590_setup_device(struct tape_device *device)
+{
+	int rc;
+	struct tape_3590_disc_data *data;
+	struct tape_3590_rdc_data *rdc_data;
+
+	DBF_EVENT(6, "3590 device setup\n");
+	data = kzalloc(sizeof(struct tape_3590_disc_data), GFP_KERNEL | GFP_DMA);
+	if (data == NULL)
+		return -ENOMEM;
+	data->read_back_op = READ_PREVIOUS;
+	device->discdata = data;
+
+	rdc_data = kmalloc(sizeof(*rdc_data), GFP_KERNEL | GFP_DMA);
+	if (!rdc_data) {
+		rc = -ENOMEM;
+		goto fail_kmalloc;
+	}
+	rc = tape_3590_read_dev_chars(device, rdc_data);
+	if (rc) {
+		DBF_LH(3, "Read device characteristics failed!\n");
+		goto fail_rdc_data;
+	}
+	rc = tape_std_assign(device);
+	if (rc)
+		goto fail_rdc_data;
+	if (rdc_data->data[31] == 0x13) {
+		data->crypt_info.capability |= TAPE390_CRYPT_SUPPORTED_MASK;
+		tape_3592_disable_crypt(device);
+	} else {
+		DBF_EVENT(6, "Device has NO crypto support\n");
+	}
+	/* Try to find out if medium is loaded */
+	rc = tape_3590_sense_medium(device);
+	if (rc) {
+		DBF_LH(3, "3590 medium sense returned %d\n", rc);
+		goto fail_rdc_data;
+	}
+	return 0;
+
+fail_rdc_data:
+	kfree(rdc_data);
+fail_kmalloc:
+	kfree(data);
+	return rc;
+}
+
+/*
+ * Cleanup device function
+ */
+static void
+tape_3590_cleanup_device(struct tape_device *device)
+{
+	flush_workqueue(tape_3590_wq);
+	tape_std_unassign(device);
+
+	kfree(device->discdata);
+	device->discdata = NULL;
+}
+
+/*
+ * List of 3590 magnetic tape commands.
+ */
+static tape_mtop_fn tape_3590_mtop[TAPE_NR_MTOPS] = {
+	[MTRESET]	 = tape_std_mtreset,
+	[MTFSF]		 = tape_std_mtfsf,
+	[MTBSF]		 = tape_std_mtbsf,
+	[MTFSR]		 = tape_std_mtfsr,
+	[MTBSR]		 = tape_std_mtbsr,
+	[MTWEOF]	 = tape_std_mtweof,
+	[MTREW]		 = tape_std_mtrew,
+	[MTOFFL]	 = tape_std_mtoffl,
+	[MTNOP]		 = tape_std_mtnop,
+	[MTRETEN]	 = tape_std_mtreten,
+	[MTBSFM]	 = tape_std_mtbsfm,
+	[MTFSFM]	 = tape_std_mtfsfm,
+	[MTEOM]		 = tape_std_mteom,
+	[MTERASE]	 = tape_std_mterase,
+	[MTRAS1]	 = NULL,
+	[MTRAS2]	 = NULL,
+	[MTRAS3]	 = NULL,
+	[MTSETBLK]	 = tape_std_mtsetblk,
+	[MTSETDENSITY]	 = NULL,
+	[MTSEEK]	 = tape_3590_mtseek,
+	[MTTELL]	 = tape_3590_mttell,
+	[MTSETDRVBUFFER] = NULL,
+	[MTFSS]		 = NULL,
+	[MTBSS]		 = NULL,
+	[MTWSM]		 = NULL,
+	[MTLOCK]	 = NULL,
+	[MTUNLOCK]	 = NULL,
+	[MTLOAD]	 = tape_std_mtload,
+	[MTUNLOAD]	 = tape_std_mtunload,
+	[MTCOMPRESSION]	 = tape_std_mtcompression,
+	[MTSETPART]	 = NULL,
+	[MTMKPART]	 = NULL
+};
+
+/*
+ * Tape discipline structure for 3590.
+ */
+static struct tape_discipline tape_discipline_3590 = {
+	.owner = THIS_MODULE,
+	.setup_device = tape_3590_setup_device,
+	.cleanup_device = tape_3590_cleanup_device,
+	.process_eov = tape_std_process_eov,
+	.irq = tape_3590_irq,
+	.read_block = tape_std_read_block,
+	.write_block = tape_std_write_block,
+#ifdef CONFIG_S390_TAPE_BLOCK
+	.bread = tape_3590_bread,
+	.free_bread = tape_3590_free_bread,
+	.check_locate = tape_3590_check_locate,
+#endif
+	.ioctl_fn = tape_3590_ioctl,
+	.mtop_array = tape_3590_mtop
+};
+
+static struct ccw_device_id tape_3590_ids[] = {
+	{CCW_DEVICE_DEVTYPE(0x3590, 0, 0x3590, 0), .driver_info = tape_3590},
+	{CCW_DEVICE_DEVTYPE(0x3592, 0, 0x3592, 0), .driver_info = tape_3592},
+	{ /* end of list */ }
+};
+
+static int
+tape_3590_online(struct ccw_device *cdev)
+{
+	return tape_generic_online(dev_get_drvdata(&cdev->dev),
+				   &tape_discipline_3590);
+}
+
+static struct ccw_driver tape_3590_driver = {
+	.driver = {
+		.name = "tape_3590",
+		.owner = THIS_MODULE,
+	},
+	.ids = tape_3590_ids,
+	.probe = tape_generic_probe,
+	.remove = tape_generic_remove,
+	.set_offline = tape_generic_offline,
+	.set_online = tape_3590_online,
+	.freeze = tape_generic_pm_suspend,
+	.int_class = IOINT_TAP,
+};
+
+/*
+ * Setup discipline structure.
+ */
+static int
+tape_3590_init(void)
+{
+	int rc;
+
+	TAPE_DBF_AREA = debug_register("tape_3590", 2, 2, 4 * sizeof(long));
+	debug_register_view(TAPE_DBF_AREA, &debug_sprintf_view);
+#ifdef DBF_LIKE_HELL
+	debug_set_level(TAPE_DBF_AREA, 6);
+#endif
+
+	DBF_EVENT(3, "3590 init\n");
+
+	tape_3590_wq = alloc_workqueue("tape_3590", 0, 0);
+	if (!tape_3590_wq)
+		return -ENOMEM;
+
+	/* Register driver for 3590 tapes. */
+	rc = ccw_driver_register(&tape_3590_driver);
+	if (rc) {
+		destroy_workqueue(tape_3590_wq);
+		DBF_EVENT(3, "3590 init failed\n");
+	} else
+		DBF_EVENT(3, "3590 registered\n");
+	return rc;
+}
+
+static void
+tape_3590_exit(void)
+{
+	ccw_driver_unregister(&tape_3590_driver);
+	destroy_workqueue(tape_3590_wq);
+	debug_unregister(TAPE_DBF_AREA);
+}
+
+MODULE_DEVICE_TABLE(ccw, tape_3590_ids);
+MODULE_AUTHOR("(C) 2001,2006 IBM Corporation");
+MODULE_DESCRIPTION("Linux on zSeries channel attached 3590 tape device driver");
+MODULE_LICENSE("GPL");
+
+module_init(tape_3590_init);
+module_exit(tape_3590_exit);
diff --git a/ap/os/linux/linux-3.4.x/drivers/s390/char/tape_3590.h b/ap/os/linux/linux-3.4.x/drivers/s390/char/tape_3590.h
new file mode 100644
index 0000000..4534055
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/s390/char/tape_3590.h
@@ -0,0 +1,175 @@
+/*
+ *  drivers/s390/char/tape_3590.h
+ *    tape device discipline for 3590 tapes.
+ *
+ *    Copyright IBM Corp. 2001,2006
+ *    Author(s): Stefan Bader <shbader@de.ibm.com>
+ *		 Michael Holzheu <holzheu@de.ibm.com>
+ *		 Martin Schwidefsky <schwidefsky@de.ibm.com>
+ */
+
+#ifndef _TAPE_3590_H
+#define _TAPE_3590_H
+
+#define MEDIUM_SENSE	0xc2
+#define READ_PREVIOUS	0x0a
+#define MODE_SENSE	0xcf
+#define PERFORM_SS_FUNC 0x77
+#define READ_SS_DATA	0x3e
+
+#define PREP_RD_SS_DATA 0x18
+#define RD_ATTMSG	0x3
+
+#define SENSE_BRA_PER  0
+#define SENSE_BRA_CONT 1
+#define SENSE_BRA_RE   2
+#define SENSE_BRA_DRE  3
+
+#define SENSE_FMT_LIBRARY	0x23
+#define SENSE_FMT_UNSOLICITED	0x40
+#define SENSE_FMT_COMMAND_REJ	0x41
+#define SENSE_FMT_COMMAND_EXEC0 0x50
+#define SENSE_FMT_COMMAND_EXEC1 0x51
+#define SENSE_FMT_EVENT0	0x60
+#define SENSE_FMT_EVENT1	0x61
+#define SENSE_FMT_MIM		0x70
+#define SENSE_FMT_SIM		0x71
+
+#define MSENSE_UNASSOCIATED	 0x00
+#define MSENSE_ASSOCIATED_MOUNT	 0x01
+#define MSENSE_ASSOCIATED_UMOUNT 0x02
+#define MSENSE_CRYPT_MASK	 0x00000010
+
+#define TAPE_3590_MAX_MSG	 0xb0
+
+/* Datatypes */
+
+struct tape_3590_disc_data {
+	struct tape390_crypt_info crypt_info;
+	int read_back_op;
+};
+
+#define TAPE_3590_CRYPT_INFO(device) \
+	((struct tape_3590_disc_data*)(device->discdata))->crypt_info
+#define TAPE_3590_READ_BACK_OP(device) \
+	((struct tape_3590_disc_data*)(device->discdata))->read_back_op
+
+struct tape_3590_sense {
+
+	unsigned int command_rej:1;
+	unsigned int interv_req:1;
+	unsigned int bus_out_check:1;
+	unsigned int eq_check:1;
+	unsigned int data_check:1;
+	unsigned int overrun:1;
+	unsigned int def_unit_check:1;
+	unsigned int assgnd_elsew:1;
+
+	unsigned int locate_fail:1;
+	unsigned int inst_online:1;
+	unsigned int reserved:1;
+	unsigned int blk_seq_err:1;
+	unsigned int begin_part:1;
+	unsigned int wr_mode:1;
+	unsigned int wr_prot:1;
+	unsigned int not_cap:1;
+
+	unsigned int bra:2;
+	unsigned int lc:3;
+	unsigned int vlf_active:1;
+	unsigned int stm:1;
+	unsigned int med_pos:1;
+
+	unsigned int rac:8;
+
+	unsigned int rc_rqc:16;
+
+	unsigned int mc:8;
+
+	unsigned int sense_fmt:8;
+
+	union {
+		struct {
+			unsigned int emc:4;
+			unsigned int smc:4;
+			unsigned int sev:2;
+			unsigned int reserved:6;
+			unsigned int md:8;
+			unsigned int refcode:8;
+			unsigned int mid:16;
+			unsigned int mp:16;
+			unsigned char volid[6];
+			unsigned int fid:8;
+		} f70;
+		struct {
+			unsigned int emc:4;
+			unsigned int smc:4;
+			unsigned int sev:2;
+			unsigned int reserved1:5;
+			unsigned int mdf:1;
+			unsigned char md[3];
+			unsigned int simid:8;
+			unsigned int uid:16;
+			unsigned int refcode1:16;
+			unsigned int refcode2:16;
+			unsigned int refcode3:16;
+			unsigned int reserved2:8;
+		} f71;
+		unsigned char data[14];
+	} fmt;
+	unsigned char pad[10];
+
+} __attribute__ ((packed));
+
+struct tape_3590_med_sense {
+	unsigned int macst:4;
+	unsigned int masst:4;
+	char pad1[7];
+	unsigned int flags;
+	char pad2[116];
+} __attribute__ ((packed));
+
+struct tape_3590_rdc_data {
+	char data[64];
+} __attribute__ ((packed));
+
+/* Datastructures for 3592 encryption support */
+
+struct tape3592_kekl {
+	__u8 flags;
+	char label[64];
+} __attribute__ ((packed));
+
+struct tape3592_kekl_pair {
+	__u8 count;
+	struct tape3592_kekl kekl[2];
+} __attribute__ ((packed));
+
+struct tape3592_kekl_query_data {
+	__u16 len;
+	__u8  fmt;
+	__u8  mc;
+	__u32 id;
+	__u8  flags;
+	struct tape3592_kekl_pair kekls;
+	char reserved[116];
+} __attribute__ ((packed));
+
+struct tape3592_kekl_query_order {
+	__u8 code;
+	__u8 flags;
+	char reserved1[2];
+	__u8 max_count;
+	char reserved2[35];
+} __attribute__ ((packed));
+
+struct tape3592_kekl_set_order {
+	__u8 code;
+	__u8 flags;
+	char reserved1[2];
+	__u8 op;
+	struct tape3592_kekl_pair kekls;
+	char reserved2[120];
+} __attribute__ ((packed));
+
+#endif /* _TAPE_3590_H */
diff --git a/ap/os/linux/linux-3.4.x/drivers/s390/char/tape_char.c b/ap/os/linux/linux-3.4.x/drivers/s390/char/tape_char.c
new file mode 100644
index 0000000..87cd0ab
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/s390/char/tape_char.c
@@ -0,0 +1,512 @@
+/*
+ *  drivers/s390/char/tape_char.c
+ *    character device frontend for tape device driver
+ *
+ *  S390 and zSeries version
+ *    Copyright IBM Corp. 2001,2006
+ *    Author(s): Carsten Otte <cotte@de.ibm.com>
+ *		 Michael Holzheu <holzheu@de.ibm.com>
+ *		 Tuan Ngo-Anh <ngoanh@de.ibm.com>
+ *		 Martin Schwidefsky <schwidefsky@de.ibm.com>
+ */
+
+#define KMSG_COMPONENT "tape"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/proc_fs.h>
+#include <linux/mtio.h>
+#include <linux/compat.h>
+
+#include <asm/uaccess.h>
+
+#define TAPE_DBF_AREA	tape_core_dbf
+
+#include "tape.h"
+#include "tape_std.h"
+#include "tape_class.h"
+
+#define TAPECHAR_MAJOR		0	/* get dynamic major */
+
+/*
+ * file operation structure for tape character frontend
+ */
+static ssize_t tapechar_read(struct file *, char __user *, size_t, loff_t *);
+static ssize_t tapechar_write(struct file *, const char __user *, size_t, loff_t *);
+static int tapechar_open(struct inode *,struct file *);
+static int tapechar_release(struct inode *,struct file *);
+static long tapechar_ioctl(struct file *, unsigned int, unsigned long);
+#ifdef CONFIG_COMPAT
+static long tapechar_compat_ioctl(struct file *, unsigned int, unsigned long);
+#endif
+
+static const struct file_operations tape_fops =
+{
+	.owner = THIS_MODULE,
+	.read = tapechar_read,
+	.write = tapechar_write,
+	.unlocked_ioctl = tapechar_ioctl,
+#ifdef CONFIG_COMPAT
+	.compat_ioctl = tapechar_compat_ioctl,
+#endif
+	.open = tapechar_open,
+	.release = tapechar_release,
+	.llseek = no_llseek,
+};
+
+static int tapechar_major = TAPECHAR_MAJOR;
+
+/*
+ * This function is called for every new tapedevice
+ */
+int
+tapechar_setup_device(struct tape_device * device)
+{
+	char	device_name[20];
+
+	sprintf(device_name, "ntibm%i", device->first_minor / 2);
+	device->nt = register_tape_dev(
+		&device->cdev->dev,
+		MKDEV(tapechar_major, device->first_minor),
+		&tape_fops,
+		device_name,
+		"non-rewinding"
+	);
+	device_name[0] = 'r';
+	device->rt = register_tape_dev(
+		&device->cdev->dev,
+		MKDEV(tapechar_major, device->first_minor + 1),
+		&tape_fops,
+		device_name,
+		"rewinding"
+	);
+
+	return 0;
+}
+
+void
+tapechar_cleanup_device(struct tape_device *device)
+{
+	unregister_tape_dev(&device->cdev->dev, device->rt);
+	device->rt = NULL;
+	unregister_tape_dev(&device->cdev->dev, device->nt);
+	device->nt = NULL;
+}
+
+static int
+tapechar_check_idalbuffer(struct tape_device *device, size_t block_size)
+{
+	struct idal_buffer *new;
+
+	if (device->char_data.idal_buf != NULL &&
+	    device->char_data.idal_buf->size == block_size)
+		return 0;
+
+	if (block_size > MAX_BLOCKSIZE) {
+		DBF_EVENT(3, "Invalid blocksize (%zd > %d)\n",
+			block_size, MAX_BLOCKSIZE);
+		return -EINVAL;
+	}
+
+	/* The current idal buffer is not correct. Allocate a new one. */
+	new = idal_buffer_alloc(block_size, 0);
+	if (IS_ERR(new))
+		return -ENOMEM;
+
+	if (device->char_data.idal_buf != NULL)
+		idal_buffer_free(device->char_data.idal_buf);
+
+	device->char_data.idal_buf = new;
+
+	return 0;
+}
+
+/*
+ * Tape device read function
+ */
+static ssize_t
+tapechar_read(struct file *filp, char __user *data, size_t count, loff_t *ppos)
+{
+	struct tape_device *device;
+	struct tape_request *request;
+	size_t block_size;
+	int rc;
+
+	DBF_EVENT(6, "TCHAR:read\n");
+	device = (struct tape_device *) filp->private_data;
+
+	/*
+	 * If the tape isn't terminated yet, do it now. And since we then
+	 * are at the end of the tape there wouldn't be anything to read
+	 * anyways. So we return immediately.
+	 */
+	if(device->required_tapemarks) {
+		return tape_std_terminate_write(device);
+	}
+
+	/* Find out block size to use */
+	if (device->char_data.block_size != 0) {
+		if (count < device->char_data.block_size) {
+			DBF_EVENT(3, "TCHAR:read smaller than block "
+				  "size was requested\n");
+			return -EINVAL;
+		}
+		block_size = device->char_data.block_size;
+	} else {
+		block_size = count;
+	}
+
+	rc = tapechar_check_idalbuffer(device, block_size);
+	if (rc)
+		return rc;
+
+#ifdef CONFIG_S390_TAPE_BLOCK
+	/* Changes position. */
+	device->blk_data.medium_changed = 1;
+#endif
+
+	DBF_EVENT(6, "TCHAR:nbytes: %lx\n", block_size);
+	/* Let the discipline build the ccw chain. */
+	request = device->discipline->read_block(device, block_size);
+	if (IS_ERR(request))
+		return PTR_ERR(request);
+	/* Execute it. */
+	rc = tape_do_io(device, request);
+	if (rc == 0) {
+		rc = block_size - request->rescnt;
+		DBF_EVENT(6, "TCHAR:rbytes:  %x\n", rc);
+		/* Copy data from idal buffer to user space. */
+		if (idal_buffer_to_user(device->char_data.idal_buf,
+					data, rc) != 0)
+			rc = -EFAULT;
+	}
+	tape_free_request(request);
+	return rc;
+}
+
+/*
+ * Tape device write function
+ */
+static ssize_t
+tapechar_write(struct file *filp, const char __user *data, size_t count, loff_t *ppos)
+{
+	struct tape_device *device;
+	struct tape_request *request;
+	size_t block_size;
+	size_t written;
+	int nblocks;
+	int i, rc;
+
+	DBF_EVENT(6, "TCHAR:write\n");
+	device = (struct tape_device *) filp->private_data;
+	/* Find out block size and number of blocks */
+	if (device->char_data.block_size != 0) {
+		if (count < device->char_data.block_size) {
+			DBF_EVENT(3, "TCHAR:write smaller than block "
+				  "size was requested\n");
+			return -EINVAL;
+		}
+		block_size = device->char_data.block_size;
+		nblocks = count / block_size;
+	} else {
+		block_size = count;
+		nblocks = 1;
+	}
+
+	rc = tapechar_check_idalbuffer(device, block_size);
+	if (rc)
+		return rc;
+
+#ifdef CONFIG_S390_TAPE_BLOCK
+	/* Changes position. */
+	device->blk_data.medium_changed = 1;
+#endif
+
+	DBF_EVENT(6,"TCHAR:nbytes: %lx\n", block_size);
+	DBF_EVENT(6, "TCHAR:nblocks: %x\n", nblocks);
+	/* Let the discipline build the ccw chain. */
+	request = device->discipline->write_block(device, block_size);
+	if (IS_ERR(request))
+		return PTR_ERR(request);
+	rc = 0;
+	written = 0;
+	for (i = 0; i < nblocks; i++) {
+		/* Copy data from user space to idal buffer. */
+		if (idal_buffer_from_user(device->char_data.idal_buf,
+					  data, block_size)) {
+			rc = -EFAULT;
+			break;
+		}
+		rc = tape_do_io(device, request);
+		if (rc)
+			break;
+		DBF_EVENT(6, "TCHAR:wbytes: %lx\n",
+			  block_size - request->rescnt);
+		written += block_size - request->rescnt;
+		if (request->rescnt != 0)
+			break;
+		data += block_size;
+	}
+	tape_free_request(request);
+	if (rc == -ENOSPC) {
+		/*
+		 * Ok, the device has no more space. It has NOT written
+		 * the block.
+		 */
+		if (device->discipline->process_eov)
+			device->discipline->process_eov(device);
+		if (written > 0)
+			rc = 0;
+
+	}
+
+	/*
+	 * After doing a write we always need two tapemarks to correctly
+	 * terminate the tape (one to terminate the file, the second to
+	 * flag the end of recorded data.
+	 * Since process_eov positions the tape in front of the written
+	 * tapemark it doesn't hurt to write two marks again.
+	 */
+	if (!rc)
+		device->required_tapemarks = 2;
+
+	return rc ? rc : written;
+}
+
+/*
+ * Character frontend tape device open function.
+ */
+static int
+tapechar_open (struct inode *inode, struct file *filp)
+{
+	struct tape_device *device;
+	int minor, rc;
+
+	DBF_EVENT(6, "TCHAR:open: %i:%i\n",
+		imajor(filp->f_path.dentry->d_inode),
+		iminor(filp->f_path.dentry->d_inode));
+
+	if (imajor(filp->f_path.dentry->d_inode) != tapechar_major)
+		return -ENODEV;
+
+	minor = iminor(filp->f_path.dentry->d_inode);
+	device = tape_find_device(minor / TAPE_MINORS_PER_DEV);
+	if (IS_ERR(device)) {
+		DBF_EVENT(3, "TCHAR:open: tape_find_device() failed\n");
+		return PTR_ERR(device);
+	}
+
+	rc = tape_open(device);
+	if (rc == 0) {
+		filp->private_data = device;
+		nonseekable_open(inode, filp);
+	} else
+		tape_put_device(device);
+
+	return rc;
+}
+
+/*
+ * Character frontend tape device release function.
+ */
+
+static int
+tapechar_release(struct inode *inode, struct file *filp)
+{
+	struct tape_device *device;
+
+	DBF_EVENT(6, "TCHAR:release: %x\n", iminor(inode));
+	device = (struct tape_device *) filp->private_data;
+
+	/*
+	 * If this is the rewinding tape minor then rewind. In that case we
+	 * write all required tapemarks. Otherwise only one to terminate the
+	 * file.
+	 */
+	if ((iminor(inode) & 1) != 0) {
+		if (device->required_tapemarks)
+			tape_std_terminate_write(device);
+		tape_mtop(device, MTREW, 1);
+	} else {
+		if (device->required_tapemarks > 1) {
+			if (tape_mtop(device, MTWEOF, 1) == 0)
+				device->required_tapemarks--;
+		}
+	}
+
+	if (device->char_data.idal_buf != NULL) {
+		idal_buffer_free(device->char_data.idal_buf);
+		device->char_data.idal_buf = NULL;
+	}
+	tape_release(device);
+	filp->private_data = NULL;
+	tape_put_device(device);
+
+	return 0;
+}
+
+/*
+ * Tape device io controls.
+ */
+static int
+__tapechar_ioctl(struct tape_device *device,
+		 unsigned int no, unsigned long data)
+{
+	int rc;
+
+	if (no == MTIOCTOP) {
+		struct mtop op;
+
+		if (copy_from_user(&op, (char __user *) data, sizeof(op)) != 0)
+			return -EFAULT;
+		if (op.mt_count < 0)
+			return -EINVAL;
+
+		/*
+		 * Operations that change tape position should write final
+		 * tapemarks.
+		 */
+		switch (op.mt_op) {
+			case MTFSF:
+			case MTBSF:
+			case MTFSR:
+			case MTBSR:
+			case MTREW:
+			case MTOFFL:
+			case MTEOM:
+			case MTRETEN:
+			case MTBSFM:
+			case MTFSFM:
+			case MTSEEK:
+#ifdef CONFIG_S390_TAPE_BLOCK
+				device->blk_data.medium_changed = 1;
+#endif
+				if (device->required_tapemarks)
+					tape_std_terminate_write(device);
+			default:
+				;
+		}
+		rc = tape_mtop(device, op.mt_op, op.mt_count);
+
+		if (op.mt_op == MTWEOF && rc == 0) {
+			if (op.mt_count > device->required_tapemarks)
+				device->required_tapemarks = 0;
+			else
+				device->required_tapemarks -= op.mt_count;
+		}
+		return rc;
+	}
+	if (no == MTIOCPOS) {
+		/* MTIOCPOS: query the tape position. */
+		struct mtpos pos;
+
+		rc = tape_mtop(device, MTTELL, 1);
+		if (rc < 0)
+			return rc;
+		pos.mt_blkno = rc;
+		if (copy_to_user((char __user *) data, &pos, sizeof(pos)) != 0)
+			return -EFAULT;
+		return 0;
+	}
+	if (no == MTIOCGET) {
+		/* MTIOCGET: query the tape drive status. */
+		struct mtget get;
+
+		memset(&get, 0, sizeof(get));
+		get.mt_type = MT_ISUNKNOWN;
+		get.mt_resid = 0 /* device->devstat.rescnt */;
+		get.mt_dsreg = device->tape_state;
+		/* FIXME: mt_gstat, mt_erreg, mt_fileno */
+		get.mt_gstat = 0;
+		get.mt_erreg = 0;
+		get.mt_fileno = 0;
+		get.mt_gstat  = device->tape_generic_status;
+
+		if (device->medium_state == MS_LOADED) {
+			rc = tape_mtop(device, MTTELL, 1);
+
+			if (rc < 0)
+				return rc;
+
+			if (rc == 0)
+				get.mt_gstat |= GMT_BOT(~0);
+
+			get.mt_blkno = rc;
+		}
+
+		if (copy_to_user((char __user *) data, &get, sizeof(get)) != 0)
+			return -EFAULT;
+
+		return 0;
+	}
+	/* Try the discipline ioctl function. */
+	if (device->discipline->ioctl_fn == NULL)
+		return -EINVAL;
+	return device->discipline->ioctl_fn(device, no, data);
+}
+
+static long
+tapechar_ioctl(struct file *filp, unsigned int no, unsigned long data)
+{
+	struct tape_device *device;
+	long rc;
+
+	DBF_EVENT(6, "TCHAR:ioct\n");
+
+	device = (struct tape_device *) filp->private_data;
+	mutex_lock(&device->mutex);
+	rc = __tapechar_ioctl(device, no, data);
+	mutex_unlock(&device->mutex);
+	return rc;
+}
+
+#ifdef CONFIG_COMPAT
+static long
+tapechar_compat_ioctl(struct file *filp, unsigned int no, unsigned long data)
+{
+	struct tape_device *device = filp->private_data;
+	int rval = -ENOIOCTLCMD;
+	unsigned long argp;
+
+	/* The 'arg' argument of any ioctl function may only be used for
+	 * pointers because of the compat pointer conversion.
+	 * Consider this when adding new ioctls.
+	 */
+	argp = (unsigned long) compat_ptr(data);
+	if (device->discipline->ioctl_fn) {
+		mutex_lock(&device->mutex);
+		rval = device->discipline->ioctl_fn(device, no, argp);
+		mutex_unlock(&device->mutex);
+		if (rval == -EINVAL)
+			rval = -ENOIOCTLCMD;
+	}
+
+	return rval;
+}
+#endif /* CONFIG_COMPAT */
+
+/*
+ * Initialize character device frontend.
+ */
+int
+tapechar_init (void)
+{
+	dev_t	dev;
+
+	if (alloc_chrdev_region(&dev, 0, 256, "tape") != 0)
+		return -1;
+
+	tapechar_major = MAJOR(dev);
+
+	return 0;
+}
+
+/*
+ * cleanup
+ */
+void
+tapechar_exit(void)
+{
+	unregister_chrdev_region(MKDEV(tapechar_major, 0), 256);
+}
diff --git a/ap/os/linux/linux-3.4.x/drivers/s390/char/tape_class.c b/ap/os/linux/linux-3.4.x/drivers/s390/char/tape_class.c
new file mode 100644
index 0000000..55343df
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/s390/char/tape_class.c
@@ -0,0 +1,133 @@
+/*
+ * (C) Copyright IBM Corp. 2004
+ * tape_class.c
+ *
+ * Tape class device support
+ *
+ * Author: Stefan Bader <shbader@de.ibm.com>
+ * Based on simple class device code by Greg K-H
+ */
+
+#define KMSG_COMPONENT "tape"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
+#include <linux/slab.h>
+
+#include "tape_class.h"
+
+MODULE_AUTHOR("Stefan Bader <shbader@de.ibm.com>");
+MODULE_DESCRIPTION(
+	"(C) Copyright IBM Corp. 2004   All Rights Reserved.\n"
+	"tape_class.c"
+);
+MODULE_LICENSE("GPL");
+
+static struct class *tape_class;
+
+/*
+ * Register a tape device and return a pointer to the cdev structure.
+ *
+ * device
+ *	The pointer to the struct device of the physical (base) device.
+ * drivername
+ *	The pointer to the drivers name for it's character devices.
+ * dev
+ *	The intended major/minor number. The major number may be 0 to
+ *	get a dynamic major number.
+ * fops
+ *	The pointer to the drivers file operations for the tape device.
+ * devname
+ *	The pointer to the name of the character device.
+ */
+struct tape_class_device *register_tape_dev(
+	struct device *		device,
+	dev_t			dev,
+	const struct file_operations *fops,
+	char *			device_name,
+	char *			mode_name)
+{
+	struct tape_class_device *	tcd;
+	int		rc;
+	char *		s;
+
+	tcd = kzalloc(sizeof(struct tape_class_device), GFP_KERNEL);
+	if (!tcd)
+		return ERR_PTR(-ENOMEM);
+
+	strncpy(tcd->device_name, device_name, TAPECLASS_NAME_LEN);
+	for (s = strchr(tcd->device_name, '/'); s; s = strchr(s, '/'))
+		*s = '!';
+	strncpy(tcd->mode_name, mode_name, TAPECLASS_NAME_LEN);
+	for (s = strchr(tcd->mode_name, '/'); s; s = strchr(s, '/'))
+		*s = '!';
+
+	tcd->char_device = cdev_alloc();
+	if (!tcd->char_device) {
+		rc = -ENOMEM;
+		goto fail_with_tcd;
+	}
+
+	tcd->char_device->owner = fops->owner;
+	tcd->char_device->ops   = fops;
+	tcd->char_device->dev   = dev;
+
+	rc = cdev_add(tcd->char_device, tcd->char_device->dev, 1);
+	if (rc)
+		goto fail_with_cdev;
+
+	tcd->class_device = device_create(tape_class, device,
+					  tcd->char_device->dev, NULL,
+					  "%s", tcd->device_name);
+	rc = IS_ERR(tcd->class_device) ? PTR_ERR(tcd->class_device) : 0;
+	if (rc)
+		goto fail_with_cdev;
+	rc = sysfs_create_link(
+		&device->kobj,
+		&tcd->class_device->kobj,
+		tcd->mode_name
+	);
+	if (rc)
+		goto fail_with_class_device;
+
+	return tcd;
+
+fail_with_class_device:
+	device_destroy(tape_class, tcd->char_device->dev);
+
+fail_with_cdev:
+	cdev_del(tcd->char_device);
+
+fail_with_tcd:
+	kfree(tcd);
+
+	return ERR_PTR(rc);
+}
+EXPORT_SYMBOL(register_tape_dev);
+
+void unregister_tape_dev(struct device *device, struct tape_class_device *tcd)
+{
+	if (tcd != NULL && !IS_ERR(tcd)) {
+		sysfs_remove_link(&device->kobj, tcd->mode_name);
+		device_destroy(tape_class, tcd->char_device->dev);
+		cdev_del(tcd->char_device);
+		kfree(tcd);
+	}
+}
+EXPORT_SYMBOL(unregister_tape_dev);
+
+
+static int __init tape_init(void)
+{
+	tape_class = class_create(THIS_MODULE, "tape390");
+
+	return 0;
+}
+
+static void __exit tape_exit(void)
+{
+	class_destroy(tape_class);
+	tape_class = NULL;
+}
+
+postcore_initcall(tape_init);
+module_exit(tape_exit);
diff --git a/ap/os/linux/linux-3.4.x/drivers/s390/char/tape_class.h b/ap/os/linux/linux-3.4.x/drivers/s390/char/tape_class.h
new file mode 100644
index 0000000..ba2092f
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/s390/char/tape_class.h
@@ -0,0 +1,59 @@
+/*
+ * (C) Copyright IBM Corp. 2004   All Rights Reserved.
+ * tape_class.h
+ *
+ * Tape class device support
+ *
+ * Author: Stefan Bader <shbader@de.ibm.com>
+ * Based on simple class device code by Greg K-H
+ */
+#ifndef __TAPE_CLASS_H__
+#define __TAPE_CLASS_H__
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <linux/major.h>
+#include <linux/cdev.h>
+
+#include <linux/device.h>
+#include <linux/kdev_t.h>
+
+#define TAPECLASS_NAME_LEN	32
+
+struct tape_class_device {
+	struct cdev		*char_device;
+	struct device		*class_device;
+	char			device_name[TAPECLASS_NAME_LEN];
+	char			mode_name[TAPECLASS_NAME_LEN];
+};
+
+/*
+ * Register a tape device and return a pointer to the tape class device
+ * created by the call.
+ *
+ * device
+ *	The pointer to the struct device of the physical (base) device.
+ * dev
+ *	The intended major/minor number. The major number may be 0 to
+ *	get a dynamic major number.
+ * fops
+ *	The pointer to the drivers file operations for the tape device.
+ * device_name
+ *	Pointer to the logical device name (will also be used as kobject name
+ *	of the cdev). This can also be called the name of the tape class
+ *	device.
+ * mode_name
+ *	Points to the name of the tape mode. This creates a link with that
+ *	name from the physical device to the logical device (class).
+ */
+struct tape_class_device *register_tape_dev(
+	struct device *		device,
+	dev_t			dev,
+	const struct file_operations *fops,
+	char *			device_name,
+	char *			node_name
+);
+void unregister_tape_dev(struct device *device, struct tape_class_device *tcd);
+
+#endif /* __TAPE_CLASS_H__ */
diff --git a/ap/os/linux/linux-3.4.x/drivers/s390/char/tape_core.c b/ap/os/linux/linux-3.4.x/drivers/s390/char/tape_core.c
new file mode 100644
index 0000000..b3a3e8e
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/s390/char/tape_core.c
@@ -0,0 +1,1392 @@
+/*
+ *  drivers/s390/char/tape_core.c
+ *    basic function of the tape device driver
+ *
+ *  S390 and zSeries version
+ *    Copyright IBM Corp. 2001, 2009
+ *    Author(s): Carsten Otte <cotte@de.ibm.com>
+ *		 Michael Holzheu <holzheu@de.ibm.com>
+ *		 Tuan Ngo-Anh <ngoanh@de.ibm.com>
+ *		 Martin Schwidefsky <schwidefsky@de.ibm.com>
+ *		 Stefan Bader <shbader@de.ibm.com>
+ */
+
+#define KMSG_COMPONENT "tape"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
+#include <linux/module.h>
+#include <linux/init.h>	     // for kernel parameters
+#include <linux/kmod.h>	     // for requesting modules
+#include <linux/spinlock.h>  // for locks
+#include <linux/vmalloc.h>
+#include <linux/list.h>
+#include <linux/slab.h>
+
+#include <asm/types.h>	     // for variable types
+
+#define TAPE_DBF_AREA	tape_core_dbf
+
+#include "tape.h"
+#include "tape_std.h"
+
+#define LONG_BUSY_TIMEOUT 180 /* seconds */
+
+static void __tape_do_irq (struct ccw_device *, unsigned long, struct irb *);
+static void tape_delayed_next_request(struct work_struct *);
+static void tape_long_busy_timeout(unsigned long data);
+
+/*
+ * One list to contain all tape devices of all disciplines, so
+ * we can assign the devices to minor numbers of the same major
+ * The list is protected by the rwlock
+ */
+static LIST_HEAD(tape_device_list);
+static DEFINE_RWLOCK(tape_device_lock);
+
+/*
+ * Pointer to debug area.
+ */
+debug_info_t *TAPE_DBF_AREA = NULL;
+EXPORT_SYMBOL(TAPE_DBF_AREA);
+
+/*
+ * Printable strings for tape enumerations.
+ */
+const char *tape_state_verbose[TS_SIZE] =
+{
+	[TS_UNUSED]   = "UNUSED",
+	[TS_IN_USE]   = "IN_USE",
+	[TS_BLKUSE]   = "BLKUSE",
+	[TS_INIT]     = "INIT  ",
+	[TS_NOT_OPER] = "NOT_OP"
+};
+
+const char *tape_op_verbose[TO_SIZE] =
+{
+	[TO_BLOCK] = "BLK",	[TO_BSB] = "BSB",
+	[TO_BSF] = "BSF",	[TO_DSE] = "DSE",
+	[TO_FSB] = "FSB",	[TO_FSF] = "FSF",
+	[TO_LBL] = "LBL",	[TO_NOP] = "NOP",
+	[TO_RBA] = "RBA",	[TO_RBI] = "RBI",
+	[TO_RFO] = "RFO",	[TO_REW] = "REW",
+	[TO_RUN] = "RUN",	[TO_WRI] = "WRI",
+	[TO_WTM] = "WTM",	[TO_MSEN] = "MSN",
+	[TO_LOAD] = "LOA",	[TO_READ_CONFIG] = "RCF",
+	[TO_READ_ATTMSG] = "RAT",
+	[TO_DIS] = "DIS",	[TO_ASSIGN] = "ASS",
+	[TO_UNASSIGN] = "UAS",  [TO_CRYPT_ON] = "CON",
+	[TO_CRYPT_OFF] = "COF",	[TO_KEKL_SET] = "KLS",
+	[TO_KEKL_QUERY] = "KLQ",[TO_RDC] = "RDC",
+};
+
+static int devid_to_int(struct ccw_dev_id *dev_id)
+{
+	return dev_id->devno + (dev_id->ssid << 16);
+}
+
+/*
+ * Some channel attached tape specific attributes.
+ *
+ * FIXME: In the future the first_minor and blocksize attribute should be
+ *        replaced by a link to the cdev tree.
+ */
+static ssize_t
+tape_medium_state_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+	struct tape_device *tdev;
+
+	tdev = dev_get_drvdata(dev);
+	return scnprintf(buf, PAGE_SIZE, "%i\n", tdev->medium_state);
+}
+
+static
+DEVICE_ATTR(medium_state, 0444, tape_medium_state_show, NULL);
+
+static ssize_t
+tape_first_minor_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+	struct tape_device *tdev;
+
+	tdev = dev_get_drvdata(dev);
+	return scnprintf(buf, PAGE_SIZE, "%i\n", tdev->first_minor);
+}
+
+static
+DEVICE_ATTR(first_minor, 0444, tape_first_minor_show, NULL);
+
+static ssize_t
+tape_state_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+	struct tape_device *tdev;
+
+	tdev = dev_get_drvdata(dev);
+	return scnprintf(buf, PAGE_SIZE, "%s\n", (tdev->first_minor < 0) ?
+		"OFFLINE" : tape_state_verbose[tdev->tape_state]);
+}
+
+static
+DEVICE_ATTR(state, 0444, tape_state_show, NULL);
+
+static ssize_t
+tape_operation_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+	struct tape_device *tdev;
+	ssize_t rc;
+
+	tdev = dev_get_drvdata(dev);
+	if (tdev->first_minor < 0)
+		return scnprintf(buf, PAGE_SIZE, "N/A\n");
+
+	spin_lock_irq(get_ccwdev_lock(tdev->cdev));
+	if (list_empty(&tdev->req_queue))
+		rc = scnprintf(buf, PAGE_SIZE, "---\n");
+	else {
+		struct tape_request *req;
+
+		req = list_entry(tdev->req_queue.next, struct tape_request,
+			list);
+		rc = scnprintf(buf,PAGE_SIZE, "%s\n", tape_op_verbose[req->op]);
+	}
+	spin_unlock_irq(get_ccwdev_lock(tdev->cdev));
+	return rc;
+}
+
+static
+DEVICE_ATTR(operation, 0444, tape_operation_show, NULL);
+
+static ssize_t
+tape_blocksize_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+	struct tape_device *tdev;
+
+	tdev = dev_get_drvdata(dev);
+
+	return scnprintf(buf, PAGE_SIZE, "%i\n", tdev->char_data.block_size);
+}
+
+static
+DEVICE_ATTR(blocksize, 0444, tape_blocksize_show, NULL);
+
+static struct attribute *tape_attrs[] = {
+	&dev_attr_medium_state.attr,
+	&dev_attr_first_minor.attr,
+	&dev_attr_state.attr,
+	&dev_attr_operation.attr,
+	&dev_attr_blocksize.attr,
+	NULL
+};
+
+static struct attribute_group tape_attr_group = {
+	.attrs = tape_attrs,
+};
+
+/*
+ * Tape state functions
+ */
+void
+tape_state_set(struct tape_device *device, enum tape_state newstate)
+{
+	const char *str;
+
+	if (device->tape_state == TS_NOT_OPER) {
+		DBF_EVENT(3, "ts_set err: not oper\n");
+		return;
+	}
+	DBF_EVENT(4, "ts. dev:	%x\n", device->first_minor);
+	DBF_EVENT(4, "old ts:\t\n");
+	if (device->tape_state < TS_SIZE && device->tape_state >=0 )
+		str = tape_state_verbose[device->tape_state];
+	else
+		str = "UNKNOWN TS";
+	DBF_EVENT(4, "%s\n", str);
+	DBF_EVENT(4, "new ts:\t\n");
+	if (newstate < TS_SIZE && newstate >= 0)
+		str = tape_state_verbose[newstate];
+	else
+		str = "UNKNOWN TS";
+	DBF_EVENT(4, "%s\n", str);
+	device->tape_state = newstate;
+	wake_up(&device->state_change_wq);
+}
+
+struct tape_med_state_work_data {
+	struct tape_device *device;
+	enum tape_medium_state state;
+	struct work_struct  work;
+};
+
+static void
+tape_med_state_work_handler(struct work_struct *work)
+{
+	static char env_state_loaded[] = "MEDIUM_STATE=LOADED";
+	static char env_state_unloaded[] = "MEDIUM_STATE=UNLOADED";
+	struct tape_med_state_work_data *p =
+		container_of(work, struct tape_med_state_work_data, work);
+	struct tape_device *device = p->device;
+	char *envp[] = { NULL, NULL };
+
+	switch (p->state) {
+	case MS_UNLOADED:
+		pr_info("%s: The tape cartridge has been successfully "
+			"unloaded\n", dev_name(&device->cdev->dev));
+		envp[0] = env_state_unloaded;
+		kobject_uevent_env(&device->cdev->dev.kobj, KOBJ_CHANGE, envp);
+		break;
+	case MS_LOADED:
+		pr_info("%s: A tape cartridge has been mounted\n",
+			dev_name(&device->cdev->dev));
+		envp[0] = env_state_loaded;
+		kobject_uevent_env(&device->cdev->dev.kobj, KOBJ_CHANGE, envp);
+		break;
+	default:
+		break;
+	}
+	tape_put_device(device);
+	kfree(p);
+}
+
+static void
+tape_med_state_work(struct tape_device *device, enum tape_medium_state state)
+{
+	struct tape_med_state_work_data *p;
+
+	p = kzalloc(sizeof(*p), GFP_ATOMIC);
+	if (p) {
+		INIT_WORK(&p->work, tape_med_state_work_handler);
+		p->device = tape_get_device(device);
+		p->state = state;
+		schedule_work(&p->work);
+	}
+}
+
+void
+tape_med_state_set(struct tape_device *device, enum tape_medium_state newstate)
+{
+	enum tape_medium_state oldstate;
+
+	oldstate = device->medium_state;
+	if (oldstate == newstate)
+		return;
+	device->medium_state = newstate;
+	switch(newstate){
+	case MS_UNLOADED:
+		device->tape_generic_status |= GMT_DR_OPEN(~0);
+		if (oldstate == MS_LOADED)
+			tape_med_state_work(device, MS_UNLOADED);
+		break;
+	case MS_LOADED:
+		device->tape_generic_status &= ~GMT_DR_OPEN(~0);
+		if (oldstate == MS_UNLOADED)
+			tape_med_state_work(device, MS_LOADED);
+		break;
+	default:
+		break;
+	}
+	wake_up(&device->state_change_wq);
+}
+
+/*
+ * Stop running ccw. Has to be called with the device lock held.
+ */
+static int
+__tape_cancel_io(struct tape_device *device, struct tape_request *request)
+{
+	int retries;
+	int rc;
+
+	/* Check if interrupt has already been processed */
+	if (request->callback == NULL)
+		return 0;
+
+	rc = 0;
+	for (retries = 0; retries < 5; retries++) {
+		rc = ccw_device_clear(device->cdev, (long) request);
+
+		switch (rc) {
+			case 0:
+				request->status	= TAPE_REQUEST_DONE;
+				return 0;
+			case -EBUSY:
+				request->status	= TAPE_REQUEST_CANCEL;
+				schedule_delayed_work(&device->tape_dnr, 0);
+				return 0;
+			case -ENODEV:
+				DBF_EXCEPTION(2, "device gone, retry\n");
+				break;
+			case -EIO:
+				DBF_EXCEPTION(2, "I/O error, retry\n");
+				break;
+			default:
+				BUG();
+		}
+	}
+
+	return rc;
+}
+
+/*
+ * Add device into the sorted list, giving it the first
+ * available minor number.
+ */
+static int
+tape_assign_minor(struct tape_device *device)
+{
+	struct tape_device *tmp;
+	int minor;
+
+	minor = 0;
+	write_lock(&tape_device_lock);
+	list_for_each_entry(tmp, &tape_device_list, node) {
+		if (minor < tmp->first_minor)
+			break;
+		minor += TAPE_MINORS_PER_DEV;
+	}
+	if (minor >= 256) {
+		write_unlock(&tape_device_lock);
+		return -ENODEV;
+	}
+	device->first_minor = minor;
+	list_add_tail(&device->node, &tmp->node);
+	write_unlock(&tape_device_lock);
+	return 0;
+}
+
+/* remove device from the list */
+static void
+tape_remove_minor(struct tape_device *device)
+{
+	write_lock(&tape_device_lock);
+	list_del_init(&device->node);
+	device->first_minor = -1;
+	write_unlock(&tape_device_lock);
+}
+
+/*
+ * Set a device online.
+ *
+ * This function is called by the common I/O layer to move a device from the
+ * detected but offline into the online state.
+ * If we return an error (RC < 0) the device remains in the offline state. This
+ * can happen if the device is assigned somewhere else, for example.
+ */
+int
+tape_generic_online(struct tape_device *device,
+		   struct tape_discipline *discipline)
+{
+	int rc;
+
+	DBF_LH(6, "tape_enable_device(%p, %p)\n", device, discipline);
+
+	if (device->tape_state != TS_INIT) {
+		DBF_LH(3, "Tapestate not INIT (%d)\n", device->tape_state);
+		return -EINVAL;
+	}
+
+	init_timer(&device->lb_timeout);
+	device->lb_timeout.function = tape_long_busy_timeout;
+
+	/* Let the discipline have a go at the device. */
+	device->discipline = discipline;
+	if (!try_module_get(discipline->owner)) {
+		return -EINVAL;
+	}
+
+	rc = discipline->setup_device(device);
+	if (rc)
+		goto out;
+	rc = tape_assign_minor(device);
+	if (rc)
+		goto out_discipline;
+
+	rc = tapechar_setup_device(device);
+	if (rc)
+		goto out_minor;
+	rc = tapeblock_setup_device(device);
+	if (rc)
+		goto out_char;
+
+	tape_state_set(device, TS_UNUSED);
+
+	DBF_LH(3, "(%08x): Drive set online\n", device->cdev_id);
+
+	return 0;
+
+out_char:
+	tapechar_cleanup_device(device);
+out_minor:
+	tape_remove_minor(device);
+out_discipline:
+	device->discipline->cleanup_device(device);
+	device->discipline = NULL;
+out:
+	module_put(discipline->owner);
+	return rc;
+}
+
+static void
+tape_cleanup_device(struct tape_device *device)
+{
+	tapeblock_cleanup_device(device);
+	tapechar_cleanup_device(device);
+	device->discipline->cleanup_device(device);
+	module_put(device->discipline->owner);
+	tape_remove_minor(device);
+	tape_med_state_set(device, MS_UNKNOWN);
+}
+
+/*
+ * Suspend device.
+ *
+ * Called by the common I/O layer if the drive should be suspended on user
+ * request. We refuse to suspend if the device is loaded or in use for the
+ * following reason:
+ * While the Linux guest is suspended, it might be logged off which causes
+ * devices to be detached. Tape devices are automatically rewound and unloaded
+ * during DETACH processing (unless the tape device was attached with the
+ * NOASSIGN or MULTIUSER option). After rewind/unload, there is no way to
+ * resume the original state of the tape device, since we would need to
+ * manually re-load the cartridge which was active at suspend time.
+ */
+int tape_generic_pm_suspend(struct ccw_device *cdev)
+{
+	struct tape_device *device;
+
+	device = dev_get_drvdata(&cdev->dev);
+	if (!device) {
+		return -ENODEV;
+	}
+
+	DBF_LH(3, "(%08x): tape_generic_pm_suspend(%p)\n",
+		device->cdev_id, device);
+
+	if (device->medium_state != MS_UNLOADED) {
+		pr_err("A cartridge is loaded in tape device %s, "
+		       "refusing to suspend\n", dev_name(&cdev->dev));
+		return -EBUSY;
+	}
+
+	spin_lock_irq(get_ccwdev_lock(device->cdev));
+	switch (device->tape_state) {
+		case TS_INIT:
+		case TS_NOT_OPER:
+		case TS_UNUSED:
+			spin_unlock_irq(get_ccwdev_lock(device->cdev));
+			break;
+		default:
+			pr_err("Tape device %s is busy, refusing to "
+			       "suspend\n", dev_name(&cdev->dev));
+			spin_unlock_irq(get_ccwdev_lock(device->cdev));
+			return -EBUSY;
+	}
+
+	DBF_LH(3, "(%08x): Drive suspended.\n", device->cdev_id);
+	return 0;
+}
+
+/*
+ * Set device offline.
+ *
+ * Called by the common I/O layer if the drive should set offline on user
+ * request. We may prevent this by returning an error.
+ * Manual offline is only allowed while the drive is not in use.
+ */
+int
+tape_generic_offline(struct ccw_device *cdev)
+{
+	struct tape_device *device;
+
+	device = dev_get_drvdata(&cdev->dev);
+	if (!device) {
+		return -ENODEV;
+	}
+
+	DBF_LH(3, "(%08x): tape_generic_offline(%p)\n",
+		device->cdev_id, device);
+
+	spin_lock_irq(get_ccwdev_lock(device->cdev));
+	switch (device->tape_state) {
+		case TS_INIT:
+		case TS_NOT_OPER:
+			spin_unlock_irq(get_ccwdev_lock(device->cdev));
+			break;
+		case TS_UNUSED:
+			tape_state_set(device, TS_INIT);
+			spin_unlock_irq(get_ccwdev_lock(device->cdev));
+			tape_cleanup_device(device);
+			break;
+		default:
+			DBF_EVENT(3, "(%08x): Set offline failed "
+				"- drive in use.\n",
+				device->cdev_id);
+			spin_unlock_irq(get_ccwdev_lock(device->cdev));
+			return -EBUSY;
+	}
+
+	DBF_LH(3, "(%08x): Drive set offline.\n", device->cdev_id);
+	return 0;
+}
+
+/*
+ * Allocate memory for a new device structure.
+ */
+static struct tape_device *
+tape_alloc_device(void)
+{
+	struct tape_device *device;
+
+	device = kzalloc(sizeof(struct tape_device), GFP_KERNEL);
+	if (device == NULL) {
+		DBF_EXCEPTION(2, "ti:no mem\n");
+		return ERR_PTR(-ENOMEM);
+	}
+	device->modeset_byte = kmalloc(1, GFP_KERNEL | GFP_DMA);
+	if (device->modeset_byte == NULL) {
+		DBF_EXCEPTION(2, "ti:no mem\n");
+		kfree(device);
+		return ERR_PTR(-ENOMEM);
+	}
+	mutex_init(&device->mutex);
+	INIT_LIST_HEAD(&device->req_queue);
+	INIT_LIST_HEAD(&device->node);
+	init_waitqueue_head(&device->state_change_wq);
+	init_waitqueue_head(&device->wait_queue);
+	device->tape_state = TS_INIT;
+	device->medium_state = MS_UNKNOWN;
+	*device->modeset_byte = 0;
+	device->first_minor = -1;
+	atomic_set(&device->ref_count, 1);
+	INIT_DELAYED_WORK(&device->tape_dnr, tape_delayed_next_request);
+
+	return device;
+}
+
+/*
+ * Get a reference to an existing device structure. This will automatically
+ * increment the reference count.
+ */
+struct tape_device *
+tape_get_device(struct tape_device *device)
+{
+	int count;
+
+	count = atomic_inc_return(&device->ref_count);
+	DBF_EVENT(4, "tape_get_device(%p) = %i\n", device, count);
+	return device;
+}
+
+/*
+ * Decrease the reference counter of a devices structure. If the
+ * reference counter reaches zero free the device structure.
+ * The function returns a NULL pointer to be used by the caller
+ * for clearing reference pointers.
+ */
+void
+tape_put_device(struct tape_device *device)
+{
+	int count;
+
+	count = atomic_dec_return(&device->ref_count);
+	DBF_EVENT(4, "tape_put_device(%p) -> %i\n", device, count);
+	BUG_ON(count < 0);
+	if (count == 0) {
+		kfree(device->modeset_byte);
+		kfree(device);
+	}
+}
+
+/*
+ * Find tape device by a device index.
+ */
+struct tape_device *
+tape_find_device(int devindex)
+{
+	struct tape_device *device, *tmp;
+
+	device = ERR_PTR(-ENODEV);
+	read_lock(&tape_device_lock);
+	list_for_each_entry(tmp, &tape_device_list, node) {
+		if (tmp->first_minor / TAPE_MINORS_PER_DEV == devindex) {
+			device = tape_get_device(tmp);
+			break;
+		}
+	}
+	read_unlock(&tape_device_lock);
+	return device;
+}
+
+/*
+ * Driverfs tape probe function.
+ */
+int
+tape_generic_probe(struct ccw_device *cdev)
+{
+	struct tape_device *device;
+	int ret;
+	struct ccw_dev_id dev_id;
+
+	device = tape_alloc_device();
+	if (IS_ERR(device))
+		return -ENODEV;
+	ccw_device_set_options(cdev, CCWDEV_DO_PATHGROUP |
+				     CCWDEV_DO_MULTIPATH);
+	ret = sysfs_create_group(&cdev->dev.kobj, &tape_attr_group);
+	if (ret) {
+		tape_put_device(device);
+		return ret;
+	}
+	dev_set_drvdata(&cdev->dev, device);
+	cdev->handler = __tape_do_irq;
+	device->cdev = cdev;
+	ccw_device_get_id(cdev, &dev_id);
+	device->cdev_id = devid_to_int(&dev_id);
+	return ret;
+}
+
+static void
+__tape_discard_requests(struct tape_device *device)
+{
+	struct tape_request *	request;
+	struct list_head *	l, *n;
+
+	list_for_each_safe(l, n, &device->req_queue) {
+		request = list_entry(l, struct tape_request, list);
+		if (request->status == TAPE_REQUEST_IN_IO)
+			request->status = TAPE_REQUEST_DONE;
+		list_del(&request->list);
+
+		/* Decrease ref_count for removed request. */
+		request->device = NULL;
+		tape_put_device(device);
+		request->rc = -EIO;
+		if (request->callback != NULL)
+			request->callback(request, request->callback_data);
+	}
+}
+
+/*
+ * Driverfs tape remove function.
+ *
+ * This function is called whenever the common I/O layer detects the device
+ * gone. This can happen at any time and we cannot refuse.
+ */
+void
+tape_generic_remove(struct ccw_device *cdev)
+{
+	struct tape_device *	device;
+
+	device = dev_get_drvdata(&cdev->dev);
+	if (!device) {
+		return;
+	}
+	DBF_LH(3, "(%08x): tape_generic_remove(%p)\n", device->cdev_id, cdev);
+
+	spin_lock_irq(get_ccwdev_lock(device->cdev));
+	switch (device->tape_state) {
+		case TS_INIT:
+			tape_state_set(device, TS_NOT_OPER);
+		case TS_NOT_OPER:
+			/*
+			 * Nothing to do.
+			 */
+			spin_unlock_irq(get_ccwdev_lock(device->cdev));
+			break;
+		case TS_UNUSED:
+			/*
+			 * Need only to release the device.
+			 */
+			tape_state_set(device, TS_NOT_OPER);
+			spin_unlock_irq(get_ccwdev_lock(device->cdev));
+			tape_cleanup_device(device);
+			break;
+		default:
+			/*
+			 * There may be requests on the queue. We will not get
+			 * an interrupt for a request that was running. So we
+			 * just post them all as I/O errors.
+			 */
+			DBF_EVENT(3, "(%08x): Drive in use vanished!\n",
+				device->cdev_id);
+			pr_warning("%s: A tape unit was detached while in "
+				   "use\n", dev_name(&device->cdev->dev));
+			tape_state_set(device, TS_NOT_OPER);
+			__tape_discard_requests(device);
+			spin_unlock_irq(get_ccwdev_lock(device->cdev));
+			tape_cleanup_device(device);
+	}
+
+	device = dev_get_drvdata(&cdev->dev);
+	if (device) {
+		sysfs_remove_group(&cdev->dev.kobj, &tape_attr_group);
+		dev_set_drvdata(&cdev->dev, NULL);
+		tape_put_device(device);
+	}
+}
+
+/*
+ * Allocate a new tape ccw request
+ */
+struct tape_request *
+tape_alloc_request(int cplength, int datasize)
+{
+	struct tape_request *request;
+
+	BUG_ON(datasize > PAGE_SIZE || (cplength*sizeof(struct ccw1)) > PAGE_SIZE);
+
+	DBF_LH(6, "tape_alloc_request(%d, %d)\n", cplength, datasize);
+
+	request = kzalloc(sizeof(struct tape_request), GFP_KERNEL);
+	if (request == NULL) {
+		DBF_EXCEPTION(1, "cqra nomem\n");
+		return ERR_PTR(-ENOMEM);
+	}
+	/* allocate channel program */
+	if (cplength > 0) {
+		request->cpaddr = kcalloc(cplength, sizeof(struct ccw1),
+					  GFP_ATOMIC | GFP_DMA);
+		if (request->cpaddr == NULL) {
+			DBF_EXCEPTION(1, "cqra nomem\n");
+			kfree(request);
+			return ERR_PTR(-ENOMEM);
+		}
+	}
+	/* alloc small kernel buffer */
+	if (datasize > 0) {
+		request->cpdata = kzalloc(datasize, GFP_KERNEL | GFP_DMA);
+		if (request->cpdata == NULL) {
+			DBF_EXCEPTION(1, "cqra nomem\n");
+			kfree(request->cpaddr);
+			kfree(request);
+			return ERR_PTR(-ENOMEM);
+		}
+	}
+	DBF_LH(6, "New request %p(%p/%p)\n", request, request->cpaddr,
+		request->cpdata);
+
+	return request;
+}
+
+/*
+ * Free tape ccw request
+ */
+void
+tape_free_request (struct tape_request * request)
+{
+	DBF_LH(6, "Free request %p\n", request);
+
+	if (request->device)
+		tape_put_device(request->device);
+	kfree(request->cpdata);
+	kfree(request->cpaddr);
+	kfree(request);
+}
+
+static int
+__tape_start_io(struct tape_device *device, struct tape_request *request)
+{
+	int rc;
+
+#ifdef CONFIG_S390_TAPE_BLOCK
+	if (request->op == TO_BLOCK)
+		device->discipline->check_locate(device, request);
+#endif
+	rc = ccw_device_start(
+		device->cdev,
+		request->cpaddr,
+		(unsigned long) request,
+		0x00,
+		request->options
+	);
+	if (rc == 0) {
+		request->status = TAPE_REQUEST_IN_IO;
+	} else if (rc == -EBUSY) {
+		/* The common I/O subsystem is currently busy. Retry later. */
+		request->status = TAPE_REQUEST_QUEUED;
+		schedule_delayed_work(&device->tape_dnr, 0);
+		rc = 0;
+	} else {
+		/* Start failed. Remove request and indicate failure. */
+		DBF_EVENT(1, "tape: start request failed with RC = %i\n", rc);
+	}
+	return rc;
+}
+
+static void
+__tape_start_next_request(struct tape_device *device)
+{
+	struct list_head *l, *n;
+	struct tape_request *request;
+	int rc;
+
+	DBF_LH(6, "__tape_start_next_request(%p)\n", device);
+	/*
+	 * Try to start each request on request queue until one is
+	 * started successful.
+	 */
+	list_for_each_safe(l, n, &device->req_queue) {
+		request = list_entry(l, struct tape_request, list);
+
+		/*
+		 * Avoid race condition if bottom-half was triggered more than
+		 * once.
+		 */
+		if (request->status == TAPE_REQUEST_IN_IO)
+			return;
+		/*
+		 * Request has already been stopped. We have to wait until
+		 * the request is removed from the queue in the interrupt
+		 * handling.
+		 */
+		if (request->status == TAPE_REQUEST_DONE)
+			return;
+
+		/*
+		 * We wanted to cancel the request but the common I/O layer
+		 * was busy at that time. This can only happen if this
+		 * function is called by delayed_next_request.
+		 * Otherwise we start the next request on the queue.
+		 */
+		if (request->status == TAPE_REQUEST_CANCEL) {
+			rc = __tape_cancel_io(device, request);
+		} else {
+			rc = __tape_start_io(device, request);
+		}
+		if (rc == 0)
+			return;
+
+		/* Set ending status. */
+		request->rc = rc;
+		request->status = TAPE_REQUEST_DONE;
+
+		/* Remove from request queue. */
+		list_del(&request->list);
+
+		/* Do callback. */
+		if (request->callback != NULL)
+			request->callback(request, request->callback_data);
+	}
+}
+
+static void
+tape_delayed_next_request(struct work_struct *work)
+{
+	struct tape_device *device =
+		container_of(work, struct tape_device, tape_dnr.work);
+
+	DBF_LH(6, "tape_delayed_next_request(%p)\n", device);
+	spin_lock_irq(get_ccwdev_lock(device->cdev));
+	__tape_start_next_request(device);
+	spin_unlock_irq(get_ccwdev_lock(device->cdev));
+}
+
+static void tape_long_busy_timeout(unsigned long data)
+{
+	struct tape_request *request;
+	struct tape_device *device;
+
+	device = (struct tape_device *) data;
+	spin_lock_irq(get_ccwdev_lock(device->cdev));
+	request = list_entry(device->req_queue.next, struct tape_request, list);
+	BUG_ON(request->status != TAPE_REQUEST_LONG_BUSY);
+	DBF_LH(6, "%08x: Long busy timeout.\n", device->cdev_id);
+	__tape_start_next_request(device);
+	device->lb_timeout.data = 0UL;
+	tape_put_device(device);
+	spin_unlock_irq(get_ccwdev_lock(device->cdev));
+}
+
+static void
+__tape_end_request(
+	struct tape_device *	device,
+	struct tape_request *	request,
+	int			rc)
+{
+	DBF_LH(6, "__tape_end_request(%p, %p, %i)\n", device, request, rc);
+	if (request) {
+		request->rc = rc;
+		request->status = TAPE_REQUEST_DONE;
+
+		/* Remove from request queue. */
+		list_del(&request->list);
+
+		/* Do callback. */
+		if (request->callback != NULL)
+			request->callback(request, request->callback_data);
+	}
+
+	/* Start next request. */
+	if (!list_empty(&device->req_queue))
+		__tape_start_next_request(device);
+}
+
+/*
+ * Write sense data to dbf
+ */
+void
+tape_dump_sense_dbf(struct tape_device *device, struct tape_request *request,
+		    struct irb *irb)
+{
+	unsigned int *sptr;
+	const char* op;
+
+	if (request != NULL)
+		op = tape_op_verbose[request->op];
+	else
+		op = "---";
+	DBF_EVENT(3, "DSTAT : %02x   CSTAT: %02x\n",
+		  irb->scsw.cmd.dstat, irb->scsw.cmd.cstat);
+	DBF_EVENT(3, "DEVICE: %08x OP\t: %s\n", device->cdev_id, op);
+	sptr = (unsigned int *) irb->ecw;
+	DBF_EVENT(3, "%08x %08x\n", sptr[0], sptr[1]);
+	DBF_EVENT(3, "%08x %08x\n", sptr[2], sptr[3]);
+	DBF_EVENT(3, "%08x %08x\n", sptr[4], sptr[5]);
+	DBF_EVENT(3, "%08x %08x\n", sptr[6], sptr[7]);
+}
+
+/*
+ * I/O helper function. Adds the request to the request queue
+ * and starts it if the tape is idle. Has to be called with
+ * the device lock held.
+ */
+static int
+__tape_start_request(struct tape_device *device, struct tape_request *request)
+{
+	int rc;
+
+	switch (request->op) {
+		case TO_MSEN:
+		case TO_ASSIGN:
+		case TO_UNASSIGN:
+		case TO_READ_ATTMSG:
+		case TO_RDC:
+			if (device->tape_state == TS_INIT)
+				break;
+			if (device->tape_state == TS_UNUSED)
+				break;
+		default:
+			if (device->tape_state == TS_BLKUSE)
+				break;
+			if (device->tape_state != TS_IN_USE)
+				return -ENODEV;
+	}
+
+	/* Increase use count of device for the added request. */
+	request->device = tape_get_device(device);
+
+	if (list_empty(&device->req_queue)) {
+		/* No other requests are on the queue. Start this one. */
+		rc = __tape_start_io(device, request);
+		if (rc)
+			return rc;
+
+		DBF_LH(5, "Request %p added for execution.\n", request);
+		list_add(&request->list, &device->req_queue);
+	} else {
+		DBF_LH(5, "Request %p add to queue.\n", request);
+		request->status = TAPE_REQUEST_QUEUED;
+		list_add_tail(&request->list, &device->req_queue);
+	}
+	return 0;
+}
+
+/*
+ * Add the request to the request queue, try to start it if the
+ * tape is idle. Return without waiting for end of i/o.
+ */
+int
+tape_do_io_async(struct tape_device *device, struct tape_request *request)
+{
+	int rc;
+
+	DBF_LH(6, "tape_do_io_async(%p, %p)\n", device, request);
+
+	spin_lock_irq(get_ccwdev_lock(device->cdev));
+	/* Add request to request queue and try to start it. */
+	rc = __tape_start_request(device, request);
+	spin_unlock_irq(get_ccwdev_lock(device->cdev));
+	return rc;
+}
+
+/*
+ * tape_do_io/__tape_wake_up
+ * Add the request to the request queue, try to start it if the
+ * tape is idle and wait uninterruptible for its completion.
+ */
+static void
+__tape_wake_up(struct tape_request *request, void *data)
+{
+	request->callback = NULL;
+	wake_up((wait_queue_head_t *) data);
+}
+
+int
+tape_do_io(struct tape_device *device, struct tape_request *request)
+{
+	int rc;
+
+	spin_lock_irq(get_ccwdev_lock(device->cdev));
+	/* Setup callback */
+	request->callback = __tape_wake_up;
+	request->callback_data = &device->wait_queue;
+	/* Add request to request queue and try to start it. */
+	rc = __tape_start_request(device, request);
+	spin_unlock_irq(get_ccwdev_lock(device->cdev));
+	if (rc)
+		return rc;
+	/* Request added to the queue. Wait for its completion. */
+	wait_event(device->wait_queue, (request->callback == NULL));
+	/* Get rc from request */
+	return request->rc;
+}
+
+/*
+ * tape_do_io_interruptible/__tape_wake_up_interruptible
+ * Add the request to the request queue, try to start it if the
+ * tape is idle and wait uninterruptible for its completion.
+ */
+static void
+__tape_wake_up_interruptible(struct tape_request *request, void *data)
+{
+	request->callback = NULL;
+	wake_up_interruptible((wait_queue_head_t *) data);
+}
+
+int
+tape_do_io_interruptible(struct tape_device *device,
+			 struct tape_request *request)
+{
+	int rc;
+
+	spin_lock_irq(get_ccwdev_lock(device->cdev));
+	/* Setup callback */
+	request->callback = __tape_wake_up_interruptible;
+	request->callback_data = &device->wait_queue;
+	rc = __tape_start_request(device, request);
+	spin_unlock_irq(get_ccwdev_lock(device->cdev));
+	if (rc)
+		return rc;
+	/* Request added to the queue. Wait for its completion. */
+	rc = wait_event_interruptible(device->wait_queue,
+				      (request->callback == NULL));
+	if (rc != -ERESTARTSYS)
+		/* Request finished normally. */
+		return request->rc;
+
+	/* Interrupted by a signal. We have to stop the current request. */
+	spin_lock_irq(get_ccwdev_lock(device->cdev));
+	rc = __tape_cancel_io(device, request);
+	spin_unlock_irq(get_ccwdev_lock(device->cdev));
+	if (rc == 0) {
+		/* Wait for the interrupt that acknowledges the halt. */
+		do {
+			rc = wait_event_interruptible(
+				device->wait_queue,
+				(request->callback == NULL)
+			);
+		} while (rc == -ERESTARTSYS);
+
+		DBF_EVENT(3, "IO stopped on %08x\n", device->cdev_id);
+		rc = -ERESTARTSYS;
+	}
+	return rc;
+}
+
+/*
+ * Stop running ccw.
+ */
+int
+tape_cancel_io(struct tape_device *device, struct tape_request *request)
+{
+	int rc;
+
+	spin_lock_irq(get_ccwdev_lock(device->cdev));
+	rc = __tape_cancel_io(device, request);
+	spin_unlock_irq(get_ccwdev_lock(device->cdev));
+	return rc;
+}
+
+/*
+ * Tape interrupt routine, called from the ccw_device layer
+ */
+static void
+__tape_do_irq (struct ccw_device *cdev, unsigned long intparm, struct irb *irb)
+{
+	struct tape_device *device;
+	struct tape_request *request;
+	int rc;
+
+	device = dev_get_drvdata(&cdev->dev);
+	if (device == NULL) {
+		return;
+	}
+	request = (struct tape_request *) intparm;
+
+	DBF_LH(6, "__tape_do_irq(device=%p, request=%p)\n", device, request);
+
+	/* On special conditions irb is an error pointer */
+	if (IS_ERR(irb)) {
+		/* FIXME: What to do with the request? */
+		switch (PTR_ERR(irb)) {
+			case -ETIMEDOUT:
+				DBF_LH(1, "(%08x): Request timed out\n",
+				       device->cdev_id);
+			case -EIO:
+				__tape_end_request(device, request, -EIO);
+				break;
+			default:
+				DBF_LH(1, "(%08x): Unexpected i/o error %li\n",
+				       device->cdev_id,	PTR_ERR(irb));
+		}
+		return;
+	}
+
+	/*
+	 * If the condition code is not zero and the start function bit is
+	 * still set, this is an deferred error and the last start I/O did
+	 * not succeed. At this point the condition that caused the deferred
+	 * error might still apply. So we just schedule the request to be
+	 * started later.
+	 */
+	if (irb->scsw.cmd.cc != 0 &&
+	    (irb->scsw.cmd.fctl & SCSW_FCTL_START_FUNC) &&
+	    (request->status == TAPE_REQUEST_IN_IO)) {
+		DBF_EVENT(3,"(%08x): deferred cc=%i, fctl=%i. restarting\n",
+			device->cdev_id, irb->scsw.cmd.cc, irb->scsw.cmd.fctl);
+		request->status = TAPE_REQUEST_QUEUED;
+		schedule_delayed_work(&device->tape_dnr, HZ);
+		return;
+	}
+
+	/* May be an unsolicited irq */
+	if(request != NULL)
+		request->rescnt = irb->scsw.cmd.count;
+	else if ((irb->scsw.cmd.dstat == 0x85 || irb->scsw.cmd.dstat == 0x80) &&
+		 !list_empty(&device->req_queue)) {
+		/* Not Ready to Ready after long busy ? */
+		struct tape_request *req;
+		req = list_entry(device->req_queue.next,
+				 struct tape_request, list);
+		if (req->status == TAPE_REQUEST_LONG_BUSY) {
+			DBF_EVENT(3, "(%08x): del timer\n", device->cdev_id);
+			if (del_timer(&device->lb_timeout)) {
+				device->lb_timeout.data = 0UL;
+				tape_put_device(device);
+				__tape_start_next_request(device);
+			}
+			return;
+		}
+	}
+	if (irb->scsw.cmd.dstat != 0x0c) {
+		/* Set the 'ONLINE' flag depending on sense byte 1 */
+		if(*(((__u8 *) irb->ecw) + 1) & SENSE_DRIVE_ONLINE)
+			device->tape_generic_status |= GMT_ONLINE(~0);
+		else
+			device->tape_generic_status &= ~GMT_ONLINE(~0);
+
+		/*
+		 * Any request that does not come back with channel end
+		 * and device end is unusual. Log the sense data.
+		 */
+		DBF_EVENT(3,"-- Tape Interrupthandler --\n");
+		tape_dump_sense_dbf(device, request, irb);
+	} else {
+		/* Upon normal completion the device _is_ online */
+		device->tape_generic_status |= GMT_ONLINE(~0);
+	}
+	if (device->tape_state == TS_NOT_OPER) {
+		DBF_EVENT(6, "tape:device is not operational\n");
+		return;
+	}
+
+	/*
+	 * Request that were canceled still come back with an interrupt.
+	 * To detect these request the state will be set to TAPE_REQUEST_DONE.
+	 */
+	if(request != NULL && request->status == TAPE_REQUEST_DONE) {
+		__tape_end_request(device, request, -EIO);
+		return;
+	}
+
+	rc = device->discipline->irq(device, request, irb);
+	/*
+	 * rc < 0 : request finished unsuccessfully.
+	 * rc == TAPE_IO_SUCCESS: request finished successfully.
+	 * rc == TAPE_IO_PENDING: request is still running. Ignore rc.
+	 * rc == TAPE_IO_RETRY: request finished but needs another go.
+	 * rc == TAPE_IO_STOP: request needs to get terminated.
+	 */
+	switch (rc) {
+		case TAPE_IO_SUCCESS:
+			/* Upon normal completion the device _is_ online */
+			device->tape_generic_status |= GMT_ONLINE(~0);
+			__tape_end_request(device, request, rc);
+			break;
+		case TAPE_IO_PENDING:
+			break;
+		case TAPE_IO_LONG_BUSY:
+			device->lb_timeout.data =
+				(unsigned long) tape_get_device(device);
+			device->lb_timeout.expires = jiffies +
+				LONG_BUSY_TIMEOUT * HZ;
+			DBF_EVENT(3, "(%08x): add timer\n", device->cdev_id);
+			add_timer(&device->lb_timeout);
+			request->status = TAPE_REQUEST_LONG_BUSY;
+			break;
+		case TAPE_IO_RETRY:
+			rc = __tape_start_io(device, request);
+			if (rc)
+				__tape_end_request(device, request, rc);
+			break;
+		case TAPE_IO_STOP:
+			rc = __tape_cancel_io(device, request);
+			if (rc)
+				__tape_end_request(device, request, rc);
+			break;
+		default:
+			if (rc > 0) {
+				DBF_EVENT(6, "xunknownrc\n");
+				__tape_end_request(device, request, -EIO);
+			} else {
+				__tape_end_request(device, request, rc);
+			}
+			break;
+	}
+}
+
+/*
+ * Tape device open function used by tape_char & tape_block frontends.
+ */
+int
+tape_open(struct tape_device *device)
+{
+	int rc;
+
+	spin_lock_irq(get_ccwdev_lock(device->cdev));
+	if (device->tape_state == TS_NOT_OPER) {
+		DBF_EVENT(6, "TAPE:nodev\n");
+		rc = -ENODEV;
+	} else if (device->tape_state == TS_IN_USE) {
+		DBF_EVENT(6, "TAPE:dbusy\n");
+		rc = -EBUSY;
+	} else if (device->tape_state == TS_BLKUSE) {
+		DBF_EVENT(6, "TAPE:dbusy\n");
+		rc = -EBUSY;
+	} else if (device->discipline != NULL &&
+		   !try_module_get(device->discipline->owner)) {
+		DBF_EVENT(6, "TAPE:nodisc\n");
+		rc = -ENODEV;
+	} else {
+		tape_state_set(device, TS_IN_USE);
+		rc = 0;
+	}
+	spin_unlock_irq(get_ccwdev_lock(device->cdev));
+	return rc;
+}
+
+/*
+ * Tape device release function used by tape_char & tape_block frontends.
+ */
+int
+tape_release(struct tape_device *device)
+{
+	spin_lock_irq(get_ccwdev_lock(device->cdev));
+	if (device->tape_state == TS_IN_USE)
+		tape_state_set(device, TS_UNUSED);
+	module_put(device->discipline->owner);
+	spin_unlock_irq(get_ccwdev_lock(device->cdev));
+	return 0;
+}
+
+/*
+ * Execute a magnetic tape command a number of times.
+ */
+int
+tape_mtop(struct tape_device *device, int mt_op, int mt_count)
+{
+	tape_mtop_fn fn;
+	int rc;
+
+	DBF_EVENT(6, "TAPE:mtio\n");
+	DBF_EVENT(6, "TAPE:ioop: %x\n", mt_op);
+	DBF_EVENT(6, "TAPE:arg:	 %x\n", mt_count);
+
+	if (mt_op < 0 || mt_op >= TAPE_NR_MTOPS)
+		return -EINVAL;
+	fn = device->discipline->mtop_array[mt_op];
+	if (fn == NULL)
+		return -EINVAL;
+
+	/* We assume that the backends can handle count up to 500. */
+	if (mt_op == MTBSR  || mt_op == MTFSR  || mt_op == MTFSF  ||
+	    mt_op == MTBSF  || mt_op == MTFSFM || mt_op == MTBSFM) {
+		rc = 0;
+		for (; mt_count > 500; mt_count -= 500)
+			if ((rc = fn(device, 500)) != 0)
+				break;
+		if (rc == 0)
+			rc = fn(device, mt_count);
+	} else
+		rc = fn(device, mt_count);
+	return rc;
+
+}
+
+/*
+ * Tape init function.
+ */
+static int
+tape_init (void)
+{
+	TAPE_DBF_AREA = debug_register ( "tape", 2, 2, 4*sizeof(long));
+	debug_register_view(TAPE_DBF_AREA, &debug_sprintf_view);
+#ifdef DBF_LIKE_HELL
+	debug_set_level(TAPE_DBF_AREA, 6);
+#endif
+	DBF_EVENT(3, "tape init\n");
+	tape_proc_init();
+	tapechar_init ();
+	tapeblock_init ();
+	return 0;
+}
+
+/*
+ * Tape exit function.
+ */
+static void
+tape_exit(void)
+{
+	DBF_EVENT(6, "tape exit\n");
+
+	/* Get rid of the frontends */
+	tapechar_exit();
+	tapeblock_exit();
+	tape_proc_cleanup();
+	debug_unregister (TAPE_DBF_AREA);
+}
+
+MODULE_AUTHOR("(C) 2001 IBM Deutschland Entwicklung GmbH by Carsten Otte and "
+	      "Michael Holzheu (cotte@de.ibm.com,holzheu@de.ibm.com)");
+MODULE_DESCRIPTION("Linux on zSeries channel attached tape device driver");
+MODULE_LICENSE("GPL");
+
+module_init(tape_init);
+module_exit(tape_exit);
+
+EXPORT_SYMBOL(tape_generic_remove);
+EXPORT_SYMBOL(tape_generic_probe);
+EXPORT_SYMBOL(tape_generic_online);
+EXPORT_SYMBOL(tape_generic_offline);
+EXPORT_SYMBOL(tape_generic_pm_suspend);
+EXPORT_SYMBOL(tape_put_device);
+EXPORT_SYMBOL(tape_get_device);
+EXPORT_SYMBOL(tape_state_verbose);
+EXPORT_SYMBOL(tape_op_verbose);
+EXPORT_SYMBOL(tape_state_set);
+EXPORT_SYMBOL(tape_med_state_set);
+EXPORT_SYMBOL(tape_alloc_request);
+EXPORT_SYMBOL(tape_free_request);
+EXPORT_SYMBOL(tape_dump_sense_dbf);
+EXPORT_SYMBOL(tape_do_io);
+EXPORT_SYMBOL(tape_do_io_async);
+EXPORT_SYMBOL(tape_do_io_interruptible);
+EXPORT_SYMBOL(tape_cancel_io);
+EXPORT_SYMBOL(tape_mtop);
diff --git a/ap/os/linux/linux-3.4.x/drivers/s390/char/tape_proc.c b/ap/os/linux/linux-3.4.x/drivers/s390/char/tape_proc.c
new file mode 100644
index 0000000..0ceb379
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/s390/char/tape_proc.c
@@ -0,0 +1,144 @@
+/*
+ *  drivers/s390/char/tape.c
+ *    tape device driver for S/390 and zSeries tapes.
+ *
+ *  S390 and zSeries version
+ *    Copyright (C) 2001 IBM Corporation
+ *    Author(s): Carsten Otte <cotte@de.ibm.com>
+ *		 Michael Holzheu <holzheu@de.ibm.com>
+ *		 Tuan Ngo-Anh <ngoanh@de.ibm.com>
+ *
+ * PROCFS Functions
+ */
+
+#define KMSG_COMPONENT "tape"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
+#include <linux/module.h>
+#include <linux/vmalloc.h>
+#include <linux/seq_file.h>
+#include <linux/proc_fs.h>
+
+#define TAPE_DBF_AREA	tape_core_dbf
+
+#include "tape.h"
+
+static const char *tape_med_st_verbose[MS_SIZE] =
+{
+	[MS_UNKNOWN] = "UNKNOWN ",
+	[MS_LOADED] = "LOADED  ",
+	[MS_UNLOADED] = "UNLOADED"
+};
+
+/* our proc tapedevices entry */
+static struct proc_dir_entry *tape_proc_devices;
+
+/*
+ * Show function for /proc/tapedevices
+ */
+static int tape_proc_show(struct seq_file *m, void *v)
+{
+	struct tape_device *device;
+	struct tape_request *request;
+	const char *str;
+	unsigned long n;
+
+	n = (unsigned long) v - 1;
+	if (!n) {
+		seq_printf(m, "TapeNo\tBusID      CuType/Model\t"
+			"DevType/Model\tBlkSize\tState\tOp\tMedState\n");
+	}
+	device = tape_find_device(n);
+	if (IS_ERR(device))
+		return 0;
+	spin_lock_irq(get_ccwdev_lock(device->cdev));
+	seq_printf(m, "%d\t", (int) n);
+	seq_printf(m, "%-10.10s ", dev_name(&device->cdev->dev));
+	seq_printf(m, "%04X/", device->cdev->id.cu_type);
+	seq_printf(m, "%02X\t", device->cdev->id.cu_model);
+	seq_printf(m, "%04X/", device->cdev->id.dev_type);
+	seq_printf(m, "%02X\t\t", device->cdev->id.dev_model);
+	if (device->char_data.block_size == 0)
+		seq_printf(m, "auto\t");
+	else
+		seq_printf(m, "%i\t", device->char_data.block_size);
+	if (device->tape_state >= 0 &&
+	    device->tape_state < TS_SIZE)
+		str = tape_state_verbose[device->tape_state];
+	else
+		str = "UNKNOWN";
+	seq_printf(m, "%s\t", str);
+	if (!list_empty(&device->req_queue)) {
+		request = list_entry(device->req_queue.next,
+				     struct tape_request, list);
+		str = tape_op_verbose[request->op];
+	} else
+		str = "---";
+	seq_printf(m, "%s\t", str);
+	seq_printf(m, "%s\n", tape_med_st_verbose[device->medium_state]);
+	spin_unlock_irq(get_ccwdev_lock(device->cdev));
+	tape_put_device(device);
+        return 0;
+}
+
+static void *tape_proc_start(struct seq_file *m, loff_t *pos)
+{
+	if (*pos >= 256 / TAPE_MINORS_PER_DEV)
+		return NULL;
+	return (void *)((unsigned long) *pos + 1);
+}
+
+static void *tape_proc_next(struct seq_file *m, void *v, loff_t *pos)
+{
+	++*pos;
+	return tape_proc_start(m, pos);
+}
+
+static void tape_proc_stop(struct seq_file *m, void *v)
+{
+}
+
+static const struct seq_operations tape_proc_seq = {
+	.start		= tape_proc_start,
+	.next		= tape_proc_next,
+	.stop		= tape_proc_stop,
+	.show		= tape_proc_show,
+};
+
+static int tape_proc_open(struct inode *inode, struct file *file)
+{
+	return seq_open(file, &tape_proc_seq);
+}
+
+static const struct file_operations tape_proc_ops =
+{
+	.owner		= THIS_MODULE,
+	.open		= tape_proc_open,
+	.read		= seq_read,
+	.llseek		= seq_lseek,
+	.release	= seq_release,
+};
+
+/*
+ * Initialize procfs stuff on startup
+ */
+void
+tape_proc_init(void)
+{
+	tape_proc_devices =
+		proc_create("tapedevices", S_IFREG | S_IRUGO | S_IWUSR, NULL,
+			    &tape_proc_ops);
+	if (tape_proc_devices == NULL) {
+		return;
+	}
+}
+
+/*
+ * Cleanup all stuff registered to the procfs
+ */
+void
+tape_proc_cleanup(void)
+{
+	if (tape_proc_devices != NULL)
+		remove_proc_entry ("tapedevices", NULL);
+}
diff --git a/ap/os/linux/linux-3.4.x/drivers/s390/char/tape_std.c b/ap/os/linux/linux-3.4.x/drivers/s390/char/tape_std.c
new file mode 100644
index 0000000..e765017
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/s390/char/tape_std.c
@@ -0,0 +1,750 @@
+/*
+ *  drivers/s390/char/tape_std.c
+ *    standard tape device functions for ibm tapes.
+ *
+ *  S390 and zSeries version
+ *    Copyright (C) 2001,2002 IBM Deutschland Entwicklung GmbH, IBM Corporation
+ *    Author(s): Carsten Otte <cotte@de.ibm.com>
+ *		 Michael Holzheu <holzheu@de.ibm.com>
+ *		 Tuan Ngo-Anh <ngoanh@de.ibm.com>
+ *		 Martin Schwidefsky <schwidefsky@de.ibm.com>
+ *		 Stefan Bader <shbader@de.ibm.com>
+ */
+
+#define KMSG_COMPONENT "tape"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
+#include <linux/stddef.h>
+#include <linux/kernel.h>
+#include <linux/bio.h>
+#include <linux/timer.h>
+
+#include <asm/types.h>
+#include <asm/idals.h>
+#include <asm/ebcdic.h>
+#include <asm/tape390.h>
+
+#define TAPE_DBF_AREA	tape_core_dbf
+
+#include "tape.h"
+#include "tape_std.h"
+
+/*
+ * tape_std_assign
+ */
+static void
+tape_std_assign_timeout(unsigned long data)
+{
+	struct tape_request *	request;
+	struct tape_device *	device;
+	int rc;
+
+	request = (struct tape_request *) data;
+	device = request->device;
+	BUG_ON(!device);
+
+	DBF_EVENT(3, "%08x: Assignment timeout. Device busy.\n",
+			device->cdev_id);
+	rc = tape_cancel_io(device, request);
+	if(rc)
+		DBF_EVENT(3, "(%08x): Assign timeout: Cancel failed with rc = "
+			  "%i\n", device->cdev_id, rc);
+}
+
+int
+tape_std_assign(struct tape_device *device)
+{
+	int                  rc;
+	struct timer_list    timeout;
+	struct tape_request *request;
+
+	request = tape_alloc_request(2, 11);
+	if (IS_ERR(request))
+		return PTR_ERR(request);
+
+	request->op = TO_ASSIGN;
+	tape_ccw_cc(request->cpaddr, ASSIGN, 11, request->cpdata);
+	tape_ccw_end(request->cpaddr + 1, NOP, 0, NULL);
+
+	/*
+	 * The assign command sometimes blocks if the device is assigned
+	 * to another host (actually this shouldn't happen but it does).
+	 * So we set up a timeout for this call.
+	 */
+	init_timer_on_stack(&timeout);
+	timeout.function = tape_std_assign_timeout;
+	timeout.data     = (unsigned long) request;
+	timeout.expires  = jiffies + 2 * HZ;
+	add_timer(&timeout);
+
+	rc = tape_do_io_interruptible(device, request);
+
+	del_timer(&timeout);
+
+	if (rc != 0) {
+		DBF_EVENT(3, "%08x: assign failed - device might be busy\n",
+			device->cdev_id);
+	} else {
+		DBF_EVENT(3, "%08x: Tape assigned\n", device->cdev_id);
+	}
+	tape_free_request(request);
+	return rc;
+}
+
+/*
+ * tape_std_unassign
+ */
+int
+tape_std_unassign (struct tape_device *device)
+{
+	int                  rc;
+	struct tape_request *request;
+
+	if (device->tape_state == TS_NOT_OPER) {
+		DBF_EVENT(3, "(%08x): Can't unassign device\n",
+			device->cdev_id);
+		return -EIO;
+	}
+
+	request = tape_alloc_request(2, 11);
+	if (IS_ERR(request))
+		return PTR_ERR(request);
+
+	request->op = TO_UNASSIGN;
+	tape_ccw_cc(request->cpaddr, UNASSIGN, 11, request->cpdata);
+	tape_ccw_end(request->cpaddr + 1, NOP, 0, NULL);
+
+	if ((rc = tape_do_io(device, request)) != 0) {
+		DBF_EVENT(3, "%08x: Unassign failed\n", device->cdev_id);
+	} else {
+		DBF_EVENT(3, "%08x: Tape unassigned\n", device->cdev_id);
+	}
+	tape_free_request(request);
+	return rc;
+}
+
+/*
+ * TAPE390_DISPLAY: Show a string on the tape display.
+ */
+int
+tape_std_display(struct tape_device *device, struct display_struct *disp)
+{
+	struct tape_request *request;
+	int rc;
+
+	request = tape_alloc_request(2, 17);
+	if (IS_ERR(request)) {
+		DBF_EVENT(3, "TAPE: load display failed\n");
+		return PTR_ERR(request);
+	}
+	request->op = TO_DIS;
+
+	*(unsigned char *) request->cpdata = disp->cntrl;
+	DBF_EVENT(5, "TAPE: display cntrl=%04x\n", disp->cntrl);
+	memcpy(((unsigned char *) request->cpdata) + 1, disp->message1, 8);
+	memcpy(((unsigned char *) request->cpdata) + 9, disp->message2, 8);
+	ASCEBC(((unsigned char*) request->cpdata) + 1, 16);
+
+	tape_ccw_cc(request->cpaddr, LOAD_DISPLAY, 17, request->cpdata);
+	tape_ccw_end(request->cpaddr + 1, NOP, 0, NULL);
+
+	rc = tape_do_io_interruptible(device, request);
+	tape_free_request(request);
+	return rc;
+}
+
+/*
+ * Read block id.
+ */
+int
+tape_std_read_block_id(struct tape_device *device, __u64 *id)
+{
+	struct tape_request *request;
+	int rc;
+
+	request = tape_alloc_request(3, 8);
+	if (IS_ERR(request))
+		return PTR_ERR(request);
+	request->op = TO_RBI;
+	/* setup ccws */
+	tape_ccw_cc(request->cpaddr, MODE_SET_DB, 1, device->modeset_byte);
+	tape_ccw_cc(request->cpaddr + 1, READ_BLOCK_ID, 8, request->cpdata);
+	tape_ccw_end(request->cpaddr + 2, NOP, 0, NULL);
+	/* execute it */
+	rc = tape_do_io(device, request);
+	if (rc == 0)
+		/* Get result from read buffer. */
+		*id = *(__u64 *) request->cpdata;
+	tape_free_request(request);
+	return rc;
+}
+
+int
+tape_std_terminate_write(struct tape_device *device)
+{
+	int rc;
+
+	if(device->required_tapemarks == 0)
+		return 0;
+
+	DBF_LH(5, "tape%d: terminate write %dxEOF\n", device->first_minor,
+		device->required_tapemarks);
+
+	rc = tape_mtop(device, MTWEOF, device->required_tapemarks);
+	if (rc)
+		return rc;
+
+	device->required_tapemarks = 0;
+	return tape_mtop(device, MTBSR, 1);
+}
+
+/*
+ * MTLOAD: Loads the tape.
+ * The default implementation just wait until the tape medium state changes
+ * to MS_LOADED.
+ */
+int
+tape_std_mtload(struct tape_device *device, int count)
+{
+	return wait_event_interruptible(device->state_change_wq,
+		(device->medium_state == MS_LOADED));
+}
+
+/*
+ * MTSETBLK: Set block size.
+ */
+int
+tape_std_mtsetblk(struct tape_device *device, int count)
+{
+	struct idal_buffer *new;
+
+	DBF_LH(6, "tape_std_mtsetblk(%d)\n", count);
+	if (count <= 0) {
+		/*
+		 * Just set block_size to 0. tapechar_read/tapechar_write
+		 * will realloc the idal buffer if a bigger one than the
+		 * current is needed.
+		 */
+		device->char_data.block_size = 0;
+		return 0;
+	}
+	if (device->char_data.idal_buf != NULL &&
+	    device->char_data.idal_buf->size == count)
+		/* We already have a idal buffer of that size. */
+		return 0;
+
+	if (count > MAX_BLOCKSIZE) {
+		DBF_EVENT(3, "Invalid block size (%d > %d) given.\n",
+			count, MAX_BLOCKSIZE);
+		return -EINVAL;
+	}
+
+	/* Allocate a new idal buffer. */
+	new = idal_buffer_alloc(count, 0);
+	if (IS_ERR(new))
+		return -ENOMEM;
+	if (device->char_data.idal_buf != NULL)
+		idal_buffer_free(device->char_data.idal_buf);
+	device->char_data.idal_buf = new;
+	device->char_data.block_size = count;
+
+	DBF_LH(6, "new blocksize is %d\n", device->char_data.block_size);
+
+	return 0;
+}
+
+/*
+ * MTRESET: Set block size to 0.
+ */
+int
+tape_std_mtreset(struct tape_device *device, int count)
+{
+	DBF_EVENT(6, "TCHAR:devreset:\n");
+	device->char_data.block_size = 0;
+	return 0;
+}
+
+/*
+ * MTFSF: Forward space over 'count' file marks. The tape is positioned
+ * at the EOT (End of Tape) side of the file mark.
+ */
+int
+tape_std_mtfsf(struct tape_device *device, int mt_count)
+{
+	struct tape_request *request;
+	struct ccw1 *ccw;
+
+	request = tape_alloc_request(mt_count + 2, 0);
+	if (IS_ERR(request))
+		return PTR_ERR(request);
+	request->op = TO_FSF;
+	/* setup ccws */
+	ccw = tape_ccw_cc(request->cpaddr, MODE_SET_DB, 1,
+			  device->modeset_byte);
+	ccw = tape_ccw_repeat(ccw, FORSPACEFILE, mt_count);
+	ccw = tape_ccw_end(ccw, NOP, 0, NULL);
+
+	/* execute it */
+	return tape_do_io_free(device, request);
+}
+
+/*
+ * MTFSR: Forward space over 'count' tape blocks (blocksize is set
+ * via MTSETBLK.
+ */
+int
+tape_std_mtfsr(struct tape_device *device, int mt_count)
+{
+	struct tape_request *request;
+	struct ccw1 *ccw;
+	int rc;
+
+	request = tape_alloc_request(mt_count + 2, 0);
+	if (IS_ERR(request))
+		return PTR_ERR(request);
+	request->op = TO_FSB;
+	/* setup ccws */
+	ccw = tape_ccw_cc(request->cpaddr, MODE_SET_DB, 1,
+			  device->modeset_byte);
+	ccw = tape_ccw_repeat(ccw, FORSPACEBLOCK, mt_count);
+	ccw = tape_ccw_end(ccw, NOP, 0, NULL);
+
+	/* execute it */
+	rc = tape_do_io(device, request);
+	if (rc == 0 && request->rescnt > 0) {
+		DBF_LH(3, "FSR over tapemark\n");
+		rc = 1;
+	}
+	tape_free_request(request);
+
+	return rc;
+}
+
+/*
+ * MTBSR: Backward space over 'count' tape blocks.
+ * (blocksize is set via MTSETBLK.
+ */
+int
+tape_std_mtbsr(struct tape_device *device, int mt_count)
+{
+	struct tape_request *request;
+	struct ccw1 *ccw;
+	int rc;
+
+	request = tape_alloc_request(mt_count + 2, 0);
+	if (IS_ERR(request))
+		return PTR_ERR(request);
+	request->op = TO_BSB;
+	/* setup ccws */
+	ccw = tape_ccw_cc(request->cpaddr, MODE_SET_DB, 1,
+			  device->modeset_byte);
+	ccw = tape_ccw_repeat(ccw, BACKSPACEBLOCK, mt_count);
+	ccw = tape_ccw_end(ccw, NOP, 0, NULL);
+
+	/* execute it */
+	rc = tape_do_io(device, request);
+	if (rc == 0 && request->rescnt > 0) {
+		DBF_LH(3, "BSR over tapemark\n");
+		rc = 1;
+	}
+	tape_free_request(request);
+
+	return rc;
+}
+
+/*
+ * MTWEOF: Write 'count' file marks at the current position.
+ */
+int
+tape_std_mtweof(struct tape_device *device, int mt_count)
+{
+	struct tape_request *request;
+	struct ccw1 *ccw;
+
+	request = tape_alloc_request(mt_count + 2, 0);
+	if (IS_ERR(request))
+		return PTR_ERR(request);
+	request->op = TO_WTM;
+	/* setup ccws */
+	ccw = tape_ccw_cc(request->cpaddr, MODE_SET_DB, 1,
+			  device->modeset_byte);
+	ccw = tape_ccw_repeat(ccw, WRITETAPEMARK, mt_count);
+	ccw = tape_ccw_end(ccw, NOP, 0, NULL);
+
+	/* execute it */
+	return tape_do_io_free(device, request);
+}
+
+/*
+ * MTBSFM: Backward space over 'count' file marks.
+ * The tape is positioned at the BOT (Begin Of Tape) side of the
+ * last skipped file mark.
+ */
+int
+tape_std_mtbsfm(struct tape_device *device, int mt_count)
+{
+	struct tape_request *request;
+	struct ccw1 *ccw;
+
+	request = tape_alloc_request(mt_count + 2, 0);
+	if (IS_ERR(request))
+		return PTR_ERR(request);
+	request->op = TO_BSF;
+	/* setup ccws */
+	ccw = tape_ccw_cc(request->cpaddr, MODE_SET_DB, 1,
+			  device->modeset_byte);
+	ccw = tape_ccw_repeat(ccw, BACKSPACEFILE, mt_count);
+	ccw = tape_ccw_end(ccw, NOP, 0, NULL);
+
+	/* execute it */
+	return tape_do_io_free(device, request);
+}
+
+/*
+ * MTBSF: Backward space over 'count' file marks. The tape is positioned at
+ * the EOT (End of Tape) side of the last skipped file mark.
+ */
+int
+tape_std_mtbsf(struct tape_device *device, int mt_count)
+{
+	struct tape_request *request;
+	struct ccw1 *ccw;
+	int rc;
+
+	request = tape_alloc_request(mt_count + 2, 0);
+	if (IS_ERR(request))
+		return PTR_ERR(request);
+	request->op = TO_BSF;
+	/* setup ccws */
+	ccw = tape_ccw_cc(request->cpaddr, MODE_SET_DB, 1,
+			  device->modeset_byte);
+	ccw = tape_ccw_repeat(ccw, BACKSPACEFILE, mt_count);
+	ccw = tape_ccw_end(ccw, NOP, 0, NULL);
+	/* execute it */
+	rc = tape_do_io_free(device, request);
+	if (rc == 0) {
+		rc = tape_mtop(device, MTFSR, 1);
+		if (rc > 0)
+			rc = 0;
+	}
+	return rc;
+}
+
+/*
+ * MTFSFM: Forward space over 'count' file marks.
+ * The tape is positioned at the BOT (Begin Of Tape) side
+ * of the last skipped file mark.
+ */
+int
+tape_std_mtfsfm(struct tape_device *device, int mt_count)
+{
+	struct tape_request *request;
+	struct ccw1 *ccw;
+	int rc;
+
+	request = tape_alloc_request(mt_count + 2, 0);
+	if (IS_ERR(request))
+		return PTR_ERR(request);
+	request->op = TO_FSF;
+	/* setup ccws */
+	ccw = tape_ccw_cc(request->cpaddr, MODE_SET_DB, 1,
+			  device->modeset_byte);
+	ccw = tape_ccw_repeat(ccw, FORSPACEFILE, mt_count);
+	ccw = tape_ccw_end(ccw, NOP, 0, NULL);
+	/* execute it */
+	rc = tape_do_io_free(device, request);
+	if (rc == 0) {
+		rc = tape_mtop(device, MTBSR, 1);
+		if (rc > 0)
+			rc = 0;
+	}
+
+	return rc;
+}
+
+/*
+ * MTREW: Rewind the tape.
+ */
+int
+tape_std_mtrew(struct tape_device *device, int mt_count)
+{
+	struct tape_request *request;
+
+	request = tape_alloc_request(3, 0);
+	if (IS_ERR(request))
+		return PTR_ERR(request);
+	request->op = TO_REW;
+	/* setup ccws */
+	tape_ccw_cc(request->cpaddr, MODE_SET_DB, 1,
+		    device->modeset_byte);
+	tape_ccw_cc(request->cpaddr + 1, REWIND, 0, NULL);
+	tape_ccw_end(request->cpaddr + 2, NOP, 0, NULL);
+
+	/* execute it */
+	return tape_do_io_free(device, request);
+}
+
+/*
+ * MTOFFL: Rewind the tape and put the drive off-line.
+ * Implement 'rewind unload'
+ */
+int
+tape_std_mtoffl(struct tape_device *device, int mt_count)
+{
+	struct tape_request *request;
+
+	request = tape_alloc_request(3, 0);
+	if (IS_ERR(request))
+		return PTR_ERR(request);
+	request->op = TO_RUN;
+	/* setup ccws */
+	tape_ccw_cc(request->cpaddr, MODE_SET_DB, 1, device->modeset_byte);
+	tape_ccw_cc(request->cpaddr + 1, REWIND_UNLOAD, 0, NULL);
+	tape_ccw_end(request->cpaddr + 2, NOP, 0, NULL);
+
+	/* execute it */
+	return tape_do_io_free(device, request);
+}
+
+/*
+ * MTNOP: 'No operation'.
+ */
+int
+tape_std_mtnop(struct tape_device *device, int mt_count)
+{
+	struct tape_request *request;
+
+	request = tape_alloc_request(2, 0);
+	if (IS_ERR(request))
+		return PTR_ERR(request);
+	request->op = TO_NOP;
+	/* setup ccws */
+	tape_ccw_cc(request->cpaddr, MODE_SET_DB, 1, device->modeset_byte);
+	tape_ccw_end(request->cpaddr + 1, NOP, 0, NULL);
+	/* execute it */
+	return tape_do_io_free(device, request);
+}
+
+/*
+ * MTEOM: positions at the end of the portion of the tape already used
+ * for recordind data. MTEOM positions after the last file mark, ready for
+ * appending another file.
+ */
+int
+tape_std_mteom(struct tape_device *device, int mt_count)
+{
+	int rc;
+
+	/*
+	 * Seek from the beginning of tape (rewind).
+	 */
+	if ((rc = tape_mtop(device, MTREW, 1)) < 0)
+		return rc;
+
+	/*
+	 * The logical end of volume is given by two sewuential tapemarks.
+	 * Look for this by skipping to the next file (over one tapemark)
+	 * and then test for another one (fsr returns 1 if a tapemark was
+	 * encountered).
+	 */
+	do {
+		if ((rc = tape_mtop(device, MTFSF, 1)) < 0)
+			return rc;
+		if ((rc = tape_mtop(device, MTFSR, 1)) < 0)
+			return rc;
+	} while (rc == 0);
+
+	return tape_mtop(device, MTBSR, 1);
+}
+
+/*
+ * MTRETEN: Retension the tape, i.e. forward space to end of tape and rewind.
+ */
+int
+tape_std_mtreten(struct tape_device *device, int mt_count)
+{
+	struct tape_request *request;
+
+	request = tape_alloc_request(4, 0);
+	if (IS_ERR(request))
+		return PTR_ERR(request);
+	request->op = TO_FSF;
+	/* setup ccws */
+	tape_ccw_cc(request->cpaddr, MODE_SET_DB, 1, device->modeset_byte);
+	tape_ccw_cc(request->cpaddr + 1,FORSPACEFILE, 0, NULL);
+	tape_ccw_cc(request->cpaddr + 2, NOP, 0, NULL);
+	tape_ccw_end(request->cpaddr + 3, CCW_CMD_TIC, 0, request->cpaddr);
+	/* execute it, MTRETEN rc gets ignored */
+	tape_do_io_interruptible(device, request);
+	tape_free_request(request);
+	return tape_mtop(device, MTREW, 1);
+}
+
+/*
+ * MTERASE: erases the tape.
+ */
+int
+tape_std_mterase(struct tape_device *device, int mt_count)
+{
+	struct tape_request *request;
+
+	request = tape_alloc_request(6, 0);
+	if (IS_ERR(request))
+		return PTR_ERR(request);
+	request->op = TO_DSE;
+	/* setup ccws */
+	tape_ccw_cc(request->cpaddr, MODE_SET_DB, 1, device->modeset_byte);
+	tape_ccw_cc(request->cpaddr + 1, REWIND, 0, NULL);
+	tape_ccw_cc(request->cpaddr + 2, ERASE_GAP, 0, NULL);
+	tape_ccw_cc(request->cpaddr + 3, DATA_SEC_ERASE, 0, NULL);
+	tape_ccw_cc(request->cpaddr + 4, REWIND, 0, NULL);
+	tape_ccw_end(request->cpaddr + 5, NOP, 0, NULL);
+
+	/* execute it */
+	return tape_do_io_free(device, request);
+}
+
+/*
+ * MTUNLOAD: Rewind the tape and unload it.
+ */
+int
+tape_std_mtunload(struct tape_device *device, int mt_count)
+{
+	return tape_mtop(device, MTOFFL, mt_count);
+}
+
+/*
+ * MTCOMPRESSION: used to enable compression.
+ * Sets the IDRC on/off.
+ */
+int
+tape_std_mtcompression(struct tape_device *device, int mt_count)
+{
+	struct tape_request *request;
+
+	if (mt_count < 0 || mt_count > 1) {
+		DBF_EXCEPTION(6, "xcom parm\n");
+		return -EINVAL;
+	}
+	request = tape_alloc_request(2, 0);
+	if (IS_ERR(request))
+		return PTR_ERR(request);
+	request->op = TO_NOP;
+	/* setup ccws */
+	if (mt_count == 0)
+		*device->modeset_byte &= ~0x08;
+	else
+		*device->modeset_byte |= 0x08;
+	tape_ccw_cc(request->cpaddr, MODE_SET_DB, 1, device->modeset_byte);
+	tape_ccw_end(request->cpaddr + 1, NOP, 0, NULL);
+	/* execute it */
+	return tape_do_io_free(device, request);
+}
+
+/*
+ * Read Block
+ */
+struct tape_request *
+tape_std_read_block(struct tape_device *device, size_t count)
+{
+	struct tape_request *request;
+
+	/*
+	 * We have to alloc 4 ccws in order to be able to transform request
+	 * into a read backward request in error case.
+	 */
+	request = tape_alloc_request(4, 0);
+	if (IS_ERR(request)) {
+		DBF_EXCEPTION(6, "xrbl fail");
+		return request;
+	}
+	request->op = TO_RFO;
+	tape_ccw_cc(request->cpaddr, MODE_SET_DB, 1, device->modeset_byte);
+	tape_ccw_end_idal(request->cpaddr + 1, READ_FORWARD,
+			  device->char_data.idal_buf);
+	DBF_EVENT(6, "xrbl ccwg\n");
+	return request;
+}
+
+/*
+ * Read Block backward transformation function.
+ */
+void
+tape_std_read_backward(struct tape_device *device, struct tape_request *request)
+{
+	/*
+	 * We have allocated 4 ccws in tape_std_read, so we can now
+	 * transform the request to a read backward, followed by a
+	 * forward space block.
+	 */
+	request->op = TO_RBA;
+	tape_ccw_cc(request->cpaddr, MODE_SET_DB, 1, device->modeset_byte);
+	tape_ccw_cc_idal(request->cpaddr + 1, READ_BACKWARD,
+			 device->char_data.idal_buf);
+	tape_ccw_cc(request->cpaddr + 2, FORSPACEBLOCK, 0, NULL);
+	tape_ccw_end(request->cpaddr + 3, NOP, 0, NULL);
+	DBF_EVENT(6, "xrop ccwg");}
+
+/*
+ * Write Block
+ */
+struct tape_request *
+tape_std_write_block(struct tape_device *device, size_t count)
+{
+	struct tape_request *request;
+
+	request = tape_alloc_request(2, 0);
+	if (IS_ERR(request)) {
+		DBF_EXCEPTION(6, "xwbl fail\n");
+		return request;
+	}
+	request->op = TO_WRI;
+	tape_ccw_cc(request->cpaddr, MODE_SET_DB, 1, device->modeset_byte);
+	tape_ccw_end_idal(request->cpaddr + 1, WRITE_CMD,
+			  device->char_data.idal_buf);
+	DBF_EVENT(6, "xwbl ccwg\n");
+	return request;
+}
+
+/*
+ * This routine is called by frontend after an ENOSP on write
+ */
+void
+tape_std_process_eov(struct tape_device *device)
+{
+	/*
+	 * End of volume: We have to backspace the last written record, then
+	 * we TRY to write a tapemark and then backspace over the written TM
+	 */
+	if (tape_mtop(device, MTBSR, 1) == 0 &&
+	    tape_mtop(device, MTWEOF, 1) == 0) {
+		tape_mtop(device, MTBSR, 1);
+	}
+}
+
+EXPORT_SYMBOL(tape_std_assign);
+EXPORT_SYMBOL(tape_std_unassign);
+EXPORT_SYMBOL(tape_std_display);
+EXPORT_SYMBOL(tape_std_read_block_id);
+EXPORT_SYMBOL(tape_std_mtload);
+EXPORT_SYMBOL(tape_std_mtsetblk);
+EXPORT_SYMBOL(tape_std_mtreset);
+EXPORT_SYMBOL(tape_std_mtfsf);
+EXPORT_SYMBOL(tape_std_mtfsr);
+EXPORT_SYMBOL(tape_std_mtbsr);
+EXPORT_SYMBOL(tape_std_mtweof);
+EXPORT_SYMBOL(tape_std_mtbsfm);
+EXPORT_SYMBOL(tape_std_mtbsf);
+EXPORT_SYMBOL(tape_std_mtfsfm);
+EXPORT_SYMBOL(tape_std_mtrew);
+EXPORT_SYMBOL(tape_std_mtoffl);
+EXPORT_SYMBOL(tape_std_mtnop);
+EXPORT_SYMBOL(tape_std_mteom);
+EXPORT_SYMBOL(tape_std_mtreten);
+EXPORT_SYMBOL(tape_std_mterase);
+EXPORT_SYMBOL(tape_std_mtunload);
+EXPORT_SYMBOL(tape_std_mtcompression);
+EXPORT_SYMBOL(tape_std_read_block);
+EXPORT_SYMBOL(tape_std_read_backward);
+EXPORT_SYMBOL(tape_std_write_block);
+EXPORT_SYMBOL(tape_std_process_eov);
diff --git a/ap/os/linux/linux-3.4.x/drivers/s390/char/tape_std.h b/ap/os/linux/linux-3.4.x/drivers/s390/char/tape_std.h
new file mode 100644
index 0000000..1fc9523
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/s390/char/tape_std.h
@@ -0,0 +1,159 @@
+/*
+ *  drivers/s390/char/tape_std.h
+ *    standard tape device functions for ibm tapes.
+ *
+ *    Copyright (C) IBM Corp. 2001,2006
+ *    Author(s): Carsten Otte <cotte@de.ibm.com>
+ *		 Tuan Ngo-Anh <ngoanh@de.ibm.com>
+ *		 Martin Schwidefsky <schwidefsky@de.ibm.com>
+ */
+
+#ifndef _TAPE_STD_H
+#define _TAPE_STD_H
+
+#include <asm/tape390.h>
+
+/*
+ * Biggest block size to handle. Currently 64K because we only build
+ * channel programs without data chaining.
+ */
+#define MAX_BLOCKSIZE   65535
+
+/*
+ * The CCW commands for the Tape type of command.
+ */
+#define INVALID_00		0x00	/* Invalid cmd */
+#define BACKSPACEBLOCK		0x27	/* Back Space block */
+#define BACKSPACEFILE		0x2f	/* Back Space file */
+#define DATA_SEC_ERASE		0x97	/* Data security erase */
+#define ERASE_GAP		0x17	/* Erase Gap */
+#define FORSPACEBLOCK		0x37	/* Forward space block */
+#define FORSPACEFILE		0x3F	/* Forward Space file */
+#define FORCE_STREAM_CNT	0xEB	/* Forced streaming count # */
+#define NOP			0x03	/* No operation	*/
+#define READ_FORWARD		0x02	/* Read forward */
+#define REWIND			0x07	/* Rewind */
+#define REWIND_UNLOAD		0x0F	/* Rewind and Unload */
+#define SENSE			0x04	/* Sense */
+#define NEW_MODE_SET		0xEB	/* Guess it is Mode set */
+#define WRITE_CMD		0x01	/* Write */
+#define WRITETAPEMARK		0x1F	/* Write Tape Mark */
+
+#define ASSIGN			0xB7	/* 3420 REJECT,3480 OK	*/
+#define CONTROL_ACCESS		0xE3	/* Set high speed */
+#define DIAG_MODE_SET		0x0B	/* 3420 NOP, 3480 REJECT */
+#define LOAD_DISPLAY		0x9F	/* 3420 REJECT,3480 OK */
+#define LOCATE			0x4F	/* 3420 REJ, 3480 NOP */
+#define LOOP_WRITE_TO_READ	0x8B	/* 3480 REJECT */
+#define MODE_SET_DB		0xDB	/* 3420 REJECT,3480 OK */
+#define MODE_SET_C3		0xC3	/* for 3420 */
+#define MODE_SET_CB		0xCB	/* for 3420 */
+#define MODE_SET_D3		0xD3	/* for 3420 */
+#define READ_BACKWARD		0x0C	/* */
+#define READ_BLOCK_ID		0x22	/* 3420 REJECT,3480 OK */
+#define READ_BUFFER		0x12	/* 3420 REJECT,3480 OK */
+#define READ_BUFF_LOG		0x24	/* 3420 REJECT,3480 OK */
+#define RELEASE			0xD4	/* 3420 NOP, 3480 REJECT */
+#define REQ_TRK_IN_ERROR	0x1B	/* 3420 NOP, 3480 REJECT */
+#define RESERVE			0xF4	/* 3420 NOP, 3480 REJECT */
+#define SENSE_GROUP_ID		0x34	/* 3420 REJECT,3480 OK */
+#define SENSE_ID		0xE4	/* 3420 REJECT,3480 OK */
+#define READ_DEV_CHAR		0x64	/* Read device characteristics */
+#define SET_DIAGNOSE		0x4B	/* 3420 NOP, 3480 REJECT */
+#define SET_GROUP_ID		0xAF	/* 3420 REJECT,3480 OK */
+#define SET_TAPE_WRITE_IMMED	0xC3	/* for 3480 */
+#define SUSPEND			0x5B	/* 3420 REJ, 3480 NOP */
+#define SYNC			0x43	/* Synchronize (flush buffer) */
+#define UNASSIGN		0xC7	/* 3420 REJECT,3480 OK */
+#define PERF_SUBSYS_FUNC	0x77	/* 3490 CMD */
+#define READ_CONFIG_DATA	0xFA	/* 3490 CMD */
+#define READ_MESSAGE_ID		0x4E	/* 3490 CMD */
+#define READ_SUBSYS_DATA	0x3E	/* 3490 CMD */
+#define SET_INTERFACE_ID	0x73	/* 3490 CMD */
+
+#define SENSE_COMMAND_REJECT		0x80
+#define SENSE_INTERVENTION_REQUIRED	0x40
+#define SENSE_BUS_OUT_CHECK		0x20
+#define SENSE_EQUIPMENT_CHECK		0x10
+#define SENSE_DATA_CHECK		0x08
+#define SENSE_OVERRUN			0x04
+#define SENSE_DEFERRED_UNIT_CHECK	0x02
+#define SENSE_ASSIGNED_ELSEWHERE	0x01
+
+#define SENSE_LOCATE_FAILURE		0x80
+#define SENSE_DRIVE_ONLINE		0x40
+#define SENSE_RESERVED			0x20
+#define SENSE_RECORD_SEQUENCE_ERR	0x10
+#define SENSE_BEGINNING_OF_TAPE		0x08
+#define SENSE_WRITE_MODE		0x04
+#define SENSE_WRITE_PROTECT		0x02
+#define SENSE_NOT_CAPABLE		0x01
+
+#define SENSE_CHANNEL_ADAPTER_CODE	0xE0
+#define SENSE_CHANNEL_ADAPTER_LOC	0x10
+#define SENSE_REPORTING_CU		0x08
+#define SENSE_AUTOMATIC_LOADER		0x04
+#define SENSE_TAPE_SYNC_MODE		0x02
+#define SENSE_TAPE_POSITIONING		0x01
+
+/* discipline functions */
+struct tape_request *tape_std_read_block(struct tape_device *, size_t);
+void tape_std_read_backward(struct tape_device *device,
+			    struct tape_request *request);
+struct tape_request *tape_std_write_block(struct tape_device *, size_t);
+struct tape_request *tape_std_bread(struct tape_device *, struct request *);
+void tape_std_free_bread(struct tape_request *);
+void tape_std_check_locate(struct tape_device *, struct tape_request *);
+struct tape_request *tape_std_bwrite(struct request *,
+				     struct tape_device *, int);
+
+/* Some non-mtop commands. */
+int tape_std_assign(struct tape_device *);
+int tape_std_unassign(struct tape_device *);
+int tape_std_read_block_id(struct tape_device *device, __u64 *id);
+int tape_std_display(struct tape_device *, struct display_struct *disp);
+int tape_std_terminate_write(struct tape_device *);
+
+/* Standard magnetic tape commands. */
+int tape_std_mtbsf(struct tape_device *, int);
+int tape_std_mtbsfm(struct tape_device *, int);
+int tape_std_mtbsr(struct tape_device *, int);
+int tape_std_mtcompression(struct tape_device *, int);
+int tape_std_mteom(struct tape_device *, int);
+int tape_std_mterase(struct tape_device *, int);
+int tape_std_mtfsf(struct tape_device *, int);
+int tape_std_mtfsfm(struct tape_device *, int);
+int tape_std_mtfsr(struct tape_device *, int);
+int tape_std_mtload(struct tape_device *, int);
+int tape_std_mtnop(struct tape_device *, int);
+int tape_std_mtoffl(struct tape_device *, int);
+int tape_std_mtreset(struct tape_device *, int);
+int tape_std_mtreten(struct tape_device *, int);
+int tape_std_mtrew(struct tape_device *, int);
+int tape_std_mtsetblk(struct tape_device *, int);
+int tape_std_mtunload(struct tape_device *, int);
+int tape_std_mtweof(struct tape_device *, int);
+
+/* Event handlers */
+void tape_std_default_handler(struct tape_device *);
+void tape_std_unexpect_uchk_handler(struct tape_device *);
+void tape_std_irq(struct tape_device *);
+void tape_std_process_eov(struct tape_device *);
+
+// the error recovery stuff:
+void tape_std_error_recovery(struct tape_device *);
+void tape_std_error_recovery_has_failed(struct tape_device *,int error_id);
+void tape_std_error_recovery_succeded(struct tape_device *);
+void tape_std_error_recovery_do_retry(struct tape_device *);
+void tape_std_error_recovery_read_opposite(struct tape_device *);
+void tape_std_error_recovery_HWBUG(struct tape_device *, int condno);
+
+/* S390 tape types */
+enum s390_tape_type {
+        tape_3480,
+        tape_3490,
+        tape_3590,
+        tape_3592,
+};
+
+#endif // _TAPE_STD_H
diff --git a/ap/os/linux/linux-3.4.x/drivers/s390/char/tty3270.c b/ap/os/linux/linux-3.4.x/drivers/s390/char/tty3270.c
new file mode 100644
index 0000000..b43445a
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/s390/char/tty3270.c
@@ -0,0 +1,1820 @@
+/*
+ *  drivers/s390/char/tty3270.c
+ *    IBM/3270 Driver - tty functions.
+ *
+ *  Author(s):
+ *    Original 3270 Code for 2.4 written by Richard Hitt (UTS Global)
+ *    Rewritten for 2.5 by Martin Schwidefsky <schwidefsky@de.ibm.com>
+ *	-- Copyright (C) 2003 IBM Deutschland Entwicklung GmbH, IBM Corporation
+ */
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/kdev_t.h>
+#include <linux/tty.h>
+#include <linux/vt_kern.h>
+#include <linux/init.h>
+#include <linux/console.h>
+#include <linux/interrupt.h>
+
+#include <linux/slab.h>
+#include <linux/bootmem.h>
+#include <linux/compat.h>
+
+#include <asm/ccwdev.h>
+#include <asm/cio.h>
+#include <asm/ebcdic.h>
+#include <asm/uaccess.h>
+
+#include "raw3270.h"
+#include "tty3270.h"
+#include "keyboard.h"
+
+#define TTY3270_CHAR_BUF_SIZE 256
+#define TTY3270_OUTPUT_BUFFER_SIZE 1024
+#define TTY3270_STRING_PAGES 5
+
+struct tty_driver *tty3270_driver;
+static int tty3270_max_index;
+
+static struct raw3270_fn tty3270_fn;
+
+struct tty3270_cell {
+	unsigned char character;
+	unsigned char highlight;
+	unsigned char f_color;
+};
+
+struct tty3270_line {
+	struct tty3270_cell *cells;
+	int len;
+};
+
+#define ESCAPE_NPAR 8
+
+/*
+ * The main tty view data structure.
+ * FIXME:
+ * 1) describe line orientation & lines list concept against screen
+ * 2) describe conversion of screen to lines
+ * 3) describe line format.
+ */
+struct tty3270 {
+	struct raw3270_view view;
+	struct tty_struct *tty;		/* Pointer to tty structure */
+	void **freemem_pages;		/* Array of pages used for freemem. */
+	struct list_head freemem;	/* List of free memory for strings. */
+
+	/* Output stuff. */
+	struct list_head lines;		/* List of lines. */
+	struct list_head update;	/* List of lines to update. */
+	unsigned char wcc;		/* Write control character. */
+	int nr_lines;			/* # lines in list. */
+	int nr_up;			/* # lines up in history. */
+	unsigned long update_flags;	/* Update indication bits. */
+	struct string *status;		/* Lower right of display. */
+	struct raw3270_request *write;	/* Single write request. */
+	struct timer_list timer;	/* Output delay timer. */
+
+	/* Current tty screen. */
+	unsigned int cx, cy;		/* Current output position. */
+	unsigned int highlight;		/* Blink/reverse/underscore */
+	unsigned int f_color;		/* Foreground color */
+	struct tty3270_line *screen;
+
+	/* Input stuff. */
+	struct string *prompt;		/* Output string for input area. */
+	struct string *input;		/* Input string for read request. */
+	struct raw3270_request *read;	/* Single read request. */
+	struct raw3270_request *kreset;	/* Single keyboard reset request. */
+	unsigned char inattr;		/* Visible/invisible input. */
+	int throttle, attn;		/* tty throttle/unthrottle. */
+	struct tasklet_struct readlet;	/* Tasklet to issue read request. */
+	struct kbd_data *kbd;		/* key_maps stuff. */
+
+	/* Escape sequence parsing. */
+	int esc_state, esc_ques, esc_npar;
+	int esc_par[ESCAPE_NPAR];
+	unsigned int saved_cx, saved_cy;
+	unsigned int saved_highlight, saved_f_color;
+
+	/* Command recalling. */
+	struct list_head rcl_lines;	/* List of recallable lines. */
+	struct list_head *rcl_walk;	/* Point in rcl_lines list. */
+	int rcl_nr, rcl_max;		/* Number/max number of rcl_lines. */
+
+	/* Character array for put_char/flush_chars. */
+	unsigned int char_count;
+	char char_buf[TTY3270_CHAR_BUF_SIZE];
+};
+
+/* tty3270->update_flags. See tty3270_update for details. */
+#define TTY_UPDATE_ERASE	1	/* Use EWRITEA instead of WRITE. */
+#define TTY_UPDATE_LIST		2	/* Update lines in tty3270->update. */
+#define TTY_UPDATE_INPUT	4	/* Update input line. */
+#define TTY_UPDATE_STATUS	8	/* Update status line. */
+#define TTY_UPDATE_ALL		16	/* Recreate screen. */
+
+static void tty3270_update(struct tty3270 *);
+
+/*
+ * Setup timeout for a device. On timeout trigger an update.
+ */
+static void tty3270_set_timer(struct tty3270 *tp, int expires)
+{
+	if (expires == 0)
+		del_timer(&tp->timer);
+	else
+		mod_timer(&tp->timer, jiffies + expires);
+}
+
+/*
+ * The input line are the two last lines of the screen.
+ */
+static void
+tty3270_update_prompt(struct tty3270 *tp, char *input, int count)
+{
+	struct string *line;
+	unsigned int off;
+
+	line = tp->prompt;
+	if (count != 0)
+		line->string[5] = TF_INMDT;
+	else
+		line->string[5] = tp->inattr;
+	if (count > tp->view.cols * 2 - 11)
+		count = tp->view.cols * 2 - 11;
+	memcpy(line->string + 6, input, count);
+	line->string[6 + count] = TO_IC;
+	/* Clear to end of input line. */
+	if (count < tp->view.cols * 2 - 11) {
+		line->string[7 + count] = TO_RA;
+		line->string[10 + count] = 0;
+		off = tp->view.cols * tp->view.rows - 9;
+		raw3270_buffer_address(tp->view.dev, line->string+count+8, off);
+		line->len = 11 + count;
+	} else
+		line->len = 7 + count;
+	tp->update_flags |= TTY_UPDATE_INPUT;
+}
+
+static void
+tty3270_create_prompt(struct tty3270 *tp)
+{
+	static const unsigned char blueprint[] =
+		{ TO_SBA, 0, 0, 0x6e, TO_SF, TF_INPUT,
+		  /* empty input string */
+		  TO_IC, TO_RA, 0, 0, 0 };
+	struct string *line;
+	unsigned int offset;
+
+	line = alloc_string(&tp->freemem,
+			    sizeof(blueprint) + tp->view.cols * 2 - 9);
+	tp->prompt = line;
+	tp->inattr = TF_INPUT;
+	/* Copy blueprint to status line */
+	memcpy(line->string, blueprint, sizeof(blueprint));
+	line->len = sizeof(blueprint);
+	/* Set output offsets. */
+	offset = tp->view.cols * (tp->view.rows - 2);
+	raw3270_buffer_address(tp->view.dev, line->string + 1, offset);
+	offset = tp->view.cols * tp->view.rows - 9;
+	raw3270_buffer_address(tp->view.dev, line->string + 8, offset);
+
+	/* Allocate input string for reading. */
+	tp->input = alloc_string(&tp->freemem, tp->view.cols * 2 - 9 + 6);
+}
+
+/*
+ * The status line is the last line of the screen. It shows the string
+ * "Running"/"Holding" in the lower right corner of the screen.
+ */
+static void
+tty3270_update_status(struct tty3270 * tp)
+{
+	char *str;
+
+	str = (tp->nr_up != 0) ? "History" : "Running";
+	memcpy(tp->status->string + 8, str, 7);
+	codepage_convert(tp->view.ascebc, tp->status->string + 8, 7);
+	tp->update_flags |= TTY_UPDATE_STATUS;
+}
+
+static void
+tty3270_create_status(struct tty3270 * tp)
+{
+	static const unsigned char blueprint[] =
+		{ TO_SBA, 0, 0, TO_SF, TF_LOG, TO_SA, TAT_COLOR, TAC_GREEN,
+		  0, 0, 0, 0, 0, 0, 0, TO_SF, TF_LOG, TO_SA, TAT_COLOR,
+		  TAC_RESET };
+	struct string *line;
+	unsigned int offset;
+
+	line = alloc_string(&tp->freemem,sizeof(blueprint));
+	tp->status = line;
+	/* Copy blueprint to status line */
+	memcpy(line->string, blueprint, sizeof(blueprint));
+	/* Set address to start of status string (= last 9 characters). */
+	offset = tp->view.cols * tp->view.rows - 9;
+	raw3270_buffer_address(tp->view.dev, line->string + 1, offset);
+}
+
+/*
+ * Set output offsets to 3270 datastream fragment of a tty string.
+ * (TO_SBA offset at the start and TO_RA offset at the end of the string)
+ */
+static void
+tty3270_update_string(struct tty3270 *tp, struct string *line, int nr)
+{
+	unsigned char *cp;
+
+	raw3270_buffer_address(tp->view.dev, line->string + 1,
+			       tp->view.cols * nr);
+	cp = line->string + line->len - 4;
+	if (*cp == TO_RA)
+		raw3270_buffer_address(tp->view.dev, cp + 1,
+				       tp->view.cols * (nr + 1));
+}
+
+/*
+ * Rebuild update list to print all lines.
+ */
+static void
+tty3270_rebuild_update(struct tty3270 *tp)
+{
+	struct string *s, *n;
+	int line, nr_up;
+
+	/* 
+	 * Throw away update list and create a new one,
+	 * containing all lines that will fit on the screen.
+	 */
+	list_for_each_entry_safe(s, n, &tp->update, update)
+		list_del_init(&s->update);
+	line = tp->view.rows - 3;
+	nr_up = tp->nr_up;
+	list_for_each_entry_reverse(s, &tp->lines, list) {
+		if (nr_up > 0) {
+			nr_up--;
+			continue;
+		}
+		tty3270_update_string(tp, s, line);
+		list_add(&s->update, &tp->update);
+		if (--line < 0)
+			break;
+	}
+	tp->update_flags |= TTY_UPDATE_LIST;
+}
+
+/*
+ * Alloc string for size bytes. If there is not enough room in
+ * freemem, free strings until there is room.
+ */
+static struct string *
+tty3270_alloc_string(struct tty3270 *tp, size_t size)
+{
+	struct string *s, *n;
+
+	s = alloc_string(&tp->freemem, size);
+	if (s)
+		return s;
+	list_for_each_entry_safe(s, n, &tp->lines, list) {
+		BUG_ON(tp->nr_lines <= tp->view.rows - 2);
+		list_del(&s->list);
+		if (!list_empty(&s->update))
+			list_del(&s->update);
+		tp->nr_lines--;
+		if (free_string(&tp->freemem, s) >= size)
+			break;
+	}
+	s = alloc_string(&tp->freemem, size);
+	BUG_ON(!s);
+	if (tp->nr_up != 0 &&
+	    tp->nr_up + tp->view.rows - 2 >= tp->nr_lines) {
+		tp->nr_up = tp->nr_lines - tp->view.rows + 2;
+		tty3270_rebuild_update(tp);
+		tty3270_update_status(tp);
+	}
+	return s;
+}
+
+/*
+ * Add an empty line to the list.
+ */
+static void
+tty3270_blank_line(struct tty3270 *tp)
+{
+	static const unsigned char blueprint[] =
+		{ TO_SBA, 0, 0, TO_SA, TAT_EXTHI, TAX_RESET,
+		  TO_SA, TAT_COLOR, TAC_RESET, TO_RA, 0, 0, 0 };
+	struct string *s;
+
+	s = tty3270_alloc_string(tp, sizeof(blueprint));
+	memcpy(s->string, blueprint, sizeof(blueprint));
+	s->len = sizeof(blueprint);
+	list_add_tail(&s->list, &tp->lines);
+	tp->nr_lines++;
+	if (tp->nr_up != 0)
+		tp->nr_up++;
+}
+
+/*
+ * Write request completion callback.
+ */
+static void
+tty3270_write_callback(struct raw3270_request *rq, void *data)
+{
+	struct tty3270 *tp;
+
+	tp = (struct tty3270 *) rq->view;
+	if (rq->rc != 0) {
+		/* Write wasn't successful. Refresh all. */
+		tp->update_flags = TTY_UPDATE_ALL;
+		tty3270_set_timer(tp, 1);
+	}
+	raw3270_request_reset(rq);
+	xchg(&tp->write, rq);
+}
+
+/*
+ * Update 3270 display.
+ */
+static void
+tty3270_update(struct tty3270 *tp)
+{
+	static char invalid_sba[2] = { 0xff, 0xff };
+	struct raw3270_request *wrq;
+	unsigned long updated;
+	struct string *s, *n;
+	char *sba, *str;
+	int rc, len;
+
+	wrq = xchg(&tp->write, 0);
+	if (!wrq) {
+		tty3270_set_timer(tp, 1);
+		return;
+	}
+
+	spin_lock(&tp->view.lock);
+	updated = 0;
+	if (tp->update_flags & TTY_UPDATE_ALL) {
+		tty3270_rebuild_update(tp);
+		tty3270_update_status(tp);
+		tp->update_flags = TTY_UPDATE_ERASE | TTY_UPDATE_LIST |
+			TTY_UPDATE_INPUT | TTY_UPDATE_STATUS;
+	}
+	if (tp->update_flags & TTY_UPDATE_ERASE) {
+		/* Use erase write alternate to erase display. */
+		raw3270_request_set_cmd(wrq, TC_EWRITEA);
+		updated |= TTY_UPDATE_ERASE;
+	} else
+		raw3270_request_set_cmd(wrq, TC_WRITE);
+
+	raw3270_request_add_data(wrq, &tp->wcc, 1);
+	tp->wcc = TW_NONE;
+
+	/*
+	 * Update status line.
+	 */
+	if (tp->update_flags & TTY_UPDATE_STATUS)
+		if (raw3270_request_add_data(wrq, tp->status->string,
+					     tp->status->len) == 0)
+			updated |= TTY_UPDATE_STATUS;
+
+	/*
+	 * Write input line.
+	 */
+	if (tp->update_flags & TTY_UPDATE_INPUT)
+		if (raw3270_request_add_data(wrq, tp->prompt->string,
+					     tp->prompt->len) == 0)
+			updated |= TTY_UPDATE_INPUT;
+
+	sba = invalid_sba;
+	
+	if (tp->update_flags & TTY_UPDATE_LIST) {
+		/* Write strings in the update list to the screen. */
+		list_for_each_entry_safe(s, n, &tp->update, update) {
+			str = s->string;
+			len = s->len;
+			/*
+			 * Skip TO_SBA at the start of the string if the
+			 * last output position matches the start address
+			 * of this line.
+			 */
+			if (s->string[1] == sba[0] && s->string[2] == sba[1])
+				str += 3, len -= 3;
+			if (raw3270_request_add_data(wrq, str, len) != 0)
+				break;
+			list_del_init(&s->update);
+			sba = s->string + s->len - 3;
+		}
+		if (list_empty(&tp->update))
+			updated |= TTY_UPDATE_LIST;
+	}
+	wrq->callback = tty3270_write_callback;
+	rc = raw3270_start(&tp->view, wrq);
+	if (rc == 0) {
+		tp->update_flags &= ~updated;
+		if (tp->update_flags)
+			tty3270_set_timer(tp, 1);
+	} else {
+		raw3270_request_reset(wrq);
+		xchg(&tp->write, wrq);
+	}
+	spin_unlock(&tp->view.lock);
+}
+
+/*
+ * Command recalling.
+ */
+static void
+tty3270_rcl_add(struct tty3270 *tp, char *input, int len)
+{
+	struct string *s;
+
+	tp->rcl_walk = NULL;
+	if (len <= 0)
+		return;
+	if (tp->rcl_nr >= tp->rcl_max) {
+		s = list_entry(tp->rcl_lines.next, struct string, list);
+		list_del(&s->list);
+		free_string(&tp->freemem, s);
+		tp->rcl_nr--;
+	}
+	s = tty3270_alloc_string(tp, len);
+	memcpy(s->string, input, len);
+	list_add_tail(&s->list, &tp->rcl_lines);
+	tp->rcl_nr++;
+}
+
+static void
+tty3270_rcl_backward(struct kbd_data *kbd)
+{
+	struct tty3270 *tp;
+	struct string *s;
+
+	tp = kbd->tty->driver_data;
+	spin_lock_bh(&tp->view.lock);
+	if (tp->inattr == TF_INPUT) {
+		if (tp->rcl_walk && tp->rcl_walk->prev != &tp->rcl_lines)
+			tp->rcl_walk = tp->rcl_walk->prev;
+		else if (!list_empty(&tp->rcl_lines))
+			tp->rcl_walk = tp->rcl_lines.prev;
+		s = tp->rcl_walk ? 
+			list_entry(tp->rcl_walk, struct string, list) : NULL;
+		if (tp->rcl_walk) {
+			s = list_entry(tp->rcl_walk, struct string, list);
+			tty3270_update_prompt(tp, s->string, s->len);
+		} else
+			tty3270_update_prompt(tp, NULL, 0);
+		tty3270_set_timer(tp, 1);
+	}
+	spin_unlock_bh(&tp->view.lock);
+}
+
+/*
+ * Deactivate tty view.
+ */
+static void
+tty3270_exit_tty(struct kbd_data *kbd)
+{
+	struct tty3270 *tp;
+
+	tp = kbd->tty->driver_data;
+	raw3270_deactivate_view(&tp->view);
+}
+
+/*
+ * Scroll forward in history.
+ */
+static void
+tty3270_scroll_forward(struct kbd_data *kbd)
+{
+	struct tty3270 *tp;
+	int nr_up;
+
+	tp = kbd->tty->driver_data;
+	spin_lock_bh(&tp->view.lock);
+	nr_up = tp->nr_up - tp->view.rows + 2;
+	if (nr_up < 0)
+		nr_up = 0;
+	if (nr_up != tp->nr_up) {
+		tp->nr_up = nr_up;
+		tty3270_rebuild_update(tp);
+		tty3270_update_status(tp);
+		tty3270_set_timer(tp, 1);
+	}
+	spin_unlock_bh(&tp->view.lock);
+}
+
+/*
+ * Scroll backward in history.
+ */
+static void
+tty3270_scroll_backward(struct kbd_data *kbd)
+{
+	struct tty3270 *tp;
+	int nr_up;
+
+	tp = kbd->tty->driver_data;
+	spin_lock_bh(&tp->view.lock);
+	nr_up = tp->nr_up + tp->view.rows - 2;
+	if (nr_up + tp->view.rows - 2 > tp->nr_lines)
+		nr_up = tp->nr_lines - tp->view.rows + 2;
+	if (nr_up != tp->nr_up) {
+		tp->nr_up = nr_up;
+		tty3270_rebuild_update(tp);
+		tty3270_update_status(tp);
+		tty3270_set_timer(tp, 1);
+	}
+	spin_unlock_bh(&tp->view.lock);
+}
+
+/*
+ * Pass input line to tty.
+ */
+static void
+tty3270_read_tasklet(struct raw3270_request *rrq)
+{
+	static char kreset_data = TW_KR;
+	struct tty3270 *tp;
+	char *input;
+	int len;
+
+	tp = (struct tty3270 *) rrq->view;
+	spin_lock_bh(&tp->view.lock);
+	/*
+	 * Two AID keys are special: For 0x7d (enter) the input line
+	 * has to be emitted to the tty and for 0x6d the screen
+	 * needs to be redrawn.
+	 */
+	input = NULL;
+	len = 0;
+	if (tp->input->string[0] == 0x7d) {
+		/* Enter: write input to tty. */
+		input = tp->input->string + 6;
+		len = tp->input->len - 6 - rrq->rescnt;
+		if (tp->inattr != TF_INPUTN)
+			tty3270_rcl_add(tp, input, len);
+		if (tp->nr_up > 0) {
+			tp->nr_up = 0;
+			tty3270_rebuild_update(tp);
+			tty3270_update_status(tp);
+		}
+		/* Clear input area. */
+		tty3270_update_prompt(tp, NULL, 0);
+		tty3270_set_timer(tp, 1);
+	} else if (tp->input->string[0] == 0x6d) {
+		/* Display has been cleared. Redraw. */
+		tp->update_flags = TTY_UPDATE_ALL;
+		tty3270_set_timer(tp, 1);
+	}
+	spin_unlock_bh(&tp->view.lock);
+
+	/* Start keyboard reset command. */
+	raw3270_request_reset(tp->kreset);
+	raw3270_request_set_cmd(tp->kreset, TC_WRITE);
+	raw3270_request_add_data(tp->kreset, &kreset_data, 1);
+	raw3270_start(&tp->view, tp->kreset);
+
+	/* Emit input string. */
+	if (tp->tty) {
+		while (len-- > 0)
+			kbd_keycode(tp->kbd, *input++);
+		/* Emit keycode for AID byte. */
+		kbd_keycode(tp->kbd, 256 + tp->input->string[0]);
+	}
+
+	raw3270_request_reset(rrq);
+	xchg(&tp->read, rrq);
+	raw3270_put_view(&tp->view);
+}
+
+/*
+ * Read request completion callback.
+ */
+static void
+tty3270_read_callback(struct raw3270_request *rq, void *data)
+{
+	raw3270_get_view(rq->view);
+	/* Schedule tasklet to pass input to tty. */
+	tasklet_schedule(&((struct tty3270 *) rq->view)->readlet);
+}
+
+/*
+ * Issue a read request. Call with device lock.
+ */
+static void
+tty3270_issue_read(struct tty3270 *tp, int lock)
+{
+	struct raw3270_request *rrq;
+	int rc;
+
+	rrq = xchg(&tp->read, 0);
+	if (!rrq)
+		/* Read already scheduled. */
+		return;
+	rrq->callback = tty3270_read_callback;
+	rrq->callback_data = tp;
+	raw3270_request_set_cmd(rrq, TC_READMOD);
+	raw3270_request_set_data(rrq, tp->input->string, tp->input->len);
+	/* Issue the read modified request. */
+	if (lock) {
+		rc = raw3270_start(&tp->view, rrq);
+	} else
+		rc = raw3270_start_irq(&tp->view, rrq);
+	if (rc) {
+		raw3270_request_reset(rrq);
+		xchg(&tp->read, rrq);
+	}
+}
+
+/*
+ * Switch to the tty view.
+ */
+static int
+tty3270_activate(struct raw3270_view *view)
+{
+	struct tty3270 *tp;
+
+	tp = (struct tty3270 *) view;
+	tp->update_flags = TTY_UPDATE_ALL;
+	tty3270_set_timer(tp, 1);
+	return 0;
+}
+
+static void
+tty3270_deactivate(struct raw3270_view *view)
+{
+	struct tty3270 *tp;
+
+	tp = (struct tty3270 *) view;
+	del_timer(&tp->timer);
+}
+
+static int
+tty3270_irq(struct tty3270 *tp, struct raw3270_request *rq, struct irb *irb)
+{
+	/* Handle ATTN. Schedule tasklet to read aid. */
+	if (irb->scsw.cmd.dstat & DEV_STAT_ATTENTION) {
+		if (!tp->throttle)
+			tty3270_issue_read(tp, 0);
+		else
+			tp->attn = 1;
+	}
+
+	if (rq) {
+		if (irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK)
+			rq->rc = -EIO;
+		else
+			/* Normal end. Copy residual count. */
+			rq->rescnt = irb->scsw.cmd.count;
+	}
+	return RAW3270_IO_DONE;
+}
+
+/*
+ * Allocate tty3270 structure.
+ */
+static struct tty3270 *
+tty3270_alloc_view(void)
+{
+	struct tty3270 *tp;
+	int pages;
+
+	tp = kzalloc(sizeof(struct tty3270), GFP_KERNEL);
+	if (!tp)
+		goto out_err;
+	tp->freemem_pages =
+		kmalloc(sizeof(void *) * TTY3270_STRING_PAGES, GFP_KERNEL);
+	if (!tp->freemem_pages)
+		goto out_tp;
+	INIT_LIST_HEAD(&tp->freemem);
+	for (pages = 0; pages < TTY3270_STRING_PAGES; pages++) {
+		tp->freemem_pages[pages] = (void *)
+			__get_free_pages(GFP_KERNEL|GFP_DMA, 0);
+		if (!tp->freemem_pages[pages])
+			goto out_pages;
+		add_string_memory(&tp->freemem,
+				  tp->freemem_pages[pages], PAGE_SIZE);
+	}
+	tp->write = raw3270_request_alloc(TTY3270_OUTPUT_BUFFER_SIZE);
+	if (IS_ERR(tp->write))
+		goto out_pages;
+	tp->read = raw3270_request_alloc(0);
+	if (IS_ERR(tp->read))
+		goto out_write;
+	tp->kreset = raw3270_request_alloc(1);
+	if (IS_ERR(tp->kreset))
+		goto out_read;
+	tp->kbd = kbd_alloc();
+	if (!tp->kbd)
+		goto out_reset;
+	return tp;
+
+out_reset:
+	raw3270_request_free(tp->kreset);
+out_read:
+	raw3270_request_free(tp->read);
+out_write:
+	raw3270_request_free(tp->write);
+out_pages:
+	while (pages--)
+		free_pages((unsigned long) tp->freemem_pages[pages], 0);
+	kfree(tp->freemem_pages);
+out_tp:
+	kfree(tp);
+out_err:
+	return ERR_PTR(-ENOMEM);
+}
+
+/*
+ * Free tty3270 structure.
+ */
+static void
+tty3270_free_view(struct tty3270 *tp)
+{
+	int pages;
+
+	del_timer_sync(&tp->timer);
+	kbd_free(tp->kbd);
+	raw3270_request_free(tp->kreset);
+	raw3270_request_free(tp->read);
+	raw3270_request_free(tp->write);
+	for (pages = 0; pages < TTY3270_STRING_PAGES; pages++)
+		free_pages((unsigned long) tp->freemem_pages[pages], 0);
+	kfree(tp->freemem_pages);
+	kfree(tp);
+}
+
+/*
+ * Allocate tty3270 screen.
+ */
+static int
+tty3270_alloc_screen(struct tty3270 *tp)
+{
+	unsigned long size;
+	int lines;
+
+	size = sizeof(struct tty3270_line) * (tp->view.rows - 2);
+	tp->screen = kzalloc(size, GFP_KERNEL);
+	if (!tp->screen)
+		goto out_err;
+	for (lines = 0; lines < tp->view.rows - 2; lines++) {
+		size = sizeof(struct tty3270_cell) * tp->view.cols;
+		tp->screen[lines].cells = kzalloc(size, GFP_KERNEL);
+		if (!tp->screen[lines].cells)
+			goto out_screen;
+	}
+	return 0;
+out_screen:
+	while (lines--)
+		kfree(tp->screen[lines].cells);
+	kfree(tp->screen);
+out_err:
+	return -ENOMEM;
+}
+
+/*
+ * Free tty3270 screen.
+ */
+static void
+tty3270_free_screen(struct tty3270 *tp)
+{
+	int lines;
+
+	for (lines = 0; lines < tp->view.rows - 2; lines++)
+		kfree(tp->screen[lines].cells);
+	kfree(tp->screen);
+}
+
+/*
+ * Unlink tty3270 data structure from tty.
+ */
+static void
+tty3270_release(struct raw3270_view *view)
+{
+	struct tty3270 *tp;
+	struct tty_struct *tty;
+
+	tp = (struct tty3270 *) view;
+	tty = tp->tty;
+	if (tty) {
+		tty->driver_data = NULL;
+		tp->tty = tp->kbd->tty = NULL;
+		tty_hangup(tty);
+		raw3270_put_view(&tp->view);
+	}
+}
+
+/*
+ * Free tty3270 data structure
+ */
+static void
+tty3270_free(struct raw3270_view *view)
+{
+	tty3270_free_screen((struct tty3270 *) view);
+	tty3270_free_view((struct tty3270 *) view);
+}
+
+/*
+ * Delayed freeing of tty3270 views.
+ */
+static void
+tty3270_del_views(void)
+{
+	struct tty3270 *tp;
+	int i;
+
+	for (i = 0; i < tty3270_max_index; i++) {
+		tp = (struct tty3270 *)
+			raw3270_find_view(&tty3270_fn, i + RAW3270_FIRSTMINOR);
+		if (!IS_ERR(tp))
+			raw3270_del_view(&tp->view);
+	}
+}
+
+static struct raw3270_fn tty3270_fn = {
+	.activate = tty3270_activate,
+	.deactivate = tty3270_deactivate,
+	.intv = (void *) tty3270_irq,
+	.release = tty3270_release,
+	.free = tty3270_free
+};
+
+/*
+ * This routine is called whenever a 3270 tty is opened.
+ */
+static int
+tty3270_open(struct tty_struct *tty, struct file * filp)
+{
+	struct tty3270 *tp;
+	int i, rc;
+
+	if (tty->count > 1)
+		return 0;
+	/* Check if the tty3270 is already there. */
+	tp = (struct tty3270 *)
+		raw3270_find_view(&tty3270_fn,
+				  tty->index + RAW3270_FIRSTMINOR);
+	if (!IS_ERR(tp)) {
+		tty->driver_data = tp;
+		tty->winsize.ws_row = tp->view.rows - 2;
+		tty->winsize.ws_col = tp->view.cols;
+		tty->low_latency = 0;
+		tp->tty = tty;
+		tp->kbd->tty = tty;
+		tp->inattr = TF_INPUT;
+		return 0;
+	}
+	if (tty3270_max_index < tty->index + 1)
+		tty3270_max_index = tty->index + 1;
+
+	/* Quick exit if there is no device for tty->index. */
+	if (PTR_ERR(tp) == -ENODEV)
+		return -ENODEV;
+
+	/* Allocate tty3270 structure on first open. */
+	tp = tty3270_alloc_view();
+	if (IS_ERR(tp))
+		return PTR_ERR(tp);
+
+	INIT_LIST_HEAD(&tp->lines);
+	INIT_LIST_HEAD(&tp->update);
+	INIT_LIST_HEAD(&tp->rcl_lines);
+	tp->rcl_max = 20;
+	setup_timer(&tp->timer, (void (*)(unsigned long)) tty3270_update,
+		    (unsigned long) tp);
+	tasklet_init(&tp->readlet, 
+		     (void (*)(unsigned long)) tty3270_read_tasklet,
+		     (unsigned long) tp->read);
+
+	rc = raw3270_add_view(&tp->view, &tty3270_fn,
+			      tty->index + RAW3270_FIRSTMINOR);
+	if (rc) {
+		tty3270_free_view(tp);
+		return rc;
+	}
+
+	rc = tty3270_alloc_screen(tp);
+	if (rc) {
+		raw3270_put_view(&tp->view);
+		raw3270_del_view(&tp->view);
+		return rc;
+	}
+
+	tp->tty = tty;
+	tty->low_latency = 0;
+	tty->driver_data = tp;
+	tty->winsize.ws_row = tp->view.rows - 2;
+	tty->winsize.ws_col = tp->view.cols;
+
+	tty3270_create_prompt(tp);
+	tty3270_create_status(tp);
+	tty3270_update_status(tp);
+
+	/* Create blank line for every line in the tty output area. */
+	for (i = 0; i < tp->view.rows - 2; i++)
+		tty3270_blank_line(tp);
+
+	tp->kbd->tty = tty;
+	tp->kbd->fn_handler[KVAL(K_INCRCONSOLE)] = tty3270_exit_tty;
+	tp->kbd->fn_handler[KVAL(K_SCROLLBACK)] = tty3270_scroll_backward;
+	tp->kbd->fn_handler[KVAL(K_SCROLLFORW)] = tty3270_scroll_forward;
+	tp->kbd->fn_handler[KVAL(K_CONS)] = tty3270_rcl_backward;
+	kbd_ascebc(tp->kbd, tp->view.ascebc);
+
+	raw3270_activate_view(&tp->view);
+	return 0;
+}
+
+/*
+ * This routine is called when the 3270 tty is closed. We wait
+ * for the remaining request to be completed. Then we clean up.
+ */
+static void
+tty3270_close(struct tty_struct *tty, struct file * filp)
+{
+	struct tty3270 *tp;
+
+	if (tty->count > 1)
+		return;
+	tp = (struct tty3270 *) tty->driver_data;
+	if (tp) {
+		tty->driver_data = NULL;
+		tp->tty = tp->kbd->tty = NULL;
+		raw3270_put_view(&tp->view);
+	}
+}
+
+/*
+ * We always have room.
+ */
+static int
+tty3270_write_room(struct tty_struct *tty)
+{
+	return INT_MAX;
+}
+
+/*
+ * Insert character into the screen at the current position with the
+ * current color and highlight. This function does NOT do cursor movement.
+ */
+static void tty3270_put_character(struct tty3270 *tp, char ch)
+{
+	struct tty3270_line *line;
+	struct tty3270_cell *cell;
+
+	line = tp->screen + tp->cy;
+	if (line->len <= tp->cx) {
+		while (line->len < tp->cx) {
+			cell = line->cells + line->len;
+			cell->character = tp->view.ascebc[' '];
+			cell->highlight = tp->highlight;
+			cell->f_color = tp->f_color;
+			line->len++;
+		}
+		line->len++;
+	}
+	cell = line->cells + tp->cx;
+	cell->character = tp->view.ascebc[(unsigned int) ch];
+	cell->highlight = tp->highlight;
+	cell->f_color = tp->f_color;
+}
+
+/*
+ * Convert a tty3270_line to a 3270 data fragment usable for output.
+ */
+static void
+tty3270_convert_line(struct tty3270 *tp, int line_nr)
+{
+	struct tty3270_line *line;
+	struct tty3270_cell *cell;
+	struct string *s, *n;
+	unsigned char highlight;
+	unsigned char f_color;
+	char *cp;
+	int flen, i;
+
+	/* Determine how long the fragment will be. */
+	flen = 3;		/* Prefix (TO_SBA). */
+	line = tp->screen + line_nr;
+	flen += line->len;
+	highlight = TAX_RESET;
+	f_color = TAC_RESET;
+	for (i = 0, cell = line->cells; i < line->len; i++, cell++) {
+		if (cell->highlight != highlight) {
+			flen += 3;	/* TO_SA to switch highlight. */
+			highlight = cell->highlight;
+		}
+		if (cell->f_color != f_color) {
+			flen += 3;	/* TO_SA to switch color. */
+			f_color = cell->f_color;
+		}
+	}
+	if (highlight != TAX_RESET)
+		flen += 3;	/* TO_SA to reset hightlight. */
+	if (f_color != TAC_RESET)
+		flen += 3;	/* TO_SA to reset color. */
+	if (line->len < tp->view.cols)
+		flen += 4;	/* Postfix (TO_RA). */
+
+	/* Find the line in the list. */
+	i = tp->view.rows - 2 - line_nr;
+	list_for_each_entry_reverse(s, &tp->lines, list)
+		if (--i <= 0)
+			break;
+	/*
+	 * Check if the line needs to get reallocated.
+	 */
+	if (s->len != flen) {
+		/* Reallocate string. */
+		n = tty3270_alloc_string(tp, flen);
+		list_add(&n->list, &s->list);
+		list_del_init(&s->list);
+		if (!list_empty(&s->update))
+			list_del_init(&s->update);
+		free_string(&tp->freemem, s);
+		s = n;
+	}
+
+	/* Write 3270 data fragment. */
+	cp = s->string;
+	*cp++ = TO_SBA;
+	*cp++ = 0;
+	*cp++ = 0;
+
+	highlight = TAX_RESET;
+	f_color = TAC_RESET;
+	for (i = 0, cell = line->cells; i < line->len; i++, cell++) {
+		if (cell->highlight != highlight) {
+			*cp++ = TO_SA;
+			*cp++ = TAT_EXTHI;
+			*cp++ = cell->highlight;
+			highlight = cell->highlight;
+		}
+		if (cell->f_color != f_color) {
+			*cp++ = TO_SA;
+			*cp++ = TAT_COLOR;
+			*cp++ = cell->f_color;
+			f_color = cell->f_color;
+		}
+		*cp++ = cell->character;
+	}
+	if (highlight != TAX_RESET) {
+		*cp++ = TO_SA;
+		*cp++ = TAT_EXTHI;
+		*cp++ = TAX_RESET;
+	}
+	if (f_color != TAC_RESET) {
+		*cp++ = TO_SA;
+		*cp++ = TAT_COLOR;
+		*cp++ = TAC_RESET;
+	}
+	if (line->len < tp->view.cols) {
+		*cp++ = TO_RA;
+		*cp++ = 0;
+		*cp++ = 0;
+		*cp++ = 0;
+	}
+
+	if (tp->nr_up + line_nr < tp->view.rows - 2) {
+		/* Line is currently visible on screen. */
+		tty3270_update_string(tp, s, line_nr);
+		/* Add line to update list. */
+		if (list_empty(&s->update)) {
+			list_add_tail(&s->update, &tp->update);
+			tp->update_flags |= TTY_UPDATE_LIST;
+		}
+	}
+}
+
+/*
+ * Do carriage return.
+ */
+static void
+tty3270_cr(struct tty3270 *tp)
+{
+	tp->cx = 0;
+}
+
+/*
+ * Do line feed.
+ */
+static void
+tty3270_lf(struct tty3270 *tp)
+{
+	struct tty3270_line temp;
+	int i;
+
+	tty3270_convert_line(tp, tp->cy);
+	if (tp->cy < tp->view.rows - 3) {
+		tp->cy++;
+		return;
+	}
+	/* Last line just filled up. Add new, blank line. */
+	tty3270_blank_line(tp);
+	temp = tp->screen[0];
+	temp.len = 0;
+	for (i = 0; i < tp->view.rows - 3; i++)
+		tp->screen[i] = tp->screen[i+1];
+	tp->screen[tp->view.rows - 3] = temp;
+	tty3270_rebuild_update(tp);
+}
+
+static void
+tty3270_ri(struct tty3270 *tp)
+{
+	if (tp->cy > 0) {
+	    tty3270_convert_line(tp, tp->cy);
+	    tp->cy--;
+	}
+}
+
+/*
+ * Insert characters at current position.
+ */
+static void
+tty3270_insert_characters(struct tty3270 *tp, int n)
+{
+	struct tty3270_line *line;
+	int k;
+
+	line = tp->screen + tp->cy;
+	while (line->len < tp->cx) {
+		line->cells[line->len].character = tp->view.ascebc[' '];
+		line->cells[line->len].highlight = TAX_RESET;
+		line->cells[line->len].f_color = TAC_RESET;
+		line->len++;
+	}
+	if (n > tp->view.cols - tp->cx)
+		n = tp->view.cols - tp->cx;
+	k = min_t(int, line->len - tp->cx, tp->view.cols - tp->cx - n);
+	while (k--)
+		line->cells[tp->cx + n + k] = line->cells[tp->cx + k];
+	line->len += n;
+	if (line->len > tp->view.cols)
+		line->len = tp->view.cols;
+	while (n-- > 0) {
+		line->cells[tp->cx + n].character = tp->view.ascebc[' '];
+		line->cells[tp->cx + n].highlight = tp->highlight;
+		line->cells[tp->cx + n].f_color = tp->f_color;
+	}
+}
+
+/*
+ * Delete characters at current position.
+ */
+static void
+tty3270_delete_characters(struct tty3270 *tp, int n)
+{
+	struct tty3270_line *line;
+	int i;
+
+	line = tp->screen + tp->cy;
+	if (line->len <= tp->cx)
+		return;
+	if (line->len - tp->cx <= n) {
+		line->len = tp->cx;
+		return;
+	}
+	for (i = tp->cx; i + n < line->len; i++)
+		line->cells[i] = line->cells[i + n];
+	line->len -= n;
+}
+
+/*
+ * Erase characters at current position.
+ */
+static void
+tty3270_erase_characters(struct tty3270 *tp, int n)
+{
+	struct tty3270_line *line;
+	struct tty3270_cell *cell;
+
+	line = tp->screen + tp->cy;
+	while (line->len > tp->cx && n-- > 0) {
+		cell = line->cells + tp->cx++;
+		cell->character = ' ';
+		cell->highlight = TAX_RESET;
+		cell->f_color = TAC_RESET;
+	}
+	tp->cx += n;
+	tp->cx = min_t(int, tp->cx, tp->view.cols - 1);
+}
+
+/*
+ * Erase line, 3 different cases:
+ *  Esc [ 0 K	Erase from current position to end of line inclusive
+ *  Esc [ 1 K	Erase from beginning of line to current position inclusive
+ *  Esc [ 2 K	Erase entire line (without moving cursor)
+ */
+static void
+tty3270_erase_line(struct tty3270 *tp, int mode)
+{
+	struct tty3270_line *line;
+	struct tty3270_cell *cell;
+	int i;
+
+	line = tp->screen + tp->cy;
+	if (mode == 0)
+		line->len = tp->cx;
+	else if (mode == 1) {
+		for (i = 0; i < tp->cx; i++) {
+			cell = line->cells + i;
+			cell->character = ' ';
+			cell->highlight = TAX_RESET;
+			cell->f_color = TAC_RESET;
+		}
+		if (line->len <= tp->cx)
+			line->len = tp->cx + 1;
+	} else if (mode == 2)
+		line->len = 0;
+	tty3270_convert_line(tp, tp->cy);
+}
+
+/*
+ * Erase display, 3 different cases:
+ *  Esc [ 0 J	Erase from current position to bottom of screen inclusive
+ *  Esc [ 1 J	Erase from top of screen to current position inclusive
+ *  Esc [ 2 J	Erase entire screen (without moving the cursor)
+ */
+static void
+tty3270_erase_display(struct tty3270 *tp, int mode)
+{
+	int i;
+
+	if (mode == 0) {
+		tty3270_erase_line(tp, 0);
+		for (i = tp->cy + 1; i < tp->view.rows - 2; i++) {
+			tp->screen[i].len = 0;
+			tty3270_convert_line(tp, i);
+		}
+	} else if (mode == 1) {
+		for (i = 0; i < tp->cy; i++) {
+			tp->screen[i].len = 0;
+			tty3270_convert_line(tp, i);
+		}
+		tty3270_erase_line(tp, 1);
+	} else if (mode == 2) {
+		for (i = 0; i < tp->view.rows - 2; i++) {
+			tp->screen[i].len = 0;
+			tty3270_convert_line(tp, i);
+		}
+	}
+	tty3270_rebuild_update(tp);
+}
+
+/*
+ * Set attributes found in an escape sequence.
+ *  Esc [ <attr> ; <attr> ; ... m
+ */
+static void
+tty3270_set_attributes(struct tty3270 *tp)
+{
+	static unsigned char f_colors[] = {
+		TAC_DEFAULT, TAC_RED, TAC_GREEN, TAC_YELLOW, TAC_BLUE,
+		TAC_PINK, TAC_TURQ, TAC_WHITE, 0, TAC_DEFAULT
+	};
+	int i, attr;
+
+	for (i = 0; i <= tp->esc_npar; i++) {
+		attr = tp->esc_par[i];
+		switch (attr) {
+		case 0:		/* Reset */
+			tp->highlight = TAX_RESET;
+			tp->f_color = TAC_RESET;
+			break;
+		/* Highlight. */
+		case 4:		/* Start underlining. */
+			tp->highlight = TAX_UNDER;
+			break;
+		case 5:		/* Start blink. */
+			tp->highlight = TAX_BLINK;
+			break;
+		case 7:		/* Start reverse. */
+			tp->highlight = TAX_REVER;
+			break;
+		case 24:	/* End underlining */
+			if (tp->highlight == TAX_UNDER)
+				tp->highlight = TAX_RESET;
+			break;
+		case 25:	/* End blink. */
+			if (tp->highlight == TAX_BLINK)
+				tp->highlight = TAX_RESET;
+			break;
+		case 27:	/* End reverse. */
+			if (tp->highlight == TAX_REVER)
+				tp->highlight = TAX_RESET;
+			break;
+		/* Foreground color. */
+		case 30:	/* Black */
+		case 31:	/* Red */
+		case 32:	/* Green */
+		case 33:	/* Yellow */
+		case 34:	/* Blue */
+		case 35:	/* Magenta */
+		case 36:	/* Cyan */
+		case 37:	/* White */
+		case 39:	/* Black */
+			tp->f_color = f_colors[attr - 30];
+			break;
+		}
+	}
+}
+
+static inline int
+tty3270_getpar(struct tty3270 *tp, int ix)
+{
+	return (tp->esc_par[ix] > 0) ? tp->esc_par[ix] : 1;
+}
+
+static void
+tty3270_goto_xy(struct tty3270 *tp, int cx, int cy)
+{
+	int max_cx = max(0, cx);
+	int max_cy = max(0, cy);
+
+	tp->cx = min_t(int, tp->view.cols - 1, max_cx);
+	cy = min_t(int, tp->view.rows - 3, max_cy);
+	if (cy != tp->cy) {
+		tty3270_convert_line(tp, tp->cy);
+		tp->cy = cy;
+	}
+}
+
+/*
+ * Process escape sequences. Known sequences:
+ *  Esc 7			Save Cursor Position
+ *  Esc 8			Restore Cursor Position
+ *  Esc [ Pn ; Pn ; .. m	Set attributes
+ *  Esc [ Pn ; Pn H		Cursor Position
+ *  Esc [ Pn ; Pn f		Cursor Position
+ *  Esc [ Pn A			Cursor Up
+ *  Esc [ Pn B			Cursor Down
+ *  Esc [ Pn C			Cursor Forward
+ *  Esc [ Pn D			Cursor Backward
+ *  Esc [ Pn G			Cursor Horizontal Absolute
+ *  Esc [ Pn X			Erase Characters
+ *  Esc [ Ps J			Erase in Display
+ *  Esc [ Ps K			Erase in Line
+ * // FIXME: add all the new ones.
+ *
+ *  Pn is a numeric parameter, a string of zero or more decimal digits.
+ *  Ps is a selective parameter.
+ */
+static void
+tty3270_escape_sequence(struct tty3270 *tp, char ch)
+{
+	enum { ESnormal, ESesc, ESsquare, ESgetpars };
+
+	if (tp->esc_state == ESnormal) {
+		if (ch == 0x1b)
+			/* Starting new escape sequence. */
+			tp->esc_state = ESesc;
+		return;
+	}
+	if (tp->esc_state == ESesc) {
+		tp->esc_state = ESnormal;
+		switch (ch) {
+		case '[':
+			tp->esc_state = ESsquare;
+			break;
+		case 'E':
+			tty3270_cr(tp);
+			tty3270_lf(tp);
+			break;
+		case 'M':
+			tty3270_ri(tp);
+			break;
+		case 'D':
+			tty3270_lf(tp);
+			break;
+		case 'Z':		/* Respond ID. */
+			kbd_puts_queue(tp->tty, "\033[?6c");
+			break;
+		case '7':		/* Save cursor position. */
+			tp->saved_cx = tp->cx;
+			tp->saved_cy = tp->cy;
+			tp->saved_highlight = tp->highlight;
+			tp->saved_f_color = tp->f_color;
+			break;
+		case '8':		/* Restore cursor position. */
+			tty3270_convert_line(tp, tp->cy);
+			tty3270_goto_xy(tp, tp->saved_cx, tp->saved_cy);
+			tp->highlight = tp->saved_highlight;
+			tp->f_color = tp->saved_f_color;
+			break;
+		case 'c':		/* Reset terminal. */
+			tp->cx = tp->saved_cx = 0;
+			tp->cy = tp->saved_cy = 0;
+			tp->highlight = tp->saved_highlight = TAX_RESET;
+			tp->f_color = tp->saved_f_color = TAC_RESET;
+			tty3270_erase_display(tp, 2);
+			break;
+		}
+		return;
+	}
+	if (tp->esc_state == ESsquare) {
+		tp->esc_state = ESgetpars;
+		memset(tp->esc_par, 0, sizeof(tp->esc_par));
+		tp->esc_npar = 0;
+		tp->esc_ques = (ch == '?');
+		if (tp->esc_ques)
+			return;
+	}
+	if (tp->esc_state == ESgetpars) {
+		if (ch == ';' && tp->esc_npar < ESCAPE_NPAR - 1) {
+			tp->esc_npar++;
+			return;
+		}
+		if (ch >= '0' && ch <= '9') {
+			tp->esc_par[tp->esc_npar] *= 10;
+			tp->esc_par[tp->esc_npar] += ch - '0';
+			return;
+		}
+	}
+	tp->esc_state = ESnormal;
+	if (ch == 'n' && !tp->esc_ques) {
+		if (tp->esc_par[0] == 5)		/* Status report. */
+			kbd_puts_queue(tp->tty, "\033[0n");
+		else if (tp->esc_par[0] == 6) {	/* Cursor report. */
+			char buf[40];
+			sprintf(buf, "\033[%d;%dR", tp->cy + 1, tp->cx + 1);
+			kbd_puts_queue(tp->tty, buf);
+		}
+		return;
+	}
+	if (tp->esc_ques)
+		return;
+	switch (ch) {
+	case 'm':
+		tty3270_set_attributes(tp);
+		break;
+	case 'H':	/* Set cursor position. */
+	case 'f':
+		tty3270_goto_xy(tp, tty3270_getpar(tp, 1) - 1,
+				tty3270_getpar(tp, 0) - 1);
+		break;
+	case 'd':	/* Set y position. */
+		tty3270_goto_xy(tp, tp->cx, tty3270_getpar(tp, 0) - 1);
+		break;
+	case 'A':	/* Cursor up. */
+	case 'F':
+		tty3270_goto_xy(tp, tp->cx, tp->cy - tty3270_getpar(tp, 0));
+		break;
+	case 'B':	/* Cursor down. */
+	case 'e':
+	case 'E':
+		tty3270_goto_xy(tp, tp->cx, tp->cy + tty3270_getpar(tp, 0));
+		break;
+	case 'C':	/* Cursor forward. */
+	case 'a':
+		tty3270_goto_xy(tp, tp->cx + tty3270_getpar(tp, 0), tp->cy);
+		break;
+	case 'D':	/* Cursor backward. */
+		tty3270_goto_xy(tp, tp->cx - tty3270_getpar(tp, 0), tp->cy);
+		break;
+	case 'G':	/* Set x position. */
+	case '`':
+		tty3270_goto_xy(tp, tty3270_getpar(tp, 0), tp->cy);
+		break;
+	case 'X':	/* Erase Characters. */
+		tty3270_erase_characters(tp, tty3270_getpar(tp, 0));
+		break;
+	case 'J':	/* Erase display. */
+		tty3270_erase_display(tp, tp->esc_par[0]);
+		break;
+	case 'K':	/* Erase line. */
+		tty3270_erase_line(tp, tp->esc_par[0]);
+		break;
+	case 'P':	/* Delete characters. */
+		tty3270_delete_characters(tp, tty3270_getpar(tp, 0));
+		break;
+	case '@':	/* Insert characters. */
+		tty3270_insert_characters(tp, tty3270_getpar(tp, 0));
+		break;
+	case 's':	/* Save cursor position. */
+		tp->saved_cx = tp->cx;
+		tp->saved_cy = tp->cy;
+		tp->saved_highlight = tp->highlight;
+		tp->saved_f_color = tp->f_color;
+		break;
+	case 'u':	/* Restore cursor position. */
+		tty3270_convert_line(tp, tp->cy);
+		tty3270_goto_xy(tp, tp->saved_cx, tp->saved_cy);
+		tp->highlight = tp->saved_highlight;
+		tp->f_color = tp->saved_f_color;
+		break;
+	}
+}
+
+/*
+ * String write routine for 3270 ttys
+ */
+static void
+tty3270_do_write(struct tty3270 *tp, const unsigned char *buf, int count)
+{
+	int i_msg, i;
+
+	spin_lock_bh(&tp->view.lock);
+	for (i_msg = 0; !tp->tty->stopped && i_msg < count; i_msg++) {
+		if (tp->esc_state != 0) {
+			/* Continue escape sequence. */
+			tty3270_escape_sequence(tp, buf[i_msg]);
+			continue;
+		}
+
+		switch (buf[i_msg]) {
+		case 0x07:		/* '\a' -- Alarm */
+			tp->wcc |= TW_PLUSALARM;
+			break;
+		case 0x08:		/* Backspace. */
+			if (tp->cx > 0) {
+				tp->cx--;
+				tty3270_put_character(tp, ' ');
+			}
+			break;
+		case 0x09:		/* '\t' -- Tabulate */
+			for (i = tp->cx % 8; i < 8; i++) {
+				if (tp->cx >= tp->view.cols) {
+					tty3270_cr(tp);
+					tty3270_lf(tp);
+					break;
+				}
+				tty3270_put_character(tp, ' ');
+				tp->cx++;
+			}
+			break;
+		case 0x0a:		/* '\n' -- New Line */
+			tty3270_cr(tp);
+			tty3270_lf(tp);
+			break;
+		case 0x0c:		/* '\f' -- Form Feed */
+			tty3270_erase_display(tp, 2);
+			tp->cx = tp->cy = 0;
+			break;
+		case 0x0d:		/* '\r' -- Carriage Return */
+			tp->cx = 0;
+			break;
+		case 0x0f:		/* SuSE "exit alternate mode" */
+			break;
+		case 0x1b:		/* Start escape sequence. */
+			tty3270_escape_sequence(tp, buf[i_msg]);
+			break;
+		default:		/* Insert normal character. */
+			if (tp->cx >= tp->view.cols) {
+				tty3270_cr(tp);
+				tty3270_lf(tp);
+			}
+			tty3270_put_character(tp, buf[i_msg]);
+			tp->cx++;
+			break;
+		}
+	}
+	/* Convert current line to 3270 data fragment. */
+	tty3270_convert_line(tp, tp->cy);
+
+	/* Setup timer to update display after 1/10 second */
+	if (!timer_pending(&tp->timer))
+		tty3270_set_timer(tp, HZ/10);
+
+	spin_unlock_bh(&tp->view.lock);
+}
+
+/*
+ * String write routine for 3270 ttys
+ */
+static int
+tty3270_write(struct tty_struct * tty,
+	      const unsigned char *buf, int count)
+{
+	struct tty3270 *tp;
+
+	tp = tty->driver_data;
+	if (!tp)
+		return 0;
+	if (tp->char_count > 0) {
+		tty3270_do_write(tp, tp->char_buf, tp->char_count);
+		tp->char_count = 0;
+	}
+	tty3270_do_write(tp, buf, count);
+	return count;
+}
+
+/*
+ * Put single characters to the ttys character buffer
+ */
+static int tty3270_put_char(struct tty_struct *tty, unsigned char ch)
+{
+	struct tty3270 *tp;
+
+	tp = tty->driver_data;
+	if (!tp || tp->char_count >= TTY3270_CHAR_BUF_SIZE)
+		return 0;
+	tp->char_buf[tp->char_count++] = ch;
+	return 1;
+}
+
+/*
+ * Flush all characters from the ttys characeter buffer put there
+ * by tty3270_put_char.
+ */
+static void
+tty3270_flush_chars(struct tty_struct *tty)
+{
+	struct tty3270 *tp;
+
+	tp = tty->driver_data;
+	if (!tp)
+		return;
+	if (tp->char_count > 0) {
+		tty3270_do_write(tp, tp->char_buf, tp->char_count);
+		tp->char_count = 0;
+	}
+}
+
+/*
+ * Returns the number of characters in the output buffer. This is
+ * used in tty_wait_until_sent to wait until all characters have
+ * appeared on the screen.
+ */
+static int
+tty3270_chars_in_buffer(struct tty_struct *tty)
+{
+	return 0;
+}
+
+static void
+tty3270_flush_buffer(struct tty_struct *tty)
+{
+}
+
+/*
+ * Check for visible/invisible input switches
+ */
+static void
+tty3270_set_termios(struct tty_struct *tty, struct ktermios *old)
+{
+	struct tty3270 *tp;
+	int new;
+
+	tp = tty->driver_data;
+	if (!tp)
+		return;
+	spin_lock_bh(&tp->view.lock);
+	if (L_ICANON(tty)) {
+		new = L_ECHO(tty) ? TF_INPUT: TF_INPUTN;
+		if (new != tp->inattr) {
+			tp->inattr = new;
+			tty3270_update_prompt(tp, NULL, 0);
+			tty3270_set_timer(tp, 1);
+		}
+	}
+	spin_unlock_bh(&tp->view.lock);
+}
+
+/*
+ * Disable reading from a 3270 tty
+ */
+static void
+tty3270_throttle(struct tty_struct * tty)
+{
+	struct tty3270 *tp;
+
+	tp = tty->driver_data;
+	if (!tp)
+		return;
+	tp->throttle = 1;
+}
+
+/*
+ * Enable reading from a 3270 tty
+ */
+static void
+tty3270_unthrottle(struct tty_struct * tty)
+{
+	struct tty3270 *tp;
+
+	tp = tty->driver_data;
+	if (!tp)
+		return;
+	tp->throttle = 0;
+	if (tp->attn)
+		tty3270_issue_read(tp, 1);
+}
+
+/*
+ * Hang up the tty device.
+ */
+static void
+tty3270_hangup(struct tty_struct *tty)
+{
+	// FIXME: implement
+}
+
+static void
+tty3270_wait_until_sent(struct tty_struct *tty, int timeout)
+{
+}
+
+static int tty3270_ioctl(struct tty_struct *tty, unsigned int cmd,
+			 unsigned long arg)
+{
+	struct tty3270 *tp;
+
+	tp = tty->driver_data;
+	if (!tp)
+		return -ENODEV;
+	if (tty->flags & (1 << TTY_IO_ERROR))
+		return -EIO;
+	return kbd_ioctl(tp->kbd, cmd, arg);
+}
+
+#ifdef CONFIG_COMPAT
+static long tty3270_compat_ioctl(struct tty_struct *tty,
+				 unsigned int cmd, unsigned long arg)
+{
+	struct tty3270 *tp;
+
+	tp = tty->driver_data;
+	if (!tp)
+		return -ENODEV;
+	if (tty->flags & (1 << TTY_IO_ERROR))
+		return -EIO;
+	return kbd_ioctl(tp->kbd, cmd, (unsigned long)compat_ptr(arg));
+}
+#endif
+
+static const struct tty_operations tty3270_ops = {
+	.open = tty3270_open,
+	.close = tty3270_close,
+	.write = tty3270_write,
+	.put_char = tty3270_put_char,
+	.flush_chars = tty3270_flush_chars,
+	.write_room = tty3270_write_room,
+	.chars_in_buffer = tty3270_chars_in_buffer,
+	.flush_buffer = tty3270_flush_buffer,
+	.throttle = tty3270_throttle,
+	.unthrottle = tty3270_unthrottle,
+	.hangup = tty3270_hangup,
+	.wait_until_sent = tty3270_wait_until_sent,
+	.ioctl = tty3270_ioctl,
+#ifdef CONFIG_COMPAT
+	.compat_ioctl = tty3270_compat_ioctl,
+#endif
+	.set_termios = tty3270_set_termios
+};
+
+/*
+ * 3270 tty registration code called from tty_init().
+ * Most kernel services (incl. kmalloc) are available at this poimt.
+ */
+static int __init tty3270_init(void)
+{
+	struct tty_driver *driver;
+	int ret;
+
+	driver = alloc_tty_driver(RAW3270_MAXDEVS);
+	if (!driver)
+		return -ENOMEM;
+
+	/*
+	 * Initialize the tty_driver structure
+	 * Entries in tty3270_driver that are NOT initialized:
+	 * proc_entry, set_termios, flush_buffer, set_ldisc, write_proc
+	 */
+	driver->driver_name = "ttyTUB";
+	driver->name = "ttyTUB";
+	driver->major = IBM_TTY3270_MAJOR;
+	driver->minor_start = RAW3270_FIRSTMINOR;
+	driver->type = TTY_DRIVER_TYPE_SYSTEM;
+	driver->subtype = SYSTEM_TYPE_TTY;
+	driver->init_termios = tty_std_termios;
+	driver->flags = TTY_DRIVER_RESET_TERMIOS | TTY_DRIVER_DYNAMIC_DEV;
+	tty_set_operations(driver, &tty3270_ops);
+	ret = tty_register_driver(driver);
+	if (ret) {
+		put_tty_driver(driver);
+		return ret;
+	}
+	tty3270_driver = driver;
+	return 0;
+}
+
+static void __exit
+tty3270_exit(void)
+{
+	struct tty_driver *driver;
+
+	driver = tty3270_driver;
+	tty3270_driver = NULL;
+	tty_unregister_driver(driver);
+	tty3270_del_views();
+}
+
+MODULE_LICENSE("GPL");
+MODULE_ALIAS_CHARDEV_MAJOR(IBM_TTY3270_MAJOR);
+
+module_init(tty3270_init);
+module_exit(tty3270_exit);
diff --git a/ap/os/linux/linux-3.4.x/drivers/s390/char/tty3270.h b/ap/os/linux/linux-3.4.x/drivers/s390/char/tty3270.h
new file mode 100644
index 0000000..799da57
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/s390/char/tty3270.h
@@ -0,0 +1,16 @@
+/*
+ *  drivers/s390/char/tty3270.h
+ *
+ *    Copyright IBM Corp. 2007
+ *
+ */
+
+#ifndef __DRIVERS_S390_CHAR_TTY3270_H
+#define __DRIVERS_S390_CHAR_TTY3270_H
+
+#include <linux/tty.h>
+#include <linux/tty_driver.h>
+
+extern struct tty_driver *tty3270_driver;
+
+#endif /* __DRIVERS_S390_CHAR_TTY3270_H */
diff --git a/ap/os/linux/linux-3.4.x/drivers/s390/char/vmcp.c b/ap/os/linux/linux-3.4.x/drivers/s390/char/vmcp.c
new file mode 100644
index 0000000..89c03e6
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/s390/char/vmcp.c
@@ -0,0 +1,213 @@
+/*
+ * Copyright IBM Corp. 2004,2010
+ * Interface implementation for communication with the z/VM control program
+ *
+ * Author(s): Christian Borntraeger <borntraeger@de.ibm.com>
+ *
+ * z/VMs CP offers the possibility to issue commands via the diagnose code 8
+ * this driver implements a character device that issues these commands and
+ * returns the answer of CP.
+ *
+ * The idea of this driver is based on cpint from Neale Ferguson and #CP in CMS
+ */
+
+#include <linux/fs.h>
+#include <linux/init.h>
+#include <linux/compat.h>
+#include <linux/kernel.h>
+#include <linux/miscdevice.h>
+#include <linux/slab.h>
+#include <linux/export.h>
+#include <asm/compat.h>
+#include <asm/cpcmd.h>
+#include <asm/debug.h>
+#include <asm/uaccess.h>
+#include "vmcp.h"
+
+static debug_info_t *vmcp_debug;
+
+static int vmcp_open(struct inode *inode, struct file *file)
+{
+	struct vmcp_session *session;
+
+	if (!capable(CAP_SYS_ADMIN))
+		return -EPERM;
+
+	session = kmalloc(sizeof(*session), GFP_KERNEL);
+	if (!session)
+		return -ENOMEM;
+
+	session->bufsize = PAGE_SIZE;
+	session->response = NULL;
+	session->resp_size = 0;
+	mutex_init(&session->mutex);
+	file->private_data = session;
+	return nonseekable_open(inode, file);
+}
+
+static int vmcp_release(struct inode *inode, struct file *file)
+{
+	struct vmcp_session *session;
+
+	session = file->private_data;
+	file->private_data = NULL;
+	free_pages((unsigned long)session->response, get_order(session->bufsize));
+	kfree(session);
+	return 0;
+}
+
+static ssize_t
+vmcp_read(struct file *file, char __user *buff, size_t count, loff_t *ppos)
+{
+	ssize_t ret;
+	size_t size;
+	struct vmcp_session *session;
+
+	session = file->private_data;
+	if (mutex_lock_interruptible(&session->mutex))
+		return -ERESTARTSYS;
+	if (!session->response) {
+		mutex_unlock(&session->mutex);
+		return 0;
+	}
+	size = min_t(size_t, session->resp_size, session->bufsize);
+	ret = simple_read_from_buffer(buff, count, ppos,
+					session->response, size);
+
+	mutex_unlock(&session->mutex);
+
+	return ret;
+}
+
+static ssize_t
+vmcp_write(struct file *file, const char __user *buff, size_t count,
+	   loff_t *ppos)
+{
+	char *cmd;
+	struct vmcp_session *session;
+
+	if (count > 240)
+		return -EINVAL;
+	cmd = kmalloc(count + 1, GFP_KERNEL);
+	if (!cmd)
+		return -ENOMEM;
+	if (copy_from_user(cmd, buff, count)) {
+		kfree(cmd);
+		return -EFAULT;
+	}
+	cmd[count] = '\0';
+	session = file->private_data;
+	if (mutex_lock_interruptible(&session->mutex)) {
+		kfree(cmd);
+		return -ERESTARTSYS;
+	}
+	if (!session->response)
+		session->response = (char *)__get_free_pages(GFP_KERNEL
+						| __GFP_REPEAT | GFP_DMA,
+						get_order(session->bufsize));
+	if (!session->response) {
+		mutex_unlock(&session->mutex);
+		kfree(cmd);
+		return -ENOMEM;
+	}
+	debug_text_event(vmcp_debug, 1, cmd);
+	session->resp_size = cpcmd(cmd, session->response, session->bufsize,
+				   &session->resp_code);
+	mutex_unlock(&session->mutex);
+	kfree(cmd);
+	*ppos = 0;		/* reset the file pointer after a command */
+	return count;
+}
+
+
+/*
+ * These ioctls are available, as the semantics of the diagnose 8 call
+ * does not fit very well into a Linux call. Diagnose X'08' is described in
+ * CP Programming Services SC24-6084-00
+ *
+ * VMCP_GETCODE: gives the CP return code back to user space
+ * VMCP_SETBUF: sets the response buffer for the next write call. diagnose 8
+ * expects adjacent pages in real storage and to make matters worse, we
+ * dont know the size of the response. Therefore we default to PAGESIZE and
+ * let userspace to change the response size, if userspace expects a bigger
+ * response
+ */
+static long vmcp_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+	struct vmcp_session *session;
+	int __user *argp;
+	int temp;
+
+	session = file->private_data;
+	if (is_compat_task())
+		argp = compat_ptr(arg);
+	else
+		argp = (int __user *)arg;
+	if (mutex_lock_interruptible(&session->mutex))
+		return -ERESTARTSYS;
+	switch (cmd) {
+	case VMCP_GETCODE:
+		temp = session->resp_code;
+		mutex_unlock(&session->mutex);
+		return put_user(temp, argp);
+	case VMCP_SETBUF:
+		free_pages((unsigned long)session->response,
+				get_order(session->bufsize));
+		session->response=NULL;
+		temp = get_user(session->bufsize, argp);
+		if (get_order(session->bufsize) > 8) {
+			session->bufsize = PAGE_SIZE;
+			temp = -EINVAL;
+		}
+		mutex_unlock(&session->mutex);
+		return temp;
+	case VMCP_GETSIZE:
+		temp = session->resp_size;
+		mutex_unlock(&session->mutex);
+		return put_user(temp, argp);
+	default:
+		mutex_unlock(&session->mutex);
+		return -ENOIOCTLCMD;
+	}
+}
+
+static const struct file_operations vmcp_fops = {
+	.owner		= THIS_MODULE,
+	.open		= vmcp_open,
+	.release	= vmcp_release,
+	.read		= vmcp_read,
+	.write		= vmcp_write,
+	.unlocked_ioctl	= vmcp_ioctl,
+	.compat_ioctl	= vmcp_ioctl,
+	.llseek		= no_llseek,
+};
+
+static struct miscdevice vmcp_dev = {
+	.name	= "vmcp",
+	.minor	= MISC_DYNAMIC_MINOR,
+	.fops	= &vmcp_fops,
+};
+
+static int __init vmcp_init(void)
+{
+	int ret;
+
+	if (!MACHINE_IS_VM)
+		return 0;
+
+	vmcp_debug = debug_register("vmcp", 1, 1, 240);
+	if (!vmcp_debug)
+		return -ENOMEM;
+
+	ret = debug_register_view(vmcp_debug, &debug_hex_ascii_view);
+	if (ret) {
+		debug_unregister(vmcp_debug);
+		return ret;
+	}
+
+	ret = misc_register(&vmcp_dev);
+	if (ret)
+		debug_unregister(vmcp_debug);
+	return ret;
+}
+device_initcall(vmcp_init);
diff --git a/ap/os/linux/linux-3.4.x/drivers/s390/char/vmcp.h b/ap/os/linux/linux-3.4.x/drivers/s390/char/vmcp.h
new file mode 100644
index 0000000..6a99394
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/s390/char/vmcp.h
@@ -0,0 +1,30 @@
+/*
+ * Copyright (C) 2004, 2005 IBM Corporation
+ * Interface implementation for communication with the z/VM control program
+ * Version 1.0
+ * Author(s): Christian Borntraeger <cborntra@de.ibm.com>
+ *
+ *
+ * z/VMs CP offers the possibility to issue commands via the diagnose code 8
+ * this driver implements a character device that issues these commands and
+ * returns the answer of CP.
+ *
+ * The idea of this driver is based on cpint from Neale Ferguson
+ */
+
+#include <linux/ioctl.h>
+#include <linux/mutex.h>
+
+#define VMCP_GETCODE _IOR(0x10, 1, int)
+#define VMCP_SETBUF _IOW(0x10, 2, int)
+#define VMCP_GETSIZE _IOR(0x10, 3, int)
+
+struct vmcp_session {
+	unsigned int bufsize;
+	char *response;
+	int resp_size;
+	int resp_code;
+	/* As we use copy_from/to_user, which might     *
+	 * sleep and cannot use a spinlock              */
+	struct mutex mutex;
+};
diff --git a/ap/os/linux/linux-3.4.x/drivers/s390/char/vmlogrdr.c b/ap/os/linux/linux-3.4.x/drivers/s390/char/vmlogrdr.c
new file mode 100644
index 0000000..524d988
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/s390/char/vmlogrdr.c
@@ -0,0 +1,911 @@
+/*
+ * drivers/s390/char/vmlogrdr.c
+ *	character device driver for reading z/VM system service records
+ *
+ *
+ *	Copyright IBM Corp. 2004, 2009
+ *	character device driver for reading z/VM system service records,
+ *	Version 1.0
+ *	Author(s): Xenia Tkatschow <xenia@us.ibm.com>
+ *		   Stefan Weinhuber <wein@de.ibm.com>
+ *
+ */
+
+#define KMSG_COMPONENT "vmlogrdr"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/errno.h>
+#include <linux/types.h>
+#include <linux/interrupt.h>
+#include <linux/spinlock.h>
+#include <linux/atomic.h>
+#include <asm/uaccess.h>
+#include <asm/cpcmd.h>
+#include <asm/debug.h>
+#include <asm/ebcdic.h>
+#include <net/iucv/iucv.h>
+#include <linux/kmod.h>
+#include <linux/cdev.h>
+#include <linux/device.h>
+#include <linux/string.h>
+
+MODULE_AUTHOR
+	("(C) 2004 IBM Corporation by Xenia Tkatschow (xenia@us.ibm.com)\n"
+	 "                            Stefan Weinhuber (wein@de.ibm.com)");
+MODULE_DESCRIPTION ("Character device driver for reading z/VM "
+		    "system service records.");
+MODULE_LICENSE("GPL");
+
+
+/*
+ * The size of the buffer for iucv data transfer is one page,
+ * but in addition to the data we read from iucv we also
+ * place an integer and some characters into that buffer,
+ * so the maximum size for record data is a little less then
+ * one page.
+ */
+#define NET_BUFFER_SIZE	(PAGE_SIZE - sizeof(int) - sizeof(FENCE))
+
+/*
+ * The elements that are concurrently accessed by bottom halves are
+ * connection_established, iucv_path_severed, local_interrupt_buffer
+ * and receive_ready. The first three can be protected by
+ * priv_lock.  receive_ready is atomic, so it can be incremented and
+ * decremented without holding a lock.
+ * The variable dev_in_use needs to be protected by the lock, since
+ * it's a flag used by open to make sure that the device is opened only
+ * by one user at the same time.
+ */
+struct vmlogrdr_priv_t {
+	char system_service[8];
+	char internal_name[8];
+	char recording_name[8];
+	struct iucv_path *path;
+	int connection_established;
+	int iucv_path_severed;
+	struct iucv_message local_interrupt_buffer;
+	atomic_t receive_ready;
+	int minor_num;
+	char * buffer;
+	char * current_position;
+	int remaining;
+	ulong residual_length;
+	int buffer_free;
+	int dev_in_use; /* 1: already opened, 0: not opened*/
+	spinlock_t priv_lock;
+	struct device  *device;
+	struct device  *class_device;
+	int autorecording;
+	int autopurge;
+};
+
+
+/*
+ * File operation structure for vmlogrdr devices
+ */
+static int vmlogrdr_open(struct inode *, struct file *);
+static int vmlogrdr_release(struct inode *, struct file *);
+static ssize_t vmlogrdr_read (struct file *filp, char __user *data,
+			      size_t count, loff_t * ppos);
+
+static const struct file_operations vmlogrdr_fops = {
+	.owner   = THIS_MODULE,
+	.open    = vmlogrdr_open,
+	.release = vmlogrdr_release,
+	.read    = vmlogrdr_read,
+	.llseek  = no_llseek,
+};
+
+
+static void vmlogrdr_iucv_path_complete(struct iucv_path *, u8 ipuser[16]);
+static void vmlogrdr_iucv_path_severed(struct iucv_path *, u8 ipuser[16]);
+static void vmlogrdr_iucv_message_pending(struct iucv_path *,
+					  struct iucv_message *);
+
+
+static struct iucv_handler vmlogrdr_iucv_handler = {
+	.path_complete	 = vmlogrdr_iucv_path_complete,
+	.path_severed	 = vmlogrdr_iucv_path_severed,
+	.message_pending = vmlogrdr_iucv_message_pending,
+};
+
+
+static DECLARE_WAIT_QUEUE_HEAD(conn_wait_queue);
+static DECLARE_WAIT_QUEUE_HEAD(read_wait_queue);
+
+/*
+ * pointer to system service private structure
+ * minor number 0 --> logrec
+ * minor number 1 --> account
+ * minor number 2 --> symptom
+ */
+
+static struct vmlogrdr_priv_t sys_ser[] = {
+	{ .system_service = "*LOGREC ",
+	  .internal_name  = "logrec",
+	  .recording_name = "EREP",
+	  .minor_num      = 0,
+	  .buffer_free    = 1,
+	  .priv_lock	  = __SPIN_LOCK_UNLOCKED(sys_ser[0].priv_lock),
+	  .autorecording  = 1,
+	  .autopurge      = 1,
+	},
+	{ .system_service = "*ACCOUNT",
+	  .internal_name  = "account",
+	  .recording_name = "ACCOUNT",
+	  .minor_num      = 1,
+	  .buffer_free    = 1,
+	  .priv_lock	  = __SPIN_LOCK_UNLOCKED(sys_ser[1].priv_lock),
+	  .autorecording  = 1,
+	  .autopurge      = 1,
+	},
+	{ .system_service = "*SYMPTOM",
+	  .internal_name  = "symptom",
+	  .recording_name = "SYMPTOM",
+	  .minor_num      = 2,
+	  .buffer_free    = 1,
+	  .priv_lock	  = __SPIN_LOCK_UNLOCKED(sys_ser[2].priv_lock),
+	  .autorecording  = 1,
+	  .autopurge      = 1,
+	}
+};
+
+#define MAXMINOR  (sizeof(sys_ser)/sizeof(struct vmlogrdr_priv_t))
+
+static char FENCE[] = {"EOR"};
+static int vmlogrdr_major = 0;
+static struct cdev  *vmlogrdr_cdev = NULL;
+static int recording_class_AB;
+
+
+static void vmlogrdr_iucv_path_complete(struct iucv_path *path, u8 ipuser[16])
+{
+	struct vmlogrdr_priv_t * logptr = path->private;
+
+	spin_lock(&logptr->priv_lock);
+	logptr->connection_established = 1;
+	spin_unlock(&logptr->priv_lock);
+	wake_up(&conn_wait_queue);
+}
+
+
+static void vmlogrdr_iucv_path_severed(struct iucv_path *path, u8 ipuser[16])
+{
+	struct vmlogrdr_priv_t * logptr = path->private;
+	u8 reason = (u8) ipuser[8];
+
+	pr_err("vmlogrdr: connection severed with reason %i\n", reason);
+
+	iucv_path_sever(path, NULL);
+	kfree(path);
+	logptr->path = NULL;
+
+	spin_lock(&logptr->priv_lock);
+	logptr->connection_established = 0;
+	logptr->iucv_path_severed = 1;
+	spin_unlock(&logptr->priv_lock);
+
+	wake_up(&conn_wait_queue);
+	/* just in case we're sleeping waiting for a record */
+	wake_up_interruptible(&read_wait_queue);
+}
+
+
+static void vmlogrdr_iucv_message_pending(struct iucv_path *path,
+					  struct iucv_message *msg)
+{
+	struct vmlogrdr_priv_t * logptr = path->private;
+
+	/*
+	 * This function is the bottom half so it should be quick.
+	 * Copy the external interrupt data into our local eib and increment
+	 * the usage count
+	 */
+	spin_lock(&logptr->priv_lock);
+	memcpy(&logptr->local_interrupt_buffer, msg, sizeof(*msg));
+	atomic_inc(&logptr->receive_ready);
+	spin_unlock(&logptr->priv_lock);
+	wake_up_interruptible(&read_wait_queue);
+}
+
+
+static int vmlogrdr_get_recording_class_AB(void)
+{
+	static const char cp_command[] = "QUERY COMMAND RECORDING ";
+	char cp_response[80];
+	char *tail;
+	int len,i;
+
+	cpcmd(cp_command, cp_response, sizeof(cp_response), NULL);
+	len = strnlen(cp_response,sizeof(cp_response));
+	// now the parsing
+	tail=strnchr(cp_response,len,'=');
+	if (!tail)
+		return 0;
+	tail++;
+	if (!strncmp("ANY",tail,3))
+		return 1;
+	if (!strncmp("NONE",tail,4))
+		return 0;
+	/*
+	 * expect comma separated list of classes here, if one of them
+	 * is A or B return 1 otherwise 0
+	 */
+        for (i=tail-cp_response; i<len; i++)
+		if ( cp_response[i]=='A' || cp_response[i]=='B' )
+			return 1;
+	return 0;
+}
+
+
+static int vmlogrdr_recording(struct vmlogrdr_priv_t * logptr,
+			      int action, int purge)
+{
+
+	char cp_command[80];
+	char cp_response[160];
+	char *onoff, *qid_string;
+	int rc;
+
+	onoff = ((action == 1) ? "ON" : "OFF");
+	qid_string = ((recording_class_AB == 1) ? " QID * " : "");
+
+	/*
+	 * The recording commands needs to be called with option QID
+	 * for guests that have previlege classes A or B.
+	 * Purging has to be done as separate step, because recording
+	 * can't be switched on as long as records are on the queue.
+	 * Doing both at the same time doesn't work.
+	 */
+	if (purge && (action == 1)) {
+		memset(cp_command, 0x00, sizeof(cp_command));
+		memset(cp_response, 0x00, sizeof(cp_response));
+		snprintf(cp_command, sizeof(cp_command),
+			 "RECORDING %s PURGE %s",
+			 logptr->recording_name,
+			 qid_string);
+		cpcmd(cp_command, cp_response, sizeof(cp_response), NULL);
+	}
+
+	memset(cp_command, 0x00, sizeof(cp_command));
+	memset(cp_response, 0x00, sizeof(cp_response));
+	snprintf(cp_command, sizeof(cp_command), "RECORDING %s %s %s",
+		logptr->recording_name,
+		onoff,
+		qid_string);
+	cpcmd(cp_command, cp_response, sizeof(cp_response), NULL);
+	/* The recording command will usually answer with 'Command complete'
+	 * on success, but when the specific service was never connected
+	 * before then there might be an additional informational message
+	 * 'HCPCRC8072I Recording entry not found' before the
+	 * 'Command complete'. So I use strstr rather then the strncmp.
+	 */
+	if (strstr(cp_response,"Command complete"))
+		rc = 0;
+	else
+		rc = -EIO;
+	/*
+	 * If we turn recording off, we have to purge any remaining records
+	 * afterwards, as a large number of queued records may impact z/VM
+	 * performance.
+	 */
+	if (purge && (action == 0)) {
+		memset(cp_command, 0x00, sizeof(cp_command));
+		memset(cp_response, 0x00, sizeof(cp_response));
+		snprintf(cp_command, sizeof(cp_command),
+			 "RECORDING %s PURGE %s",
+			 logptr->recording_name,
+			 qid_string);
+		cpcmd(cp_command, cp_response, sizeof(cp_response), NULL);
+	}
+
+	return rc;
+}
+
+
+static int vmlogrdr_open (struct inode *inode, struct file *filp)
+{
+	int dev_num = 0;
+	struct vmlogrdr_priv_t * logptr = NULL;
+	int connect_rc = 0;
+	int ret;
+
+	dev_num = iminor(inode);
+	if (dev_num > MAXMINOR)
+		return -ENODEV;
+	logptr = &sys_ser[dev_num];
+
+	/*
+	 * only allow for blocking reads to be open
+	 */
+	if (filp->f_flags & O_NONBLOCK)
+		return -ENOSYS;
+
+	/* Besure this device hasn't already been opened */
+	spin_lock_bh(&logptr->priv_lock);
+	if (logptr->dev_in_use)	{
+		spin_unlock_bh(&logptr->priv_lock);
+		return -EBUSY;
+	}
+	logptr->dev_in_use = 1;
+	logptr->connection_established = 0;
+	logptr->iucv_path_severed = 0;
+	atomic_set(&logptr->receive_ready, 0);
+	logptr->buffer_free = 1;
+	spin_unlock_bh(&logptr->priv_lock);
+
+	/* set the file options */
+	filp->private_data = logptr;
+	filp->f_op = &vmlogrdr_fops;
+
+	/* start recording for this service*/
+	if (logptr->autorecording) {
+		ret = vmlogrdr_recording(logptr,1,logptr->autopurge);
+		if (ret)
+			pr_warning("vmlogrdr: failed to start "
+				   "recording automatically\n");
+	}
+
+	/* create connection to the system service */
+	logptr->path = iucv_path_alloc(10, 0, GFP_KERNEL);
+	if (!logptr->path)
+		goto out_dev;
+	connect_rc = iucv_path_connect(logptr->path, &vmlogrdr_iucv_handler,
+				       logptr->system_service, NULL, NULL,
+				       logptr);
+	if (connect_rc) {
+		pr_err("vmlogrdr: iucv connection to %s "
+		       "failed with rc %i \n",
+		       logptr->system_service, connect_rc);
+		goto out_path;
+	}
+
+	/* We've issued the connect and now we must wait for a
+	 * ConnectionComplete or ConnectinSevered Interrupt
+	 * before we can continue to process.
+	 */
+	wait_event(conn_wait_queue, (logptr->connection_established)
+		   || (logptr->iucv_path_severed));
+	if (logptr->iucv_path_severed)
+		goto out_record;
+	nonseekable_open(inode, filp);
+	return 0;
+
+out_record:
+	if (logptr->autorecording)
+		vmlogrdr_recording(logptr,0,logptr->autopurge);
+out_path:
+	kfree(logptr->path);	/* kfree(NULL) is ok. */
+	logptr->path = NULL;
+out_dev:
+	logptr->dev_in_use = 0;
+	return -EIO;
+}
+
+
+static int vmlogrdr_release (struct inode *inode, struct file *filp)
+{
+	int ret;
+
+	struct vmlogrdr_priv_t * logptr = filp->private_data;
+
+	iucv_path_sever(logptr->path, NULL);
+	kfree(logptr->path);
+	logptr->path = NULL;
+	if (logptr->autorecording) {
+		ret = vmlogrdr_recording(logptr,0,logptr->autopurge);
+		if (ret)
+			pr_warning("vmlogrdr: failed to stop "
+				   "recording automatically\n");
+	}
+	logptr->dev_in_use = 0;
+
+	return 0;
+}
+
+
+static int vmlogrdr_receive_data(struct vmlogrdr_priv_t *priv)
+{
+	int rc, *temp;
+	/* we need to keep track of two data sizes here:
+	 * The number of bytes we need to receive from iucv and
+	 * the total number of bytes we actually write into the buffer.
+	 */
+	int user_data_count, iucv_data_count;
+	char * buffer;
+
+	if (atomic_read(&priv->receive_ready)) {
+		spin_lock_bh(&priv->priv_lock);
+		if (priv->residual_length){
+			/* receive second half of a record */
+			iucv_data_count = priv->residual_length;
+			user_data_count = 0;
+			buffer = priv->buffer;
+		} else {
+			/* receive a new record:
+			 * We need to return the total length of the record
+                         * + size of FENCE in the first 4 bytes of the buffer.
+		         */
+			iucv_data_count = priv->local_interrupt_buffer.length;
+			user_data_count = sizeof(int);
+			temp = (int*)priv->buffer;
+			*temp= iucv_data_count + sizeof(FENCE);
+			buffer = priv->buffer + sizeof(int);
+		}
+		/*
+		 * If the record is bigger than our buffer, we receive only
+		 * a part of it. We can get the rest later.
+		 */
+		if (iucv_data_count > NET_BUFFER_SIZE)
+			iucv_data_count = NET_BUFFER_SIZE;
+		rc = iucv_message_receive(priv->path,
+					  &priv->local_interrupt_buffer,
+					  0, buffer, iucv_data_count,
+					  &priv->residual_length);
+		spin_unlock_bh(&priv->priv_lock);
+		/* An rc of 5 indicates that the record was bigger than
+		 * the buffer, which is OK for us. A 9 indicates that the
+		 * record was purged befor we could receive it.
+		 */
+		if (rc == 5)
+			rc = 0;
+		if (rc == 9)
+			atomic_set(&priv->receive_ready, 0);
+	} else {
+		rc = 1;
+	}
+	if (!rc) {
+		priv->buffer_free = 0;
+ 		user_data_count += iucv_data_count;
+		priv->current_position = priv->buffer;
+		if (priv->residual_length == 0){
+			/* the whole record has been captured,
+			 * now add the fence */
+			atomic_dec(&priv->receive_ready);
+			buffer = priv->buffer + user_data_count;
+			memcpy(buffer, FENCE, sizeof(FENCE));
+			user_data_count += sizeof(FENCE);
+		}
+		priv->remaining = user_data_count;
+	}
+
+	return rc;
+}
+
+
+static ssize_t vmlogrdr_read(struct file *filp, char __user *data,
+			     size_t count, loff_t * ppos)
+{
+	int rc;
+	struct vmlogrdr_priv_t * priv = filp->private_data;
+
+	while (priv->buffer_free) {
+		rc = vmlogrdr_receive_data(priv);
+		if (rc) {
+			rc = wait_event_interruptible(read_wait_queue,
+					atomic_read(&priv->receive_ready));
+			if (rc)
+				return rc;
+		}
+	}
+	/* copy only up to end of record */
+	if (count > priv->remaining)
+		count = priv->remaining;
+
+	if (copy_to_user(data, priv->current_position, count))
+		return -EFAULT;
+
+	*ppos += count;
+	priv->current_position += count;
+	priv->remaining -= count;
+
+	/* if all data has been transferred, set buffer free */
+	if (priv->remaining == 0)
+		priv->buffer_free = 1;
+
+	return count;
+}
+
+static ssize_t vmlogrdr_autopurge_store(struct device * dev,
+					struct device_attribute *attr,
+					const char * buf, size_t count)
+{
+	struct vmlogrdr_priv_t *priv = dev_get_drvdata(dev);
+	ssize_t ret = count;
+
+	switch (buf[0]) {
+	case '0':
+		priv->autopurge=0;
+		break;
+	case '1':
+		priv->autopurge=1;
+		break;
+	default:
+		ret = -EINVAL;
+	}
+	return ret;
+}
+
+
+static ssize_t vmlogrdr_autopurge_show(struct device *dev,
+				       struct device_attribute *attr,
+				       char *buf)
+{
+	struct vmlogrdr_priv_t *priv = dev_get_drvdata(dev);
+	return sprintf(buf, "%u\n", priv->autopurge);
+}
+
+
+static DEVICE_ATTR(autopurge, 0644, vmlogrdr_autopurge_show,
+		   vmlogrdr_autopurge_store);
+
+
+static ssize_t vmlogrdr_purge_store(struct device * dev,
+				    struct device_attribute *attr,
+				    const char * buf, size_t count)
+{
+
+	char cp_command[80];
+	char cp_response[80];
+	struct vmlogrdr_priv_t *priv = dev_get_drvdata(dev);
+
+	if (buf[0] != '1')
+		return -EINVAL;
+
+	memset(cp_command, 0x00, sizeof(cp_command));
+	memset(cp_response, 0x00, sizeof(cp_response));
+
+        /*
+	 * The recording command needs to be called with option QID
+	 * for guests that have previlege classes A or B.
+	 * Other guests will not recognize the command and we have to
+	 * issue the same command without the QID parameter.
+	 */
+
+	if (recording_class_AB)
+		snprintf(cp_command, sizeof(cp_command),
+			 "RECORDING %s PURGE QID * ",
+			 priv->recording_name);
+	else
+		snprintf(cp_command, sizeof(cp_command),
+			 "RECORDING %s PURGE ",
+			 priv->recording_name);
+
+	cpcmd(cp_command, cp_response, sizeof(cp_response), NULL);
+
+	return count;
+}
+
+
+static DEVICE_ATTR(purge, 0200, NULL, vmlogrdr_purge_store);
+
+
+static ssize_t vmlogrdr_autorecording_store(struct device *dev,
+					    struct device_attribute *attr,
+					    const char *buf, size_t count)
+{
+	struct vmlogrdr_priv_t *priv = dev_get_drvdata(dev);
+	ssize_t ret = count;
+
+	switch (buf[0]) {
+	case '0':
+		priv->autorecording=0;
+		break;
+	case '1':
+		priv->autorecording=1;
+		break;
+	default:
+		ret = -EINVAL;
+	}
+	return ret;
+}
+
+
+static ssize_t vmlogrdr_autorecording_show(struct device *dev,
+					   struct device_attribute *attr,
+					   char *buf)
+{
+	struct vmlogrdr_priv_t *priv = dev_get_drvdata(dev);
+	return sprintf(buf, "%u\n", priv->autorecording);
+}
+
+
+static DEVICE_ATTR(autorecording, 0644, vmlogrdr_autorecording_show,
+		   vmlogrdr_autorecording_store);
+
+
+static ssize_t vmlogrdr_recording_store(struct device * dev,
+					struct device_attribute *attr,
+					const char * buf, size_t count)
+{
+	struct vmlogrdr_priv_t *priv = dev_get_drvdata(dev);
+	ssize_t ret;
+
+	switch (buf[0]) {
+	case '0':
+		ret = vmlogrdr_recording(priv,0,0);
+		break;
+	case '1':
+		ret = vmlogrdr_recording(priv,1,0);
+		break;
+	default:
+		ret = -EINVAL;
+	}
+	if (ret)
+		return ret;
+	else
+		return count;
+
+}
+
+
+static DEVICE_ATTR(recording, 0200, NULL, vmlogrdr_recording_store);
+
+
+static ssize_t vmlogrdr_recording_status_show(struct device_driver *driver,
+					      char *buf)
+{
+
+	static const char cp_command[] = "QUERY RECORDING ";
+	int len;
+
+	cpcmd(cp_command, buf, 4096, NULL);
+	len = strlen(buf);
+	return len;
+}
+
+
+static DRIVER_ATTR(recording_status, 0444, vmlogrdr_recording_status_show,
+		   NULL);
+
+static struct attribute *vmlogrdr_attrs[] = {
+	&dev_attr_autopurge.attr,
+	&dev_attr_purge.attr,
+	&dev_attr_autorecording.attr,
+	&dev_attr_recording.attr,
+	NULL,
+};
+
+static int vmlogrdr_pm_prepare(struct device *dev)
+{
+	int rc;
+	struct vmlogrdr_priv_t *priv = dev_get_drvdata(dev);
+
+	rc = 0;
+	if (priv) {
+		spin_lock_bh(&priv->priv_lock);
+		if (priv->dev_in_use)
+			rc = -EBUSY;
+		spin_unlock_bh(&priv->priv_lock);
+	}
+	if (rc)
+		pr_err("vmlogrdr: device %s is busy. Refuse to suspend.\n",
+		       dev_name(dev));
+	return rc;
+}
+
+
+static const struct dev_pm_ops vmlogrdr_pm_ops = {
+	.prepare = vmlogrdr_pm_prepare,
+};
+
+static struct attribute_group vmlogrdr_attr_group = {
+	.attrs = vmlogrdr_attrs,
+};
+
+static struct class *vmlogrdr_class;
+static struct device_driver vmlogrdr_driver = {
+	.name = "vmlogrdr",
+	.bus  = &iucv_bus,
+	.pm = &vmlogrdr_pm_ops,
+};
+
+
+static int vmlogrdr_register_driver(void)
+{
+	int ret;
+
+	/* Register with iucv driver */
+	ret = iucv_register(&vmlogrdr_iucv_handler, 1);
+	if (ret)
+		goto out;
+
+	ret = driver_register(&vmlogrdr_driver);
+	if (ret)
+		goto out_iucv;
+
+	ret = driver_create_file(&vmlogrdr_driver,
+				 &driver_attr_recording_status);
+	if (ret)
+		goto out_driver;
+
+	vmlogrdr_class = class_create(THIS_MODULE, "vmlogrdr");
+	if (IS_ERR(vmlogrdr_class)) {
+		ret = PTR_ERR(vmlogrdr_class);
+		vmlogrdr_class = NULL;
+		goto out_attr;
+	}
+	return 0;
+
+out_attr:
+	driver_remove_file(&vmlogrdr_driver, &driver_attr_recording_status);
+out_driver:
+	driver_unregister(&vmlogrdr_driver);
+out_iucv:
+	iucv_unregister(&vmlogrdr_iucv_handler, 1);
+out:
+	return ret;
+}
+
+
+static void vmlogrdr_unregister_driver(void)
+{
+	class_destroy(vmlogrdr_class);
+	vmlogrdr_class = NULL;
+	driver_remove_file(&vmlogrdr_driver, &driver_attr_recording_status);
+	driver_unregister(&vmlogrdr_driver);
+	iucv_unregister(&vmlogrdr_iucv_handler, 1);
+}
+
+
+static int vmlogrdr_register_device(struct vmlogrdr_priv_t *priv)
+{
+	struct device *dev;
+	int ret;
+
+	dev = kzalloc(sizeof(struct device), GFP_KERNEL);
+	if (dev) {
+		dev_set_name(dev, priv->internal_name);
+		dev->bus = &iucv_bus;
+		dev->parent = iucv_root;
+		dev->driver = &vmlogrdr_driver;
+		dev_set_drvdata(dev, priv);
+		/*
+		 * The release function could be called after the
+		 * module has been unloaded. It's _only_ task is to
+		 * free the struct. Therefore, we specify kfree()
+		 * directly here. (Probably a little bit obfuscating
+		 * but legitime ...).
+		 */
+		dev->release = (void (*)(struct device *))kfree;
+	} else
+		return -ENOMEM;
+	ret = device_register(dev);
+	if (ret) {
+		put_device(dev);
+		return ret;
+	}
+
+	ret = sysfs_create_group(&dev->kobj, &vmlogrdr_attr_group);
+	if (ret) {
+		device_unregister(dev);
+		return ret;
+	}
+	priv->class_device = device_create(vmlogrdr_class, dev,
+					   MKDEV(vmlogrdr_major,
+						 priv->minor_num),
+					   priv, "%s", dev_name(dev));
+	if (IS_ERR(priv->class_device)) {
+		ret = PTR_ERR(priv->class_device);
+		priv->class_device=NULL;
+		sysfs_remove_group(&dev->kobj, &vmlogrdr_attr_group);
+		device_unregister(dev);
+		return ret;
+	}
+	priv->device = dev;
+	return 0;
+}
+
+
+static int vmlogrdr_unregister_device(struct vmlogrdr_priv_t *priv)
+{
+	device_destroy(vmlogrdr_class, MKDEV(vmlogrdr_major, priv->minor_num));
+	if (priv->device != NULL) {
+		sysfs_remove_group(&priv->device->kobj, &vmlogrdr_attr_group);
+		device_unregister(priv->device);
+		priv->device=NULL;
+	}
+	return 0;
+}
+
+
+static int vmlogrdr_register_cdev(dev_t dev)
+{
+	int rc = 0;
+	vmlogrdr_cdev = cdev_alloc();
+	if (!vmlogrdr_cdev) {
+		return -ENOMEM;
+	}
+	vmlogrdr_cdev->owner = THIS_MODULE;
+	vmlogrdr_cdev->ops = &vmlogrdr_fops;
+	vmlogrdr_cdev->dev = dev;
+	rc = cdev_add(vmlogrdr_cdev, vmlogrdr_cdev->dev, MAXMINOR);
+	if (!rc)
+		return 0;
+
+	// cleanup: cdev is not fully registered, no cdev_del here!
+	kobject_put(&vmlogrdr_cdev->kobj);
+	vmlogrdr_cdev=NULL;
+	return rc;
+}
+
+
+static void vmlogrdr_cleanup(void)
+{
+        int i;
+
+	if (vmlogrdr_cdev) {
+		cdev_del(vmlogrdr_cdev);
+		vmlogrdr_cdev=NULL;
+	}
+	for (i=0; i < MAXMINOR; ++i ) {
+		vmlogrdr_unregister_device(&sys_ser[i]);
+		free_page((unsigned long)sys_ser[i].buffer);
+	}
+	vmlogrdr_unregister_driver();
+	if (vmlogrdr_major) {
+		unregister_chrdev_region(MKDEV(vmlogrdr_major, 0), MAXMINOR);
+		vmlogrdr_major=0;
+	}
+}
+
+
+static int __init vmlogrdr_init(void)
+{
+	int rc;
+	int i;
+	dev_t dev;
+
+	if (! MACHINE_IS_VM) {
+		pr_err("not running under VM, driver not loaded.\n");
+		return -ENODEV;
+	}
+
+        recording_class_AB = vmlogrdr_get_recording_class_AB();
+
+	rc = alloc_chrdev_region(&dev, 0, MAXMINOR, "vmlogrdr");
+	if (rc)
+		return rc;
+	vmlogrdr_major = MAJOR(dev);
+
+	rc=vmlogrdr_register_driver();
+	if (rc)
+		goto cleanup;
+
+	for (i=0; i < MAXMINOR; ++i ) {
+		sys_ser[i].buffer = (char *) get_zeroed_page(GFP_KERNEL);
+		if (!sys_ser[i].buffer) {
+			rc = -ENOMEM;
+			break;
+		}
+		sys_ser[i].current_position = sys_ser[i].buffer;
+		rc=vmlogrdr_register_device(&sys_ser[i]);
+		if (rc)
+			break;
+	}
+	if (rc)
+		goto cleanup;
+
+	rc = vmlogrdr_register_cdev(dev);
+	if (rc)
+		goto cleanup;
+	return 0;
+
+cleanup:
+	vmlogrdr_cleanup();
+	return rc;
+}
+
+
+static void __exit vmlogrdr_exit(void)
+{
+	vmlogrdr_cleanup();
+	return;
+}
+
+
+module_init(vmlogrdr_init);
+module_exit(vmlogrdr_exit);
diff --git a/ap/os/linux/linux-3.4.x/drivers/s390/char/vmur.c b/ap/os/linux/linux-3.4.x/drivers/s390/char/vmur.c
new file mode 100644
index 0000000..73bef0b
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/s390/char/vmur.c
@@ -0,0 +1,1070 @@
+/*
+ * Linux driver for System z and s390 unit record devices
+ * (z/VM virtual punch, reader, printer)
+ *
+ * Copyright IBM Corp. 2001, 2009
+ * Authors: Malcolm Beattie <beattiem@uk.ibm.com>
+ *	    Michael Holzheu <holzheu@de.ibm.com>
+ *	    Frank Munzert <munzert@de.ibm.com>
+ */
+
+#define KMSG_COMPONENT "vmur"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
+#include <linux/cdev.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+
+#include <asm/uaccess.h>
+#include <asm/cio.h>
+#include <asm/ccwdev.h>
+#include <asm/debug.h>
+#include <asm/diag.h>
+
+#include "vmur.h"
+
+/*
+ * Driver overview
+ *
+ * Unit record device support is implemented as a character device driver.
+ * We can fit at least 16 bits into a device minor number and use the
+ * simple method of mapping a character device number with minor abcd
+ * to the unit record device with devno abcd.
+ * I/O to virtual unit record devices is handled as follows:
+ * Reads: Diagnose code 0x14 (input spool file manipulation)
+ * is used to read spool data page-wise.
+ * Writes: The CCW used is WRITE_CCW_CMD (0x01). The device's record length
+ * is available by reading sysfs attr reclen. Each write() to the device
+ * must specify an integral multiple (maximal 511) of reclen.
+ */
+
+static char ur_banner[] = "z/VM virtual unit record device driver";
+
+MODULE_AUTHOR("IBM Corporation");
+MODULE_DESCRIPTION("s390 z/VM virtual unit record device driver");
+MODULE_LICENSE("GPL");
+
+static dev_t ur_first_dev_maj_min;
+static struct class *vmur_class;
+static struct debug_info *vmur_dbf;
+
+/* We put the device's record length (for writes) in the driver_info field */
+static struct ccw_device_id ur_ids[] = {
+	{ CCWDEV_CU_DI(READER_PUNCH_DEVTYPE, 80) },
+	{ CCWDEV_CU_DI(PRINTER_DEVTYPE, 132) },
+	{ /* end of list */ }
+};
+
+MODULE_DEVICE_TABLE(ccw, ur_ids);
+
+static int ur_probe(struct ccw_device *cdev);
+static void ur_remove(struct ccw_device *cdev);
+static int ur_set_online(struct ccw_device *cdev);
+static int ur_set_offline(struct ccw_device *cdev);
+static int ur_pm_suspend(struct ccw_device *cdev);
+
+static struct ccw_driver ur_driver = {
+	.driver = {
+		.name	= "vmur",
+		.owner	= THIS_MODULE,
+	},
+	.ids		= ur_ids,
+	.probe		= ur_probe,
+	.remove		= ur_remove,
+	.set_online	= ur_set_online,
+	.set_offline	= ur_set_offline,
+	.freeze		= ur_pm_suspend,
+	.int_class	= IOINT_VMR,
+};
+
+static DEFINE_MUTEX(vmur_mutex);
+
+/*
+ * Allocation, freeing, getting and putting of urdev structures
+ *
+ * Each ur device (urd) contains a reference to its corresponding ccw device
+ * (cdev) using the urd->cdev pointer. Each ccw device has a reference to the
+ * ur device using dev_get_drvdata(&cdev->dev) pointer.
+ *
+ * urd references:
+ * - ur_probe gets a urd reference, ur_remove drops the reference
+ *   dev_get_drvdata(&cdev->dev)
+ * - ur_open gets a urd reference, ur_relase drops the reference
+ *   (urf->urd)
+ *
+ * cdev references:
+ * - urdev_alloc get a cdev reference (urd->cdev)
+ * - urdev_free drops the cdev reference (urd->cdev)
+ *
+ * Setting and clearing of dev_get_drvdata(&cdev->dev) is protected by the ccwdev lock
+ */
+static struct urdev *urdev_alloc(struct ccw_device *cdev)
+{
+	struct urdev *urd;
+
+	urd = kzalloc(sizeof(struct urdev), GFP_KERNEL);
+	if (!urd)
+		return NULL;
+	urd->reclen = cdev->id.driver_info;
+	ccw_device_get_id(cdev, &urd->dev_id);
+	mutex_init(&urd->io_mutex);
+	init_waitqueue_head(&urd->wait);
+	spin_lock_init(&urd->open_lock);
+	atomic_set(&urd->ref_count,  1);
+	urd->cdev = cdev;
+	get_device(&cdev->dev);
+	return urd;
+}
+
+static void urdev_free(struct urdev *urd)
+{
+	TRACE("urdev_free: %p\n", urd);
+	if (urd->cdev)
+		put_device(&urd->cdev->dev);
+	kfree(urd);
+}
+
+static void urdev_get(struct urdev *urd)
+{
+	atomic_inc(&urd->ref_count);
+}
+
+static struct urdev *urdev_get_from_cdev(struct ccw_device *cdev)
+{
+	struct urdev *urd;
+	unsigned long flags;
+
+	spin_lock_irqsave(get_ccwdev_lock(cdev), flags);
+	urd = dev_get_drvdata(&cdev->dev);
+	if (urd)
+		urdev_get(urd);
+	spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
+	return urd;
+}
+
+static struct urdev *urdev_get_from_devno(u16 devno)
+{
+	char bus_id[16];
+	struct ccw_device *cdev;
+	struct urdev *urd;
+
+	sprintf(bus_id, "0.0.%04x", devno);
+	cdev = get_ccwdev_by_busid(&ur_driver, bus_id);
+	if (!cdev)
+		return NULL;
+	urd = urdev_get_from_cdev(cdev);
+	put_device(&cdev->dev);
+	return urd;
+}
+
+static void urdev_put(struct urdev *urd)
+{
+	if (atomic_dec_and_test(&urd->ref_count))
+		urdev_free(urd);
+}
+
+/*
+ * State and contents of ur devices can be changed by class D users issuing
+ * CP commands such as PURGE or TRANSFER, while the Linux guest is suspended.
+ * Also the Linux guest might be logged off, which causes all active spool
+ * files to be closed.
+ * So we cannot guarantee that spool files are still the same when the Linux
+ * guest is resumed. In order to avoid unpredictable results at resume time
+ * we simply refuse to suspend if a ur device node is open.
+ */
+static int ur_pm_suspend(struct ccw_device *cdev)
+{
+	struct urdev *urd = dev_get_drvdata(&cdev->dev);
+
+	TRACE("ur_pm_suspend: cdev=%p\n", cdev);
+	if (urd->open_flag) {
+		pr_err("Unit record device %s is busy, %s refusing to "
+		       "suspend.\n", dev_name(&cdev->dev), ur_banner);
+		return -EBUSY;
+	}
+	return 0;
+}
+
+/*
+ * Low-level functions to do I/O to a ur device.
+ *     alloc_chan_prog
+ *     free_chan_prog
+ *     do_ur_io
+ *     ur_int_handler
+ *
+ * alloc_chan_prog allocates and builds the channel program
+ * free_chan_prog frees memory of the channel program
+ *
+ * do_ur_io issues the channel program to the device and blocks waiting
+ * on a completion event it publishes at urd->io_done. The function
+ * serialises itself on the device's mutex so that only one I/O
+ * is issued at a time (and that I/O is synchronous).
+ *
+ * ur_int_handler catches the "I/O done" interrupt, writes the
+ * subchannel status word into the scsw member of the urdev structure
+ * and complete()s the io_done to wake the waiting do_ur_io.
+ *
+ * The caller of do_ur_io is responsible for kfree()ing the channel program
+ * address pointer that alloc_chan_prog returned.
+ */
+
+static void free_chan_prog(struct ccw1 *cpa)
+{
+	struct ccw1 *ptr = cpa;
+
+	while (ptr->cda) {
+		kfree((void *)(addr_t) ptr->cda);
+		ptr++;
+	}
+	kfree(cpa);
+}
+
+/*
+ * alloc_chan_prog
+ * The channel program we use is write commands chained together
+ * with a final NOP CCW command-chained on (which ensures that CE and DE
+ * are presented together in a single interrupt instead of as separate
+ * interrupts unless an incorrect length indication kicks in first). The
+ * data length in each CCW is reclen.
+ */
+static struct ccw1 *alloc_chan_prog(const char __user *ubuf, int rec_count,
+				    int reclen)
+{
+	struct ccw1 *cpa;
+	void *kbuf;
+	int i;
+
+	TRACE("alloc_chan_prog(%p, %i, %i)\n", ubuf, rec_count, reclen);
+
+	/*
+	 * We chain a NOP onto the writes to force CE+DE together.
+	 * That means we allocate room for CCWs to cover count/reclen
+	 * records plus a NOP.
+	 */
+	cpa = kzalloc((rec_count + 1) * sizeof(struct ccw1),
+		      GFP_KERNEL | GFP_DMA);
+	if (!cpa)
+		return ERR_PTR(-ENOMEM);
+
+	for (i = 0; i < rec_count; i++) {
+		cpa[i].cmd_code = WRITE_CCW_CMD;
+		cpa[i].flags = CCW_FLAG_CC | CCW_FLAG_SLI;
+		cpa[i].count = reclen;
+		kbuf = kmalloc(reclen, GFP_KERNEL | GFP_DMA);
+		if (!kbuf) {
+			free_chan_prog(cpa);
+			return ERR_PTR(-ENOMEM);
+		}
+		cpa[i].cda = (u32)(addr_t) kbuf;
+		if (copy_from_user(kbuf, ubuf, reclen)) {
+			free_chan_prog(cpa);
+			return ERR_PTR(-EFAULT);
+		}
+		ubuf += reclen;
+	}
+	/* The following NOP CCW forces CE+DE to be presented together */
+	cpa[i].cmd_code = CCW_CMD_NOOP;
+	return cpa;
+}
+
+static int do_ur_io(struct urdev *urd, struct ccw1 *cpa)
+{
+	int rc;
+	struct ccw_device *cdev = urd->cdev;
+	DECLARE_COMPLETION_ONSTACK(event);
+
+	TRACE("do_ur_io: cpa=%p\n", cpa);
+
+	rc = mutex_lock_interruptible(&urd->io_mutex);
+	if (rc)
+		return rc;
+
+	urd->io_done = &event;
+
+	spin_lock_irq(get_ccwdev_lock(cdev));
+	rc = ccw_device_start(cdev, cpa, 1, 0, 0);
+	spin_unlock_irq(get_ccwdev_lock(cdev));
+
+	TRACE("do_ur_io: ccw_device_start returned %d\n", rc);
+	if (rc)
+		goto out;
+
+	wait_for_completion(&event);
+	TRACE("do_ur_io: I/O complete\n");
+	rc = 0;
+
+out:
+	mutex_unlock(&urd->io_mutex);
+	return rc;
+}
+
+/*
+ * ur interrupt handler, called from the ccw_device layer
+ */
+static void ur_int_handler(struct ccw_device *cdev, unsigned long intparm,
+			   struct irb *irb)
+{
+	struct urdev *urd;
+
+	TRACE("ur_int_handler: intparm=0x%lx cstat=%02x dstat=%02x res=%u\n",
+	      intparm, irb->scsw.cmd.cstat, irb->scsw.cmd.dstat,
+	      irb->scsw.cmd.count);
+
+	if (!intparm) {
+		TRACE("ur_int_handler: unsolicited interrupt\n");
+		return;
+	}
+	urd = dev_get_drvdata(&cdev->dev);
+	BUG_ON(!urd);
+	/* On special conditions irb is an error pointer */
+	if (IS_ERR(irb))
+		urd->io_request_rc = PTR_ERR(irb);
+	else if (irb->scsw.cmd.dstat == (DEV_STAT_CHN_END | DEV_STAT_DEV_END))
+		urd->io_request_rc = 0;
+	else
+		urd->io_request_rc = -EIO;
+
+	complete(urd->io_done);
+}
+
+/*
+ * reclen sysfs attribute - The record length to be used for write CCWs
+ */
+static ssize_t ur_attr_reclen_show(struct device *dev,
+				   struct device_attribute *attr, char *buf)
+{
+	struct urdev *urd;
+	int rc;
+
+	urd = urdev_get_from_cdev(to_ccwdev(dev));
+	if (!urd)
+		return -ENODEV;
+	rc = sprintf(buf, "%zu\n", urd->reclen);
+	urdev_put(urd);
+	return rc;
+}
+
+static DEVICE_ATTR(reclen, 0444, ur_attr_reclen_show, NULL);
+
+static int ur_create_attributes(struct device *dev)
+{
+	return device_create_file(dev, &dev_attr_reclen);
+}
+
+static void ur_remove_attributes(struct device *dev)
+{
+	device_remove_file(dev, &dev_attr_reclen);
+}
+
+/*
+ * diagnose code 0x210 - retrieve device information
+ * cc=0  normal completion, we have a real device
+ * cc=1  CP paging error
+ * cc=2  The virtual device exists, but is not associated with a real device
+ * cc=3  Invalid device address, or the virtual device does not exist
+ */
+static int get_urd_class(struct urdev *urd)
+{
+	static struct diag210 ur_diag210;
+	int cc;
+
+	ur_diag210.vrdcdvno = urd->dev_id.devno;
+	ur_diag210.vrdclen = sizeof(struct diag210);
+
+	cc = diag210(&ur_diag210);
+	switch (cc) {
+	case 0:
+		return -EOPNOTSUPP;
+	case 2:
+		return ur_diag210.vrdcvcla; /* virtual device class */
+	case 3:
+		return -ENODEV;
+	default:
+		return -EIO;
+	}
+}
+
+/*
+ * Allocation and freeing of urfile structures
+ */
+static struct urfile *urfile_alloc(struct urdev *urd)
+{
+	struct urfile *urf;
+
+	urf = kzalloc(sizeof(struct urfile), GFP_KERNEL);
+	if (!urf)
+		return NULL;
+	urf->urd = urd;
+
+	TRACE("urfile_alloc: urd=%p urf=%p rl=%zu\n", urd, urf,
+	      urf->dev_reclen);
+
+	return urf;
+}
+
+static void urfile_free(struct urfile *urf)
+{
+	TRACE("urfile_free: urf=%p urd=%p\n", urf, urf->urd);
+	kfree(urf);
+}
+
+/*
+ * The fops implementation of the character device driver
+ */
+static ssize_t do_write(struct urdev *urd, const char __user *udata,
+			size_t count, size_t reclen, loff_t *ppos)
+{
+	struct ccw1 *cpa;
+	int rc;
+
+	cpa = alloc_chan_prog(udata, count / reclen, reclen);
+	if (IS_ERR(cpa))
+		return PTR_ERR(cpa);
+
+	rc = do_ur_io(urd, cpa);
+	if (rc)
+		goto fail_kfree_cpa;
+
+	if (urd->io_request_rc) {
+		rc = urd->io_request_rc;
+		goto fail_kfree_cpa;
+	}
+	*ppos += count;
+	rc = count;
+
+fail_kfree_cpa:
+	free_chan_prog(cpa);
+	return rc;
+}
+
+static ssize_t ur_write(struct file *file, const char __user *udata,
+			size_t count, loff_t *ppos)
+{
+	struct urfile *urf = file->private_data;
+
+	TRACE("ur_write: count=%zu\n", count);
+
+	if (count == 0)
+		return 0;
+
+	if (count % urf->dev_reclen)
+		return -EINVAL;	/* count must be a multiple of reclen */
+
+	if (count > urf->dev_reclen * MAX_RECS_PER_IO)
+		count = urf->dev_reclen * MAX_RECS_PER_IO;
+
+	return do_write(urf->urd, udata, count, urf->dev_reclen, ppos);
+}
+
+/*
+ * diagnose code 0x14 subcode 0x0028 - position spool file to designated
+ *				       record
+ * cc=0  normal completion
+ * cc=2  no file active on the virtual reader or device not ready
+ * cc=3  record specified is beyond EOF
+ */
+static int diag_position_to_record(int devno, int record)
+{
+	int cc;
+
+	cc = diag14(record, devno, 0x28);
+	switch (cc) {
+	case 0:
+		return 0;
+	case 2:
+		return -ENOMEDIUM;
+	case 3:
+		return -ENODATA; /* position beyond end of file */
+	default:
+		return -EIO;
+	}
+}
+
+/*
+ * diagnose code 0x14 subcode 0x0000 - read next spool file buffer
+ * cc=0  normal completion
+ * cc=1  EOF reached
+ * cc=2  no file active on the virtual reader, and no file eligible
+ * cc=3  file already active on the virtual reader or specified virtual
+ *	 reader does not exist or is not a reader
+ */
+static int diag_read_file(int devno, char *buf)
+{
+	int cc;
+
+	cc = diag14((unsigned long) buf, devno, 0x00);
+	switch (cc) {
+	case 0:
+		return 0;
+	case 1:
+		return -ENODATA;
+	case 2:
+		return -ENOMEDIUM;
+	default:
+		return -EIO;
+	}
+}
+
+static ssize_t diag14_read(struct file *file, char __user *ubuf, size_t count,
+			   loff_t *offs)
+{
+	size_t len, copied, res;
+	char *buf;
+	int rc;
+	u16 reclen;
+	struct urdev *urd;
+
+	urd = ((struct urfile *) file->private_data)->urd;
+	reclen = ((struct urfile *) file->private_data)->file_reclen;
+
+	rc = diag_position_to_record(urd->dev_id.devno, *offs / PAGE_SIZE + 1);
+	if (rc == -ENODATA)
+		return 0;
+	if (rc)
+		return rc;
+
+	len = min((size_t) PAGE_SIZE, count);
+	buf = (char *) __get_free_page(GFP_KERNEL | GFP_DMA);
+	if (!buf)
+		return -ENOMEM;
+
+	copied = 0;
+	res = (size_t) (*offs % PAGE_SIZE);
+	do {
+		rc = diag_read_file(urd->dev_id.devno, buf);
+		if (rc == -ENODATA) {
+			break;
+		}
+		if (rc)
+			goto fail;
+		if (reclen && (copied == 0) && (*offs < PAGE_SIZE))
+			*((u16 *) &buf[FILE_RECLEN_OFFSET]) = reclen;
+		len = min(count - copied, PAGE_SIZE - res);
+		if (copy_to_user(ubuf + copied, buf + res, len)) {
+			rc = -EFAULT;
+			goto fail;
+		}
+		res = 0;
+		copied += len;
+	} while (copied != count);
+
+	*offs += copied;
+	rc = copied;
+fail:
+	free_page((unsigned long) buf);
+	return rc;
+}
+
+static ssize_t ur_read(struct file *file, char __user *ubuf, size_t count,
+		       loff_t *offs)
+{
+	struct urdev *urd;
+	int rc;
+
+	TRACE("ur_read: count=%zu ppos=%li\n", count, (unsigned long) *offs);
+
+	if (count == 0)
+		return 0;
+
+	urd = ((struct urfile *) file->private_data)->urd;
+	rc = mutex_lock_interruptible(&urd->io_mutex);
+	if (rc)
+		return rc;
+	rc = diag14_read(file, ubuf, count, offs);
+	mutex_unlock(&urd->io_mutex);
+	return rc;
+}
+
+/*
+ * diagnose code 0x14 subcode 0x0fff - retrieve next file descriptor
+ * cc=0  normal completion
+ * cc=1  no files on reader queue or no subsequent file
+ * cc=2  spid specified is invalid
+ */
+static int diag_read_next_file_info(struct file_control_block *buf, int spid)
+{
+	int cc;
+
+	cc = diag14((unsigned long) buf, spid, 0xfff);
+	switch (cc) {
+	case 0:
+		return 0;
+	default:
+		return -ENODATA;
+	}
+}
+
+static int verify_uri_device(struct urdev *urd)
+{
+	struct file_control_block *fcb;
+	char *buf;
+	int rc;
+
+	fcb = kmalloc(sizeof(*fcb), GFP_KERNEL | GFP_DMA);
+	if (!fcb)
+		return -ENOMEM;
+
+	/* check for empty reader device (beginning of chain) */
+	rc = diag_read_next_file_info(fcb, 0);
+	if (rc)
+		goto fail_free_fcb;
+
+	/* if file is in hold status, we do not read it */
+	if (fcb->file_stat & (FLG_SYSTEM_HOLD | FLG_USER_HOLD)) {
+		rc = -EPERM;
+		goto fail_free_fcb;
+	}
+
+	/* open file on virtual reader	*/
+	buf = (char *) __get_free_page(GFP_KERNEL | GFP_DMA);
+	if (!buf) {
+		rc = -ENOMEM;
+		goto fail_free_fcb;
+	}
+	rc = diag_read_file(urd->dev_id.devno, buf);
+	if ((rc != 0) && (rc != -ENODATA)) /* EOF does not hurt */
+		goto fail_free_buf;
+
+	/* check if the file on top of the queue is open now */
+	rc = diag_read_next_file_info(fcb, 0);
+	if (rc)
+		goto fail_free_buf;
+	if (!(fcb->file_stat & FLG_IN_USE)) {
+		rc = -EMFILE;
+		goto fail_free_buf;
+	}
+	rc = 0;
+
+fail_free_buf:
+	free_page((unsigned long) buf);
+fail_free_fcb:
+	kfree(fcb);
+	return rc;
+}
+
+static int verify_device(struct urdev *urd)
+{
+	switch (urd->class) {
+	case DEV_CLASS_UR_O:
+		return 0; /* no check needed here */
+	case DEV_CLASS_UR_I:
+		return verify_uri_device(urd);
+	default:
+		return -EOPNOTSUPP;
+	}
+}
+
+static int get_uri_file_reclen(struct urdev *urd)
+{
+	struct file_control_block *fcb;
+	int rc;
+
+	fcb = kmalloc(sizeof(*fcb), GFP_KERNEL | GFP_DMA);
+	if (!fcb)
+		return -ENOMEM;
+	rc = diag_read_next_file_info(fcb, 0);
+	if (rc)
+		goto fail_free;
+	if (fcb->file_stat & FLG_CP_DUMP)
+		rc = 0;
+	else
+		rc = fcb->rec_len;
+
+fail_free:
+	kfree(fcb);
+	return rc;
+}
+
+static int get_file_reclen(struct urdev *urd)
+{
+	switch (urd->class) {
+	case DEV_CLASS_UR_O:
+		return 0;
+	case DEV_CLASS_UR_I:
+		return get_uri_file_reclen(urd);
+	default:
+		return -EOPNOTSUPP;
+	}
+}
+
+static int ur_open(struct inode *inode, struct file *file)
+{
+	u16 devno;
+	struct urdev *urd;
+	struct urfile *urf;
+	unsigned short accmode;
+	int rc;
+
+	accmode = file->f_flags & O_ACCMODE;
+
+	if (accmode == O_RDWR)
+		return -EACCES;
+	/*
+	 * We treat the minor number as the devno of the ur device
+	 * to find in the driver tree.
+	 */
+	devno = MINOR(file->f_dentry->d_inode->i_rdev);
+
+	urd = urdev_get_from_devno(devno);
+	if (!urd) {
+		rc = -ENXIO;
+		goto out;
+	}
+
+	spin_lock(&urd->open_lock);
+	while (urd->open_flag) {
+		spin_unlock(&urd->open_lock);
+		if (file->f_flags & O_NONBLOCK) {
+			rc = -EBUSY;
+			goto fail_put;
+		}
+		if (wait_event_interruptible(urd->wait, urd->open_flag == 0)) {
+			rc = -ERESTARTSYS;
+			goto fail_put;
+		}
+		spin_lock(&urd->open_lock);
+	}
+	urd->open_flag++;
+	spin_unlock(&urd->open_lock);
+
+	TRACE("ur_open\n");
+
+	if (((accmode == O_RDONLY) && (urd->class != DEV_CLASS_UR_I)) ||
+	    ((accmode == O_WRONLY) && (urd->class != DEV_CLASS_UR_O))) {
+		TRACE("ur_open: unsupported dev class (%d)\n", urd->class);
+		rc = -EACCES;
+		goto fail_unlock;
+	}
+
+	rc = verify_device(urd);
+	if (rc)
+		goto fail_unlock;
+
+	urf = urfile_alloc(urd);
+	if (!urf) {
+		rc = -ENOMEM;
+		goto fail_unlock;
+	}
+
+	urf->dev_reclen = urd->reclen;
+	rc = get_file_reclen(urd);
+	if (rc < 0)
+		goto fail_urfile_free;
+	urf->file_reclen = rc;
+	file->private_data = urf;
+	return 0;
+
+fail_urfile_free:
+	urfile_free(urf);
+fail_unlock:
+	spin_lock(&urd->open_lock);
+	urd->open_flag--;
+	spin_unlock(&urd->open_lock);
+fail_put:
+	urdev_put(urd);
+out:
+	return rc;
+}
+
+static int ur_release(struct inode *inode, struct file *file)
+{
+	struct urfile *urf = file->private_data;
+
+	TRACE("ur_release\n");
+	spin_lock(&urf->urd->open_lock);
+	urf->urd->open_flag--;
+	spin_unlock(&urf->urd->open_lock);
+	wake_up_interruptible(&urf->urd->wait);
+	urdev_put(urf->urd);
+	urfile_free(urf);
+	return 0;
+}
+
+static loff_t ur_llseek(struct file *file, loff_t offset, int whence)
+{
+	loff_t newpos;
+
+	if ((file->f_flags & O_ACCMODE) != O_RDONLY)
+		return -ESPIPE; /* seek allowed only for reader */
+	if (offset % PAGE_SIZE)
+		return -ESPIPE; /* only multiples of 4K allowed */
+	switch (whence) {
+	case 0: /* SEEK_SET */
+		newpos = offset;
+		break;
+	case 1: /* SEEK_CUR */
+		newpos = file->f_pos + offset;
+		break;
+	default:
+		return -EINVAL;
+	}
+	file->f_pos = newpos;
+	return newpos;
+}
+
+static const struct file_operations ur_fops = {
+	.owner	 = THIS_MODULE,
+	.open	 = ur_open,
+	.release = ur_release,
+	.read	 = ur_read,
+	.write	 = ur_write,
+	.llseek  = ur_llseek,
+};
+
+/*
+ * ccw_device infrastructure:
+ *     ur_probe creates the struct urdev (with refcount = 1), the device
+ *     attributes, sets up the interrupt handler and validates the virtual
+ *     unit record device.
+ *     ur_remove removes the device attributes and drops the reference to
+ *     struct urdev.
+ *
+ *     ur_probe, ur_remove, ur_set_online and ur_set_offline are serialized
+ *     by the vmur_mutex lock.
+ *
+ *     urd->char_device is used as indication that the online function has
+ *     been completed successfully.
+ */
+static int ur_probe(struct ccw_device *cdev)
+{
+	struct urdev *urd;
+	int rc;
+
+	TRACE("ur_probe: cdev=%p\n", cdev);
+
+	mutex_lock(&vmur_mutex);
+	urd = urdev_alloc(cdev);
+	if (!urd) {
+		rc = -ENOMEM;
+		goto fail_unlock;
+	}
+
+	rc = ur_create_attributes(&cdev->dev);
+	if (rc) {
+		rc = -ENOMEM;
+		goto fail_urdev_put;
+	}
+	cdev->handler = ur_int_handler;
+
+	/* validate virtual unit record device */
+	urd->class = get_urd_class(urd);
+	if (urd->class < 0) {
+		rc = urd->class;
+		goto fail_remove_attr;
+	}
+	if ((urd->class != DEV_CLASS_UR_I) && (urd->class != DEV_CLASS_UR_O)) {
+		rc = -EOPNOTSUPP;
+		goto fail_remove_attr;
+	}
+	spin_lock_irq(get_ccwdev_lock(cdev));
+	dev_set_drvdata(&cdev->dev, urd);
+	spin_unlock_irq(get_ccwdev_lock(cdev));
+
+	mutex_unlock(&vmur_mutex);
+	return 0;
+
+fail_remove_attr:
+	ur_remove_attributes(&cdev->dev);
+fail_urdev_put:
+	urdev_put(urd);
+fail_unlock:
+	mutex_unlock(&vmur_mutex);
+	return rc;
+}
+
+static int ur_set_online(struct ccw_device *cdev)
+{
+	struct urdev *urd;
+	int minor, major, rc;
+	char node_id[16];
+
+	TRACE("ur_set_online: cdev=%p\n", cdev);
+
+	mutex_lock(&vmur_mutex);
+	urd = urdev_get_from_cdev(cdev);
+	if (!urd) {
+		/* ur_remove already deleted our urd */
+		rc = -ENODEV;
+		goto fail_unlock;
+	}
+
+	if (urd->char_device) {
+		/* Another ur_set_online was faster */
+		rc = -EBUSY;
+		goto fail_urdev_put;
+	}
+
+	minor = urd->dev_id.devno;
+	major = MAJOR(ur_first_dev_maj_min);
+
+	urd->char_device = cdev_alloc();
+	if (!urd->char_device) {
+		rc = -ENOMEM;
+		goto fail_urdev_put;
+	}
+
+	urd->char_device->ops = &ur_fops;
+	urd->char_device->dev = MKDEV(major, minor);
+	urd->char_device->owner = ur_fops.owner;
+
+	rc = cdev_add(urd->char_device, urd->char_device->dev, 1);
+	if (rc)
+		goto fail_free_cdev;
+	if (urd->cdev->id.cu_type == READER_PUNCH_DEVTYPE) {
+		if (urd->class == DEV_CLASS_UR_I)
+			sprintf(node_id, "vmrdr-%s", dev_name(&cdev->dev));
+		if (urd->class == DEV_CLASS_UR_O)
+			sprintf(node_id, "vmpun-%s", dev_name(&cdev->dev));
+	} else if (urd->cdev->id.cu_type == PRINTER_DEVTYPE) {
+		sprintf(node_id, "vmprt-%s", dev_name(&cdev->dev));
+	} else {
+		rc = -EOPNOTSUPP;
+		goto fail_free_cdev;
+	}
+
+	urd->device = device_create(vmur_class, NULL, urd->char_device->dev,
+				    NULL, "%s", node_id);
+	if (IS_ERR(urd->device)) {
+		rc = PTR_ERR(urd->device);
+		TRACE("ur_set_online: device_create rc=%d\n", rc);
+		goto fail_free_cdev;
+	}
+	urdev_put(urd);
+	mutex_unlock(&vmur_mutex);
+	return 0;
+
+fail_free_cdev:
+	cdev_del(urd->char_device);
+	urd->char_device = NULL;
+fail_urdev_put:
+	urdev_put(urd);
+fail_unlock:
+	mutex_unlock(&vmur_mutex);
+	return rc;
+}
+
+static int ur_set_offline_force(struct ccw_device *cdev, int force)
+{
+	struct urdev *urd;
+	int rc;
+
+	TRACE("ur_set_offline: cdev=%p\n", cdev);
+	urd = urdev_get_from_cdev(cdev);
+	if (!urd)
+		/* ur_remove already deleted our urd */
+		return -ENODEV;
+	if (!urd->char_device) {
+		/* Another ur_set_offline was faster */
+		rc = -EBUSY;
+		goto fail_urdev_put;
+	}
+	if (!force && (atomic_read(&urd->ref_count) > 2)) {
+		/* There is still a user of urd (e.g. ur_open) */
+		TRACE("ur_set_offline: BUSY\n");
+		rc = -EBUSY;
+		goto fail_urdev_put;
+	}
+	device_destroy(vmur_class, urd->char_device->dev);
+	cdev_del(urd->char_device);
+	urd->char_device = NULL;
+	rc = 0;
+
+fail_urdev_put:
+	urdev_put(urd);
+	return rc;
+}
+
+static int ur_set_offline(struct ccw_device *cdev)
+{
+	int rc;
+
+	mutex_lock(&vmur_mutex);
+	rc = ur_set_offline_force(cdev, 0);
+	mutex_unlock(&vmur_mutex);
+	return rc;
+}
+
+static void ur_remove(struct ccw_device *cdev)
+{
+	unsigned long flags;
+
+	TRACE("ur_remove\n");
+
+	mutex_lock(&vmur_mutex);
+
+	if (cdev->online)
+		ur_set_offline_force(cdev, 1);
+	ur_remove_attributes(&cdev->dev);
+
+	spin_lock_irqsave(get_ccwdev_lock(cdev), flags);
+	urdev_put(dev_get_drvdata(&cdev->dev));
+	dev_set_drvdata(&cdev->dev, NULL);
+	spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
+
+	mutex_unlock(&vmur_mutex);
+}
+
+/*
+ * Module initialisation and cleanup
+ */
+static int __init ur_init(void)
+{
+	int rc;
+	dev_t dev;
+
+	if (!MACHINE_IS_VM) {
+		pr_err("The %s cannot be loaded without z/VM\n",
+		       ur_banner);
+		return -ENODEV;
+	}
+
+	vmur_dbf = debug_register("vmur", 4, 1, 4 * sizeof(long));
+	if (!vmur_dbf)
+		return -ENOMEM;
+	rc = debug_register_view(vmur_dbf, &debug_sprintf_view);
+	if (rc)
+		goto fail_free_dbf;
+
+	debug_set_level(vmur_dbf, 6);
+
+	vmur_class = class_create(THIS_MODULE, "vmur");
+	if (IS_ERR(vmur_class)) {
+		rc = PTR_ERR(vmur_class);
+		goto fail_free_dbf;
+	}
+
+	rc = ccw_driver_register(&ur_driver);
+	if (rc)
+		goto fail_class_destroy;
+
+	rc = alloc_chrdev_region(&dev, 0, NUM_MINORS, "vmur");
+	if (rc) {
+		pr_err("Kernel function alloc_chrdev_region failed with "
+		       "error code %d\n", rc);
+		goto fail_unregister_driver;
+	}
+	ur_first_dev_maj_min = MKDEV(MAJOR(dev), 0);
+
+	pr_info("%s loaded.\n", ur_banner);
+	return 0;
+
+fail_unregister_driver:
+	ccw_driver_unregister(&ur_driver);
+fail_class_destroy:
+	class_destroy(vmur_class);
+fail_free_dbf:
+	debug_unregister(vmur_dbf);
+	return rc;
+}
+
+static void __exit ur_exit(void)
+{
+	unregister_chrdev_region(ur_first_dev_maj_min, NUM_MINORS);
+	ccw_driver_unregister(&ur_driver);
+	class_destroy(vmur_class);
+	debug_unregister(vmur_dbf);
+	pr_info("%s unloaded.\n", ur_banner);
+}
+
+module_init(ur_init);
+module_exit(ur_exit);
diff --git a/ap/os/linux/linux-3.4.x/drivers/s390/char/vmur.h b/ap/os/linux/linux-3.4.x/drivers/s390/char/vmur.h
new file mode 100644
index 0000000..fa320ad
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/s390/char/vmur.h
@@ -0,0 +1,110 @@
+/*
+ * Linux driver for System z and s390 unit record devices
+ * (z/VM virtual punch, reader, printer)
+ *
+ * Copyright IBM Corp. 2001, 2007
+ * Authors: Malcolm Beattie <beattiem@uk.ibm.com>
+ *	    Michael Holzheu <holzheu@de.ibm.com>
+ *	    Frank Munzert <munzert@de.ibm.com>
+ */
+
+#ifndef _VMUR_H_
+#define _VMUR_H_
+
+#define DEV_CLASS_UR_I 0x20 /* diag210 unit record input device class */
+#define DEV_CLASS_UR_O 0x10 /* diag210 unit record output device class */
+/*
+ * we only support z/VM's default unit record devices:
+ * both in SPOOL directory control statement and in CP DEFINE statement
+ *	RDR defaults to 2540 reader
+ *	PUN defaults to 2540 punch
+ *	PRT defaults to 1403 printer
+ */
+#define READER_PUNCH_DEVTYPE	0x2540
+#define PRINTER_DEVTYPE		0x1403
+
+/* z/VM spool file control block SFBLOK */
+struct file_control_block {
+	char reserved_1[8];
+	char user_owner[8];
+	char user_orig[8];
+	__s32 data_recs;
+	__s16 rec_len;
+	__s16 file_num;
+	__u8  file_stat;
+	__u8  dev_type;
+	char  reserved_2[6];
+	char  file_name[12];
+	char  file_type[12];
+	char  create_date[8];
+	char  create_time[8];
+	char  reserved_3[6];
+	__u8  file_class;
+	__u8  sfb_lok;
+	__u64 distr_code;
+	__u32 reserved_4;
+	__u8  current_starting_copy_number;
+	__u8  sfblock_cntrl_flags;
+	__u8  reserved_5;
+	__u8  more_status_flags;
+	char  rest[200];
+} __attribute__ ((packed));
+
+#define FLG_SYSTEM_HOLD	0x04
+#define FLG_CP_DUMP	0x10
+#define FLG_USER_HOLD	0x20
+#define FLG_IN_USE	0x80
+
+/*
+ * A struct urdev is created for each ur device that is made available
+ * via the ccw_device driver model.
+ */
+struct urdev {
+	struct ccw_device *cdev;	/* Backpointer to ccw device */
+	struct mutex io_mutex;		/* Serialises device IO */
+	struct completion *io_done;	/* do_ur_io waits; irq completes */
+	struct device *device;
+	struct cdev *char_device;
+	struct ccw_dev_id dev_id;	/* device id */
+	size_t reclen;			/* Record length for *write* CCWs */
+	int class;			/* VM device class */
+	int io_request_rc;		/* return code from I/O request */
+	atomic_t ref_count;		/* reference counter */
+	wait_queue_head_t wait;		/* wait queue to serialize open */
+	int open_flag;			/* "urdev is open" flag */
+	spinlock_t open_lock;		/* serialize critical sections */
+};
+
+/*
+ * A struct urfile is allocated at open() time for each device and
+ * freed on release().
+ */
+struct urfile {
+	struct urdev *urd;
+	unsigned int flags;
+	size_t dev_reclen;
+	__u16 file_reclen;
+};
+
+/*
+ * Device major/minor definitions.
+ */
+
+#define UR_MAJOR 0	/* get dynamic major */
+/*
+ * We map minor numbers directly to device numbers (0-FFFF) for simplicity.
+ * This avoids having to allocate (and manage) slot numbers.
+ */
+#define NUM_MINORS 65536
+
+/* Limiting each I/O to 511 records limits chan prog to 4KB (511 r/w + 1 NOP) */
+#define MAX_RECS_PER_IO		511
+#define WRITE_CCW_CMD		0x01
+
+#define TRACE(x...) debug_sprintf_event(vmur_dbf, 1, x)
+#define CCWDEV_CU_DI(cutype, di) \
+		CCW_DEVICE(cutype, 0x00), .driver_info = (di)
+
+#define FILE_RECLEN_OFFSET	4064 /* reclen offset in spool data block */
+
+#endif /* _VMUR_H_ */
diff --git a/ap/os/linux/linux-3.4.x/drivers/s390/char/vmwatchdog.c b/ap/os/linux/linux-3.4.x/drivers/s390/char/vmwatchdog.c
new file mode 100644
index 0000000..2211277
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/s390/char/vmwatchdog.c
@@ -0,0 +1,337 @@
+/*
+ * Watchdog implementation based on z/VM Watchdog Timer API
+ *
+ * Copyright IBM Corp. 2004,2009
+ *
+ * The user space watchdog daemon can use this driver as
+ * /dev/vmwatchdog to have z/VM execute the specified CP
+ * command when the timeout expires. The default command is
+ * "IPL", which which cause an immediate reboot.
+ */
+#define KMSG_COMPONENT "vmwatchdog"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
+#include <linux/init.h>
+#include <linux/fs.h>
+#include <linux/kernel.h>
+#include <linux/miscdevice.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/slab.h>
+#include <linux/suspend.h>
+#include <linux/watchdog.h>
+
+#include <asm/ebcdic.h>
+#include <asm/io.h>
+#include <asm/uaccess.h>
+
+#define MAX_CMDLEN 240
+#define MIN_INTERVAL 15
+static char vmwdt_cmd[MAX_CMDLEN] = "IPL";
+static bool vmwdt_conceal;
+
+static bool vmwdt_nowayout = WATCHDOG_NOWAYOUT;
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Arnd Bergmann <arndb@de.ibm.com>");
+MODULE_DESCRIPTION("z/VM Watchdog Timer");
+module_param_string(cmd, vmwdt_cmd, MAX_CMDLEN, 0644);
+MODULE_PARM_DESC(cmd, "CP command that is run when the watchdog triggers");
+module_param_named(conceal, vmwdt_conceal, bool, 0644);
+MODULE_PARM_DESC(conceal, "Enable the CONCEAL CP option while the watchdog "
+		" is active");
+module_param_named(nowayout, vmwdt_nowayout, bool, 0);
+MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started"
+		" (default=CONFIG_WATCHDOG_NOWAYOUT)");
+MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
+
+static unsigned int vmwdt_interval = 60;
+static unsigned long vmwdt_is_open;
+static int vmwdt_expect_close;
+
+static DEFINE_MUTEX(vmwdt_mutex);
+
+#define VMWDT_OPEN	0	/* devnode is open or suspend in progress */
+#define VMWDT_RUNNING	1	/* The watchdog is armed */
+
+enum vmwdt_func {
+	/* function codes */
+	wdt_init   = 0,
+	wdt_change = 1,
+	wdt_cancel = 2,
+	/* flags */
+	wdt_conceal = 0x80000000,
+};
+
+static int __diag288(enum vmwdt_func func, unsigned int timeout,
+			    char *cmd, size_t len)
+{
+	register unsigned long __func asm("2") = func;
+	register unsigned long __timeout asm("3") = timeout;
+	register unsigned long __cmdp asm("4") = virt_to_phys(cmd);
+	register unsigned long __cmdl asm("5") = len;
+	int err;
+
+	err = -EINVAL;
+	asm volatile(
+		"	diag	%1,%3,0x288\n"
+		"0:	la	%0,0\n"
+		"1:\n"
+		EX_TABLE(0b,1b)
+		: "+d" (err) : "d"(__func), "d"(__timeout),
+		  "d"(__cmdp), "d"(__cmdl) : "1", "cc");
+	return err;
+}
+
+static int vmwdt_keepalive(void)
+{
+	/* we allocate new memory every time to avoid having
+	 * to track the state. static allocation is not an
+	 * option since that might not be contiguous in real
+	 * storage in case of a modular build */
+	static char *ebc_cmd;
+	size_t len;
+	int ret;
+	unsigned int func;
+
+	ebc_cmd = kmalloc(MAX_CMDLEN, GFP_KERNEL);
+	if (!ebc_cmd)
+		return -ENOMEM;
+
+	len = strlcpy(ebc_cmd, vmwdt_cmd, MAX_CMDLEN);
+	ASCEBC(ebc_cmd, MAX_CMDLEN);
+	EBC_TOUPPER(ebc_cmd, MAX_CMDLEN);
+
+	func = vmwdt_conceal ? (wdt_init | wdt_conceal) : wdt_init;
+	set_bit(VMWDT_RUNNING, &vmwdt_is_open);
+	ret = __diag288(func, vmwdt_interval, ebc_cmd, len);
+	WARN_ON(ret != 0);
+	kfree(ebc_cmd);
+	return ret;
+}
+
+static int vmwdt_disable(void)
+{
+	int ret = __diag288(wdt_cancel, 0, "", 0);
+	WARN_ON(ret != 0);
+	clear_bit(VMWDT_RUNNING, &vmwdt_is_open);
+	return ret;
+}
+
+static int __init vmwdt_probe(void)
+{
+	/* there is no real way to see if the watchdog is supported,
+	 * so we try initializing it with a NOP command ("BEGIN")
+	 * that won't cause any harm even if the following disable
+	 * fails for some reason */
+	static char __initdata ebc_begin[] = {
+		194, 197, 199, 201, 213
+	};
+	if (__diag288(wdt_init, 15, ebc_begin, sizeof(ebc_begin)) != 0)
+		return -EINVAL;
+	return vmwdt_disable();
+}
+
+static int vmwdt_open(struct inode *i, struct file *f)
+{
+	int ret;
+	if (test_and_set_bit(VMWDT_OPEN, &vmwdt_is_open))
+		return -EBUSY;
+	ret = vmwdt_keepalive();
+	if (ret)
+		clear_bit(VMWDT_OPEN, &vmwdt_is_open);
+	return ret ? ret : nonseekable_open(i, f);
+}
+
+static int vmwdt_close(struct inode *i, struct file *f)
+{
+	if (vmwdt_expect_close == 42)
+		vmwdt_disable();
+	vmwdt_expect_close = 0;
+	clear_bit(VMWDT_OPEN, &vmwdt_is_open);
+	return 0;
+}
+
+static struct watchdog_info vmwdt_info = {
+	.options = WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING | WDIOF_MAGICCLOSE,
+	.firmware_version = 0,
+	.identity = "z/VM Watchdog Timer",
+};
+
+static int __vmwdt_ioctl(unsigned int cmd, unsigned long arg)
+{
+	switch (cmd) {
+	case WDIOC_GETSUPPORT:
+		if (copy_to_user((void __user *)arg, &vmwdt_info,
+					sizeof(vmwdt_info)))
+			return -EFAULT;
+		return 0;
+	case WDIOC_GETSTATUS:
+	case WDIOC_GETBOOTSTATUS:
+		return put_user(0, (int __user *)arg);
+	case WDIOC_GETTEMP:
+		return -EINVAL;
+	case WDIOC_SETOPTIONS:
+		{
+			int options, ret;
+			if (get_user(options, (int __user *)arg))
+				return -EFAULT;
+			ret = -EINVAL;
+			if (options & WDIOS_DISABLECARD) {
+				ret = vmwdt_disable();
+				if (ret)
+					return ret;
+			}
+			if (options & WDIOS_ENABLECARD) {
+				ret = vmwdt_keepalive();
+			}
+			return ret;
+		}
+	case WDIOC_GETTIMEOUT:
+		return put_user(vmwdt_interval, (int __user *)arg);
+	case WDIOC_SETTIMEOUT:
+		{
+			int interval;
+			if (get_user(interval, (int __user *)arg))
+				return -EFAULT;
+			if (interval < MIN_INTERVAL)
+				return -EINVAL;
+			vmwdt_interval = interval;
+		}
+		return vmwdt_keepalive();
+	case WDIOC_KEEPALIVE:
+		return vmwdt_keepalive();
+	}
+	return -EINVAL;
+}
+
+static long vmwdt_ioctl(struct file *f, unsigned int cmd, unsigned long arg)
+{
+	int rc;
+
+	mutex_lock(&vmwdt_mutex);
+	rc = __vmwdt_ioctl(cmd, arg);
+	mutex_unlock(&vmwdt_mutex);
+	return (long) rc;
+}
+
+static ssize_t vmwdt_write(struct file *f, const char __user *buf,
+				size_t count, loff_t *ppos)
+{
+	if(count) {
+		if (!vmwdt_nowayout) {
+			size_t i;
+
+			/* note: just in case someone wrote the magic character
+			 * five months ago... */
+			vmwdt_expect_close = 0;
+
+			for (i = 0; i != count; i++) {
+				char c;
+				if (get_user(c, buf+i))
+					return -EFAULT;
+				if (c == 'V')
+					vmwdt_expect_close = 42;
+			}
+		}
+		/* someone wrote to us, we should restart timer */
+		vmwdt_keepalive();
+	}
+	return count;
+}
+
+static int vmwdt_resume(void)
+{
+	clear_bit(VMWDT_OPEN, &vmwdt_is_open);
+	return NOTIFY_DONE;
+}
+
+/*
+ * It makes no sense to go into suspend while the watchdog is running.
+ * Depending on the memory size, the watchdog might trigger, while we
+ * are still saving the memory.
+ * We reuse the open flag to ensure that suspend and watchdog open are
+ * exclusive operations
+ */
+static int vmwdt_suspend(void)
+{
+	if (test_and_set_bit(VMWDT_OPEN, &vmwdt_is_open)) {
+		pr_err("The system cannot be suspended while the watchdog"
+			" is in use\n");
+		return notifier_from_errno(-EBUSY);
+	}
+	if (test_bit(VMWDT_RUNNING, &vmwdt_is_open)) {
+		clear_bit(VMWDT_OPEN, &vmwdt_is_open);
+		pr_err("The system cannot be suspended while the watchdog"
+			" is running\n");
+		return notifier_from_errno(-EBUSY);
+	}
+	return NOTIFY_DONE;
+}
+
+/*
+ * This function is called for suspend and resume.
+ */
+static int vmwdt_power_event(struct notifier_block *this, unsigned long event,
+			     void *ptr)
+{
+	switch (event) {
+	case PM_POST_HIBERNATION:
+	case PM_POST_SUSPEND:
+		return vmwdt_resume();
+	case PM_HIBERNATION_PREPARE:
+	case PM_SUSPEND_PREPARE:
+		return vmwdt_suspend();
+	default:
+		return NOTIFY_DONE;
+	}
+}
+
+static struct notifier_block vmwdt_power_notifier = {
+	.notifier_call = vmwdt_power_event,
+};
+
+static const struct file_operations vmwdt_fops = {
+	.open    = &vmwdt_open,
+	.release = &vmwdt_close,
+	.unlocked_ioctl = &vmwdt_ioctl,
+	.write   = &vmwdt_write,
+	.owner   = THIS_MODULE,
+	.llseek  = noop_llseek,
+};
+
+static struct miscdevice vmwdt_dev = {
+	.minor      = WATCHDOG_MINOR,
+	.name       = "watchdog",
+	.fops       = &vmwdt_fops,
+};
+
+static int __init vmwdt_init(void)
+{
+	int ret;
+
+	ret = vmwdt_probe();
+	if (ret)
+		return ret;
+	ret = register_pm_notifier(&vmwdt_power_notifier);
+	if (ret)
+		return ret;
+	/*
+	 * misc_register() has to be the last action in module_init(), because
+	 * file operations will be available right after this.
+	 */
+	ret = misc_register(&vmwdt_dev);
+	if (ret) {
+		unregister_pm_notifier(&vmwdt_power_notifier);
+		return ret;
+	}
+	return 0;
+}
+module_init(vmwdt_init);
+
+static void __exit vmwdt_exit(void)
+{
+	unregister_pm_notifier(&vmwdt_power_notifier);
+	misc_deregister(&vmwdt_dev);
+}
+module_exit(vmwdt_exit);
diff --git a/ap/os/linux/linux-3.4.x/drivers/s390/char/zcore.c b/ap/os/linux/linux-3.4.x/drivers/s390/char/zcore.c
new file mode 100644
index 0000000..3303d66
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/s390/char/zcore.c
@@ -0,0 +1,748 @@
+/*
+ * zcore module to export memory content and register sets for creating system
+ * dumps on SCSI disks (zfcpdump). The "zcore/mem" debugfs file shows the same
+ * dump format as s390 standalone dumps.
+ *
+ * For more information please refer to Documentation/s390/zfcpdump.txt
+ *
+ * Copyright IBM Corp. 2003,2008
+ * Author(s): Michael Holzheu
+ */
+
+#define KMSG_COMPONENT "zdump"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/miscdevice.h>
+#include <linux/debugfs.h>
+#include <linux/module.h>
+#include <asm/asm-offsets.h>
+#include <asm/ipl.h>
+#include <asm/sclp.h>
+#include <asm/setup.h>
+#include <asm/uaccess.h>
+#include <asm/debug.h>
+#include <asm/processor.h>
+#include <asm/irqflags.h>
+#include <asm/checksum.h>
+#include "sclp.h"
+
+#define TRACE(x...) debug_sprintf_event(zcore_dbf, 1, x)
+
+#define TO_USER		0
+#define TO_KERNEL	1
+#define CHUNK_INFO_SIZE	34 /* 2 16-byte char, each followed by blank */
+
+enum arch_id {
+	ARCH_S390	= 0,
+	ARCH_S390X	= 1,
+};
+
+/* dump system info */
+
+struct sys_info {
+	enum arch_id	 arch;
+	unsigned long	 sa_base;
+	u32		 sa_size;
+	int		 cpu_map[NR_CPUS];
+	unsigned long	 mem_size;
+	struct save_area lc_mask;
+};
+
+struct ipib_info {
+	unsigned long	ipib;
+	u32		checksum;
+}  __attribute__((packed));
+
+static struct sys_info sys_info;
+static struct debug_info *zcore_dbf;
+static int hsa_available;
+static struct dentry *zcore_dir;
+static struct dentry *zcore_file;
+static struct dentry *zcore_memmap_file;
+static struct dentry *zcore_reipl_file;
+static struct ipl_parameter_block *ipl_block;
+
+/*
+ * Copy memory from HSA to kernel or user memory (not reentrant):
+ *
+ * @dest:  Kernel or user buffer where memory should be copied to
+ * @src:   Start address within HSA where data should be copied
+ * @count: Size of buffer, which should be copied
+ * @mode:  Either TO_KERNEL or TO_USER
+ */
+static int memcpy_hsa(void *dest, unsigned long src, size_t count, int mode)
+{
+	int offs, blk_num;
+	static char buf[PAGE_SIZE] __attribute__((__aligned__(PAGE_SIZE)));
+
+	if (count == 0)
+		return 0;
+
+	/* copy first block */
+	offs = 0;
+	if ((src % PAGE_SIZE) != 0) {
+		blk_num = src / PAGE_SIZE + 2;
+		if (sclp_sdias_copy(buf, blk_num, 1)) {
+			TRACE("sclp_sdias_copy() failed\n");
+			return -EIO;
+		}
+		offs = min((PAGE_SIZE - (src % PAGE_SIZE)), count);
+		if (mode == TO_USER) {
+			if (copy_to_user((__force __user void*) dest,
+					 buf + (src % PAGE_SIZE), offs))
+				return -EFAULT;
+		} else
+			memcpy(dest, buf + (src % PAGE_SIZE), offs);
+	}
+	if (offs == count)
+		goto out;
+
+	/* copy middle */
+	for (; (offs + PAGE_SIZE) <= count; offs += PAGE_SIZE) {
+		blk_num = (src + offs) / PAGE_SIZE + 2;
+		if (sclp_sdias_copy(buf, blk_num, 1)) {
+			TRACE("sclp_sdias_copy() failed\n");
+			return -EIO;
+		}
+		if (mode == TO_USER) {
+			if (copy_to_user((__force __user void*) dest + offs,
+					 buf, PAGE_SIZE))
+				return -EFAULT;
+		} else
+			memcpy(dest + offs, buf, PAGE_SIZE);
+	}
+	if (offs == count)
+		goto out;
+
+	/* copy last block */
+	blk_num = (src + offs) / PAGE_SIZE + 2;
+	if (sclp_sdias_copy(buf, blk_num, 1)) {
+		TRACE("sclp_sdias_copy() failed\n");
+		return -EIO;
+	}
+	if (mode == TO_USER) {
+		if (copy_to_user((__force __user void*) dest + offs, buf,
+				 PAGE_SIZE))
+			return -EFAULT;
+	} else
+		memcpy(dest + offs, buf, count - offs);
+out:
+	return 0;
+}
+
+static int memcpy_hsa_user(void __user *dest, unsigned long src, size_t count)
+{
+	return memcpy_hsa((void __force *) dest, src, count, TO_USER);
+}
+
+static int memcpy_hsa_kernel(void *dest, unsigned long src, size_t count)
+{
+	return memcpy_hsa(dest, src, count, TO_KERNEL);
+}
+
+static int __init init_cpu_info(enum arch_id arch)
+{
+	struct save_area *sa;
+
+	/* get info for boot cpu from lowcore, stored in the HSA */
+
+	sa = kmalloc(sizeof(*sa), GFP_KERNEL);
+	if (!sa)
+		return -ENOMEM;
+	if (memcpy_hsa_kernel(sa, sys_info.sa_base, sys_info.sa_size) < 0) {
+		TRACE("could not copy from HSA\n");
+		kfree(sa);
+		return -EIO;
+	}
+	zfcpdump_save_areas[0] = sa;
+	return 0;
+}
+
+static DEFINE_MUTEX(zcore_mutex);
+
+#define DUMP_VERSION	0x5
+#define DUMP_MAGIC	0xa8190173618f23fdULL
+#define DUMP_ARCH_S390X	2
+#define DUMP_ARCH_S390	1
+#define HEADER_SIZE	4096
+
+/* dump header dumped according to s390 crash dump format */
+
+struct zcore_header {
+	u64 magic;
+	u32 version;
+	u32 header_size;
+	u32 dump_level;
+	u32 page_size;
+	u64 mem_size;
+	u64 mem_start;
+	u64 mem_end;
+	u32 num_pages;
+	u32 pad1;
+	u64 tod;
+	struct cpuid cpu_id;
+	u32 arch_id;
+	u32 volnr;
+	u32 build_arch;
+	u64 rmem_size;
+	u8 mvdump;
+	u16 cpu_cnt;
+	u16 real_cpu_cnt;
+	u8 end_pad1[0x200-0x061];
+	u64 mvdump_sign;
+	u64 mvdump_zipl_time;
+	u8 end_pad2[0x800-0x210];
+	u32 lc_vec[512];
+} __attribute__((packed,__aligned__(16)));
+
+static struct zcore_header zcore_header = {
+	.magic		= DUMP_MAGIC,
+	.version	= DUMP_VERSION,
+	.header_size	= 4096,
+	.dump_level	= 0,
+	.page_size	= PAGE_SIZE,
+	.mem_start	= 0,
+#ifdef CONFIG_64BIT
+	.build_arch	= DUMP_ARCH_S390X,
+#else
+	.build_arch	= DUMP_ARCH_S390,
+#endif
+};
+
+/*
+ * Copy lowcore info to buffer. Use map in order to copy only register parts.
+ *
+ * @buf:    User buffer
+ * @sa:     Pointer to save area
+ * @sa_off: Offset in save area to copy
+ * @len:    Number of bytes to copy
+ */
+static int copy_lc(void __user *buf, void *sa, int sa_off, int len)
+{
+	int i;
+	char *lc_mask = (char*)&sys_info.lc_mask;
+
+	for (i = 0; i < len; i++) {
+		if (!lc_mask[i + sa_off])
+			continue;
+		if (copy_to_user(buf + i, sa + sa_off + i, 1))
+			return -EFAULT;
+	}
+	return 0;
+}
+
+/*
+ * Copy lowcores info to memory, if necessary
+ *
+ * @buf:   User buffer
+ * @addr:  Start address of buffer in dump memory
+ * @count: Size of buffer
+ */
+static int zcore_add_lc(char __user *buf, unsigned long start, size_t count)
+{
+	unsigned long end;
+	int i = 0;
+
+	if (count == 0)
+		return 0;
+
+	end = start + count;
+	while (zfcpdump_save_areas[i]) {
+		unsigned long cp_start, cp_end; /* copy range */
+		unsigned long sa_start, sa_end; /* save area range */
+		unsigned long prefix;
+		unsigned long sa_off, len, buf_off;
+
+		prefix = zfcpdump_save_areas[i]->pref_reg;
+		sa_start = prefix + sys_info.sa_base;
+		sa_end = prefix + sys_info.sa_base + sys_info.sa_size;
+
+		if ((end < sa_start) || (start > sa_end))
+			goto next;
+		cp_start = max(start, sa_start);
+		cp_end = min(end, sa_end);
+
+		buf_off = cp_start - start;
+		sa_off = cp_start - sa_start;
+		len = cp_end - cp_start;
+
+		TRACE("copy_lc for: %lx\n", start);
+		if (copy_lc(buf + buf_off, zfcpdump_save_areas[i], sa_off, len))
+			return -EFAULT;
+next:
+		i++;
+	}
+	return 0;
+}
+
+/*
+ * Read routine for zcore character device
+ * First 4K are dump header
+ * Next 32MB are HSA Memory
+ * Rest is read from absolute Memory
+ */
+static ssize_t zcore_read(struct file *file, char __user *buf, size_t count,
+			  loff_t *ppos)
+{
+	unsigned long mem_start; /* Start address in memory */
+	size_t mem_offs;	 /* Offset in dump memory */
+	size_t hdr_count;	 /* Size of header part of output buffer */
+	size_t size;
+	int rc;
+
+	mutex_lock(&zcore_mutex);
+
+	if (*ppos > (sys_info.mem_size + HEADER_SIZE)) {
+		rc = -EINVAL;
+		goto fail;
+	}
+
+	count = min(count, (size_t) (sys_info.mem_size + HEADER_SIZE - *ppos));
+
+	/* Copy dump header */
+	if (*ppos < HEADER_SIZE) {
+		size = min(count, (size_t) (HEADER_SIZE - *ppos));
+		if (copy_to_user(buf, &zcore_header + *ppos, size)) {
+			rc = -EFAULT;
+			goto fail;
+		}
+		hdr_count = size;
+		mem_start = 0;
+	} else {
+		hdr_count = 0;
+		mem_start = *ppos - HEADER_SIZE;
+	}
+
+	mem_offs = 0;
+
+	/* Copy from HSA data */
+	if (*ppos < (ZFCPDUMP_HSA_SIZE + HEADER_SIZE)) {
+		size = min((count - hdr_count), (size_t) (ZFCPDUMP_HSA_SIZE
+			   - mem_start));
+		rc = memcpy_hsa_user(buf + hdr_count, mem_start, size);
+		if (rc)
+			goto fail;
+
+		mem_offs += size;
+	}
+
+	/* Copy from real mem */
+	size = count - mem_offs - hdr_count;
+	rc = copy_to_user_real(buf + hdr_count + mem_offs,
+			       (void *) mem_start + mem_offs, size);
+	if (rc)
+		goto fail;
+
+	/*
+	 * Since s390 dump analysis tools like lcrash or crash
+	 * expect register sets in the prefix pages of the cpus,
+	 * we copy them into the read buffer, if necessary.
+	 * buf + hdr_count: Start of memory part of output buffer
+	 * mem_start: Start memory address to copy from
+	 * count - hdr_count: Size of memory area to copy
+	 */
+	if (zcore_add_lc(buf + hdr_count, mem_start, count - hdr_count)) {
+		rc = -EFAULT;
+		goto fail;
+	}
+	*ppos += count;
+fail:
+	mutex_unlock(&zcore_mutex);
+	return (rc < 0) ? rc : count;
+}
+
+static int zcore_open(struct inode *inode, struct file *filp)
+{
+	if (!hsa_available)
+		return -ENODATA;
+	else
+		return capable(CAP_SYS_RAWIO) ? 0 : -EPERM;
+}
+
+static int zcore_release(struct inode *inode, struct file *filep)
+{
+	diag308(DIAG308_REL_HSA, NULL);
+	hsa_available = 0;
+	return 0;
+}
+
+static loff_t zcore_lseek(struct file *file, loff_t offset, int orig)
+{
+	loff_t rc;
+
+	mutex_lock(&zcore_mutex);
+	switch (orig) {
+	case 0:
+		file->f_pos = offset;
+		rc = file->f_pos;
+		break;
+	case 1:
+		file->f_pos += offset;
+		rc = file->f_pos;
+		break;
+	default:
+		rc = -EINVAL;
+	}
+	mutex_unlock(&zcore_mutex);
+	return rc;
+}
+
+static const struct file_operations zcore_fops = {
+	.owner		= THIS_MODULE,
+	.llseek		= zcore_lseek,
+	.read		= zcore_read,
+	.open		= zcore_open,
+	.release	= zcore_release,
+};
+
+static ssize_t zcore_memmap_read(struct file *filp, char __user *buf,
+				 size_t count, loff_t *ppos)
+{
+	return simple_read_from_buffer(buf, count, ppos, filp->private_data,
+				       MEMORY_CHUNKS * CHUNK_INFO_SIZE);
+}
+
+static int zcore_memmap_open(struct inode *inode, struct file *filp)
+{
+	int i;
+	char *buf;
+	struct mem_chunk *chunk_array;
+
+	chunk_array = kzalloc(MEMORY_CHUNKS * sizeof(struct mem_chunk),
+			      GFP_KERNEL);
+	if (!chunk_array)
+		return -ENOMEM;
+	detect_memory_layout(chunk_array);
+	buf = kzalloc(MEMORY_CHUNKS * CHUNK_INFO_SIZE, GFP_KERNEL);
+	if (!buf) {
+		kfree(chunk_array);
+		return -ENOMEM;
+	}
+	for (i = 0; i < MEMORY_CHUNKS; i++) {
+		sprintf(buf + (i * CHUNK_INFO_SIZE), "%016llx %016llx ",
+			(unsigned long long) chunk_array[i].addr,
+			(unsigned long long) chunk_array[i].size);
+		if (chunk_array[i].size == 0)
+			break;
+	}
+	kfree(chunk_array);
+	filp->private_data = buf;
+	return nonseekable_open(inode, filp);
+}
+
+static int zcore_memmap_release(struct inode *inode, struct file *filp)
+{
+	kfree(filp->private_data);
+	return 0;
+}
+
+static const struct file_operations zcore_memmap_fops = {
+	.owner		= THIS_MODULE,
+	.read		= zcore_memmap_read,
+	.open		= zcore_memmap_open,
+	.release	= zcore_memmap_release,
+	.llseek		= no_llseek,
+};
+
+static ssize_t zcore_reipl_write(struct file *filp, const char __user *buf,
+				 size_t count, loff_t *ppos)
+{
+	if (ipl_block) {
+		diag308(DIAG308_SET, ipl_block);
+		diag308(DIAG308_IPL, NULL);
+	}
+	return count;
+}
+
+static int zcore_reipl_open(struct inode *inode, struct file *filp)
+{
+	return nonseekable_open(inode, filp);
+}
+
+static int zcore_reipl_release(struct inode *inode, struct file *filp)
+{
+	return 0;
+}
+
+static const struct file_operations zcore_reipl_fops = {
+	.owner		= THIS_MODULE,
+	.write		= zcore_reipl_write,
+	.open		= zcore_reipl_open,
+	.release	= zcore_reipl_release,
+	.llseek		= no_llseek,
+};
+
+#ifdef CONFIG_32BIT
+
+static void __init set_lc_mask(struct save_area *map)
+{
+	memset(&map->ext_save, 0xff, sizeof(map->ext_save));
+	memset(&map->timer, 0xff, sizeof(map->timer));
+	memset(&map->clk_cmp, 0xff, sizeof(map->clk_cmp));
+	memset(&map->psw, 0xff, sizeof(map->psw));
+	memset(&map->pref_reg, 0xff, sizeof(map->pref_reg));
+	memset(&map->acc_regs, 0xff, sizeof(map->acc_regs));
+	memset(&map->fp_regs, 0xff, sizeof(map->fp_regs));
+	memset(&map->gp_regs, 0xff, sizeof(map->gp_regs));
+	memset(&map->ctrl_regs, 0xff, sizeof(map->ctrl_regs));
+}
+
+#else /* CONFIG_32BIT */
+
+static void __init set_lc_mask(struct save_area *map)
+{
+	memset(&map->fp_regs, 0xff, sizeof(map->fp_regs));
+	memset(&map->gp_regs, 0xff, sizeof(map->gp_regs));
+	memset(&map->psw, 0xff, sizeof(map->psw));
+	memset(&map->pref_reg, 0xff, sizeof(map->pref_reg));
+	memset(&map->fp_ctrl_reg, 0xff, sizeof(map->fp_ctrl_reg));
+	memset(&map->tod_reg, 0xff, sizeof(map->tod_reg));
+	memset(&map->timer, 0xff, sizeof(map->timer));
+	memset(&map->clk_cmp, 0xff, sizeof(map->clk_cmp));
+	memset(&map->acc_regs, 0xff, sizeof(map->acc_regs));
+	memset(&map->ctrl_regs, 0xff, sizeof(map->ctrl_regs));
+}
+
+#endif /* CONFIG_32BIT */
+
+/*
+ * Initialize dump globals for a given architecture
+ */
+static int __init sys_info_init(enum arch_id arch)
+{
+	int rc;
+
+	switch (arch) {
+	case ARCH_S390X:
+		pr_alert("DETECTED 'S390X (64 bit) OS'\n");
+		break;
+	case ARCH_S390:
+		pr_alert("DETECTED 'S390 (32 bit) OS'\n");
+		break;
+	default:
+		pr_alert("0x%x is an unknown architecture.\n",arch);
+		return -EINVAL;
+	}
+	sys_info.sa_base = SAVE_AREA_BASE;
+	sys_info.sa_size = sizeof(struct save_area);
+	sys_info.arch = arch;
+	set_lc_mask(&sys_info.lc_mask);
+	rc = init_cpu_info(arch);
+	if (rc)
+		return rc;
+	sys_info.mem_size = real_memory_size;
+
+	return 0;
+}
+
+static int __init check_sdias(void)
+{
+	int rc, act_hsa_size;
+
+	rc = sclp_sdias_blk_count();
+	if (rc < 0) {
+		TRACE("Could not determine HSA size\n");
+		return rc;
+	}
+	act_hsa_size = (rc - 1) * PAGE_SIZE;
+	if (act_hsa_size < ZFCPDUMP_HSA_SIZE) {
+		TRACE("HSA size too small: %i\n", act_hsa_size);
+		return -EINVAL;
+	}
+	return 0;
+}
+
+static int __init get_mem_size(unsigned long *mem)
+{
+	int i;
+	struct mem_chunk *chunk_array;
+
+	chunk_array = kzalloc(MEMORY_CHUNKS * sizeof(struct mem_chunk),
+			      GFP_KERNEL);
+	if (!chunk_array)
+		return -ENOMEM;
+	detect_memory_layout(chunk_array);
+	for (i = 0; i < MEMORY_CHUNKS; i++) {
+		if (chunk_array[i].size == 0)
+			break;
+		*mem += chunk_array[i].size;
+	}
+	kfree(chunk_array);
+	return 0;
+}
+
+static int __init zcore_header_init(int arch, struct zcore_header *hdr)
+{
+	int rc, i;
+	unsigned long memory = 0;
+	u32 prefix;
+
+	if (arch == ARCH_S390X)
+		hdr->arch_id = DUMP_ARCH_S390X;
+	else
+		hdr->arch_id = DUMP_ARCH_S390;
+	rc = get_mem_size(&memory);
+	if (rc)
+		return rc;
+	hdr->mem_size = memory;
+	hdr->rmem_size = memory;
+	hdr->mem_end = sys_info.mem_size;
+	hdr->num_pages = memory / PAGE_SIZE;
+	hdr->tod = get_clock();
+	get_cpu_id(&hdr->cpu_id);
+	for (i = 0; zfcpdump_save_areas[i]; i++) {
+		prefix = zfcpdump_save_areas[i]->pref_reg;
+		hdr->real_cpu_cnt++;
+		if (!prefix)
+			continue;
+		hdr->lc_vec[hdr->cpu_cnt] = prefix;
+		hdr->cpu_cnt++;
+	}
+	return 0;
+}
+
+/*
+ * Provide IPL parameter information block from either HSA or memory
+ * for future reipl
+ */
+static int __init zcore_reipl_init(void)
+{
+	struct ipib_info ipib_info;
+	int rc;
+
+	rc = memcpy_hsa_kernel(&ipib_info, __LC_DUMP_REIPL, sizeof(ipib_info));
+	if (rc)
+		return rc;
+	if (ipib_info.ipib == 0)
+		return 0;
+	ipl_block = (void *) __get_free_page(GFP_KERNEL);
+	if (!ipl_block)
+		return -ENOMEM;
+	if (ipib_info.ipib < ZFCPDUMP_HSA_SIZE)
+		rc = memcpy_hsa_kernel(ipl_block, ipib_info.ipib, PAGE_SIZE);
+	else
+		rc = memcpy_real(ipl_block, (void *) ipib_info.ipib, PAGE_SIZE);
+	if (rc || csum_partial(ipl_block, ipl_block->hdr.len, 0) !=
+	    ipib_info.checksum) {
+		TRACE("Checksum does not match\n");
+		free_page((unsigned long) ipl_block);
+		ipl_block = NULL;
+	}
+	return 0;
+}
+
+static int __init zcore_init(void)
+{
+	unsigned char arch;
+	int rc;
+
+	if (ipl_info.type != IPL_TYPE_FCP_DUMP)
+		return -ENODATA;
+	if (OLDMEM_BASE)
+		return -ENODATA;
+
+	zcore_dbf = debug_register("zcore", 4, 1, 4 * sizeof(long));
+	debug_register_view(zcore_dbf, &debug_sprintf_view);
+	debug_set_level(zcore_dbf, 6);
+
+	TRACE("devno:  %x\n", ipl_info.data.fcp.dev_id.devno);
+	TRACE("wwpn:   %llx\n", (unsigned long long) ipl_info.data.fcp.wwpn);
+	TRACE("lun:    %llx\n", (unsigned long long) ipl_info.data.fcp.lun);
+
+	rc = sclp_sdias_init();
+	if (rc)
+		goto fail;
+
+	rc = check_sdias();
+	if (rc)
+		goto fail;
+
+	rc = memcpy_hsa_kernel(&arch, __LC_AR_MODE_ID, 1);
+	if (rc)
+		goto fail;
+
+#ifdef CONFIG_64BIT
+	if (arch == ARCH_S390) {
+		pr_alert("The 64-bit dump tool cannot be used for a "
+			 "32-bit system\n");
+		rc = -EINVAL;
+		goto fail;
+	}
+#else /* CONFIG_64BIT */
+	if (arch == ARCH_S390X) {
+		pr_alert("The 32-bit dump tool cannot be used for a "
+			 "64-bit system\n");
+		rc = -EINVAL;
+		goto fail;
+	}
+#endif /* CONFIG_64BIT */
+
+	rc = sys_info_init(arch);
+	if (rc)
+		goto fail;
+
+	rc = zcore_header_init(arch, &zcore_header);
+	if (rc)
+		goto fail;
+
+	rc = zcore_reipl_init();
+	if (rc)
+		goto fail;
+
+	zcore_dir = debugfs_create_dir("zcore" , NULL);
+	if (!zcore_dir) {
+		rc = -ENOMEM;
+		goto fail;
+	}
+	zcore_file = debugfs_create_file("mem", S_IRUSR, zcore_dir, NULL,
+					 &zcore_fops);
+	if (!zcore_file) {
+		rc = -ENOMEM;
+		goto fail_dir;
+	}
+	zcore_memmap_file = debugfs_create_file("memmap", S_IRUSR, zcore_dir,
+						NULL, &zcore_memmap_fops);
+	if (!zcore_memmap_file) {
+		rc = -ENOMEM;
+		goto fail_file;
+	}
+	zcore_reipl_file = debugfs_create_file("reipl", S_IRUSR, zcore_dir,
+						NULL, &zcore_reipl_fops);
+	if (!zcore_reipl_file) {
+		rc = -ENOMEM;
+		goto fail_memmap_file;
+	}
+	hsa_available = 1;
+	return 0;
+
+fail_memmap_file:
+	debugfs_remove(zcore_memmap_file);
+fail_file:
+	debugfs_remove(zcore_file);
+fail_dir:
+	debugfs_remove(zcore_dir);
+fail:
+	diag308(DIAG308_REL_HSA, NULL);
+	return rc;
+}
+
+static void __exit zcore_exit(void)
+{
+	debug_unregister(zcore_dbf);
+	sclp_sdias_exit();
+	free_page((unsigned long) ipl_block);
+	debugfs_remove(zcore_reipl_file);
+	debugfs_remove(zcore_memmap_file);
+	debugfs_remove(zcore_file);
+	debugfs_remove(zcore_dir);
+	diag308(DIAG308_REL_HSA, NULL);
+}
+
+MODULE_AUTHOR("Copyright IBM Corp. 2003,2008");
+MODULE_DESCRIPTION("zcore module for zfcpdump support");
+MODULE_LICENSE("GPL");
+
+subsys_initcall(zcore_init);
+module_exit(zcore_exit);
diff --git a/ap/os/linux/linux-3.4.x/drivers/s390/cio/Makefile b/ap/os/linux/linux-3.4.x/drivers/s390/cio/Makefile
new file mode 100644
index 0000000..e1b700a
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/s390/cio/Makefile
@@ -0,0 +1,14 @@
+#
+# Makefile for the S/390 common i/o drivers
+#
+
+obj-y += airq.o blacklist.o chsc.o cio.o css.o chp.o idset.o isc.o \
+	fcx.o itcw.o crw.o ccwreq.o
+ccw_device-objs += device.o device_fsm.o device_ops.o
+ccw_device-objs += device_id.o device_pgid.o device_status.o
+obj-y += ccw_device.o cmf.o
+obj-$(CONFIG_CHSC_SCH) += chsc_sch.o
+obj-$(CONFIG_CCWGROUP) += ccwgroup.o
+
+qdio-objs := qdio_main.o qdio_thinint.o qdio_debug.o qdio_setup.o
+obj-$(CONFIG_QDIO) += qdio.o
diff --git a/ap/os/linux/linux-3.4.x/drivers/s390/cio/airq.c b/ap/os/linux/linux-3.4.x/drivers/s390/cio/airq.c
new file mode 100644
index 0000000..65d2e76
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/s390/cio/airq.c
@@ -0,0 +1,151 @@
+/*
+ *  drivers/s390/cio/airq.c
+ *    Support for adapter interruptions
+ *
+ *    Copyright IBM Corp. 1999,2007
+ *    Author(s): Ingo Adlung <adlung@de.ibm.com>
+ *		 Cornelia Huck <cornelia.huck@de.ibm.com>
+ *		 Arnd Bergmann <arndb@de.ibm.com>
+ *		 Peter Oberparleiter <peter.oberparleiter@de.ibm.com>
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/rcupdate.h>
+
+#include <asm/airq.h>
+#include <asm/isc.h>
+
+#include "cio.h"
+#include "cio_debug.h"
+
+#define NR_AIRQS		32
+#define NR_AIRQS_PER_WORD	sizeof(unsigned long)
+#define NR_AIRQ_WORDS		(NR_AIRQS / NR_AIRQS_PER_WORD)
+
+union indicator_t {
+	unsigned long word[NR_AIRQ_WORDS];
+	unsigned char byte[NR_AIRQS];
+} __attribute__((packed));
+
+struct airq_t {
+	adapter_int_handler_t handler;
+	void *drv_data;
+};
+
+static union indicator_t indicators[MAX_ISC+1];
+static struct airq_t *airqs[MAX_ISC+1][NR_AIRQS];
+
+static int register_airq(struct airq_t *airq, u8 isc)
+{
+	int i;
+
+	for (i = 0; i < NR_AIRQS; i++)
+		if (!cmpxchg(&airqs[isc][i], NULL, airq))
+			return i;
+	return -ENOMEM;
+}
+
+/**
+ * s390_register_adapter_interrupt() - register adapter interrupt handler
+ * @handler: adapter handler to be registered
+ * @drv_data: driver data passed with each call to the handler
+ * @isc: isc for which the handler should be called
+ *
+ * Returns:
+ *  Pointer to the indicator to be used on success
+ *  ERR_PTR() if registration failed
+ */
+void *s390_register_adapter_interrupt(adapter_int_handler_t handler,
+				      void *drv_data, u8 isc)
+{
+	struct airq_t *airq;
+	char dbf_txt[16];
+	int ret;
+
+	if (isc > MAX_ISC)
+		return ERR_PTR(-EINVAL);
+	airq = kmalloc(sizeof(struct airq_t), GFP_KERNEL);
+	if (!airq) {
+		ret = -ENOMEM;
+		goto out;
+	}
+	airq->handler = handler;
+	airq->drv_data = drv_data;
+
+	ret = register_airq(airq, isc);
+out:
+	snprintf(dbf_txt, sizeof(dbf_txt), "rairq:%d", ret);
+	CIO_TRACE_EVENT(4, dbf_txt);
+	if (ret < 0) {
+		kfree(airq);
+		return ERR_PTR(ret);
+	} else
+		return &indicators[isc].byte[ret];
+}
+EXPORT_SYMBOL(s390_register_adapter_interrupt);
+
+/**
+ * s390_unregister_adapter_interrupt - unregister adapter interrupt handler
+ * @ind: indicator for which the handler is to be unregistered
+ * @isc: interruption subclass
+ */
+void s390_unregister_adapter_interrupt(void *ind, u8 isc)
+{
+	struct airq_t *airq;
+	char dbf_txt[16];
+	int i;
+
+	i = (int) ((addr_t) ind) - ((addr_t) &indicators[isc].byte[0]);
+	snprintf(dbf_txt, sizeof(dbf_txt), "urairq:%d", i);
+	CIO_TRACE_EVENT(4, dbf_txt);
+	indicators[isc].byte[i] = 0;
+	airq = xchg(&airqs[isc][i], NULL);
+	/*
+	 * Allow interrupts to complete. This will ensure that the airq handle
+	 * is no longer referenced by any interrupt handler.
+	 */
+	synchronize_sched();
+	kfree(airq);
+}
+EXPORT_SYMBOL(s390_unregister_adapter_interrupt);
+
+#define INDICATOR_MASK	(0xffUL << ((NR_AIRQS_PER_WORD - 1) * 8))
+
+void do_adapter_IO(u8 isc)
+{
+	int w;
+	int i;
+	unsigned long word;
+	struct airq_t *airq;
+
+	/*
+	 * Access indicator array in word-sized chunks to minimize storage
+	 * fetch operations.
+	 */
+	for (w = 0; w < NR_AIRQ_WORDS; w++) {
+		word = indicators[isc].word[w];
+		i = w * NR_AIRQS_PER_WORD;
+		/*
+		 * Check bytes within word for active indicators.
+		 */
+		while (word) {
+			if (word & INDICATOR_MASK) {
+				airq = airqs[isc][i];
+				/* Make sure gcc reads from airqs only once. */
+				barrier();
+				if (likely(airq))
+					airq->handler(&indicators[isc].byte[i],
+						      airq->drv_data);
+				else
+					/*
+					 * Reset ill-behaved indicator.
+					 */
+					indicators[isc].byte[i] = 0;
+			}
+			word <<= 8;
+			i++;
+		}
+	}
+}
diff --git a/ap/os/linux/linux-3.4.x/drivers/s390/cio/blacklist.c b/ap/os/linux/linux-3.4.x/drivers/s390/cio/blacklist.c
new file mode 100644
index 0000000..08c6603
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/s390/cio/blacklist.c
@@ -0,0 +1,399 @@
+/*
+ *  drivers/s390/cio/blacklist.c
+ *   S/390 common I/O routines -- blacklisting of specific devices
+ *
+ *    Copyright (C) 1999-2002 IBM Deutschland Entwicklung GmbH,
+ *			      IBM Corporation
+ *    Author(s): Ingo Adlung (adlung@de.ibm.com)
+ *		 Cornelia Huck (cornelia.huck@de.ibm.com)
+ *		 Arnd Bergmann (arndb@de.ibm.com)
+ */
+
+#define KMSG_COMPONENT "cio"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
+#include <linux/init.h>
+#include <linux/vmalloc.h>
+#include <linux/proc_fs.h>
+#include <linux/seq_file.h>
+#include <linux/ctype.h>
+#include <linux/device.h>
+
+#include <asm/cio.h>
+#include <asm/uaccess.h>
+
+#include "blacklist.h"
+#include "cio.h"
+#include "cio_debug.h"
+#include "css.h"
+#include "device.h"
+
+/*
+ * "Blacklisting" of certain devices:
+ * Device numbers given in the commandline as cio_ignore=... won't be known
+ * to Linux.
+ *
+ * These can be single devices or ranges of devices
+ */
+
+/* 65536 bits for each set to indicate if a devno is blacklisted or not */
+#define __BL_DEV_WORDS ((__MAX_SUBCHANNEL + (8*sizeof(long) - 1)) / \
+			 (8*sizeof(long)))
+static unsigned long bl_dev[__MAX_SSID + 1][__BL_DEV_WORDS];
+typedef enum {add, free} range_action;
+
+/*
+ * Function: blacklist_range
+ * (Un-)blacklist the devices from-to
+ */
+static int blacklist_range(range_action action, unsigned int from_ssid,
+			   unsigned int to_ssid, unsigned int from,
+			   unsigned int to, int msgtrigger)
+{
+	if ((from_ssid > to_ssid) || ((from_ssid == to_ssid) && (from > to))) {
+		if (msgtrigger)
+			pr_warning("0.%x.%04x to 0.%x.%04x is not a valid "
+				   "range for cio_ignore\n", from_ssid, from,
+				   to_ssid, to);
+
+		return 1;
+	}
+
+	while ((from_ssid < to_ssid) || ((from_ssid == to_ssid) &&
+	       (from <= to))) {
+		if (action == add)
+			set_bit(from, bl_dev[from_ssid]);
+		else
+			clear_bit(from, bl_dev[from_ssid]);
+		from++;
+		if (from > __MAX_SUBCHANNEL) {
+			from_ssid++;
+			from = 0;
+		}
+	}
+
+	return 0;
+}
+
+static int pure_hex(char **cp, unsigned int *val, int min_digit,
+		    int max_digit, int max_val)
+{
+	int diff;
+
+	diff = 0;
+	*val = 0;
+
+	while (diff <= max_digit) {
+		int value = hex_to_bin(**cp);
+
+		if (value < 0)
+			break;
+		*val = *val * 16 + value;
+		(*cp)++;
+		diff++;
+	}
+
+	if ((diff < min_digit) || (diff > max_digit) || (*val > max_val))
+		return 1;
+
+	return 0;
+}
+
+static int parse_busid(char *str, unsigned int *cssid, unsigned int *ssid,
+		       unsigned int *devno, int msgtrigger)
+{
+	char *str_work;
+	int val, rc, ret;
+
+	rc = 1;
+
+	if (*str == '\0')
+		goto out;
+
+	/* old style */
+	str_work = str;
+	val = simple_strtoul(str, &str_work, 16);
+
+	if (*str_work == '\0') {
+		if (val <= __MAX_SUBCHANNEL) {
+			*devno = val;
+			*ssid = 0;
+			*cssid = 0;
+			rc = 0;
+		}
+		goto out;
+	}
+
+	/* new style */
+	str_work = str;
+	ret = pure_hex(&str_work, cssid, 1, 2, __MAX_CSSID);
+	if (ret || (str_work[0] != '.'))
+		goto out;
+	str_work++;
+	ret = pure_hex(&str_work, ssid, 1, 1, __MAX_SSID);
+	if (ret || (str_work[0] != '.'))
+		goto out;
+	str_work++;
+	ret = pure_hex(&str_work, devno, 4, 4, __MAX_SUBCHANNEL);
+	if (ret || (str_work[0] != '\0'))
+		goto out;
+
+	rc = 0;
+out:
+	if (rc && msgtrigger)
+		pr_warning("%s is not a valid device for the cio_ignore "
+			   "kernel parameter\n", str);
+
+	return rc;
+}
+
+static int blacklist_parse_parameters(char *str, range_action action,
+				      int msgtrigger)
+{
+	unsigned int from_cssid, to_cssid, from_ssid, to_ssid, from, to;
+	int rc, totalrc;
+	char *parm;
+	range_action ra;
+
+	totalrc = 0;
+
+	while ((parm = strsep(&str, ","))) {
+		rc = 0;
+		ra = action;
+		if (*parm == '!') {
+			if (ra == add)
+				ra = free;
+			else
+				ra = add;
+			parm++;
+		}
+		if (strcmp(parm, "all") == 0) {
+			from_cssid = 0;
+			from_ssid = 0;
+			from = 0;
+			to_cssid = __MAX_CSSID;
+			to_ssid = __MAX_SSID;
+			to = __MAX_SUBCHANNEL;
+		} else {
+			rc = parse_busid(strsep(&parm, "-"), &from_cssid,
+					 &from_ssid, &from, msgtrigger);
+			if (!rc) {
+				if (parm != NULL)
+					rc = parse_busid(parm, &to_cssid,
+							 &to_ssid, &to,
+							 msgtrigger);
+				else {
+					to_cssid = from_cssid;
+					to_ssid = from_ssid;
+					to = from;
+				}
+			}
+		}
+		if (!rc) {
+			rc = blacklist_range(ra, from_ssid, to_ssid, from, to,
+					     msgtrigger);
+			if (rc)
+				totalrc = -EINVAL;
+		} else
+			totalrc = -EINVAL;
+	}
+
+	return totalrc;
+}
+
+static int __init
+blacklist_setup (char *str)
+{
+	CIO_MSG_EVENT(6, "Reading blacklist parameters\n");
+	if (blacklist_parse_parameters(str, add, 1))
+		return 0;
+	return 1;
+}
+
+__setup ("cio_ignore=", blacklist_setup);
+
+/* Checking if devices are blacklisted */
+
+/*
+ * Function: is_blacklisted
+ * Returns 1 if the given devicenumber can be found in the blacklist,
+ * otherwise 0.
+ * Used by validate_subchannel()
+ */
+int
+is_blacklisted (int ssid, int devno)
+{
+	return test_bit (devno, bl_dev[ssid]);
+}
+
+#ifdef CONFIG_PROC_FS
+/*
+ * Function: blacklist_parse_proc_parameters
+ * parse the stuff which is piped to /proc/cio_ignore
+ */
+static int blacklist_parse_proc_parameters(char *buf)
+{
+	int rc;
+	char *parm;
+
+	parm = strsep(&buf, " ");
+
+	if (strcmp("free", parm) == 0)
+		rc = blacklist_parse_parameters(buf, free, 0);
+	else if (strcmp("add", parm) == 0)
+		rc = blacklist_parse_parameters(buf, add, 0);
+	else if (strcmp("purge", parm) == 0)
+		return ccw_purge_blacklisted();
+	else
+		return -EINVAL;
+
+	css_schedule_reprobe();
+
+	return rc;
+}
+
+/* Iterator struct for all devices. */
+struct ccwdev_iter {
+	int devno;
+	int ssid;
+	int in_range;
+};
+
+static void *
+cio_ignore_proc_seq_start(struct seq_file *s, loff_t *offset)
+{
+	struct ccwdev_iter *iter = s->private;
+
+	if (*offset >= (__MAX_SUBCHANNEL + 1) * (__MAX_SSID + 1))
+		return NULL;
+	memset(iter, 0, sizeof(*iter));
+	iter->ssid = *offset / (__MAX_SUBCHANNEL + 1);
+	iter->devno = *offset % (__MAX_SUBCHANNEL + 1);
+	return iter;
+}
+
+static void
+cio_ignore_proc_seq_stop(struct seq_file *s, void *it)
+{
+}
+
+static void *
+cio_ignore_proc_seq_next(struct seq_file *s, void *it, loff_t *offset)
+{
+	struct ccwdev_iter *iter;
+
+	if (*offset >= (__MAX_SUBCHANNEL + 1) * (__MAX_SSID + 1))
+		return NULL;
+	iter = it;
+	if (iter->devno == __MAX_SUBCHANNEL) {
+		iter->devno = 0;
+		iter->ssid++;
+		if (iter->ssid > __MAX_SSID)
+			return NULL;
+	} else
+		iter->devno++;
+	(*offset)++;
+	return iter;
+}
+
+static int
+cio_ignore_proc_seq_show(struct seq_file *s, void *it)
+{
+	struct ccwdev_iter *iter;
+
+	iter = it;
+	if (!is_blacklisted(iter->ssid, iter->devno))
+		/* Not blacklisted, nothing to output. */
+		return 0;
+	if (!iter->in_range) {
+		/* First device in range. */
+		if ((iter->devno == __MAX_SUBCHANNEL) ||
+		    !is_blacklisted(iter->ssid, iter->devno + 1))
+			/* Singular device. */
+			return seq_printf(s, "0.%x.%04x\n",
+					  iter->ssid, iter->devno);
+		iter->in_range = 1;
+		return seq_printf(s, "0.%x.%04x-", iter->ssid, iter->devno);
+	}
+	if ((iter->devno == __MAX_SUBCHANNEL) ||
+	    !is_blacklisted(iter->ssid, iter->devno + 1)) {
+		/* Last device in range. */
+		iter->in_range = 0;
+		return seq_printf(s, "0.%x.%04x\n", iter->ssid, iter->devno);
+	}
+	return 0;
+}
+
+static ssize_t
+cio_ignore_write(struct file *file, const char __user *user_buf,
+		 size_t user_len, loff_t *offset)
+{
+	char *buf;
+	ssize_t rc, ret, i;
+
+	if (*offset)
+		return -EINVAL;
+	if (user_len > 65536)
+		user_len = 65536;
+	buf = vzalloc(user_len + 1); /* maybe better use the stack? */
+	if (buf == NULL)
+		return -ENOMEM;
+
+	if (strncpy_from_user (buf, user_buf, user_len) < 0) {
+		rc = -EFAULT;
+		goto out_free;
+	}
+
+	i = user_len - 1;
+	while ((i >= 0) && (isspace(buf[i]) || (buf[i] == 0))) {
+		buf[i] = '\0';
+		i--;
+	}
+	ret = blacklist_parse_proc_parameters(buf);
+	if (ret)
+		rc = ret;
+	else
+		rc = user_len;
+
+out_free:
+	vfree (buf);
+	return rc;
+}
+
+static const struct seq_operations cio_ignore_proc_seq_ops = {
+	.start = cio_ignore_proc_seq_start,
+	.stop  = cio_ignore_proc_seq_stop,
+	.next  = cio_ignore_proc_seq_next,
+	.show  = cio_ignore_proc_seq_show,
+};
+
+static int
+cio_ignore_proc_open(struct inode *inode, struct file *file)
+{
+	return seq_open_private(file, &cio_ignore_proc_seq_ops,
+				sizeof(struct ccwdev_iter));
+}
+
+static const struct file_operations cio_ignore_proc_fops = {
+	.open    = cio_ignore_proc_open,
+	.read    = seq_read,
+	.llseek  = seq_lseek,
+	.release = seq_release_private,
+	.write   = cio_ignore_write,
+};
+
+static int
+cio_ignore_proc_init (void)
+{
+	struct proc_dir_entry *entry;
+
+	entry = proc_create("cio_ignore", S_IFREG | S_IRUGO | S_IWUSR, NULL,
+			    &cio_ignore_proc_fops);
+	if (!entry)
+		return -ENOENT;
+	return 0;
+}
+
+__initcall (cio_ignore_proc_init);
+
+#endif /* CONFIG_PROC_FS */
diff --git a/ap/os/linux/linux-3.4.x/drivers/s390/cio/blacklist.h b/ap/os/linux/linux-3.4.x/drivers/s390/cio/blacklist.h
new file mode 100644
index 0000000..95e25c1
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/s390/cio/blacklist.h
@@ -0,0 +1,6 @@
+#ifndef S390_BLACKLIST_H
+#define S390_BLACKLIST_H
+
+extern int is_blacklisted (int ssid, int devno);
+
+#endif
diff --git a/ap/os/linux/linux-3.4.x/drivers/s390/cio/ccwgroup.c b/ap/os/linux/linux-3.4.x/drivers/s390/cio/ccwgroup.c
new file mode 100644
index 0000000..5f1dc6f
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/s390/cio/ccwgroup.c
@@ -0,0 +1,649 @@
+/*
+ *  bus driver for ccwgroup
+ *
+ *  Copyright IBM Corp. 2002, 2009
+ *
+ *  Author(s): Arnd Bergmann (arndb@de.ibm.com)
+ *	       Cornelia Huck (cornelia.huck@de.ibm.com)
+ */
+#include <linux/module.h>
+#include <linux/errno.h>
+#include <linux/slab.h>
+#include <linux/list.h>
+#include <linux/device.h>
+#include <linux/init.h>
+#include <linux/ctype.h>
+#include <linux/dcache.h>
+
+#include <asm/ccwdev.h>
+#include <asm/ccwgroup.h>
+
+#define CCW_BUS_ID_SIZE		20
+
+/* In Linux 2.4, we had a channel device layer called "chandev"
+ * that did all sorts of obscure stuff for networking devices.
+ * This is another driver that serves as a replacement for just
+ * one of its functions, namely the translation of single subchannels
+ * to devices that use multiple subchannels.
+ */
+
+/* a device matches a driver if all its slave devices match the same
+ * entry of the driver */
+static int ccwgroup_bus_match(struct device *dev, struct device_driver * drv)
+{
+	struct ccwgroup_device *gdev = to_ccwgroupdev(dev);
+	struct ccwgroup_driver *gdrv = to_ccwgroupdrv(drv);
+
+	if (gdev->creator_id == gdrv->driver_id)
+		return 1;
+
+	return 0;
+}
+
+static struct bus_type ccwgroup_bus_type;
+
+static void __ccwgroup_remove_symlinks(struct ccwgroup_device *gdev)
+{
+	int i;
+	char str[8];
+
+	for (i = 0; i < gdev->count; i++) {
+		sprintf(str, "cdev%d", i);
+		sysfs_remove_link(&gdev->dev.kobj, str);
+		sysfs_remove_link(&gdev->cdev[i]->dev.kobj, "group_device");
+	}
+}
+
+/*
+ * Remove references from ccw devices to ccw group device and from
+ * ccw group device to ccw devices.
+ */
+static void __ccwgroup_remove_cdev_refs(struct ccwgroup_device *gdev)
+{
+	struct ccw_device *cdev;
+	int i;
+
+	for (i = 0; i < gdev->count; i++) {
+		cdev = gdev->cdev[i];
+		if (!cdev)
+			continue;
+		spin_lock_irq(cdev->ccwlock);
+		dev_set_drvdata(&cdev->dev, NULL);
+		spin_unlock_irq(cdev->ccwlock);
+		gdev->cdev[i] = NULL;
+		put_device(&cdev->dev);
+	}
+}
+
+static int ccwgroup_set_online(struct ccwgroup_device *gdev)
+{
+	struct ccwgroup_driver *gdrv = to_ccwgroupdrv(gdev->dev.driver);
+	int ret = 0;
+
+	if (atomic_cmpxchg(&gdev->onoff, 0, 1) != 0)
+		return -EAGAIN;
+	if (gdev->state == CCWGROUP_ONLINE)
+		goto out;
+	if (gdrv->set_online)
+		ret = gdrv->set_online(gdev);
+	if (ret)
+		goto out;
+
+	gdev->state = CCWGROUP_ONLINE;
+out:
+	atomic_set(&gdev->onoff, 0);
+	return ret;
+}
+
+static int ccwgroup_set_offline(struct ccwgroup_device *gdev)
+{
+	struct ccwgroup_driver *gdrv = to_ccwgroupdrv(gdev->dev.driver);
+	int ret = 0;
+
+	if (atomic_cmpxchg(&gdev->onoff, 0, 1) != 0)
+		return -EAGAIN;
+	if (gdev->state == CCWGROUP_OFFLINE)
+		goto out;
+	if (gdrv->set_offline)
+		ret = gdrv->set_offline(gdev);
+	if (ret)
+		goto out;
+
+	gdev->state = CCWGROUP_OFFLINE;
+out:
+	atomic_set(&gdev->onoff, 0);
+	return ret;
+}
+
+static ssize_t ccwgroup_online_store(struct device *dev,
+				     struct device_attribute *attr,
+				     const char *buf, size_t count)
+{
+	struct ccwgroup_device *gdev = to_ccwgroupdev(dev);
+	struct ccwgroup_driver *gdrv = to_ccwgroupdrv(dev->driver);
+	unsigned long value;
+	int ret;
+
+	if (!dev->driver)
+		return -EINVAL;
+	if (!try_module_get(gdrv->driver.owner))
+		return -EINVAL;
+
+	ret = strict_strtoul(buf, 0, &value);
+	if (ret)
+		goto out;
+
+	if (value == 1)
+		ret = ccwgroup_set_online(gdev);
+	else if (value == 0)
+		ret = ccwgroup_set_offline(gdev);
+	else
+		ret = -EINVAL;
+out:
+	module_put(gdrv->driver.owner);
+	return (ret == 0) ? count : ret;
+}
+
+static ssize_t ccwgroup_online_show(struct device *dev,
+				    struct device_attribute *attr,
+				    char *buf)
+{
+	struct ccwgroup_device *gdev = to_ccwgroupdev(dev);
+	int online;
+
+	online = (gdev->state == CCWGROUP_ONLINE) ? 1 : 0;
+
+	return scnprintf(buf, PAGE_SIZE, "%d\n", online);
+}
+
+/*
+ * Provide an 'ungroup' attribute so the user can remove group devices no
+ * longer needed or accidentially created. Saves memory :)
+ */
+static void ccwgroup_ungroup_callback(struct device *dev)
+{
+	struct ccwgroup_device *gdev = to_ccwgroupdev(dev);
+
+	mutex_lock(&gdev->reg_mutex);
+	if (device_is_registered(&gdev->dev)) {
+		__ccwgroup_remove_symlinks(gdev);
+		device_unregister(dev);
+		__ccwgroup_remove_cdev_refs(gdev);
+	}
+	mutex_unlock(&gdev->reg_mutex);
+}
+
+static ssize_t ccwgroup_ungroup_store(struct device *dev,
+				      struct device_attribute *attr,
+				      const char *buf, size_t count)
+{
+	struct ccwgroup_device *gdev = to_ccwgroupdev(dev);
+	int rc;
+
+	/* Prevent concurrent online/offline processing and ungrouping. */
+	if (atomic_cmpxchg(&gdev->onoff, 0, 1) != 0)
+		return -EAGAIN;
+	if (gdev->state != CCWGROUP_OFFLINE) {
+		rc = -EINVAL;
+		goto out;
+	}
+	/* Note that we cannot unregister the device from one of its
+	 * attribute methods, so we have to use this roundabout approach.
+	 */
+	rc = device_schedule_callback(dev, ccwgroup_ungroup_callback);
+out:
+	if (rc) {
+		if (rc != -EAGAIN)
+			/* Release onoff "lock" when ungrouping failed. */
+			atomic_set(&gdev->onoff, 0);
+		return rc;
+	}
+	return count;
+}
+static DEVICE_ATTR(ungroup, 0200, NULL, ccwgroup_ungroup_store);
+static DEVICE_ATTR(online, 0644, ccwgroup_online_show, ccwgroup_online_store);
+
+static struct attribute *ccwgroup_attrs[] = {
+	&dev_attr_online.attr,
+	&dev_attr_ungroup.attr,
+	NULL,
+};
+static struct attribute_group ccwgroup_attr_group = {
+	.attrs = ccwgroup_attrs,
+};
+static const struct attribute_group *ccwgroup_attr_groups[] = {
+	&ccwgroup_attr_group,
+	NULL,
+};
+
+static void ccwgroup_release(struct device *dev)
+{
+	kfree(to_ccwgroupdev(dev));
+}
+
+static int __ccwgroup_create_symlinks(struct ccwgroup_device *gdev)
+{
+	char str[8];
+	int i, rc;
+
+	for (i = 0; i < gdev->count; i++) {
+		rc = sysfs_create_link(&gdev->cdev[i]->dev.kobj,
+				       &gdev->dev.kobj, "group_device");
+		if (rc) {
+			for (--i; i >= 0; i--)
+				sysfs_remove_link(&gdev->cdev[i]->dev.kobj,
+						  "group_device");
+			return rc;
+		}
+	}
+	for (i = 0; i < gdev->count; i++) {
+		sprintf(str, "cdev%d", i);
+		rc = sysfs_create_link(&gdev->dev.kobj,
+				       &gdev->cdev[i]->dev.kobj, str);
+		if (rc) {
+			for (--i; i >= 0; i--) {
+				sprintf(str, "cdev%d", i);
+				sysfs_remove_link(&gdev->dev.kobj, str);
+			}
+			for (i = 0; i < gdev->count; i++)
+				sysfs_remove_link(&gdev->cdev[i]->dev.kobj,
+						  "group_device");
+			return rc;
+		}
+	}
+	return 0;
+}
+
+static int __get_next_bus_id(const char **buf, char *bus_id)
+{
+	int rc, len;
+	char *start, *end;
+
+	start = (char *)*buf;
+	end = strchr(start, ',');
+	if (!end) {
+		/* Last entry. Strip trailing newline, if applicable. */
+		end = strchr(start, '\n');
+		if (end)
+			*end = '\0';
+		len = strlen(start) + 1;
+	} else {
+		len = end - start + 1;
+		end++;
+	}
+	if (len < CCW_BUS_ID_SIZE) {
+		strlcpy(bus_id, start, len);
+		rc = 0;
+	} else
+		rc = -EINVAL;
+	*buf = end;
+	return rc;
+}
+
+static int __is_valid_bus_id(char bus_id[CCW_BUS_ID_SIZE])
+{
+	int cssid, ssid, devno;
+
+	/* Must be of form %x.%x.%04x */
+	if (sscanf(bus_id, "%x.%1x.%04x", &cssid, &ssid, &devno) != 3)
+		return 0;
+	return 1;
+}
+
+/**
+ * ccwgroup_create_from_string() - create and register a ccw group device
+ * @root: parent device for the new device
+ * @creator_id: identifier of creating driver
+ * @cdrv: ccw driver of slave devices
+ * @num_devices: number of slave devices
+ * @buf: buffer containing comma separated bus ids of slave devices
+ *
+ * Create and register a new ccw group device as a child of @root. Slave
+ * devices are obtained from the list of bus ids given in @buf and must all
+ * belong to @cdrv.
+ * Returns:
+ *  %0 on success and an error code on failure.
+ * Context:
+ *  non-atomic
+ */
+int ccwgroup_create_from_string(struct device *root, unsigned int creator_id,
+				struct ccw_driver *cdrv, int num_devices,
+				const char *buf)
+{
+	struct ccwgroup_device *gdev;
+	int rc, i;
+	char tmp_bus_id[CCW_BUS_ID_SIZE];
+	const char *curr_buf;
+
+	gdev = kzalloc(sizeof(*gdev) + num_devices * sizeof(gdev->cdev[0]),
+		       GFP_KERNEL);
+	if (!gdev)
+		return -ENOMEM;
+
+	atomic_set(&gdev->onoff, 0);
+	mutex_init(&gdev->reg_mutex);
+	mutex_lock(&gdev->reg_mutex);
+	gdev->creator_id = creator_id;
+	gdev->count = num_devices;
+	gdev->dev.bus = &ccwgroup_bus_type;
+	gdev->dev.parent = root;
+	gdev->dev.release = ccwgroup_release;
+	device_initialize(&gdev->dev);
+
+	curr_buf = buf;
+	for (i = 0; i < num_devices && curr_buf; i++) {
+		rc = __get_next_bus_id(&curr_buf, tmp_bus_id);
+		if (rc != 0)
+			goto error;
+		if (!__is_valid_bus_id(tmp_bus_id)) {
+			rc = -EINVAL;
+			goto error;
+		}
+		gdev->cdev[i] = get_ccwdev_by_busid(cdrv, tmp_bus_id);
+		/*
+		 * All devices have to be of the same type in
+		 * order to be grouped.
+		 */
+		if (!gdev->cdev[i]
+		    || gdev->cdev[i]->id.driver_info !=
+		    gdev->cdev[0]->id.driver_info) {
+			rc = -EINVAL;
+			goto error;
+		}
+		/* Don't allow a device to belong to more than one group. */
+		spin_lock_irq(gdev->cdev[i]->ccwlock);
+		if (dev_get_drvdata(&gdev->cdev[i]->dev)) {
+			spin_unlock_irq(gdev->cdev[i]->ccwlock);
+			rc = -EINVAL;
+			goto error;
+		}
+		dev_set_drvdata(&gdev->cdev[i]->dev, gdev);
+		spin_unlock_irq(gdev->cdev[i]->ccwlock);
+	}
+	/* Check for sufficient number of bus ids. */
+	if (i < num_devices && !curr_buf) {
+		rc = -EINVAL;
+		goto error;
+	}
+	/* Check for trailing stuff. */
+	if (i == num_devices && strlen(curr_buf) > 0) {
+		rc = -EINVAL;
+		goto error;
+	}
+
+	dev_set_name(&gdev->dev, "%s", dev_name(&gdev->cdev[0]->dev));
+	gdev->dev.groups = ccwgroup_attr_groups;
+	rc = device_add(&gdev->dev);
+	if (rc)
+		goto error;
+	rc = __ccwgroup_create_symlinks(gdev);
+	if (rc) {
+		device_del(&gdev->dev);
+		goto error;
+	}
+	mutex_unlock(&gdev->reg_mutex);
+	return 0;
+error:
+	for (i = 0; i < num_devices; i++)
+		if (gdev->cdev[i]) {
+			spin_lock_irq(gdev->cdev[i]->ccwlock);
+			if (dev_get_drvdata(&gdev->cdev[i]->dev) == gdev)
+				dev_set_drvdata(&gdev->cdev[i]->dev, NULL);
+			spin_unlock_irq(gdev->cdev[i]->ccwlock);
+			put_device(&gdev->cdev[i]->dev);
+			gdev->cdev[i] = NULL;
+		}
+	mutex_unlock(&gdev->reg_mutex);
+	put_device(&gdev->dev);
+	return rc;
+}
+EXPORT_SYMBOL(ccwgroup_create_from_string);
+
+static int ccwgroup_notifier(struct notifier_block *nb, unsigned long action,
+			     void *data)
+{
+	struct device *dev = data;
+
+	if (action == BUS_NOTIFY_UNBIND_DRIVER)
+		device_schedule_callback(dev, ccwgroup_ungroup_callback);
+
+	return NOTIFY_OK;
+}
+
+static struct notifier_block ccwgroup_nb = {
+	.notifier_call = ccwgroup_notifier
+};
+
+static int __init init_ccwgroup(void)
+{
+	int ret;
+
+	ret = bus_register(&ccwgroup_bus_type);
+	if (ret)
+		return ret;
+
+	ret = bus_register_notifier(&ccwgroup_bus_type, &ccwgroup_nb);
+	if (ret)
+		bus_unregister(&ccwgroup_bus_type);
+
+	return ret;
+}
+
+static void __exit cleanup_ccwgroup(void)
+{
+	bus_unregister_notifier(&ccwgroup_bus_type, &ccwgroup_nb);
+	bus_unregister(&ccwgroup_bus_type);
+}
+
+module_init(init_ccwgroup);
+module_exit(cleanup_ccwgroup);
+
+/************************** driver stuff ******************************/
+
+static int ccwgroup_probe(struct device *dev)
+{
+	struct ccwgroup_device *gdev = to_ccwgroupdev(dev);
+	struct ccwgroup_driver *gdrv = to_ccwgroupdrv(dev->driver);
+
+	return gdrv->probe ? gdrv->probe(gdev) : -ENODEV;
+}
+
+static int ccwgroup_remove(struct device *dev)
+{
+	struct ccwgroup_device *gdev = to_ccwgroupdev(dev);
+	struct ccwgroup_driver *gdrv = to_ccwgroupdrv(dev->driver);
+
+	if (!dev->driver)
+		return 0;
+	if (gdrv->remove)
+		gdrv->remove(gdev);
+
+	return 0;
+}
+
+static void ccwgroup_shutdown(struct device *dev)
+{
+	struct ccwgroup_device *gdev = to_ccwgroupdev(dev);
+	struct ccwgroup_driver *gdrv = to_ccwgroupdrv(dev->driver);
+
+	if (!dev->driver)
+		return;
+	if (gdrv->shutdown)
+		gdrv->shutdown(gdev);
+}
+
+static int ccwgroup_pm_prepare(struct device *dev)
+{
+	struct ccwgroup_device *gdev = to_ccwgroupdev(dev);
+	struct ccwgroup_driver *gdrv = to_ccwgroupdrv(gdev->dev.driver);
+
+	/* Fail while device is being set online/offline. */
+	if (atomic_read(&gdev->onoff))
+		return -EAGAIN;
+
+	if (!gdev->dev.driver || gdev->state != CCWGROUP_ONLINE)
+		return 0;
+
+	return gdrv->prepare ? gdrv->prepare(gdev) : 0;
+}
+
+static void ccwgroup_pm_complete(struct device *dev)
+{
+	struct ccwgroup_device *gdev = to_ccwgroupdev(dev);
+	struct ccwgroup_driver *gdrv = to_ccwgroupdrv(dev->driver);
+
+	if (!gdev->dev.driver || gdev->state != CCWGROUP_ONLINE)
+		return;
+
+	if (gdrv->complete)
+		gdrv->complete(gdev);
+}
+
+static int ccwgroup_pm_freeze(struct device *dev)
+{
+	struct ccwgroup_device *gdev = to_ccwgroupdev(dev);
+	struct ccwgroup_driver *gdrv = to_ccwgroupdrv(gdev->dev.driver);
+
+	if (!gdev->dev.driver || gdev->state != CCWGROUP_ONLINE)
+		return 0;
+
+	return gdrv->freeze ? gdrv->freeze(gdev) : 0;
+}
+
+static int ccwgroup_pm_thaw(struct device *dev)
+{
+	struct ccwgroup_device *gdev = to_ccwgroupdev(dev);
+	struct ccwgroup_driver *gdrv = to_ccwgroupdrv(gdev->dev.driver);
+
+	if (!gdev->dev.driver || gdev->state != CCWGROUP_ONLINE)
+		return 0;
+
+	return gdrv->thaw ? gdrv->thaw(gdev) : 0;
+}
+
+static int ccwgroup_pm_restore(struct device *dev)
+{
+	struct ccwgroup_device *gdev = to_ccwgroupdev(dev);
+	struct ccwgroup_driver *gdrv = to_ccwgroupdrv(gdev->dev.driver);
+
+	if (!gdev->dev.driver || gdev->state != CCWGROUP_ONLINE)
+		return 0;
+
+	return gdrv->restore ? gdrv->restore(gdev) : 0;
+}
+
+static const struct dev_pm_ops ccwgroup_pm_ops = {
+	.prepare = ccwgroup_pm_prepare,
+	.complete = ccwgroup_pm_complete,
+	.freeze = ccwgroup_pm_freeze,
+	.thaw = ccwgroup_pm_thaw,
+	.restore = ccwgroup_pm_restore,
+};
+
+static struct bus_type ccwgroup_bus_type = {
+	.name   = "ccwgroup",
+	.match  = ccwgroup_bus_match,
+	.probe  = ccwgroup_probe,
+	.remove = ccwgroup_remove,
+	.shutdown = ccwgroup_shutdown,
+	.pm = &ccwgroup_pm_ops,
+};
+
+/**
+ * ccwgroup_driver_register() - register a ccw group driver
+ * @cdriver: driver to be registered
+ *
+ * This function is mainly a wrapper around driver_register().
+ */
+int ccwgroup_driver_register(struct ccwgroup_driver *cdriver)
+{
+	/* register our new driver with the core */
+	cdriver->driver.bus = &ccwgroup_bus_type;
+
+	return driver_register(&cdriver->driver);
+}
+EXPORT_SYMBOL(ccwgroup_driver_register);
+
+static int __ccwgroup_match_all(struct device *dev, void *data)
+{
+	return 1;
+}
+
+/**
+ * ccwgroup_driver_unregister() - deregister a ccw group driver
+ * @cdriver: driver to be deregistered
+ *
+ * This function is mainly a wrapper around driver_unregister().
+ */
+void ccwgroup_driver_unregister(struct ccwgroup_driver *cdriver)
+{
+	struct device *dev;
+
+	/* We don't want ccwgroup devices to live longer than their driver. */
+	while ((dev = driver_find_device(&cdriver->driver, NULL, NULL,
+					 __ccwgroup_match_all))) {
+		struct ccwgroup_device *gdev = to_ccwgroupdev(dev);
+
+		mutex_lock(&gdev->reg_mutex);
+		__ccwgroup_remove_symlinks(gdev);
+		device_unregister(dev);
+		__ccwgroup_remove_cdev_refs(gdev);
+		mutex_unlock(&gdev->reg_mutex);
+		put_device(dev);
+	}
+	driver_unregister(&cdriver->driver);
+}
+EXPORT_SYMBOL(ccwgroup_driver_unregister);
+
+/**
+ * ccwgroup_probe_ccwdev() - probe function for slave devices
+ * @cdev: ccw device to be probed
+ *
+ * This is a dummy probe function for ccw devices that are slave devices in
+ * a ccw group device.
+ * Returns:
+ *  always %0
+ */
+int ccwgroup_probe_ccwdev(struct ccw_device *cdev)
+{
+	return 0;
+}
+EXPORT_SYMBOL(ccwgroup_probe_ccwdev);
+
+/**
+ * ccwgroup_remove_ccwdev() - remove function for slave devices
+ * @cdev: ccw device to be removed
+ *
+ * This is a remove function for ccw devices that are slave devices in a ccw
+ * group device. It sets the ccw device offline and also deregisters the
+ * embedding ccw group device.
+ */
+void ccwgroup_remove_ccwdev(struct ccw_device *cdev)
+{
+	struct ccwgroup_device *gdev;
+
+	/* Ignore offlining errors, device is gone anyway. */
+	ccw_device_set_offline(cdev);
+	/* If one of its devices is gone, the whole group is done for. */
+	spin_lock_irq(cdev->ccwlock);
+	gdev = dev_get_drvdata(&cdev->dev);
+	if (!gdev) {
+		spin_unlock_irq(cdev->ccwlock);
+		return;
+	}
+	/* Get ccwgroup device reference for local processing. */
+	get_device(&gdev->dev);
+	spin_unlock_irq(cdev->ccwlock);
+	/* Unregister group device. */
+	mutex_lock(&gdev->reg_mutex);
+	if (device_is_registered(&gdev->dev)) {
+		__ccwgroup_remove_symlinks(gdev);
+		device_unregister(&gdev->dev);
+		__ccwgroup_remove_cdev_refs(gdev);
+	}
+	mutex_unlock(&gdev->reg_mutex);
+	/* Release ccwgroup device reference for local processing. */
+	put_device(&gdev->dev);
+}
+EXPORT_SYMBOL(ccwgroup_remove_ccwdev);
+MODULE_LICENSE("GPL");
diff --git a/ap/os/linux/linux-3.4.x/drivers/s390/cio/ccwreq.c b/ap/os/linux/linux-3.4.x/drivers/s390/cio/ccwreq.c
new file mode 100644
index 0000000..5156264
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/s390/cio/ccwreq.c
@@ -0,0 +1,367 @@
+/*
+ *  Handling of internal CCW device requests.
+ *
+ *    Copyright IBM Corp. 2009, 2011
+ *    Author(s): Peter Oberparleiter <peter.oberparleiter@de.ibm.com>
+ */
+
+#define KMSG_COMPONENT "cio"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
+#include <linux/types.h>
+#include <linux/err.h>
+#include <asm/ccwdev.h>
+#include <asm/cio.h>
+
+#include "io_sch.h"
+#include "cio.h"
+#include "device.h"
+#include "cio_debug.h"
+
+/**
+ * lpm_adjust - adjust path mask
+ * @lpm: path mask to adjust
+ * @mask: mask of available paths
+ *
+ * Shift @lpm right until @lpm and @mask have at least one bit in common or
+ * until @lpm is zero. Return the resulting lpm.
+ */
+int lpm_adjust(int lpm, int mask)
+{
+	while (lpm && ((lpm & mask) == 0))
+		lpm >>= 1;
+	return lpm;
+}
+
+/*
+ * Adjust path mask to use next path and reset retry count. Return resulting
+ * path mask.
+ */
+static u16 ccwreq_next_path(struct ccw_device *cdev)
+{
+	struct ccw_request *req = &cdev->private->req;
+
+	if (!req->singlepath) {
+		req->mask = 0;
+		goto out;
+	}
+	req->retries	= req->maxretries;
+	req->mask	= lpm_adjust(req->mask >>= 1, req->lpm);
+out:
+	return req->mask;
+}
+
+/*
+ * Clean up device state and report to callback.
+ */
+static void ccwreq_stop(struct ccw_device *cdev, int rc)
+{
+	struct ccw_request *req = &cdev->private->req;
+
+	if (req->done)
+		return;
+	req->done = 1;
+	ccw_device_set_timeout(cdev, 0);
+	memset(&cdev->private->irb, 0, sizeof(struct irb));
+	if (rc && rc != -ENODEV && req->drc)
+		rc = req->drc;
+	req->callback(cdev, req->data, rc);
+}
+
+/*
+ * (Re-)Start the operation until retries and paths are exhausted.
+ */
+static void ccwreq_do(struct ccw_device *cdev)
+{
+	struct ccw_request *req = &cdev->private->req;
+	struct subchannel *sch = to_subchannel(cdev->dev.parent);
+	struct ccw1 *cp = req->cp;
+	int rc = -EACCES;
+
+	while (req->mask) {
+		if (req->retries-- == 0) {
+			/* Retries exhausted, try next path. */
+			ccwreq_next_path(cdev);
+			continue;
+		}
+		/* Perform start function. */
+		memset(&cdev->private->irb, 0, sizeof(struct irb));
+		rc = cio_start(sch, cp, (u8) req->mask);
+		if (rc == 0) {
+			/* I/O started successfully. */
+			ccw_device_set_timeout(cdev, req->timeout);
+			return;
+		}
+		if (rc == -ENODEV) {
+			/* Permanent device error. */
+			break;
+		}
+		if (rc == -EACCES) {
+			/* Permant path error. */
+			ccwreq_next_path(cdev);
+			continue;
+		}
+		/* Temporary improper status. */
+		rc = cio_clear(sch);
+		if (rc)
+			break;
+		return;
+	}
+	ccwreq_stop(cdev, rc);
+}
+
+/**
+ * ccw_request_start - perform I/O request
+ * @cdev: ccw device
+ *
+ * Perform the I/O request specified by cdev->req.
+ */
+void ccw_request_start(struct ccw_device *cdev)
+{
+	struct ccw_request *req = &cdev->private->req;
+
+	if (req->singlepath) {
+		/* Try all paths twice to counter link flapping. */
+		req->mask = 0x8080;
+	} else
+		req->mask = req->lpm;
+
+	req->retries	= req->maxretries;
+	req->mask	= lpm_adjust(req->mask, req->lpm);
+	req->drc	= 0;
+	req->done	= 0;
+	req->cancel	= 0;
+	if (!req->mask)
+		goto out_nopath;
+	ccwreq_do(cdev);
+	return;
+
+out_nopath:
+	ccwreq_stop(cdev, -EACCES);
+}
+
+/**
+ * ccw_request_cancel - cancel running I/O request
+ * @cdev: ccw device
+ *
+ * Cancel the I/O request specified by cdev->req. Return non-zero if request
+ * has already finished, zero otherwise.
+ */
+int ccw_request_cancel(struct ccw_device *cdev)
+{
+	struct subchannel *sch = to_subchannel(cdev->dev.parent);
+	struct ccw_request *req = &cdev->private->req;
+	int rc;
+
+	if (req->done)
+		return 1;
+	req->cancel = 1;
+	rc = cio_clear(sch);
+	if (rc)
+		ccwreq_stop(cdev, rc);
+	return 0;
+}
+
+/*
+ * Return the status of the internal I/O started on the specified ccw device.
+ * Perform BASIC SENSE if required.
+ */
+static enum io_status ccwreq_status(struct ccw_device *cdev, struct irb *lcirb)
+{
+	struct irb *irb = &cdev->private->irb;
+	struct cmd_scsw *scsw = &irb->scsw.cmd;
+	enum uc_todo todo;
+
+	/* Perform BASIC SENSE if needed. */
+	if (ccw_device_accumulate_and_sense(cdev, lcirb))
+		return IO_RUNNING;
+	/* Check for halt/clear interrupt. */
+	if (scsw->fctl & (SCSW_FCTL_HALT_FUNC | SCSW_FCTL_CLEAR_FUNC))
+		return IO_KILLED;
+	/* Check for path error. */
+	if (scsw->cc == 3 || scsw->pno)
+		return IO_PATH_ERROR;
+	/* Handle BASIC SENSE data. */
+	if (irb->esw.esw0.erw.cons) {
+		CIO_TRACE_EVENT(2, "sensedata");
+		CIO_HEX_EVENT(2, &cdev->private->dev_id,
+			      sizeof(struct ccw_dev_id));
+		CIO_HEX_EVENT(2, &cdev->private->irb.ecw, SENSE_MAX_COUNT);
+		/* Check for command reject. */
+		if (irb->ecw[0] & SNS0_CMD_REJECT)
+			return IO_REJECTED;
+		/* Ask the driver what to do */
+		if (cdev->drv && cdev->drv->uc_handler) {
+			todo = cdev->drv->uc_handler(cdev, lcirb);
+			CIO_TRACE_EVENT(2, "uc_response");
+			CIO_HEX_EVENT(2, &todo, sizeof(todo));
+			switch (todo) {
+			case UC_TODO_RETRY:
+				return IO_STATUS_ERROR;
+			case UC_TODO_RETRY_ON_NEW_PATH:
+				return IO_PATH_ERROR;
+			case UC_TODO_STOP:
+				return IO_REJECTED;
+			default:
+				return IO_STATUS_ERROR;
+			}
+		}
+		/* Assume that unexpected SENSE data implies an error. */
+		return IO_STATUS_ERROR;
+	}
+	/* Check for channel errors. */
+	if (scsw->cstat != 0)
+		return IO_STATUS_ERROR;
+	/* Check for device errors. */
+	if (scsw->dstat & ~(DEV_STAT_CHN_END | DEV_STAT_DEV_END))
+		return IO_STATUS_ERROR;
+	/* Check for final state. */
+	if (!(scsw->dstat & DEV_STAT_DEV_END))
+		return IO_RUNNING;
+	/* Check for other improper status. */
+	if (scsw->cc == 1 && (scsw->stctl & SCSW_STCTL_ALERT_STATUS))
+		return IO_STATUS_ERROR;
+	return IO_DONE;
+}
+
+/*
+ * Log ccw request status.
+ */
+static void ccwreq_log_status(struct ccw_device *cdev, enum io_status status)
+{
+	struct ccw_request *req = &cdev->private->req;
+	struct {
+		struct ccw_dev_id dev_id;
+		u16 retries;
+		u8 lpm;
+		u8 status;
+	}  __attribute__ ((packed)) data;
+	data.dev_id	= cdev->private->dev_id;
+	data.retries	= req->retries;
+	data.lpm	= (u8) req->mask;
+	data.status	= (u8) status;
+	CIO_TRACE_EVENT(2, "reqstat");
+	CIO_HEX_EVENT(2, &data, sizeof(data));
+}
+
+/**
+ * ccw_request_handler - interrupt handler for I/O request procedure.
+ * @cdev: ccw device
+ *
+ * Handle interrupt during I/O request procedure.
+ */
+void ccw_request_handler(struct ccw_device *cdev)
+{
+	struct irb *irb = (struct irb *)&S390_lowcore.irb;
+	struct ccw_request *req = &cdev->private->req;
+	enum io_status status;
+	int rc = -EOPNOTSUPP;
+
+	/* Check status of I/O request. */
+	status = ccwreq_status(cdev, irb);
+	if (req->filter)
+		status = req->filter(cdev, req->data, irb, status);
+	if (status != IO_RUNNING)
+		ccw_device_set_timeout(cdev, 0);
+	if (status != IO_DONE && status != IO_RUNNING)
+		ccwreq_log_status(cdev, status);
+	switch (status) {
+	case IO_DONE:
+		break;
+	case IO_RUNNING:
+		return;
+	case IO_REJECTED:
+		goto err;
+	case IO_PATH_ERROR:
+		goto out_next_path;
+	case IO_STATUS_ERROR:
+		goto out_restart;
+	case IO_KILLED:
+		/* Check if request was cancelled on purpose. */
+		if (req->cancel) {
+			rc = -EIO;
+			goto err;
+		}
+		goto out_restart;
+	}
+	/* Check back with request initiator. */
+	if (!req->check)
+		goto out;
+	switch (req->check(cdev, req->data)) {
+	case 0:
+		break;
+	case -EAGAIN:
+		goto out_restart;
+	case -EACCES:
+		goto out_next_path;
+	default:
+		goto err;
+	}
+out:
+	ccwreq_stop(cdev, 0);
+	return;
+
+out_next_path:
+	/* Try next path and restart I/O. */
+	if (!ccwreq_next_path(cdev)) {
+		rc = -EACCES;
+		goto err;
+	}
+out_restart:
+	/* Restart. */
+	ccwreq_do(cdev);
+	return;
+err:
+	ccwreq_stop(cdev, rc);
+}
+
+
+/**
+ * ccw_request_timeout - timeout handler for I/O request procedure
+ * @cdev: ccw device
+ *
+ * Handle timeout during I/O request procedure.
+ */
+void ccw_request_timeout(struct ccw_device *cdev)
+{
+	struct subchannel *sch = to_subchannel(cdev->dev.parent);
+	struct ccw_request *req = &cdev->private->req;
+	int rc = -ENODEV, chp;
+
+	if (cio_update_schib(sch))
+		goto err;
+
+	for (chp = 0; chp < 8; chp++) {
+		if ((0x80 >> chp) & sch->schib.pmcw.lpum)
+			pr_warning("%s: No interrupt was received within %lus "
+				   "(CS=%02x, DS=%02x, CHPID=%x.%02x)\n",
+				   dev_name(&cdev->dev), req->timeout / HZ,
+				   scsw_cstat(&sch->schib.scsw),
+				   scsw_dstat(&sch->schib.scsw),
+				   sch->schid.cssid,
+				   sch->schib.pmcw.chpid[chp]);
+	}
+
+	if (!ccwreq_next_path(cdev)) {
+		/* set the final return code for this request */
+		req->drc = -ETIME;
+	}
+	rc = cio_clear(sch);
+	if (rc)
+		goto err;
+	return;
+
+err:
+	ccwreq_stop(cdev, rc);
+}
+
+/**
+ * ccw_request_notoper - notoper handler for I/O request procedure
+ * @cdev: ccw device
+ *
+ * Handle notoper during I/O request procedure.
+ */
+void ccw_request_notoper(struct ccw_device *cdev)
+{
+	ccwreq_stop(cdev, -ENODEV);
+}
diff --git a/ap/os/linux/linux-3.4.x/drivers/s390/cio/chp.c b/ap/os/linux/linux-3.4.x/drivers/s390/cio/chp.c
new file mode 100644
index 0000000..e792436
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/s390/cio/chp.c
@@ -0,0 +1,740 @@
+/*
+ *  drivers/s390/cio/chp.c
+ *
+ *    Copyright IBM Corp. 1999,2010
+ *    Author(s): Cornelia Huck (cornelia.huck@de.ibm.com)
+ *		 Arnd Bergmann (arndb@de.ibm.com)
+ *		 Peter Oberparleiter <peter.oberparleiter@de.ibm.com>
+ */
+
+#include <linux/bug.h>
+#include <linux/workqueue.h>
+#include <linux/spinlock.h>
+#include <linux/export.h>
+#include <linux/sched.h>
+#include <linux/init.h>
+#include <linux/jiffies.h>
+#include <linux/wait.h>
+#include <linux/mutex.h>
+#include <linux/errno.h>
+#include <linux/slab.h>
+#include <asm/chpid.h>
+#include <asm/sclp.h>
+#include <asm/crw.h>
+
+#include "cio.h"
+#include "css.h"
+#include "ioasm.h"
+#include "cio_debug.h"
+#include "chp.h"
+
+#define to_channelpath(device) container_of(device, struct channel_path, dev)
+#define CHP_INFO_UPDATE_INTERVAL	1*HZ
+
+enum cfg_task_t {
+	cfg_none,
+	cfg_configure,
+	cfg_deconfigure
+};
+
+/* Map for pending configure tasks. */
+static enum cfg_task_t chp_cfg_task[__MAX_CSSID + 1][__MAX_CHPID + 1];
+static DEFINE_MUTEX(cfg_lock);
+static int cfg_busy;
+
+/* Map for channel-path status. */
+static struct sclp_chp_info chp_info;
+static DEFINE_MUTEX(info_lock);
+
+/* Time after which channel-path status may be outdated. */
+static unsigned long chp_info_expires;
+
+/* Workqueue to perform pending configure tasks. */
+static struct workqueue_struct *chp_wq;
+static struct work_struct cfg_work;
+
+/* Wait queue for configure completion events. */
+static wait_queue_head_t cfg_wait_queue;
+
+/* Set vary state for given chpid. */
+static void set_chp_logically_online(struct chp_id chpid, int onoff)
+{
+	chpid_to_chp(chpid)->state = onoff;
+}
+
+/* On success return 0 if channel-path is varied offline, 1 if it is varied
+ * online. Return -ENODEV if channel-path is not registered. */
+int chp_get_status(struct chp_id chpid)
+{
+	return (chpid_to_chp(chpid) ? chpid_to_chp(chpid)->state : -ENODEV);
+}
+
+/**
+ * chp_get_sch_opm - return opm for subchannel
+ * @sch: subchannel
+ *
+ * Calculate and return the operational path mask (opm) based on the chpids
+ * used by the subchannel and the status of the associated channel-paths.
+ */
+u8 chp_get_sch_opm(struct subchannel *sch)
+{
+	struct chp_id chpid;
+	int opm;
+	int i;
+
+	opm = 0;
+	chp_id_init(&chpid);
+	for (i = 0; i < 8; i++) {
+		opm <<= 1;
+		chpid.id = sch->schib.pmcw.chpid[i];
+		if (chp_get_status(chpid) != 0)
+			opm |= 1;
+	}
+	return opm;
+}
+EXPORT_SYMBOL_GPL(chp_get_sch_opm);
+
+/**
+ * chp_is_registered - check if a channel-path is registered
+ * @chpid: channel-path ID
+ *
+ * Return non-zero if a channel-path with the given chpid is registered,
+ * zero otherwise.
+ */
+int chp_is_registered(struct chp_id chpid)
+{
+	return chpid_to_chp(chpid) != NULL;
+}
+
+/*
+ * Function: s390_vary_chpid
+ * Varies the specified chpid online or offline
+ */
+static int s390_vary_chpid(struct chp_id chpid, int on)
+{
+	char dbf_text[15];
+	int status;
+
+	sprintf(dbf_text, on?"varyon%x.%02x":"varyoff%x.%02x", chpid.cssid,
+		chpid.id);
+	CIO_TRACE_EVENT(2, dbf_text);
+
+	status = chp_get_status(chpid);
+	if (!on && !status)
+		return 0;
+
+	set_chp_logically_online(chpid, on);
+	chsc_chp_vary(chpid, on);
+	return 0;
+}
+
+/*
+ * Channel measurement related functions
+ */
+static ssize_t chp_measurement_chars_read(struct file *filp,
+					  struct kobject *kobj,
+					  struct bin_attribute *bin_attr,
+					  char *buf, loff_t off, size_t count)
+{
+	struct channel_path *chp;
+	struct device *device;
+
+	device = container_of(kobj, struct device, kobj);
+	chp = to_channelpath(device);
+	if (!chp->cmg_chars)
+		return 0;
+
+	return memory_read_from_buffer(buf, count, &off,
+				chp->cmg_chars, sizeof(struct cmg_chars));
+}
+
+static struct bin_attribute chp_measurement_chars_attr = {
+	.attr = {
+		.name = "measurement_chars",
+		.mode = S_IRUSR,
+	},
+	.size = sizeof(struct cmg_chars),
+	.read = chp_measurement_chars_read,
+};
+
+static void chp_measurement_copy_block(struct cmg_entry *buf,
+				       struct channel_subsystem *css,
+				       struct chp_id chpid)
+{
+	void *area;
+	struct cmg_entry *entry, reference_buf;
+	int idx;
+
+	if (chpid.id < 128) {
+		area = css->cub_addr1;
+		idx = chpid.id;
+	} else {
+		area = css->cub_addr2;
+		idx = chpid.id - 128;
+	}
+	entry = area + (idx * sizeof(struct cmg_entry));
+	do {
+		memcpy(buf, entry, sizeof(*entry));
+		memcpy(&reference_buf, entry, sizeof(*entry));
+	} while (reference_buf.values[0] != buf->values[0]);
+}
+
+static ssize_t chp_measurement_read(struct file *filp, struct kobject *kobj,
+				    struct bin_attribute *bin_attr,
+				    char *buf, loff_t off, size_t count)
+{
+	struct channel_path *chp;
+	struct channel_subsystem *css;
+	struct device *device;
+	unsigned int size;
+
+	device = container_of(kobj, struct device, kobj);
+	chp = to_channelpath(device);
+	css = to_css(chp->dev.parent);
+
+	size = sizeof(struct cmg_entry);
+
+	/* Only allow single reads. */
+	if (off || count < size)
+		return 0;
+	chp_measurement_copy_block((struct cmg_entry *)buf, css, chp->chpid);
+	count = size;
+	return count;
+}
+
+static struct bin_attribute chp_measurement_attr = {
+	.attr = {
+		.name = "measurement",
+		.mode = S_IRUSR,
+	},
+	.size = sizeof(struct cmg_entry),
+	.read = chp_measurement_read,
+};
+
+void chp_remove_cmg_attr(struct channel_path *chp)
+{
+	device_remove_bin_file(&chp->dev, &chp_measurement_chars_attr);
+	device_remove_bin_file(&chp->dev, &chp_measurement_attr);
+}
+
+int chp_add_cmg_attr(struct channel_path *chp)
+{
+	int ret;
+
+	ret = device_create_bin_file(&chp->dev, &chp_measurement_chars_attr);
+	if (ret)
+		return ret;
+	ret = device_create_bin_file(&chp->dev, &chp_measurement_attr);
+	if (ret)
+		device_remove_bin_file(&chp->dev, &chp_measurement_chars_attr);
+	return ret;
+}
+
+/*
+ * Files for the channel path entries.
+ */
+static ssize_t chp_status_show(struct device *dev,
+			       struct device_attribute *attr, char *buf)
+{
+	struct channel_path *chp = to_channelpath(dev);
+	int status;
+
+	mutex_lock(&chp->lock);
+	status = chp->state;
+	mutex_unlock(&chp->lock);
+
+	return status ? sprintf(buf, "online\n") : sprintf(buf, "offline\n");
+}
+
+static ssize_t chp_status_write(struct device *dev,
+				struct device_attribute *attr,
+				const char *buf, size_t count)
+{
+	struct channel_path *cp = to_channelpath(dev);
+	char cmd[10];
+	int num_args;
+	int error;
+
+	num_args = sscanf(buf, "%5s", cmd);
+	if (!num_args)
+		return count;
+
+	if (!strnicmp(cmd, "on", 2) || !strcmp(cmd, "1")) {
+		mutex_lock(&cp->lock);
+		error = s390_vary_chpid(cp->chpid, 1);
+		mutex_unlock(&cp->lock);
+	} else if (!strnicmp(cmd, "off", 3) || !strcmp(cmd, "0")) {
+		mutex_lock(&cp->lock);
+		error = s390_vary_chpid(cp->chpid, 0);
+		mutex_unlock(&cp->lock);
+	} else
+		error = -EINVAL;
+
+	return error < 0 ? error : count;
+}
+
+static DEVICE_ATTR(status, 0644, chp_status_show, chp_status_write);
+
+static ssize_t chp_configure_show(struct device *dev,
+				  struct device_attribute *attr, char *buf)
+{
+	struct channel_path *cp;
+	int status;
+
+	cp = to_channelpath(dev);
+	status = chp_info_get_status(cp->chpid);
+	if (status < 0)
+		return status;
+
+	return snprintf(buf, PAGE_SIZE, "%d\n", status);
+}
+
+static int cfg_wait_idle(void);
+
+static ssize_t chp_configure_write(struct device *dev,
+				   struct device_attribute *attr,
+				   const char *buf, size_t count)
+{
+	struct channel_path *cp;
+	int val;
+	char delim;
+
+	if (sscanf(buf, "%d %c", &val, &delim) != 1)
+		return -EINVAL;
+	if (val != 0 && val != 1)
+		return -EINVAL;
+	cp = to_channelpath(dev);
+	chp_cfg_schedule(cp->chpid, val);
+	cfg_wait_idle();
+
+	return count;
+}
+
+static DEVICE_ATTR(configure, 0644, chp_configure_show, chp_configure_write);
+
+static ssize_t chp_type_show(struct device *dev, struct device_attribute *attr,
+			     char *buf)
+{
+	struct channel_path *chp = to_channelpath(dev);
+	u8 type;
+
+	mutex_lock(&chp->lock);
+	type = chp->desc.desc;
+	mutex_unlock(&chp->lock);
+	return sprintf(buf, "%x\n", type);
+}
+
+static DEVICE_ATTR(type, 0444, chp_type_show, NULL);
+
+static ssize_t chp_cmg_show(struct device *dev, struct device_attribute *attr,
+			    char *buf)
+{
+	struct channel_path *chp = to_channelpath(dev);
+
+	if (!chp)
+		return 0;
+	if (chp->cmg == -1) /* channel measurements not available */
+		return sprintf(buf, "unknown\n");
+	return sprintf(buf, "%x\n", chp->cmg);
+}
+
+static DEVICE_ATTR(cmg, 0444, chp_cmg_show, NULL);
+
+static ssize_t chp_shared_show(struct device *dev,
+			       struct device_attribute *attr, char *buf)
+{
+	struct channel_path *chp = to_channelpath(dev);
+
+	if (!chp)
+		return 0;
+	if (chp->shared == -1) /* channel measurements not available */
+		return sprintf(buf, "unknown\n");
+	return sprintf(buf, "%x\n", chp->shared);
+}
+
+static DEVICE_ATTR(shared, 0444, chp_shared_show, NULL);
+
+static struct attribute *chp_attrs[] = {
+	&dev_attr_status.attr,
+	&dev_attr_configure.attr,
+	&dev_attr_type.attr,
+	&dev_attr_cmg.attr,
+	&dev_attr_shared.attr,
+	NULL,
+};
+
+static struct attribute_group chp_attr_group = {
+	.attrs = chp_attrs,
+};
+
+static void chp_release(struct device *dev)
+{
+	struct channel_path *cp;
+
+	cp = to_channelpath(dev);
+	kfree(cp);
+}
+
+/**
+ * chp_new - register a new channel-path
+ * @chpid - channel-path ID
+ *
+ * Create and register data structure representing new channel-path. Return
+ * zero on success, non-zero otherwise.
+ */
+int chp_new(struct chp_id chpid)
+{
+	struct channel_path *chp;
+	int ret;
+
+	if (chp_is_registered(chpid))
+		return 0;
+	chp = kzalloc(sizeof(struct channel_path), GFP_KERNEL);
+	if (!chp)
+		return -ENOMEM;
+
+	/* fill in status, etc. */
+	chp->chpid = chpid;
+	chp->state = 1;
+	chp->dev.parent = &channel_subsystems[chpid.cssid]->device;
+	chp->dev.release = chp_release;
+	mutex_init(&chp->lock);
+
+	/* Obtain channel path description and fill it in. */
+	ret = chsc_determine_base_channel_path_desc(chpid, &chp->desc);
+	if (ret)
+		goto out_free;
+	if ((chp->desc.flags & 0x80) == 0) {
+		ret = -ENODEV;
+		goto out_free;
+	}
+	/* Get channel-measurement characteristics. */
+	if (css_chsc_characteristics.scmc && css_chsc_characteristics.secm) {
+		ret = chsc_get_channel_measurement_chars(chp);
+		if (ret)
+			goto out_free;
+	} else {
+		chp->cmg = -1;
+	}
+	dev_set_name(&chp->dev, "chp%x.%02x", chpid.cssid, chpid.id);
+
+	/* make it known to the system */
+	ret = device_register(&chp->dev);
+	if (ret) {
+		CIO_MSG_EVENT(0, "Could not register chp%x.%02x: %d\n",
+			      chpid.cssid, chpid.id, ret);
+		put_device(&chp->dev);
+		goto out;
+	}
+	ret = sysfs_create_group(&chp->dev.kobj, &chp_attr_group);
+	if (ret) {
+		device_unregister(&chp->dev);
+		goto out;
+	}
+	mutex_lock(&channel_subsystems[chpid.cssid]->mutex);
+	if (channel_subsystems[chpid.cssid]->cm_enabled) {
+		ret = chp_add_cmg_attr(chp);
+		if (ret) {
+			sysfs_remove_group(&chp->dev.kobj, &chp_attr_group);
+			device_unregister(&chp->dev);
+			mutex_unlock(&channel_subsystems[chpid.cssid]->mutex);
+			goto out;
+		}
+	}
+	channel_subsystems[chpid.cssid]->chps[chpid.id] = chp;
+	mutex_unlock(&channel_subsystems[chpid.cssid]->mutex);
+	goto out;
+out_free:
+	kfree(chp);
+out:
+	return ret;
+}
+
+/**
+ * chp_get_chp_desc - return newly allocated channel-path description
+ * @chpid: channel-path ID
+ *
+ * On success return a newly allocated copy of the channel-path description
+ * data associated with the given channel-path ID. Return %NULL on error.
+ */
+void *chp_get_chp_desc(struct chp_id chpid)
+{
+	struct channel_path *chp;
+	struct channel_path_desc *desc;
+
+	chp = chpid_to_chp(chpid);
+	if (!chp)
+		return NULL;
+	desc = kmalloc(sizeof(struct channel_path_desc), GFP_KERNEL);
+	if (!desc)
+		return NULL;
+
+	mutex_lock(&chp->lock);
+	memcpy(desc, &chp->desc, sizeof(struct channel_path_desc));
+	mutex_unlock(&chp->lock);
+	return desc;
+}
+
+/**
+ * chp_process_crw - process channel-path status change
+ * @crw0: channel report-word to handler
+ * @crw1: second channel-report word (always NULL)
+ * @overflow: crw overflow indication
+ *
+ * Handle channel-report-words indicating that the status of a channel-path
+ * has changed.
+ */
+static void chp_process_crw(struct crw *crw0, struct crw *crw1,
+			    int overflow)
+{
+	struct chp_id chpid;
+
+	if (overflow) {
+		css_schedule_eval_all();
+		return;
+	}
+	CIO_CRW_EVENT(2, "CRW reports slct=%d, oflw=%d, "
+		      "chn=%d, rsc=%X, anc=%d, erc=%X, rsid=%X\n",
+		      crw0->slct, crw0->oflw, crw0->chn, crw0->rsc, crw0->anc,
+		      crw0->erc, crw0->rsid);
+	/*
+	 * Check for solicited machine checks. These are
+	 * created by reset channel path and need not be
+	 * handled here.
+	 */
+	if (crw0->slct) {
+		CIO_CRW_EVENT(2, "solicited machine check for "
+			      "channel path %02X\n", crw0->rsid);
+		return;
+	}
+	chp_id_init(&chpid);
+	chpid.id = crw0->rsid;
+	switch (crw0->erc) {
+	case CRW_ERC_IPARM: /* Path has come. */
+		if (!chp_is_registered(chpid))
+			chp_new(chpid);
+		chsc_chp_online(chpid);
+		break;
+	case CRW_ERC_PERRI: /* Path has gone. */
+	case CRW_ERC_PERRN:
+		chsc_chp_offline(chpid);
+		break;
+	default:
+		CIO_CRW_EVENT(2, "Don't know how to handle erc=%x\n",
+			      crw0->erc);
+	}
+}
+
+int chp_ssd_get_mask(struct chsc_ssd_info *ssd, struct chp_link *link)
+{
+	int i;
+	int mask;
+
+	for (i = 0; i < 8; i++) {
+		mask = 0x80 >> i;
+		if (!(ssd->path_mask & mask))
+			continue;
+		if (!chp_id_is_equal(&ssd->chpid[i], &link->chpid))
+			continue;
+		if ((ssd->fla_valid_mask & mask) &&
+		    ((ssd->fla[i] & link->fla_mask) != link->fla))
+			continue;
+		return mask;
+	}
+	return 0;
+}
+EXPORT_SYMBOL_GPL(chp_ssd_get_mask);
+
+static inline int info_bit_num(struct chp_id id)
+{
+	return id.id + id.cssid * (__MAX_CHPID + 1);
+}
+
+/* Force chp_info refresh on next call to info_validate(). */
+static void info_expire(void)
+{
+	mutex_lock(&info_lock);
+	chp_info_expires = jiffies - 1;
+	mutex_unlock(&info_lock);
+}
+
+/* Ensure that chp_info is up-to-date. */
+static int info_update(void)
+{
+	int rc;
+
+	mutex_lock(&info_lock);
+	rc = 0;
+	if (time_after(jiffies, chp_info_expires)) {
+		/* Data is too old, update. */
+		rc = sclp_chp_read_info(&chp_info);
+		chp_info_expires = jiffies + CHP_INFO_UPDATE_INTERVAL ;
+	}
+	mutex_unlock(&info_lock);
+
+	return rc;
+}
+
+/**
+ * chp_info_get_status - retrieve configure status of a channel-path
+ * @chpid: channel-path ID
+ *
+ * On success, return 0 for standby, 1 for configured, 2 for reserved,
+ * 3 for not recognized. Return negative error code on error.
+ */
+int chp_info_get_status(struct chp_id chpid)
+{
+	int rc;
+	int bit;
+
+	rc = info_update();
+	if (rc)
+		return rc;
+
+	bit = info_bit_num(chpid);
+	mutex_lock(&info_lock);
+	if (!chp_test_bit(chp_info.recognized, bit))
+		rc = CHP_STATUS_NOT_RECOGNIZED;
+	else if (chp_test_bit(chp_info.configured, bit))
+		rc = CHP_STATUS_CONFIGURED;
+	else if (chp_test_bit(chp_info.standby, bit))
+		rc = CHP_STATUS_STANDBY;
+	else
+		rc = CHP_STATUS_RESERVED;
+	mutex_unlock(&info_lock);
+
+	return rc;
+}
+
+/* Return configure task for chpid. */
+static enum cfg_task_t cfg_get_task(struct chp_id chpid)
+{
+	return chp_cfg_task[chpid.cssid][chpid.id];
+}
+
+/* Set configure task for chpid. */
+static void cfg_set_task(struct chp_id chpid, enum cfg_task_t cfg)
+{
+	chp_cfg_task[chpid.cssid][chpid.id] = cfg;
+}
+
+/* Perform one configure/deconfigure request. Reschedule work function until
+ * last request. */
+static void cfg_func(struct work_struct *work)
+{
+	struct chp_id chpid;
+	enum cfg_task_t t;
+	int rc;
+
+	mutex_lock(&cfg_lock);
+	t = cfg_none;
+	chp_id_for_each(&chpid) {
+		t = cfg_get_task(chpid);
+		if (t != cfg_none) {
+			cfg_set_task(chpid, cfg_none);
+			break;
+		}
+	}
+	mutex_unlock(&cfg_lock);
+
+	switch (t) {
+	case cfg_configure:
+		rc = sclp_chp_configure(chpid);
+		if (rc)
+			CIO_MSG_EVENT(2, "chp: sclp_chp_configure(%x.%02x)="
+				      "%d\n", chpid.cssid, chpid.id, rc);
+		else {
+			info_expire();
+			chsc_chp_online(chpid);
+		}
+		break;
+	case cfg_deconfigure:
+		rc = sclp_chp_deconfigure(chpid);
+		if (rc)
+			CIO_MSG_EVENT(2, "chp: sclp_chp_deconfigure(%x.%02x)="
+				      "%d\n", chpid.cssid, chpid.id, rc);
+		else {
+			info_expire();
+			chsc_chp_offline(chpid);
+		}
+		break;
+	case cfg_none:
+		/* Get updated information after last change. */
+		info_update();
+		mutex_lock(&cfg_lock);
+		cfg_busy = 0;
+		mutex_unlock(&cfg_lock);
+		wake_up_interruptible(&cfg_wait_queue);
+		return;
+	}
+	queue_work(chp_wq, &cfg_work);
+}
+
+/**
+ * chp_cfg_schedule - schedule chpid configuration request
+ * @chpid - channel-path ID
+ * @configure - Non-zero for configure, zero for deconfigure
+ *
+ * Schedule a channel-path configuration/deconfiguration request.
+ */
+void chp_cfg_schedule(struct chp_id chpid, int configure)
+{
+	CIO_MSG_EVENT(2, "chp_cfg_sched%x.%02x=%d\n", chpid.cssid, chpid.id,
+		      configure);
+	mutex_lock(&cfg_lock);
+	cfg_set_task(chpid, configure ? cfg_configure : cfg_deconfigure);
+	cfg_busy = 1;
+	mutex_unlock(&cfg_lock);
+	queue_work(chp_wq, &cfg_work);
+}
+
+/**
+ * chp_cfg_cancel_deconfigure - cancel chpid deconfiguration request
+ * @chpid - channel-path ID
+ *
+ * Cancel an active channel-path deconfiguration request if it has not yet
+ * been performed.
+ */
+void chp_cfg_cancel_deconfigure(struct chp_id chpid)
+{
+	CIO_MSG_EVENT(2, "chp_cfg_cancel:%x.%02x\n", chpid.cssid, chpid.id);
+	mutex_lock(&cfg_lock);
+	if (cfg_get_task(chpid) == cfg_deconfigure)
+		cfg_set_task(chpid, cfg_none);
+	mutex_unlock(&cfg_lock);
+}
+
+static int cfg_wait_idle(void)
+{
+	if (wait_event_interruptible(cfg_wait_queue, !cfg_busy))
+		return -ERESTARTSYS;
+	return 0;
+}
+
+static int __init chp_init(void)
+{
+	struct chp_id chpid;
+	int ret;
+
+	ret = crw_register_handler(CRW_RSC_CPATH, chp_process_crw);
+	if (ret)
+		return ret;
+	chp_wq = create_singlethread_workqueue("cio_chp");
+	if (!chp_wq) {
+		crw_unregister_handler(CRW_RSC_CPATH);
+		return -ENOMEM;
+	}
+	INIT_WORK(&cfg_work, cfg_func);
+	init_waitqueue_head(&cfg_wait_queue);
+	if (info_update())
+		return 0;
+	/* Register available channel-paths. */
+	chp_id_for_each(&chpid) {
+		if (chp_info_get_status(chpid) != CHP_STATUS_NOT_RECOGNIZED)
+			chp_new(chpid);
+	}
+
+	return 0;
+}
+
+subsys_initcall(chp_init);
diff --git a/ap/os/linux/linux-3.4.x/drivers/s390/cio/chp.h b/ap/os/linux/linux-3.4.x/drivers/s390/cio/chp.h
new file mode 100644
index 0000000..12b4903
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/s390/cio/chp.h
@@ -0,0 +1,72 @@
+/*
+ *  drivers/s390/cio/chp.h
+ *
+ *    Copyright IBM Corp. 2007,2010
+ *    Author(s): Peter Oberparleiter <peter.oberparleiter@de.ibm.com>
+ */
+
+#ifndef S390_CHP_H
+#define S390_CHP_H S390_CHP_H
+
+#include <linux/types.h>
+#include <linux/device.h>
+#include <linux/mutex.h>
+#include <asm/chpid.h>
+#include "chsc.h"
+#include "css.h"
+
+#define CHP_STATUS_STANDBY		0
+#define CHP_STATUS_CONFIGURED		1
+#define CHP_STATUS_RESERVED		2
+#define CHP_STATUS_NOT_RECOGNIZED	3
+
+#define CHP_ONLINE 0
+#define CHP_OFFLINE 1
+#define CHP_VARY_ON 2
+#define CHP_VARY_OFF 3
+
+struct chp_link {
+	struct chp_id chpid;
+	u32 fla_mask;
+	u16 fla;
+};
+
+static inline int chp_test_bit(u8 *bitmap, int num)
+{
+	int byte = num >> 3;
+	int mask = 128 >> (num & 7);
+
+	return (bitmap[byte] & mask) ? 1 : 0;
+}
+
+
+struct channel_path {
+	struct device dev;
+	struct chp_id chpid;
+	struct mutex lock; /* Serialize access to below members. */
+	int state;
+	struct channel_path_desc desc;
+	/* Channel-measurement related stuff: */
+	int cmg;
+	int shared;
+	void *cmg_chars;
+};
+
+/* Return channel_path struct for given chpid. */
+static inline struct channel_path *chpid_to_chp(struct chp_id chpid)
+{
+	return channel_subsystems[chpid.cssid]->chps[chpid.id];
+}
+
+int chp_get_status(struct chp_id chpid);
+u8 chp_get_sch_opm(struct subchannel *sch);
+int chp_is_registered(struct chp_id chpid);
+void *chp_get_chp_desc(struct chp_id chpid);
+void chp_remove_cmg_attr(struct channel_path *chp);
+int chp_add_cmg_attr(struct channel_path *chp);
+int chp_new(struct chp_id chpid);
+void chp_cfg_schedule(struct chp_id chpid, int configure);
+void chp_cfg_cancel_deconfigure(struct chp_id chpid);
+int chp_info_get_status(struct chp_id chpid);
+int chp_ssd_get_mask(struct chsc_ssd_info *, struct chp_link *);
+#endif /* S390_CHP_H */
diff --git a/ap/os/linux/linux-3.4.x/drivers/s390/cio/chsc.c b/ap/os/linux/linux-3.4.x/drivers/s390/cio/chsc.c
new file mode 100644
index 0000000..a84631a
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/s390/cio/chsc.c
@@ -0,0 +1,1050 @@
+/*
+ *  drivers/s390/cio/chsc.c
+ *   S/390 common I/O routines -- channel subsystem call
+ *
+ *    Copyright IBM Corp. 1999,2010
+ *    Author(s): Ingo Adlung (adlung@de.ibm.com)
+ *		 Cornelia Huck (cornelia.huck@de.ibm.com)
+ *		 Arnd Bergmann (arndb@de.ibm.com)
+ */
+
+#define KMSG_COMPONENT "cio"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/init.h>
+#include <linux/device.h>
+
+#include <asm/cio.h>
+#include <asm/chpid.h>
+#include <asm/chsc.h>
+#include <asm/crw.h>
+
+#include "css.h"
+#include "cio.h"
+#include "cio_debug.h"
+#include "ioasm.h"
+#include "chp.h"
+#include "chsc.h"
+
+static void *sei_page;
+static void *chsc_page;
+static DEFINE_SPINLOCK(chsc_page_lock);
+
+/**
+ * chsc_error_from_response() - convert a chsc response to an error
+ * @response: chsc response code
+ *
+ * Returns an appropriate Linux error code for @response.
+ */
+int chsc_error_from_response(int response)
+{
+	switch (response) {
+	case 0x0001:
+		return 0;
+	case 0x0002:
+	case 0x0003:
+	case 0x0006:
+	case 0x0007:
+	case 0x0008:
+	case 0x000a:
+	case 0x0104:
+		return -EINVAL;
+	case 0x0004:
+		return -EOPNOTSUPP;
+	default:
+		return -EIO;
+	}
+}
+EXPORT_SYMBOL_GPL(chsc_error_from_response);
+
+struct chsc_ssd_area {
+	struct chsc_header request;
+	u16 :10;
+	u16 ssid:2;
+	u16 :4;
+	u16 f_sch;	  /* first subchannel */
+	u16 :16;
+	u16 l_sch;	  /* last subchannel */
+	u32 :32;
+	struct chsc_header response;
+	u32 :32;
+	u8 sch_valid : 1;
+	u8 dev_valid : 1;
+	u8 st	     : 3; /* subchannel type */
+	u8 zeroes    : 3;
+	u8  unit_addr;	  /* unit address */
+	u16 devno;	  /* device number */
+	u8 path_mask;
+	u8 fla_valid_mask;
+	u16 sch;	  /* subchannel */
+	u8 chpid[8];	  /* chpids 0-7 */
+	u16 fla[8];	  /* full link addresses 0-7 */
+} __attribute__ ((packed));
+
+int chsc_get_ssd_info(struct subchannel_id schid, struct chsc_ssd_info *ssd)
+{
+	struct chsc_ssd_area *ssd_area;
+	int ccode;
+	int ret;
+	int i;
+	int mask;
+
+	spin_lock_irq(&chsc_page_lock);
+	memset(chsc_page, 0, PAGE_SIZE);
+	ssd_area = chsc_page;
+	ssd_area->request.length = 0x0010;
+	ssd_area->request.code = 0x0004;
+	ssd_area->ssid = schid.ssid;
+	ssd_area->f_sch = schid.sch_no;
+	ssd_area->l_sch = schid.sch_no;
+
+	ccode = chsc(ssd_area);
+	/* Check response. */
+	if (ccode > 0) {
+		ret = (ccode == 3) ? -ENODEV : -EBUSY;
+		goto out;
+	}
+	ret = chsc_error_from_response(ssd_area->response.code);
+	if (ret != 0) {
+		CIO_MSG_EVENT(2, "chsc: ssd failed for 0.%x.%04x (rc=%04x)\n",
+			      schid.ssid, schid.sch_no,
+			      ssd_area->response.code);
+		goto out;
+	}
+	if (!ssd_area->sch_valid) {
+		ret = -ENODEV;
+		goto out;
+	}
+	/* Copy data */
+	ret = 0;
+	memset(ssd, 0, sizeof(struct chsc_ssd_info));
+	if ((ssd_area->st != SUBCHANNEL_TYPE_IO) &&
+	    (ssd_area->st != SUBCHANNEL_TYPE_MSG))
+		goto out;
+	ssd->path_mask = ssd_area->path_mask;
+	ssd->fla_valid_mask = ssd_area->fla_valid_mask;
+	for (i = 0; i < 8; i++) {
+		mask = 0x80 >> i;
+		if (ssd_area->path_mask & mask) {
+			chp_id_init(&ssd->chpid[i]);
+			ssd->chpid[i].id = ssd_area->chpid[i];
+		}
+		if (ssd_area->fla_valid_mask & mask)
+			ssd->fla[i] = ssd_area->fla[i];
+	}
+out:
+	spin_unlock_irq(&chsc_page_lock);
+	return ret;
+}
+
+static int s390_subchannel_remove_chpid(struct subchannel *sch, void *data)
+{
+	spin_lock_irq(sch->lock);
+	if (sch->driver && sch->driver->chp_event)
+		if (sch->driver->chp_event(sch, data, CHP_OFFLINE) != 0)
+			goto out_unreg;
+	spin_unlock_irq(sch->lock);
+	return 0;
+
+out_unreg:
+	sch->lpm = 0;
+	spin_unlock_irq(sch->lock);
+	css_schedule_eval(sch->schid);
+	return 0;
+}
+
+void chsc_chp_offline(struct chp_id chpid)
+{
+	char dbf_txt[15];
+	struct chp_link link;
+
+	sprintf(dbf_txt, "chpr%x.%02x", chpid.cssid, chpid.id);
+	CIO_TRACE_EVENT(2, dbf_txt);
+
+	if (chp_get_status(chpid) <= 0)
+		return;
+	memset(&link, 0, sizeof(struct chp_link));
+	link.chpid = chpid;
+	/* Wait until previous actions have settled. */
+	css_wait_for_slow_path();
+	for_each_subchannel_staged(s390_subchannel_remove_chpid, NULL, &link);
+}
+
+static int s390_process_res_acc_new_sch(struct subchannel_id schid, void *data)
+{
+	struct schib schib;
+	/*
+	 * We don't know the device yet, but since a path
+	 * may be available now to the device we'll have
+	 * to do recognition again.
+	 * Since we don't have any idea about which chpid
+	 * that beast may be on we'll have to do a stsch
+	 * on all devices, grr...
+	 */
+	if (stsch_err(schid, &schib))
+		/* We're through */
+		return -ENXIO;
+
+	/* Put it on the slow path. */
+	css_schedule_eval(schid);
+	return 0;
+}
+
+static int __s390_process_res_acc(struct subchannel *sch, void *data)
+{
+	spin_lock_irq(sch->lock);
+	if (sch->driver && sch->driver->chp_event)
+		sch->driver->chp_event(sch, data, CHP_ONLINE);
+	spin_unlock_irq(sch->lock);
+
+	return 0;
+}
+
+static void s390_process_res_acc(struct chp_link *link)
+{
+	char dbf_txt[15];
+
+	sprintf(dbf_txt, "accpr%x.%02x", link->chpid.cssid,
+		link->chpid.id);
+	CIO_TRACE_EVENT( 2, dbf_txt);
+	if (link->fla != 0) {
+		sprintf(dbf_txt, "fla%x", link->fla);
+		CIO_TRACE_EVENT( 2, dbf_txt);
+	}
+	/* Wait until previous actions have settled. */
+	css_wait_for_slow_path();
+	/*
+	 * I/O resources may have become accessible.
+	 * Scan through all subchannels that may be concerned and
+	 * do a validation on those.
+	 * The more information we have (info), the less scanning
+	 * will we have to do.
+	 */
+	for_each_subchannel_staged(__s390_process_res_acc,
+				   s390_process_res_acc_new_sch, link);
+}
+
+static int
+__get_chpid_from_lir(void *data)
+{
+	struct lir {
+		u8  iq;
+		u8  ic;
+		u16 sci;
+		/* incident-node descriptor */
+		u32 indesc[28];
+		/* attached-node descriptor */
+		u32 andesc[28];
+		/* incident-specific information */
+		u32 isinfo[28];
+	} __attribute__ ((packed)) *lir;
+
+	lir = data;
+	if (!(lir->iq&0x80))
+		/* NULL link incident record */
+		return -EINVAL;
+	if (!(lir->indesc[0]&0xc0000000))
+		/* node descriptor not valid */
+		return -EINVAL;
+	if (!(lir->indesc[0]&0x10000000))
+		/* don't handle device-type nodes - FIXME */
+		return -EINVAL;
+	/* Byte 3 contains the chpid. Could also be CTCA, but we don't care */
+
+	return (u16) (lir->indesc[0]&0x000000ff);
+}
+
+struct chsc_sei_area {
+	struct chsc_header request;
+	u32 reserved1;
+	u32 reserved2;
+	u32 reserved3;
+	struct chsc_header response;
+	u32 reserved4;
+	u8  flags;
+	u8  vf;		/* validity flags */
+	u8  rs;		/* reporting source */
+	u8  cc;		/* content code */
+	u16 fla;	/* full link address */
+	u16 rsid;	/* reporting source id */
+	u32 reserved5;
+	u32 reserved6;
+	u8 ccdf[4096 - 16 - 24];	/* content-code dependent field */
+	/* ccdf has to be big enough for a link-incident record */
+} __attribute__ ((packed));
+
+static void chsc_process_sei_link_incident(struct chsc_sei_area *sei_area)
+{
+	struct chp_id chpid;
+	int id;
+
+	CIO_CRW_EVENT(4, "chsc: link incident (rs=%02x, rs_id=%04x)\n",
+		      sei_area->rs, sei_area->rsid);
+	if (sei_area->rs != 4)
+		return;
+	id = __get_chpid_from_lir(sei_area->ccdf);
+	if (id < 0)
+		CIO_CRW_EVENT(4, "chsc: link incident - invalid LIR\n");
+	else {
+		chp_id_init(&chpid);
+		chpid.id = id;
+		chsc_chp_offline(chpid);
+	}
+}
+
+static void chsc_process_sei_res_acc(struct chsc_sei_area *sei_area)
+{
+	struct chp_link link;
+	struct chp_id chpid;
+	int status;
+
+	CIO_CRW_EVENT(4, "chsc: resource accessibility event (rs=%02x, "
+		      "rs_id=%04x)\n", sei_area->rs, sei_area->rsid);
+	if (sei_area->rs != 4)
+		return;
+	chp_id_init(&chpid);
+	chpid.id = sei_area->rsid;
+	/* allocate a new channel path structure, if needed */
+	status = chp_get_status(chpid);
+	if (status < 0)
+		chp_new(chpid);
+	else if (!status)
+		return;
+	memset(&link, 0, sizeof(struct chp_link));
+	link.chpid = chpid;
+	if ((sei_area->vf & 0xc0) != 0) {
+		link.fla = sei_area->fla;
+		if ((sei_area->vf & 0xc0) == 0xc0)
+			/* full link address */
+			link.fla_mask = 0xffff;
+		else
+			/* link address */
+			link.fla_mask = 0xff00;
+	}
+	s390_process_res_acc(&link);
+}
+
+static void chsc_process_sei_chp_avail(struct chsc_sei_area *sei_area)
+{
+	struct channel_path *chp;
+	struct chp_id chpid;
+	u8 *data;
+	int num;
+
+	CIO_CRW_EVENT(4, "chsc: channel path availability information\n");
+	if (sei_area->rs != 0)
+		return;
+	data = sei_area->ccdf;
+	chp_id_init(&chpid);
+	for (num = 0; num <= __MAX_CHPID; num++) {
+		if (!chp_test_bit(data, num))
+			continue;
+		chpid.id = num;
+
+		CIO_CRW_EVENT(4, "Update information for channel path "
+			      "%x.%02x\n", chpid.cssid, chpid.id);
+		chp = chpid_to_chp(chpid);
+		if (!chp) {
+			chp_new(chpid);
+			continue;
+		}
+		mutex_lock(&chp->lock);
+		chsc_determine_base_channel_path_desc(chpid, &chp->desc);
+		mutex_unlock(&chp->lock);
+	}
+}
+
+struct chp_config_data {
+	u8 map[32];
+	u8 op;
+	u8 pc;
+};
+
+static void chsc_process_sei_chp_config(struct chsc_sei_area *sei_area)
+{
+	struct chp_config_data *data;
+	struct chp_id chpid;
+	int num;
+	char *events[3] = {"configure", "deconfigure", "cancel deconfigure"};
+
+	CIO_CRW_EVENT(4, "chsc: channel-path-configuration notification\n");
+	if (sei_area->rs != 0)
+		return;
+	data = (struct chp_config_data *) &(sei_area->ccdf);
+	chp_id_init(&chpid);
+	for (num = 0; num <= __MAX_CHPID; num++) {
+		if (!chp_test_bit(data->map, num))
+			continue;
+		chpid.id = num;
+		pr_notice("Processing %s for channel path %x.%02x\n",
+			  events[data->op], chpid.cssid, chpid.id);
+		switch (data->op) {
+		case 0:
+			chp_cfg_schedule(chpid, 1);
+			break;
+		case 1:
+			chp_cfg_schedule(chpid, 0);
+			break;
+		case 2:
+			chp_cfg_cancel_deconfigure(chpid);
+			break;
+		}
+	}
+}
+
+static void chsc_process_sei(struct chsc_sei_area *sei_area)
+{
+	/* Check if we might have lost some information. */
+	if (sei_area->flags & 0x40) {
+		CIO_CRW_EVENT(2, "chsc: event overflow\n");
+		css_schedule_eval_all();
+	}
+	/* which kind of information was stored? */
+	switch (sei_area->cc) {
+	case 1: /* link incident*/
+		chsc_process_sei_link_incident(sei_area);
+		break;
+	case 2: /* i/o resource accessibility */
+		chsc_process_sei_res_acc(sei_area);
+		break;
+	case 7: /* channel-path-availability information */
+		chsc_process_sei_chp_avail(sei_area);
+		break;
+	case 8: /* channel-path-configuration notification */
+		chsc_process_sei_chp_config(sei_area);
+		break;
+	default: /* other stuff */
+		CIO_CRW_EVENT(4, "chsc: unhandled sei content code %d\n",
+			      sei_area->cc);
+		break;
+	}
+}
+
+static void chsc_process_crw(struct crw *crw0, struct crw *crw1, int overflow)
+{
+	struct chsc_sei_area *sei_area;
+
+	if (overflow) {
+		css_schedule_eval_all();
+		return;
+	}
+	CIO_CRW_EVENT(2, "CRW reports slct=%d, oflw=%d, "
+		      "chn=%d, rsc=%X, anc=%d, erc=%X, rsid=%X\n",
+		      crw0->slct, crw0->oflw, crw0->chn, crw0->rsc, crw0->anc,
+		      crw0->erc, crw0->rsid);
+	if (!sei_page)
+		return;
+	/* Access to sei_page is serialized through machine check handler
+	 * thread, so no need for locking. */
+	sei_area = sei_page;
+
+	CIO_TRACE_EVENT(2, "prcss");
+	do {
+		memset(sei_area, 0, sizeof(*sei_area));
+		sei_area->request.length = 0x0010;
+		sei_area->request.code = 0x000e;
+		if (chsc(sei_area))
+			break;
+
+		if (sei_area->response.code == 0x0001) {
+			CIO_CRW_EVENT(4, "chsc: sei successful\n");
+			chsc_process_sei(sei_area);
+		} else {
+			CIO_CRW_EVENT(2, "chsc: sei failed (rc=%04x)\n",
+				      sei_area->response.code);
+			break;
+		}
+	} while (sei_area->flags & 0x80);
+}
+
+void chsc_chp_online(struct chp_id chpid)
+{
+	char dbf_txt[15];
+	struct chp_link link;
+
+	sprintf(dbf_txt, "cadd%x.%02x", chpid.cssid, chpid.id);
+	CIO_TRACE_EVENT(2, dbf_txt);
+
+	if (chp_get_status(chpid) != 0) {
+		memset(&link, 0, sizeof(struct chp_link));
+		link.chpid = chpid;
+		/* Wait until previous actions have settled. */
+		css_wait_for_slow_path();
+		for_each_subchannel_staged(__s390_process_res_acc, NULL,
+					   &link);
+	}
+}
+
+static void __s390_subchannel_vary_chpid(struct subchannel *sch,
+					 struct chp_id chpid, int on)
+{
+	unsigned long flags;
+	struct chp_link link;
+
+	memset(&link, 0, sizeof(struct chp_link));
+	link.chpid = chpid;
+	spin_lock_irqsave(sch->lock, flags);
+	if (sch->driver && sch->driver->chp_event)
+		sch->driver->chp_event(sch, &link,
+				       on ? CHP_VARY_ON : CHP_VARY_OFF);
+	spin_unlock_irqrestore(sch->lock, flags);
+}
+
+static int s390_subchannel_vary_chpid_off(struct subchannel *sch, void *data)
+{
+	struct chp_id *chpid = data;
+
+	__s390_subchannel_vary_chpid(sch, *chpid, 0);
+	return 0;
+}
+
+static int s390_subchannel_vary_chpid_on(struct subchannel *sch, void *data)
+{
+	struct chp_id *chpid = data;
+
+	__s390_subchannel_vary_chpid(sch, *chpid, 1);
+	return 0;
+}
+
+static int
+__s390_vary_chpid_on(struct subchannel_id schid, void *data)
+{
+	struct schib schib;
+
+	if (stsch_err(schid, &schib))
+		/* We're through */
+		return -ENXIO;
+	/* Put it on the slow path. */
+	css_schedule_eval(schid);
+	return 0;
+}
+
+/**
+ * chsc_chp_vary - propagate channel-path vary operation to subchannels
+ * @chpid: channl-path ID
+ * @on: non-zero for vary online, zero for vary offline
+ */
+int chsc_chp_vary(struct chp_id chpid, int on)
+{
+	struct channel_path *chp = chpid_to_chp(chpid);
+
+	/* Wait until previous actions have settled. */
+	css_wait_for_slow_path();
+	/*
+	 * Redo PathVerification on the devices the chpid connects to
+	 */
+	if (on) {
+		/* Try to update the channel path descritor. */
+		chsc_determine_base_channel_path_desc(chpid, &chp->desc);
+		for_each_subchannel_staged(s390_subchannel_vary_chpid_on,
+					   __s390_vary_chpid_on, &chpid);
+	} else
+		for_each_subchannel_staged(s390_subchannel_vary_chpid_off,
+					   NULL, &chpid);
+
+	return 0;
+}
+
+static void
+chsc_remove_cmg_attr(struct channel_subsystem *css)
+{
+	int i;
+
+	for (i = 0; i <= __MAX_CHPID; i++) {
+		if (!css->chps[i])
+			continue;
+		chp_remove_cmg_attr(css->chps[i]);
+	}
+}
+
+static int
+chsc_add_cmg_attr(struct channel_subsystem *css)
+{
+	int i, ret;
+
+	ret = 0;
+	for (i = 0; i <= __MAX_CHPID; i++) {
+		if (!css->chps[i])
+			continue;
+		ret = chp_add_cmg_attr(css->chps[i]);
+		if (ret)
+			goto cleanup;
+	}
+	return ret;
+cleanup:
+	for (--i; i >= 0; i--) {
+		if (!css->chps[i])
+			continue;
+		chp_remove_cmg_attr(css->chps[i]);
+	}
+	return ret;
+}
+
+int __chsc_do_secm(struct channel_subsystem *css, int enable)
+{
+	struct {
+		struct chsc_header request;
+		u32 operation_code : 2;
+		u32 : 30;
+		u32 key : 4;
+		u32 : 28;
+		u32 zeroes1;
+		u32 cub_addr1;
+		u32 zeroes2;
+		u32 cub_addr2;
+		u32 reserved[13];
+		struct chsc_header response;
+		u32 status : 8;
+		u32 : 4;
+		u32 fmt : 4;
+		u32 : 16;
+	} __attribute__ ((packed)) *secm_area;
+	int ret, ccode;
+
+	spin_lock_irq(&chsc_page_lock);
+	memset(chsc_page, 0, PAGE_SIZE);
+	secm_area = chsc_page;
+	secm_area->request.length = 0x0050;
+	secm_area->request.code = 0x0016;
+
+	secm_area->key = PAGE_DEFAULT_KEY >> 4;
+	secm_area->cub_addr1 = (u64)(unsigned long)css->cub_addr1;
+	secm_area->cub_addr2 = (u64)(unsigned long)css->cub_addr2;
+
+	secm_area->operation_code = enable ? 0 : 1;
+
+	ccode = chsc(secm_area);
+	if (ccode > 0) {
+		ret = (ccode == 3) ? -ENODEV : -EBUSY;
+		goto out;
+	}
+
+	switch (secm_area->response.code) {
+	case 0x0102:
+	case 0x0103:
+		ret = -EINVAL;
+		break;
+	default:
+		ret = chsc_error_from_response(secm_area->response.code);
+	}
+	if (ret != 0)
+		CIO_CRW_EVENT(2, "chsc: secm failed (rc=%04x)\n",
+			      secm_area->response.code);
+out:
+	spin_unlock_irq(&chsc_page_lock);
+	return ret;
+}
+
+int
+chsc_secm(struct channel_subsystem *css, int enable)
+{
+	int ret;
+
+	if (enable && !css->cm_enabled) {
+		css->cub_addr1 = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
+		css->cub_addr2 = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
+		if (!css->cub_addr1 || !css->cub_addr2) {
+			free_page((unsigned long)css->cub_addr1);
+			free_page((unsigned long)css->cub_addr2);
+			return -ENOMEM;
+		}
+	}
+	ret = __chsc_do_secm(css, enable);
+	if (!ret) {
+		css->cm_enabled = enable;
+		if (css->cm_enabled) {
+			ret = chsc_add_cmg_attr(css);
+			if (ret) {
+				__chsc_do_secm(css, 0);
+				css->cm_enabled = 0;
+			}
+		} else
+			chsc_remove_cmg_attr(css);
+	}
+	if (!css->cm_enabled) {
+		free_page((unsigned long)css->cub_addr1);
+		free_page((unsigned long)css->cub_addr2);
+	}
+	return ret;
+}
+
+int chsc_determine_channel_path_desc(struct chp_id chpid, int fmt, int rfmt,
+				     int c, int m, void *page)
+{
+	struct chsc_scpd *scpd_area;
+	int ccode, ret;
+
+	if ((rfmt == 1) && !css_general_characteristics.fcs)
+		return -EINVAL;
+	if ((rfmt == 2) && !css_general_characteristics.cib)
+		return -EINVAL;
+
+	memset(page, 0, PAGE_SIZE);
+	scpd_area = page;
+	scpd_area->request.length = 0x0010;
+	scpd_area->request.code = 0x0002;
+	scpd_area->cssid = chpid.cssid;
+	scpd_area->first_chpid = chpid.id;
+	scpd_area->last_chpid = chpid.id;
+	scpd_area->m = m;
+	scpd_area->c = c;
+	scpd_area->fmt = fmt;
+	scpd_area->rfmt = rfmt;
+
+	ccode = chsc(scpd_area);
+	if (ccode > 0)
+		return (ccode == 3) ? -ENODEV : -EBUSY;
+
+	ret = chsc_error_from_response(scpd_area->response.code);
+	if (ret)
+		CIO_CRW_EVENT(2, "chsc: scpd failed (rc=%04x)\n",
+			      scpd_area->response.code);
+	return ret;
+}
+EXPORT_SYMBOL_GPL(chsc_determine_channel_path_desc);
+
+int chsc_determine_base_channel_path_desc(struct chp_id chpid,
+					  struct channel_path_desc *desc)
+{
+	struct chsc_response_struct *chsc_resp;
+	struct chsc_scpd *scpd_area;
+	unsigned long flags;
+	int ret;
+
+	spin_lock_irqsave(&chsc_page_lock, flags);
+	scpd_area = chsc_page;
+	ret = chsc_determine_channel_path_desc(chpid, 0, 0, 0, 0, scpd_area);
+	if (ret)
+		goto out;
+	chsc_resp = (void *)&scpd_area->response;
+	memcpy(desc, &chsc_resp->data, sizeof(*desc));
+out:
+	spin_unlock_irqrestore(&chsc_page_lock, flags);
+	return ret;
+}
+
+int chsc_determine_fmt1_channel_path_desc(struct chp_id chpid,
+					  struct channel_path_desc_fmt1 *desc)
+{
+	struct chsc_response_struct *chsc_resp;
+	struct chsc_scpd *scpd_area;
+	int ret;
+
+	spin_lock_irq(&chsc_page_lock);
+	scpd_area = chsc_page;
+	ret = chsc_determine_channel_path_desc(chpid, 0, 0, 1, 0, scpd_area);
+	if (ret)
+		goto out;
+	chsc_resp = (void *)&scpd_area->response;
+	memcpy(desc, &chsc_resp->data, sizeof(*desc));
+out:
+	spin_unlock_irq(&chsc_page_lock);
+	return ret;
+}
+
+static void
+chsc_initialize_cmg_chars(struct channel_path *chp, u8 cmcv,
+			  struct cmg_chars *chars)
+{
+	struct cmg_chars *cmg_chars;
+	int i, mask;
+
+	cmg_chars = chp->cmg_chars;
+	for (i = 0; i < NR_MEASUREMENT_CHARS; i++) {
+		mask = 0x80 >> (i + 3);
+		if (cmcv & mask)
+			cmg_chars->values[i] = chars->values[i];
+		else
+			cmg_chars->values[i] = 0;
+	}
+}
+
+int chsc_get_channel_measurement_chars(struct channel_path *chp)
+{
+	struct cmg_chars *cmg_chars;
+	int ccode, ret;
+
+	struct {
+		struct chsc_header request;
+		u32 : 24;
+		u32 first_chpid : 8;
+		u32 : 24;
+		u32 last_chpid : 8;
+		u32 zeroes1;
+		struct chsc_header response;
+		u32 zeroes2;
+		u32 not_valid : 1;
+		u32 shared : 1;
+		u32 : 22;
+		u32 chpid : 8;
+		u32 cmcv : 5;
+		u32 : 11;
+		u32 cmgq : 8;
+		u32 cmg : 8;
+		u32 zeroes3;
+		u32 data[NR_MEASUREMENT_CHARS];
+	} __attribute__ ((packed)) *scmc_area;
+
+	chp->cmg_chars = NULL;
+	cmg_chars = kmalloc(sizeof(*cmg_chars), GFP_KERNEL);
+	if (!cmg_chars)
+		return -ENOMEM;
+
+	spin_lock_irq(&chsc_page_lock);
+	memset(chsc_page, 0, PAGE_SIZE);
+	scmc_area = chsc_page;
+	scmc_area->request.length = 0x0010;
+	scmc_area->request.code = 0x0022;
+	scmc_area->first_chpid = chp->chpid.id;
+	scmc_area->last_chpid = chp->chpid.id;
+
+	ccode = chsc(scmc_area);
+	if (ccode > 0) {
+		ret = (ccode == 3) ? -ENODEV : -EBUSY;
+		goto out;
+	}
+
+	ret = chsc_error_from_response(scmc_area->response.code);
+	if (ret) {
+		CIO_CRW_EVENT(2, "chsc: scmc failed (rc=%04x)\n",
+			      scmc_area->response.code);
+		goto out;
+	}
+	if (scmc_area->not_valid) {
+		chp->cmg = -1;
+		chp->shared = -1;
+		goto out;
+	}
+	chp->cmg = scmc_area->cmg;
+	chp->shared = scmc_area->shared;
+	if (chp->cmg != 2 && chp->cmg != 3) {
+		/* No cmg-dependent data. */
+		goto out;
+	}
+	chp->cmg_chars = cmg_chars;
+	chsc_initialize_cmg_chars(chp, scmc_area->cmcv,
+				  (struct cmg_chars *) &scmc_area->data);
+out:
+	spin_unlock_irq(&chsc_page_lock);
+	if (!chp->cmg_chars)
+		kfree(cmg_chars);
+
+	return ret;
+}
+
+int __init chsc_init(void)
+{
+	int ret;
+
+	sei_page = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
+	chsc_page = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
+	if (!sei_page || !chsc_page) {
+		ret = -ENOMEM;
+		goto out_err;
+	}
+	ret = crw_register_handler(CRW_RSC_CSS, chsc_process_crw);
+	if (ret)
+		goto out_err;
+	return ret;
+out_err:
+	free_page((unsigned long)chsc_page);
+	free_page((unsigned long)sei_page);
+	return ret;
+}
+
+void __init chsc_init_cleanup(void)
+{
+	crw_unregister_handler(CRW_RSC_CSS);
+	free_page((unsigned long)chsc_page);
+	free_page((unsigned long)sei_page);
+}
+
+int chsc_enable_facility(int operation_code)
+{
+	unsigned long flags;
+	int ret;
+	struct {
+		struct chsc_header request;
+		u8 reserved1:4;
+		u8 format:4;
+		u8 reserved2;
+		u16 operation_code;
+		u32 reserved3;
+		u32 reserved4;
+		u32 operation_data_area[252];
+		struct chsc_header response;
+		u32 reserved5:4;
+		u32 format2:4;
+		u32 reserved6:24;
+	} __attribute__ ((packed)) *sda_area;
+
+	spin_lock_irqsave(&chsc_page_lock, flags);
+	memset(chsc_page, 0, PAGE_SIZE);
+	sda_area = chsc_page;
+	sda_area->request.length = 0x0400;
+	sda_area->request.code = 0x0031;
+	sda_area->operation_code = operation_code;
+
+	ret = chsc(sda_area);
+	if (ret > 0) {
+		ret = (ret == 3) ? -ENODEV : -EBUSY;
+		goto out;
+	}
+
+	switch (sda_area->response.code) {
+	case 0x0101:
+		ret = -EOPNOTSUPP;
+		break;
+	default:
+		ret = chsc_error_from_response(sda_area->response.code);
+	}
+	if (ret != 0)
+		CIO_CRW_EVENT(2, "chsc: sda (oc=%x) failed (rc=%04x)\n",
+			      operation_code, sda_area->response.code);
+out:
+	spin_unlock_irqrestore(&chsc_page_lock, flags);
+	return ret;
+}
+
+struct css_general_char css_general_characteristics;
+struct css_chsc_char css_chsc_characteristics;
+
+int __init
+chsc_determine_css_characteristics(void)
+{
+	int result;
+	struct {
+		struct chsc_header request;
+		u32 reserved1;
+		u32 reserved2;
+		u32 reserved3;
+		struct chsc_header response;
+		u32 reserved4;
+		u32 general_char[510];
+		u32 chsc_char[508];
+	} __attribute__ ((packed)) *scsc_area;
+
+	spin_lock_irq(&chsc_page_lock);
+	memset(chsc_page, 0, PAGE_SIZE);
+	scsc_area = chsc_page;
+	scsc_area->request.length = 0x0010;
+	scsc_area->request.code = 0x0010;
+
+	result = chsc(scsc_area);
+	if (result) {
+		result = (result == 3) ? -ENODEV : -EBUSY;
+		goto exit;
+	}
+
+	result = chsc_error_from_response(scsc_area->response.code);
+	if (result == 0) {
+		memcpy(&css_general_characteristics, scsc_area->general_char,
+		       sizeof(css_general_characteristics));
+		memcpy(&css_chsc_characteristics, scsc_area->chsc_char,
+		       sizeof(css_chsc_characteristics));
+	} else
+		CIO_CRW_EVENT(2, "chsc: scsc failed (rc=%04x)\n",
+			      scsc_area->response.code);
+exit:
+	spin_unlock_irq(&chsc_page_lock);
+	return result;
+}
+
+EXPORT_SYMBOL_GPL(css_general_characteristics);
+EXPORT_SYMBOL_GPL(css_chsc_characteristics);
+
+int chsc_sstpc(void *page, unsigned int op, u16 ctrl)
+{
+	struct {
+		struct chsc_header request;
+		unsigned int rsvd0;
+		unsigned int op : 8;
+		unsigned int rsvd1 : 8;
+		unsigned int ctrl : 16;
+		unsigned int rsvd2[5];
+		struct chsc_header response;
+		unsigned int rsvd3[7];
+	} __attribute__ ((packed)) *rr;
+	int rc;
+
+	memset(page, 0, PAGE_SIZE);
+	rr = page;
+	rr->request.length = 0x0020;
+	rr->request.code = 0x0033;
+	rr->op = op;
+	rr->ctrl = ctrl;
+	rc = chsc(rr);
+	if (rc)
+		return -EIO;
+	rc = (rr->response.code == 0x0001) ? 0 : -EIO;
+	return rc;
+}
+
+int chsc_sstpi(void *page, void *result, size_t size)
+{
+	struct {
+		struct chsc_header request;
+		unsigned int rsvd0[3];
+		struct chsc_header response;
+		char data[size];
+	} __attribute__ ((packed)) *rr;
+	int rc;
+
+	memset(page, 0, PAGE_SIZE);
+	rr = page;
+	rr->request.length = 0x0010;
+	rr->request.code = 0x0038;
+	rc = chsc(rr);
+	if (rc)
+		return -EIO;
+	memcpy(result, &rr->data, size);
+	return (rr->response.code == 0x0001) ? 0 : -EIO;
+}
+
+int chsc_siosl(struct subchannel_id schid)
+{
+	struct {
+		struct chsc_header request;
+		u32 word1;
+		struct subchannel_id sid;
+		u32 word3;
+		struct chsc_header response;
+		u32 word[11];
+	} __attribute__ ((packed)) *siosl_area;
+	unsigned long flags;
+	int ccode;
+	int rc;
+
+	spin_lock_irqsave(&chsc_page_lock, flags);
+	memset(chsc_page, 0, PAGE_SIZE);
+	siosl_area = chsc_page;
+	siosl_area->request.length = 0x0010;
+	siosl_area->request.code = 0x0046;
+	siosl_area->word1 = 0x80000000;
+	siosl_area->sid = schid;
+
+	ccode = chsc(siosl_area);
+	if (ccode > 0) {
+		if (ccode == 3)
+			rc = -ENODEV;
+		else
+			rc = -EBUSY;
+		CIO_MSG_EVENT(2, "chsc: chsc failed for 0.%x.%04x (ccode=%d)\n",
+			      schid.ssid, schid.sch_no, ccode);
+		goto out;
+	}
+	rc = chsc_error_from_response(siosl_area->response.code);
+	if (rc)
+		CIO_MSG_EVENT(2, "chsc: siosl failed for 0.%x.%04x (rc=%04x)\n",
+			      schid.ssid, schid.sch_no,
+			      siosl_area->response.code);
+	else
+		CIO_MSG_EVENT(4, "chsc: siosl succeeded for 0.%x.%04x\n",
+			      schid.ssid, schid.sch_no);
+out:
+	spin_unlock_irqrestore(&chsc_page_lock, flags);
+	return rc;
+}
+EXPORT_SYMBOL_GPL(chsc_siosl);
diff --git a/ap/os/linux/linux-3.4.x/drivers/s390/cio/chsc.h b/ap/os/linux/linux-3.4.x/drivers/s390/cio/chsc.h
new file mode 100644
index 0000000..3f15b2a
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/s390/cio/chsc.h
@@ -0,0 +1,121 @@
+#ifndef S390_CHSC_H
+#define S390_CHSC_H
+
+#include <linux/types.h>
+#include <linux/device.h>
+#include <asm/chpid.h>
+#include <asm/chsc.h>
+#include <asm/schid.h>
+
+#define CHSC_SDA_OC_MSS   0x2
+
+struct chsc_header {
+	u16 length;
+	u16 code;
+} __attribute__ ((packed));
+
+#define NR_MEASUREMENT_CHARS 5
+struct cmg_chars {
+	u32 values[NR_MEASUREMENT_CHARS];
+} __attribute__ ((packed));
+
+#define NR_MEASUREMENT_ENTRIES 8
+struct cmg_entry {
+	u32 values[NR_MEASUREMENT_ENTRIES];
+} __attribute__ ((packed));
+
+struct channel_path_desc {
+	u8 flags;
+	u8 lsn;
+	u8 desc;
+	u8 chpid;
+	u8 swla;
+	u8 zeroes;
+	u8 chla;
+	u8 chpp;
+} __attribute__ ((packed));
+
+struct channel_path_desc_fmt1 {
+	u8 flags;
+	u8 lsn;
+	u8 desc;
+	u8 chpid;
+	u32:24;
+	u8 chpp;
+	u32 unused[3];
+	u16 mdc;
+	u16:13;
+	u8 r:1;
+	u8 s:1;
+	u8 f:1;
+	u32 zeros[2];
+} __attribute__ ((packed));
+
+struct channel_path;
+
+struct css_chsc_char {
+	u64 res;
+	u64 : 20;
+	u32 secm : 1; /* bit 84 */
+	u32 : 1;
+	u32 scmc : 1; /* bit 86 */
+	u32 : 20;
+	u32 scssc : 1;  /* bit 107 */
+	u32 scsscf : 1; /* bit 108 */
+	u32 : 19;
+}__attribute__((packed));
+
+extern struct css_chsc_char css_chsc_characteristics;
+
+struct chsc_ssd_info {
+	u8 path_mask;
+	u8 fla_valid_mask;
+	struct chp_id chpid[8];
+	u16 fla[8];
+};
+
+struct chsc_scpd {
+	struct chsc_header request;
+	u32:2;
+	u32 m:1;
+	u32 c:1;
+	u32 fmt:4;
+	u32 cssid:8;
+	u32:4;
+	u32 rfmt:4;
+	u32 first_chpid:8;
+	u32:24;
+	u32 last_chpid:8;
+	u32 zeroes1;
+	struct chsc_header response;
+	u8 data[PAGE_SIZE - 20];
+} __attribute__ ((packed));
+
+
+extern int chsc_get_ssd_info(struct subchannel_id schid,
+			     struct chsc_ssd_info *ssd);
+extern int chsc_determine_css_characteristics(void);
+extern int chsc_init(void);
+extern void chsc_init_cleanup(void);
+
+extern int chsc_enable_facility(int);
+struct channel_subsystem;
+extern int chsc_secm(struct channel_subsystem *, int);
+int __chsc_do_secm(struct channel_subsystem *css, int enable);
+
+int chsc_chp_vary(struct chp_id chpid, int on);
+int chsc_determine_channel_path_desc(struct chp_id chpid, int fmt, int rfmt,
+				     int c, int m, void *page);
+int chsc_determine_base_channel_path_desc(struct chp_id chpid,
+					  struct channel_path_desc *desc);
+int chsc_determine_fmt1_channel_path_desc(struct chp_id chpid,
+					  struct channel_path_desc_fmt1 *desc);
+void chsc_chp_online(struct chp_id chpid);
+void chsc_chp_offline(struct chp_id chpid);
+int chsc_get_channel_measurement_chars(struct channel_path *chp);
+
+int chsc_error_from_response(int response);
+
+int chsc_siosl(struct subchannel_id schid);
+
+#endif
diff --git a/ap/os/linux/linux-3.4.x/drivers/s390/cio/chsc_sch.c b/ap/os/linux/linux-3.4.x/drivers/s390/cio/chsc_sch.c
new file mode 100644
index 0000000..8f9a1a3
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/s390/cio/chsc_sch.c
@@ -0,0 +1,873 @@
+/*
+ * Driver for s390 chsc subchannels
+ *
+ * Copyright IBM Corp. 2008, 2011
+ *
+ * Author(s): Cornelia Huck <cornelia.huck@de.ibm.com>
+ *
+ */
+
+#include <linux/slab.h>
+#include <linux/compat.h>
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/uaccess.h>
+#include <linux/miscdevice.h>
+#include <linux/kernel_stat.h>
+
+#include <asm/compat.h>
+#include <asm/cio.h>
+#include <asm/chsc.h>
+#include <asm/isc.h>
+
+#include "cio.h"
+#include "cio_debug.h"
+#include "css.h"
+#include "chsc_sch.h"
+#include "ioasm.h"
+
+static debug_info_t *chsc_debug_msg_id;
+static debug_info_t *chsc_debug_log_id;
+
+#define CHSC_MSG(imp, args...) do {					\
+		debug_sprintf_event(chsc_debug_msg_id, imp , ##args);	\
+	} while (0)
+
+#define CHSC_LOG(imp, txt) do {					\
+		debug_text_event(chsc_debug_log_id, imp , txt);	\
+	} while (0)
+
+static void CHSC_LOG_HEX(int level, void *data, int length)
+{
+	while (length > 0) {
+		debug_event(chsc_debug_log_id, level, data, length);
+		length -= chsc_debug_log_id->buf_size;
+		data += chsc_debug_log_id->buf_size;
+	}
+}
+
+MODULE_AUTHOR("IBM Corporation");
+MODULE_DESCRIPTION("driver for s390 chsc subchannels");
+MODULE_LICENSE("GPL");
+
+static void chsc_subchannel_irq(struct subchannel *sch)
+{
+	struct chsc_private *private = dev_get_drvdata(&sch->dev);
+	struct chsc_request *request = private->request;
+	struct irb *irb = (struct irb *)&S390_lowcore.irb;
+
+	CHSC_LOG(4, "irb");
+	CHSC_LOG_HEX(4, irb, sizeof(*irb));
+	kstat_cpu(smp_processor_id()).irqs[IOINT_CSC]++;
+
+	/* Copy irb to provided request and set done. */
+	if (!request) {
+		CHSC_MSG(0, "Interrupt on sch 0.%x.%04x with no request\n",
+			 sch->schid.ssid, sch->schid.sch_no);
+		return;
+	}
+	private->request = NULL;
+	memcpy(&request->irb, irb, sizeof(*irb));
+	cio_update_schib(sch);
+	complete(&request->completion);
+	put_device(&sch->dev);
+}
+
+static int chsc_subchannel_probe(struct subchannel *sch)
+{
+	struct chsc_private *private;
+	int ret;
+
+	CHSC_MSG(6, "Detected chsc subchannel 0.%x.%04x\n",
+		 sch->schid.ssid, sch->schid.sch_no);
+	sch->isc = CHSC_SCH_ISC;
+	private = kzalloc(sizeof(*private), GFP_KERNEL);
+	if (!private)
+		return -ENOMEM;
+	dev_set_drvdata(&sch->dev, private);
+	ret = cio_enable_subchannel(sch, (u32)(unsigned long)sch);
+	if (ret) {
+		CHSC_MSG(0, "Failed to enable 0.%x.%04x: %d\n",
+			 sch->schid.ssid, sch->schid.sch_no, ret);
+		dev_set_drvdata(&sch->dev, NULL);
+		kfree(private);
+	} else {
+		if (dev_get_uevent_suppress(&sch->dev)) {
+			dev_set_uevent_suppress(&sch->dev, 0);
+			kobject_uevent(&sch->dev.kobj, KOBJ_ADD);
+		}
+	}
+	return ret;
+}
+
+static int chsc_subchannel_remove(struct subchannel *sch)
+{
+	struct chsc_private *private;
+
+	cio_disable_subchannel(sch);
+	private = dev_get_drvdata(&sch->dev);
+	dev_set_drvdata(&sch->dev, NULL);
+	if (private->request) {
+		complete(&private->request->completion);
+		put_device(&sch->dev);
+	}
+	kfree(private);
+	return 0;
+}
+
+static void chsc_subchannel_shutdown(struct subchannel *sch)
+{
+	cio_disable_subchannel(sch);
+}
+
+static int chsc_subchannel_prepare(struct subchannel *sch)
+{
+	int cc;
+	struct schib schib;
+	/*
+	 * Don't allow suspend while the subchannel is not idle
+	 * since we don't have a way to clear the subchannel and
+	 * cannot disable it with a request running.
+	 */
+	cc = stsch_err(sch->schid, &schib);
+	if (!cc && scsw_stctl(&schib.scsw))
+		return -EAGAIN;
+	return 0;
+}
+
+static int chsc_subchannel_freeze(struct subchannel *sch)
+{
+	return cio_disable_subchannel(sch);
+}
+
+static int chsc_subchannel_restore(struct subchannel *sch)
+{
+	return cio_enable_subchannel(sch, (u32)(unsigned long)sch);
+}
+
+static struct css_device_id chsc_subchannel_ids[] = {
+	{ .match_flags = 0x1, .type =SUBCHANNEL_TYPE_CHSC, },
+	{ /* end of list */ },
+};
+MODULE_DEVICE_TABLE(css, chsc_subchannel_ids);
+
+static struct css_driver chsc_subchannel_driver = {
+	.drv = {
+		.owner = THIS_MODULE,
+		.name = "chsc_subchannel",
+	},
+	.subchannel_type = chsc_subchannel_ids,
+	.irq = chsc_subchannel_irq,
+	.probe = chsc_subchannel_probe,
+	.remove = chsc_subchannel_remove,
+	.shutdown = chsc_subchannel_shutdown,
+	.prepare = chsc_subchannel_prepare,
+	.freeze = chsc_subchannel_freeze,
+	.thaw = chsc_subchannel_restore,
+	.restore = chsc_subchannel_restore,
+};
+
+static int __init chsc_init_dbfs(void)
+{
+	chsc_debug_msg_id = debug_register("chsc_msg", 16, 1,
+					   16 * sizeof(long));
+	if (!chsc_debug_msg_id)
+		goto out;
+	debug_register_view(chsc_debug_msg_id, &debug_sprintf_view);
+	debug_set_level(chsc_debug_msg_id, 2);
+	chsc_debug_log_id = debug_register("chsc_log", 16, 1, 16);
+	if (!chsc_debug_log_id)
+		goto out;
+	debug_register_view(chsc_debug_log_id, &debug_hex_ascii_view);
+	debug_set_level(chsc_debug_log_id, 2);
+	return 0;
+out:
+	if (chsc_debug_msg_id)
+		debug_unregister(chsc_debug_msg_id);
+	return -ENOMEM;
+}
+
+static void chsc_remove_dbfs(void)
+{
+	debug_unregister(chsc_debug_log_id);
+	debug_unregister(chsc_debug_msg_id);
+}
+
+static int __init chsc_init_sch_driver(void)
+{
+	return css_driver_register(&chsc_subchannel_driver);
+}
+
+static void chsc_cleanup_sch_driver(void)
+{
+	css_driver_unregister(&chsc_subchannel_driver);
+}
+
+static DEFINE_SPINLOCK(chsc_lock);
+
+static int chsc_subchannel_match_next_free(struct device *dev, void *data)
+{
+	struct subchannel *sch = to_subchannel(dev);
+
+	return sch->schib.pmcw.ena && !scsw_fctl(&sch->schib.scsw);
+}
+
+static struct subchannel *chsc_get_next_subchannel(struct subchannel *sch)
+{
+	struct device *dev;
+
+	dev = driver_find_device(&chsc_subchannel_driver.drv,
+				 sch ? &sch->dev : NULL, NULL,
+				 chsc_subchannel_match_next_free);
+	return dev ? to_subchannel(dev) : NULL;
+}
+
+/**
+ * chsc_async() - try to start a chsc request asynchronously
+ * @chsc_area: request to be started
+ * @request: request structure to associate
+ *
+ * Tries to start a chsc request on one of the existing chsc subchannels.
+ * Returns:
+ *  %0 if the request was performed synchronously
+ *  %-EINPROGRESS if the request was successfully started
+ *  %-EBUSY if all chsc subchannels are busy
+ *  %-ENODEV if no chsc subchannels are available
+ * Context:
+ *  interrupts disabled, chsc_lock held
+ */
+static int chsc_async(struct chsc_async_area *chsc_area,
+		      struct chsc_request *request)
+{
+	int cc;
+	struct chsc_private *private;
+	struct subchannel *sch = NULL;
+	int ret = -ENODEV;
+	char dbf[10];
+
+	chsc_area->header.key = PAGE_DEFAULT_KEY >> 4;
+	while ((sch = chsc_get_next_subchannel(sch))) {
+		spin_lock(sch->lock);
+		private = dev_get_drvdata(&sch->dev);
+		if (private->request) {
+			spin_unlock(sch->lock);
+			ret = -EBUSY;
+			continue;
+		}
+		chsc_area->header.sid = sch->schid;
+		CHSC_LOG(2, "schid");
+		CHSC_LOG_HEX(2, &sch->schid, sizeof(sch->schid));
+		cc = chsc(chsc_area);
+		sprintf(dbf, "cc:%d", cc);
+		CHSC_LOG(2, dbf);
+		switch (cc) {
+		case 0:
+			ret = 0;
+			break;
+		case 1:
+			sch->schib.scsw.cmd.fctl |= SCSW_FCTL_START_FUNC;
+			ret = -EINPROGRESS;
+			private->request = request;
+			break;
+		case 2:
+			ret = -EBUSY;
+			break;
+		default:
+			ret = -ENODEV;
+		}
+		spin_unlock(sch->lock);
+		CHSC_MSG(2, "chsc on 0.%x.%04x returned cc=%d\n",
+			 sch->schid.ssid, sch->schid.sch_no, cc);
+		if (ret == -EINPROGRESS)
+			return -EINPROGRESS;
+		put_device(&sch->dev);
+		if (ret == 0)
+			return 0;
+	}
+	return ret;
+}
+
+static void chsc_log_command(struct chsc_async_area *chsc_area)
+{
+	char dbf[10];
+
+	sprintf(dbf, "CHSC:%x", chsc_area->header.code);
+	CHSC_LOG(0, dbf);
+	CHSC_LOG_HEX(0, chsc_area, 32);
+}
+
+static int chsc_examine_irb(struct chsc_request *request)
+{
+	int backed_up;
+
+	if (!(scsw_stctl(&request->irb.scsw) & SCSW_STCTL_STATUS_PEND))
+		return -EIO;
+	backed_up = scsw_cstat(&request->irb.scsw) & SCHN_STAT_CHAIN_CHECK;
+	request->irb.scsw.cmd.cstat &= ~SCHN_STAT_CHAIN_CHECK;
+	if (scsw_cstat(&request->irb.scsw) == 0)
+		return 0;
+	if (!backed_up)
+		return 0;
+	if (scsw_cstat(&request->irb.scsw) & SCHN_STAT_PROG_CHECK)
+		return -EIO;
+	if (scsw_cstat(&request->irb.scsw) & SCHN_STAT_PROT_CHECK)
+		return -EPERM;
+	if (scsw_cstat(&request->irb.scsw) & SCHN_STAT_CHN_DATA_CHK)
+		return -EAGAIN;
+	if (scsw_cstat(&request->irb.scsw) & SCHN_STAT_CHN_CTRL_CHK)
+		return -EAGAIN;
+	return -EIO;
+}
+
+static int chsc_ioctl_start(void __user *user_area)
+{
+	struct chsc_request *request;
+	struct chsc_async_area *chsc_area;
+	int ret;
+	char dbf[10];
+
+	if (!css_general_characteristics.dynio)
+		/* It makes no sense to try. */
+		return -EOPNOTSUPP;
+	chsc_area = (void *)get_zeroed_page(GFP_DMA | GFP_KERNEL);
+	if (!chsc_area)
+		return -ENOMEM;
+	request = kzalloc(sizeof(*request), GFP_KERNEL);
+	if (!request) {
+		ret = -ENOMEM;
+		goto out_free;
+	}
+	init_completion(&request->completion);
+	if (copy_from_user(chsc_area, user_area, PAGE_SIZE)) {
+		ret = -EFAULT;
+		goto out_free;
+	}
+	chsc_log_command(chsc_area);
+	spin_lock_irq(&chsc_lock);
+	ret = chsc_async(chsc_area, request);
+	spin_unlock_irq(&chsc_lock);
+	if (ret == -EINPROGRESS) {
+		wait_for_completion(&request->completion);
+		ret = chsc_examine_irb(request);
+	}
+	/* copy area back to user */
+	if (!ret)
+		if (copy_to_user(user_area, chsc_area, PAGE_SIZE))
+			ret = -EFAULT;
+out_free:
+	sprintf(dbf, "ret:%d", ret);
+	CHSC_LOG(0, dbf);
+	kfree(request);
+	free_page((unsigned long)chsc_area);
+	return ret;
+}
+
+static int chsc_ioctl_info_channel_path(void __user *user_cd)
+{
+	struct chsc_chp_cd *cd;
+	int ret, ccode;
+	struct {
+		struct chsc_header request;
+		u32 : 2;
+		u32 m : 1;
+		u32 : 1;
+		u32 fmt1 : 4;
+		u32 cssid : 8;
+		u32 : 8;
+		u32 first_chpid : 8;
+		u32 : 24;
+		u32 last_chpid : 8;
+		u32 : 32;
+		struct chsc_header response;
+		u8 data[PAGE_SIZE - 20];
+	} __attribute__ ((packed)) *scpcd_area;
+
+	scpcd_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
+	if (!scpcd_area)
+		return -ENOMEM;
+	cd = kzalloc(sizeof(*cd), GFP_KERNEL);
+	if (!cd) {
+		ret = -ENOMEM;
+		goto out_free;
+	}
+	if (copy_from_user(cd, user_cd, sizeof(*cd))) {
+		ret = -EFAULT;
+		goto out_free;
+	}
+	scpcd_area->request.length = 0x0010;
+	scpcd_area->request.code = 0x0028;
+	scpcd_area->m = cd->m;
+	scpcd_area->fmt1 = cd->fmt;
+	scpcd_area->cssid = cd->chpid.cssid;
+	scpcd_area->first_chpid = cd->chpid.id;
+	scpcd_area->last_chpid = cd->chpid.id;
+
+	ccode = chsc(scpcd_area);
+	if (ccode != 0) {
+		ret = -EIO;
+		goto out_free;
+	}
+	if (scpcd_area->response.code != 0x0001) {
+		ret = -EIO;
+		CHSC_MSG(0, "scpcd: response code=%x\n",
+			 scpcd_area->response.code);
+		goto out_free;
+	}
+	memcpy(&cd->cpcb, &scpcd_area->response, scpcd_area->response.length);
+	if (copy_to_user(user_cd, cd, sizeof(*cd)))
+		ret = -EFAULT;
+	else
+		ret = 0;
+out_free:
+	kfree(cd);
+	free_page((unsigned long)scpcd_area);
+	return ret;
+}
+
+static int chsc_ioctl_info_cu(void __user *user_cd)
+{
+	struct chsc_cu_cd *cd;
+	int ret, ccode;
+	struct {
+		struct chsc_header request;
+		u32 : 2;
+		u32 m : 1;
+		u32 : 1;
+		u32 fmt1 : 4;
+		u32 cssid : 8;
+		u32 : 8;
+		u32 first_cun : 8;
+		u32 : 24;
+		u32 last_cun : 8;
+		u32 : 32;
+		struct chsc_header response;
+		u8 data[PAGE_SIZE - 20];
+	} __attribute__ ((packed)) *scucd_area;
+
+	scucd_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
+	if (!scucd_area)
+		return -ENOMEM;
+	cd = kzalloc(sizeof(*cd), GFP_KERNEL);
+	if (!cd) {
+		ret = -ENOMEM;
+		goto out_free;
+	}
+	if (copy_from_user(cd, user_cd, sizeof(*cd))) {
+		ret = -EFAULT;
+		goto out_free;
+	}
+	scucd_area->request.length = 0x0010;
+	scucd_area->request.code = 0x0028;
+	scucd_area->m = cd->m;
+	scucd_area->fmt1 = cd->fmt;
+	scucd_area->cssid = cd->cssid;
+	scucd_area->first_cun = cd->cun;
+	scucd_area->last_cun = cd->cun;
+
+	ccode = chsc(scucd_area);
+	if (ccode != 0) {
+		ret = -EIO;
+		goto out_free;
+	}
+	if (scucd_area->response.code != 0x0001) {
+		ret = -EIO;
+		CHSC_MSG(0, "scucd: response code=%x\n",
+			 scucd_area->response.code);
+		goto out_free;
+	}
+	memcpy(&cd->cucb, &scucd_area->response, scucd_area->response.length);
+	if (copy_to_user(user_cd, cd, sizeof(*cd)))
+		ret = -EFAULT;
+	else
+		ret = 0;
+out_free:
+	kfree(cd);
+	free_page((unsigned long)scucd_area);
+	return ret;
+}
+
+static int chsc_ioctl_info_sch_cu(void __user *user_cud)
+{
+	struct chsc_sch_cud *cud;
+	int ret, ccode;
+	struct {
+		struct chsc_header request;
+		u32 : 2;
+		u32 m : 1;
+		u32 : 5;
+		u32 fmt1 : 4;
+		u32 : 2;
+		u32 ssid : 2;
+		u32 first_sch : 16;
+		u32 : 8;
+		u32 cssid : 8;
+		u32 last_sch : 16;
+		u32 : 32;
+		struct chsc_header response;
+		u8 data[PAGE_SIZE - 20];
+	} __attribute__ ((packed)) *sscud_area;
+
+	sscud_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
+	if (!sscud_area)
+		return -ENOMEM;
+	cud = kzalloc(sizeof(*cud), GFP_KERNEL);
+	if (!cud) {
+		ret = -ENOMEM;
+		goto out_free;
+	}
+	if (copy_from_user(cud, user_cud, sizeof(*cud))) {
+		ret = -EFAULT;
+		goto out_free;
+	}
+	sscud_area->request.length = 0x0010;
+	sscud_area->request.code = 0x0006;
+	sscud_area->m = cud->schid.m;
+	sscud_area->fmt1 = cud->fmt;
+	sscud_area->ssid = cud->schid.ssid;
+	sscud_area->first_sch = cud->schid.sch_no;
+	sscud_area->cssid = cud->schid.cssid;
+	sscud_area->last_sch = cud->schid.sch_no;
+
+	ccode = chsc(sscud_area);
+	if (ccode != 0) {
+		ret = -EIO;
+		goto out_free;
+	}
+	if (sscud_area->response.code != 0x0001) {
+		ret = -EIO;
+		CHSC_MSG(0, "sscud: response code=%x\n",
+			 sscud_area->response.code);
+		goto out_free;
+	}
+	memcpy(&cud->scub, &sscud_area->response, sscud_area->response.length);
+	if (copy_to_user(user_cud, cud, sizeof(*cud)))
+		ret = -EFAULT;
+	else
+		ret = 0;
+out_free:
+	kfree(cud);
+	free_page((unsigned long)sscud_area);
+	return ret;
+}
+
+static int chsc_ioctl_conf_info(void __user *user_ci)
+{
+	struct chsc_conf_info *ci;
+	int ret, ccode;
+	struct {
+		struct chsc_header request;
+		u32 : 2;
+		u32 m : 1;
+		u32 : 1;
+		u32 fmt1 : 4;
+		u32 cssid : 8;
+		u32 : 6;
+		u32 ssid : 2;
+		u32 : 8;
+		u64 : 64;
+		struct chsc_header response;
+		u8 data[PAGE_SIZE - 20];
+	} __attribute__ ((packed)) *sci_area;
+
+	sci_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
+	if (!sci_area)
+		return -ENOMEM;
+	ci = kzalloc(sizeof(*ci), GFP_KERNEL);
+	if (!ci) {
+		ret = -ENOMEM;
+		goto out_free;
+	}
+	if (copy_from_user(ci, user_ci, sizeof(*ci))) {
+		ret = -EFAULT;
+		goto out_free;
+	}
+	sci_area->request.length = 0x0010;
+	sci_area->request.code = 0x0012;
+	sci_area->m = ci->id.m;
+	sci_area->fmt1 = ci->fmt;
+	sci_area->cssid = ci->id.cssid;
+	sci_area->ssid = ci->id.ssid;
+
+	ccode = chsc(sci_area);
+	if (ccode != 0) {
+		ret = -EIO;
+		goto out_free;
+	}
+	if (sci_area->response.code != 0x0001) {
+		ret = -EIO;
+		CHSC_MSG(0, "sci: response code=%x\n",
+			 sci_area->response.code);
+		goto out_free;
+	}
+	memcpy(&ci->scid, &sci_area->response, sci_area->response.length);
+	if (copy_to_user(user_ci, ci, sizeof(*ci)))
+		ret = -EFAULT;
+	else
+		ret = 0;
+out_free:
+	kfree(ci);
+	free_page((unsigned long)sci_area);
+	return ret;
+}
+
+static int chsc_ioctl_conf_comp_list(void __user *user_ccl)
+{
+	struct chsc_comp_list *ccl;
+	int ret, ccode;
+	struct {
+		struct chsc_header request;
+		u32 ctype : 8;
+		u32 : 4;
+		u32 fmt : 4;
+		u32 : 16;
+		u64 : 64;
+		u32 list_parm[2];
+		u64 : 64;
+		struct chsc_header response;
+		u8 data[PAGE_SIZE - 36];
+	} __attribute__ ((packed)) *sccl_area;
+	struct {
+		u32 m : 1;
+		u32 : 31;
+		u32 cssid : 8;
+		u32 : 16;
+		u32 chpid : 8;
+	} __attribute__ ((packed)) *chpid_parm;
+	struct {
+		u32 f_cssid : 8;
+		u32 l_cssid : 8;
+		u32 : 16;
+		u32 res;
+	} __attribute__ ((packed)) *cssids_parm;
+
+	sccl_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
+	if (!sccl_area)
+		return -ENOMEM;
+	ccl = kzalloc(sizeof(*ccl), GFP_KERNEL);
+	if (!ccl) {
+		ret = -ENOMEM;
+		goto out_free;
+	}
+	if (copy_from_user(ccl, user_ccl, sizeof(*ccl))) {
+		ret = -EFAULT;
+		goto out_free;
+	}
+	sccl_area->request.length = 0x0020;
+	sccl_area->request.code = 0x0030;
+	sccl_area->fmt = ccl->req.fmt;
+	sccl_area->ctype = ccl->req.ctype;
+	switch (sccl_area->ctype) {
+	case CCL_CU_ON_CHP:
+	case CCL_IOP_CHP:
+		chpid_parm = (void *)&sccl_area->list_parm;
+		chpid_parm->m = ccl->req.chpid.m;
+		chpid_parm->cssid = ccl->req.chpid.chp.cssid;
+		chpid_parm->chpid = ccl->req.chpid.chp.id;
+		break;
+	case CCL_CSS_IMG:
+	case CCL_CSS_IMG_CONF_CHAR:
+		cssids_parm = (void *)&sccl_area->list_parm;
+		cssids_parm->f_cssid = ccl->req.cssids.f_cssid;
+		cssids_parm->l_cssid = ccl->req.cssids.l_cssid;
+		break;
+	}
+	ccode = chsc(sccl_area);
+	if (ccode != 0) {
+		ret = -EIO;
+		goto out_free;
+	}
+	if (sccl_area->response.code != 0x0001) {
+		ret = -EIO;
+		CHSC_MSG(0, "sccl: response code=%x\n",
+			 sccl_area->response.code);
+		goto out_free;
+	}
+	memcpy(&ccl->sccl, &sccl_area->response, sccl_area->response.length);
+	if (copy_to_user(user_ccl, ccl, sizeof(*ccl)))
+		ret = -EFAULT;
+	else
+		ret = 0;
+out_free:
+	kfree(ccl);
+	free_page((unsigned long)sccl_area);
+	return ret;
+}
+
+static int chsc_ioctl_chpd(void __user *user_chpd)
+{
+	struct chsc_scpd *scpd_area;
+	struct chsc_cpd_info *chpd;
+	int ret;
+
+	chpd = kzalloc(sizeof(*chpd), GFP_KERNEL);
+	scpd_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
+	if (!scpd_area || !chpd) {
+		ret = -ENOMEM;
+		goto out_free;
+	}
+	if (copy_from_user(chpd, user_chpd, sizeof(*chpd))) {
+		ret = -EFAULT;
+		goto out_free;
+	}
+	ret = chsc_determine_channel_path_desc(chpd->chpid, chpd->fmt,
+					       chpd->rfmt, chpd->c, chpd->m,
+					       scpd_area);
+	if (ret)
+		goto out_free;
+	memcpy(&chpd->chpdb, &scpd_area->response, scpd_area->response.length);
+	if (copy_to_user(user_chpd, chpd, sizeof(*chpd)))
+		ret = -EFAULT;
+out_free:
+	kfree(chpd);
+	free_page((unsigned long)scpd_area);
+	return ret;
+}
+
+static int chsc_ioctl_dcal(void __user *user_dcal)
+{
+	struct chsc_dcal *dcal;
+	int ret, ccode;
+	struct {
+		struct chsc_header request;
+		u32 atype : 8;
+		u32 : 4;
+		u32 fmt : 4;
+		u32 : 16;
+		u32 res0[2];
+		u32 list_parm[2];
+		u32 res1[2];
+		struct chsc_header response;
+		u8 data[PAGE_SIZE - 36];
+	} __attribute__ ((packed)) *sdcal_area;
+
+	sdcal_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
+	if (!sdcal_area)
+		return -ENOMEM;
+	dcal = kzalloc(sizeof(*dcal), GFP_KERNEL);
+	if (!dcal) {
+		ret = -ENOMEM;
+		goto out_free;
+	}
+	if (copy_from_user(dcal, user_dcal, sizeof(*dcal))) {
+		ret = -EFAULT;
+		goto out_free;
+	}
+	sdcal_area->request.length = 0x0020;
+	sdcal_area->request.code = 0x0034;
+	sdcal_area->atype = dcal->req.atype;
+	sdcal_area->fmt = dcal->req.fmt;
+	memcpy(&sdcal_area->list_parm, &dcal->req.list_parm,
+	       sizeof(sdcal_area->list_parm));
+
+	ccode = chsc(sdcal_area);
+	if (ccode != 0) {
+		ret = -EIO;
+		goto out_free;
+	}
+	if (sdcal_area->response.code != 0x0001) {
+		ret = -EIO;
+		CHSC_MSG(0, "sdcal: response code=%x\n",
+			 sdcal_area->response.code);
+		goto out_free;
+	}
+	memcpy(&dcal->sdcal, &sdcal_area->response,
+	       sdcal_area->response.length);
+	if (copy_to_user(user_dcal, dcal, sizeof(*dcal)))
+		ret = -EFAULT;
+	else
+		ret = 0;
+out_free:
+	kfree(dcal);
+	free_page((unsigned long)sdcal_area);
+	return ret;
+}
+
+static long chsc_ioctl(struct file *filp, unsigned int cmd,
+		       unsigned long arg)
+{
+	void __user *argp;
+
+	CHSC_MSG(2, "chsc_ioctl called, cmd=%x\n", cmd);
+	if (is_compat_task())
+		argp = compat_ptr(arg);
+	else
+		argp = (void __user *)arg;
+	switch (cmd) {
+	case CHSC_START:
+		return chsc_ioctl_start(argp);
+	case CHSC_INFO_CHANNEL_PATH:
+		return chsc_ioctl_info_channel_path(argp);
+	case CHSC_INFO_CU:
+		return chsc_ioctl_info_cu(argp);
+	case CHSC_INFO_SCH_CU:
+		return chsc_ioctl_info_sch_cu(argp);
+	case CHSC_INFO_CI:
+		return chsc_ioctl_conf_info(argp);
+	case CHSC_INFO_CCL:
+		return chsc_ioctl_conf_comp_list(argp);
+	case CHSC_INFO_CPD:
+		return chsc_ioctl_chpd(argp);
+	case CHSC_INFO_DCAL:
+		return chsc_ioctl_dcal(argp);
+	default: /* unknown ioctl number */
+		return -ENOIOCTLCMD;
+	}
+}
+
+static const struct file_operations chsc_fops = {
+	.owner = THIS_MODULE,
+	.open = nonseekable_open,
+	.unlocked_ioctl = chsc_ioctl,
+	.compat_ioctl = chsc_ioctl,
+	.llseek = no_llseek,
+};
+
+static struct miscdevice chsc_misc_device = {
+	.minor = MISC_DYNAMIC_MINOR,
+	.name = "chsc",
+	.fops = &chsc_fops,
+};
+
+static int __init chsc_misc_init(void)
+{
+	return misc_register(&chsc_misc_device);
+}
+
+static void chsc_misc_cleanup(void)
+{
+	misc_deregister(&chsc_misc_device);
+}
+
+static int __init chsc_sch_init(void)
+{
+	int ret;
+
+	ret = chsc_init_dbfs();
+	if (ret)
+		return ret;
+	isc_register(CHSC_SCH_ISC);
+	ret = chsc_init_sch_driver();
+	if (ret)
+		goto out_dbf;
+	ret = chsc_misc_init();
+	if (ret)
+		goto out_driver;
+	return ret;
+out_driver:
+	chsc_cleanup_sch_driver();
+out_dbf:
+	isc_unregister(CHSC_SCH_ISC);
+	chsc_remove_dbfs();
+	return ret;
+}
+
+static void __exit chsc_sch_exit(void)
+{
+	chsc_misc_cleanup();
+	chsc_cleanup_sch_driver();
+	isc_unregister(CHSC_SCH_ISC);
+	chsc_remove_dbfs();
+}
+
+module_init(chsc_sch_init);
+module_exit(chsc_sch_exit);
diff --git a/ap/os/linux/linux-3.4.x/drivers/s390/cio/chsc_sch.h b/ap/os/linux/linux-3.4.x/drivers/s390/cio/chsc_sch.h
new file mode 100644
index 0000000..589ebfa
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/s390/cio/chsc_sch.h
@@ -0,0 +1,13 @@
+#ifndef _CHSC_SCH_H
+#define _CHSC_SCH_H
+
+struct chsc_request {
+	struct completion completion;
+	struct irb irb;
+};
+
+struct chsc_private {
+	struct chsc_request *request;
+};
+
+#endif
diff --git a/ap/os/linux/linux-3.4.x/drivers/s390/cio/cio.c b/ap/os/linux/linux-3.4.x/drivers/s390/cio/cio.c
new file mode 100644
index 0000000..a49c46c
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/s390/cio/cio.c
@@ -0,0 +1,1151 @@
+/*
+ *  drivers/s390/cio/cio.c
+ *   S/390 common I/O routines -- low level i/o calls
+ *
+ *    Copyright IBM Corp. 1999,2008
+ *    Author(s): Ingo Adlung (adlung@de.ibm.com)
+ *		 Cornelia Huck (cornelia.huck@de.ibm.com)
+ *		 Arnd Bergmann (arndb@de.ibm.com)
+ *		 Martin Schwidefsky (schwidefsky@de.ibm.com)
+ */
+
+#define KMSG_COMPONENT "cio"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
+#include <linux/ftrace.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/device.h>
+#include <linux/kernel_stat.h>
+#include <linux/interrupt.h>
+#include <asm/cio.h>
+#include <asm/delay.h>
+#include <asm/irq.h>
+#include <asm/irq_regs.h>
+#include <asm/setup.h>
+#include <asm/reset.h>
+#include <asm/ipl.h>
+#include <asm/chpid.h>
+#include <asm/airq.h>
+#include <asm/isc.h>
+#include <asm/cputime.h>
+#include <asm/fcx.h>
+#include <asm/nmi.h>
+#include <asm/crw.h>
+#include "cio.h"
+#include "css.h"
+#include "chsc.h"
+#include "ioasm.h"
+#include "io_sch.h"
+#include "blacklist.h"
+#include "cio_debug.h"
+#include "chp.h"
+
+debug_info_t *cio_debug_msg_id;
+debug_info_t *cio_debug_trace_id;
+debug_info_t *cio_debug_crw_id;
+
+/*
+ * Function: cio_debug_init
+ * Initializes three debug logs for common I/O:
+ * - cio_msg logs generic cio messages
+ * - cio_trace logs the calling of different functions
+ * - cio_crw logs machine check related cio messages
+ */
+static int __init cio_debug_init(void)
+{
+	cio_debug_msg_id = debug_register("cio_msg", 16, 1, 16 * sizeof(long));
+	if (!cio_debug_msg_id)
+		goto out_unregister;
+	debug_register_view(cio_debug_msg_id, &debug_sprintf_view);
+	debug_set_level(cio_debug_msg_id, 2);
+	cio_debug_trace_id = debug_register("cio_trace", 16, 1, 16);
+	if (!cio_debug_trace_id)
+		goto out_unregister;
+	debug_register_view(cio_debug_trace_id, &debug_hex_ascii_view);
+	debug_set_level(cio_debug_trace_id, 2);
+	cio_debug_crw_id = debug_register("cio_crw", 16, 1, 16 * sizeof(long));
+	if (!cio_debug_crw_id)
+		goto out_unregister;
+	debug_register_view(cio_debug_crw_id, &debug_sprintf_view);
+	debug_set_level(cio_debug_crw_id, 4);
+	return 0;
+
+out_unregister:
+	if (cio_debug_msg_id)
+		debug_unregister(cio_debug_msg_id);
+	if (cio_debug_trace_id)
+		debug_unregister(cio_debug_trace_id);
+	if (cio_debug_crw_id)
+		debug_unregister(cio_debug_crw_id);
+	return -1;
+}
+
+arch_initcall (cio_debug_init);
+
+int cio_set_options(struct subchannel *sch, int flags)
+{
+	struct io_subchannel_private *priv = to_io_private(sch);
+
+	priv->options.suspend = (flags & DOIO_ALLOW_SUSPEND) != 0;
+	priv->options.prefetch = (flags & DOIO_DENY_PREFETCH) != 0;
+	priv->options.inter = (flags & DOIO_SUPPRESS_INTER) != 0;
+	return 0;
+}
+
+static int
+cio_start_handle_notoper(struct subchannel *sch, __u8 lpm)
+{
+	char dbf_text[15];
+
+	if (lpm != 0)
+		sch->lpm &= ~lpm;
+	else
+		sch->lpm = 0;
+
+	CIO_MSG_EVENT(2, "cio_start: 'not oper' status for "
+		      "subchannel 0.%x.%04x!\n", sch->schid.ssid,
+		      sch->schid.sch_no);
+
+	if (cio_update_schib(sch))
+		return -ENODEV;
+
+	sprintf(dbf_text, "no%s", dev_name(&sch->dev));
+	CIO_TRACE_EVENT(0, dbf_text);
+	CIO_HEX_EVENT(0, &sch->schib, sizeof (struct schib));
+
+	return (sch->lpm ? -EACCES : -ENODEV);
+}
+
+int
+cio_start_key (struct subchannel *sch,	/* subchannel structure */
+	       struct ccw1 * cpa,	/* logical channel prog addr */
+	       __u8 lpm,		/* logical path mask */
+	       __u8 key)                /* storage key */
+{
+	struct io_subchannel_private *priv = to_io_private(sch);
+	union orb *orb = &priv->orb;
+	int ccode;
+
+	CIO_TRACE_EVENT(5, "stIO");
+	CIO_TRACE_EVENT(5, dev_name(&sch->dev));
+
+	memset(orb, 0, sizeof(union orb));
+	/* sch is always under 2G. */
+	orb->cmd.intparm = (u32)(addr_t)sch;
+	orb->cmd.fmt = 1;
+
+	orb->cmd.pfch = priv->options.prefetch == 0;
+	orb->cmd.spnd = priv->options.suspend;
+	orb->cmd.ssic = priv->options.suspend && priv->options.inter;
+	orb->cmd.lpm = (lpm != 0) ? lpm : sch->lpm;
+#ifdef CONFIG_64BIT
+	/*
+	 * for 64 bit we always support 64 bit IDAWs with 4k page size only
+	 */
+	orb->cmd.c64 = 1;
+	orb->cmd.i2k = 0;
+#endif
+	orb->cmd.key = key >> 4;
+	/* issue "Start Subchannel" */
+	orb->cmd.cpa = (__u32) __pa(cpa);
+	ccode = ssch(sch->schid, orb);
+
+	/* process condition code */
+	CIO_HEX_EVENT(5, &ccode, sizeof(ccode));
+
+	switch (ccode) {
+	case 0:
+		/*
+		 * initialize device status information
+		 */
+		sch->schib.scsw.cmd.actl |= SCSW_ACTL_START_PEND;
+		return 0;
+	case 1:		/* status pending */
+	case 2:		/* busy */
+		return -EBUSY;
+	case 3:		/* device/path not operational */
+		return cio_start_handle_notoper(sch, lpm);
+	default:
+		return ccode;
+	}
+}
+
+int
+cio_start (struct subchannel *sch, struct ccw1 *cpa, __u8 lpm)
+{
+	return cio_start_key(sch, cpa, lpm, PAGE_DEFAULT_KEY);
+}
+
+/*
+ * resume suspended I/O operation
+ */
+int
+cio_resume (struct subchannel *sch)
+{
+	int ccode;
+
+	CIO_TRACE_EVENT(4, "resIO");
+	CIO_TRACE_EVENT(4, dev_name(&sch->dev));
+
+	ccode = rsch (sch->schid);
+
+	CIO_HEX_EVENT(4, &ccode, sizeof(ccode));
+
+	switch (ccode) {
+	case 0:
+		sch->schib.scsw.cmd.actl |= SCSW_ACTL_RESUME_PEND;
+		return 0;
+	case 1:
+		return -EBUSY;
+	case 2:
+		return -EINVAL;
+	default:
+		/*
+		 * useless to wait for request completion
+		 *  as device is no longer operational !
+		 */
+		return -ENODEV;
+	}
+}
+
+/*
+ * halt I/O operation
+ */
+int
+cio_halt(struct subchannel *sch)
+{
+	int ccode;
+
+	if (!sch)
+		return -ENODEV;
+
+	CIO_TRACE_EVENT(2, "haltIO");
+	CIO_TRACE_EVENT(2, dev_name(&sch->dev));
+
+	/*
+	 * Issue "Halt subchannel" and process condition code
+	 */
+	ccode = hsch (sch->schid);
+
+	CIO_HEX_EVENT(2, &ccode, sizeof(ccode));
+
+	switch (ccode) {
+	case 0:
+		sch->schib.scsw.cmd.actl |= SCSW_ACTL_HALT_PEND;
+		return 0;
+	case 1:		/* status pending */
+	case 2:		/* busy */
+		return -EBUSY;
+	default:		/* device not operational */
+		return -ENODEV;
+	}
+}
+
+/*
+ * Clear I/O operation
+ */
+int
+cio_clear(struct subchannel *sch)
+{
+	int ccode;
+
+	if (!sch)
+		return -ENODEV;
+
+	CIO_TRACE_EVENT(2, "clearIO");
+	CIO_TRACE_EVENT(2, dev_name(&sch->dev));
+
+	/*
+	 * Issue "Clear subchannel" and process condition code
+	 */
+	ccode = csch (sch->schid);
+
+	CIO_HEX_EVENT(2, &ccode, sizeof(ccode));
+
+	switch (ccode) {
+	case 0:
+		sch->schib.scsw.cmd.actl |= SCSW_ACTL_CLEAR_PEND;
+		return 0;
+	default:		/* device not operational */
+		return -ENODEV;
+	}
+}
+
+/*
+ * Function: cio_cancel
+ * Issues a "Cancel Subchannel" on the specified subchannel
+ * Note: We don't need any fancy intparms and flags here
+ *	 since xsch is executed synchronously.
+ * Only for common I/O internal use as for now.
+ */
+int
+cio_cancel (struct subchannel *sch)
+{
+	int ccode;
+
+	if (!sch)
+		return -ENODEV;
+
+	CIO_TRACE_EVENT(2, "cancelIO");
+	CIO_TRACE_EVENT(2, dev_name(&sch->dev));
+
+	ccode = xsch (sch->schid);
+
+	CIO_HEX_EVENT(2, &ccode, sizeof(ccode));
+
+	switch (ccode) {
+	case 0:		/* success */
+		/* Update information in scsw. */
+		if (cio_update_schib(sch))
+			return -ENODEV;
+		return 0;
+	case 1:		/* status pending */
+		return -EBUSY;
+	case 2:		/* not applicable */
+		return -EINVAL;
+	default:	/* not oper */
+		return -ENODEV;
+	}
+}
+
+
+static void cio_apply_config(struct subchannel *sch, struct schib *schib)
+{
+	schib->pmcw.intparm = sch->config.intparm;
+	schib->pmcw.mbi = sch->config.mbi;
+	schib->pmcw.isc = sch->config.isc;
+	schib->pmcw.ena = sch->config.ena;
+	schib->pmcw.mme = sch->config.mme;
+	schib->pmcw.mp = sch->config.mp;
+	schib->pmcw.csense = sch->config.csense;
+	schib->pmcw.mbfc = sch->config.mbfc;
+	if (sch->config.mbfc)
+		schib->mba = sch->config.mba;
+}
+
+static int cio_check_config(struct subchannel *sch, struct schib *schib)
+{
+	return (schib->pmcw.intparm == sch->config.intparm) &&
+		(schib->pmcw.mbi == sch->config.mbi) &&
+		(schib->pmcw.isc == sch->config.isc) &&
+		(schib->pmcw.ena == sch->config.ena) &&
+		(schib->pmcw.mme == sch->config.mme) &&
+		(schib->pmcw.mp == sch->config.mp) &&
+		(schib->pmcw.csense == sch->config.csense) &&
+		(schib->pmcw.mbfc == sch->config.mbfc) &&
+		(!sch->config.mbfc || (schib->mba == sch->config.mba));
+}
+
+/*
+ * cio_commit_config - apply configuration to the subchannel
+ */
+int cio_commit_config(struct subchannel *sch)
+{
+	struct schib schib;
+	int ccode, retry, ret = 0;
+
+	if (stsch_err(sch->schid, &schib) || !css_sch_is_valid(&schib))
+		return -ENODEV;
+
+	for (retry = 0; retry < 5; retry++) {
+		/* copy desired changes to local schib */
+		cio_apply_config(sch, &schib);
+		ccode = msch_err(sch->schid, &schib);
+		if (ccode < 0) /* -EIO if msch gets a program check. */
+			return ccode;
+		switch (ccode) {
+		case 0: /* successful */
+			if (stsch_err(sch->schid, &schib) ||
+			    !css_sch_is_valid(&schib))
+				return -ENODEV;
+			if (cio_check_config(sch, &schib)) {
+				/* commit changes from local schib */
+				memcpy(&sch->schib, &schib, sizeof(schib));
+				return 0;
+			}
+			ret = -EAGAIN;
+			break;
+		case 1: /* status pending */
+			return -EBUSY;
+		case 2: /* busy */
+			udelay(100); /* allow for recovery */
+			ret = -EBUSY;
+			break;
+		case 3: /* not operational */
+			return -ENODEV;
+		}
+	}
+	return ret;
+}
+
+/**
+ * cio_update_schib - Perform stsch and update schib if subchannel is valid.
+ * @sch: subchannel on which to perform stsch
+ * Return zero on success, -ENODEV otherwise.
+ */
+int cio_update_schib(struct subchannel *sch)
+{
+	struct schib schib;
+
+	if (stsch_err(sch->schid, &schib) || !css_sch_is_valid(&schib))
+		return -ENODEV;
+
+	memcpy(&sch->schib, &schib, sizeof(schib));
+	return 0;
+}
+EXPORT_SYMBOL_GPL(cio_update_schib);
+
+/**
+ * cio_enable_subchannel - enable a subchannel.
+ * @sch: subchannel to be enabled
+ * @intparm: interruption parameter to set
+ */
+int cio_enable_subchannel(struct subchannel *sch, u32 intparm)
+{
+	int retry;
+	int ret;
+
+	CIO_TRACE_EVENT(2, "ensch");
+	CIO_TRACE_EVENT(2, dev_name(&sch->dev));
+
+	if (sch_is_pseudo_sch(sch))
+		return -EINVAL;
+	if (cio_update_schib(sch))
+		return -ENODEV;
+
+	sch->config.ena = 1;
+	sch->config.isc = sch->isc;
+	sch->config.intparm = intparm;
+
+	for (retry = 0; retry < 3; retry++) {
+		ret = cio_commit_config(sch);
+		if (ret == -EIO) {
+			/*
+			 * Got a program check in msch. Try without
+			 * the concurrent sense bit the next time.
+			 */
+			sch->config.csense = 0;
+		} else if (ret == -EBUSY) {
+			struct irb irb;
+			if (tsch(sch->schid, &irb) != 0)
+				break;
+		} else
+			break;
+	}
+	CIO_HEX_EVENT(2, &ret, sizeof(ret));
+	return ret;
+}
+EXPORT_SYMBOL_GPL(cio_enable_subchannel);
+
+/**
+ * cio_disable_subchannel - disable a subchannel.
+ * @sch: subchannel to disable
+ */
+int cio_disable_subchannel(struct subchannel *sch)
+{
+	int retry;
+	int ret;
+
+	CIO_TRACE_EVENT(2, "dissch");
+	CIO_TRACE_EVENT(2, dev_name(&sch->dev));
+
+	if (sch_is_pseudo_sch(sch))
+		return 0;
+	if (cio_update_schib(sch))
+		return -ENODEV;
+
+	sch->config.ena = 0;
+
+	for (retry = 0; retry < 3; retry++) {
+		ret = cio_commit_config(sch);
+		if (ret == -EBUSY) {
+			struct irb irb;
+			if (tsch(sch->schid, &irb) != 0)
+				break;
+		} else
+			break;
+	}
+	CIO_HEX_EVENT(2, &ret, sizeof(ret));
+	return ret;
+}
+EXPORT_SYMBOL_GPL(cio_disable_subchannel);
+
+int cio_create_sch_lock(struct subchannel *sch)
+{
+	sch->lock = kmalloc(sizeof(spinlock_t), GFP_KERNEL);
+	if (!sch->lock)
+		return -ENOMEM;
+	spin_lock_init(sch->lock);
+	return 0;
+}
+
+static int cio_check_devno_blacklisted(struct subchannel *sch)
+{
+	if (is_blacklisted(sch->schid.ssid, sch->schib.pmcw.dev)) {
+		/*
+		 * This device must not be known to Linux. So we simply
+		 * say that there is no device and return ENODEV.
+		 */
+		CIO_MSG_EVENT(6, "Blacklisted device detected "
+			      "at devno %04X, subchannel set %x\n",
+			      sch->schib.pmcw.dev, sch->schid.ssid);
+		return -ENODEV;
+	}
+	return 0;
+}
+
+static int cio_validate_io_subchannel(struct subchannel *sch)
+{
+	/* Initialization for io subchannels. */
+	if (!css_sch_is_valid(&sch->schib))
+		return -ENODEV;
+
+	/* Devno is valid. */
+	return cio_check_devno_blacklisted(sch);
+}
+
+static int cio_validate_msg_subchannel(struct subchannel *sch)
+{
+	/* Initialization for message subchannels. */
+	if (!css_sch_is_valid(&sch->schib))
+		return -ENODEV;
+
+	/* Devno is valid. */
+	return cio_check_devno_blacklisted(sch);
+}
+
+/**
+ * cio_validate_subchannel - basic validation of subchannel
+ * @sch: subchannel structure to be filled out
+ * @schid: subchannel id
+ *
+ * Find out subchannel type and initialize struct subchannel.
+ * Return codes:
+ *   0 on success
+ *   -ENXIO for non-defined subchannels
+ *   -ENODEV for invalid subchannels or blacklisted devices
+ *   -EIO for subchannels in an invalid subchannel set
+ */
+int cio_validate_subchannel(struct subchannel *sch, struct subchannel_id schid)
+{
+	char dbf_txt[15];
+	int ccode;
+	int err;
+
+	sprintf(dbf_txt, "valsch%x", schid.sch_no);
+	CIO_TRACE_EVENT(4, dbf_txt);
+
+	/* Nuke all fields. */
+	memset(sch, 0, sizeof(struct subchannel));
+
+	sch->schid = schid;
+	if (cio_is_console(schid)) {
+		sch->lock = cio_get_console_lock();
+	} else {
+		err = cio_create_sch_lock(sch);
+		if (err)
+			goto out;
+	}
+	mutex_init(&sch->reg_mutex);
+
+	/*
+	 * The first subchannel that is not-operational (ccode==3)
+	 *  indicates that there aren't any more devices available.
+	 * If stsch gets an exception, it means the current subchannel set
+	 *  is not valid.
+	 */
+	ccode = stsch_err (schid, &sch->schib);
+	if (ccode) {
+		err = (ccode == 3) ? -ENXIO : ccode;
+		goto out;
+	}
+	/* Copy subchannel type from path management control word. */
+	sch->st = sch->schib.pmcw.st;
+
+	switch (sch->st) {
+	case SUBCHANNEL_TYPE_IO:
+		err = cio_validate_io_subchannel(sch);
+		break;
+	case SUBCHANNEL_TYPE_MSG:
+		err = cio_validate_msg_subchannel(sch);
+		break;
+	default:
+		err = 0;
+	}
+	if (err)
+		goto out;
+
+	CIO_MSG_EVENT(4, "Subchannel 0.%x.%04x reports subchannel type %04X\n",
+		      sch->schid.ssid, sch->schid.sch_no, sch->st);
+	return 0;
+out:
+	if (!cio_is_console(schid))
+		kfree(sch->lock);
+	sch->lock = NULL;
+	return err;
+}
+
+/*
+ * do_IRQ() handles all normal I/O device IRQ's (the special
+ *	    SMP cross-CPU interrupts have their own specific
+ *	    handlers).
+ *
+ */
+void __irq_entry do_IRQ(struct pt_regs *regs)
+{
+	struct tpi_info *tpi_info;
+	struct subchannel *sch;
+	struct irb *irb;
+	struct pt_regs *old_regs;
+
+	old_regs = set_irq_regs(regs);
+	irq_enter();
+	__this_cpu_write(s390_idle.nohz_delay, 1);
+	if (S390_lowcore.int_clock >= S390_lowcore.clock_comparator)
+		/* Serve timer interrupts first. */
+		clock_comparator_work();
+	/*
+	 * Get interrupt information from lowcore
+	 */
+	tpi_info = (struct tpi_info *)&S390_lowcore.subchannel_id;
+	irb = (struct irb *)&S390_lowcore.irb;
+	do {
+		kstat_cpu(smp_processor_id()).irqs[IO_INTERRUPT]++;
+		if (tpi_info->adapter_IO) {
+			do_adapter_IO(tpi_info->isc);
+			continue;
+		}
+		sch = (struct subchannel *)(unsigned long)tpi_info->intparm;
+		if (!sch) {
+			/* Clear pending interrupt condition. */
+			kstat_cpu(smp_processor_id()).irqs[IOINT_CIO]++;
+			tsch(tpi_info->schid, irb);
+			continue;
+		}
+		spin_lock(sch->lock);
+		/* Store interrupt response block to lowcore. */
+		if (tsch(tpi_info->schid, irb) == 0) {
+			/* Keep subchannel information word up to date. */
+			memcpy (&sch->schib.scsw, &irb->scsw,
+				sizeof (irb->scsw));
+			/* Call interrupt handler if there is one. */
+			if (sch->driver && sch->driver->irq)
+				sch->driver->irq(sch);
+			else
+				kstat_cpu(smp_processor_id()).irqs[IOINT_CIO]++;
+		} else
+			kstat_cpu(smp_processor_id()).irqs[IOINT_CIO]++;
+		spin_unlock(sch->lock);
+		/*
+		 * Are more interrupts pending?
+		 * If so, the tpi instruction will update the lowcore
+		 * to hold the info for the next interrupt.
+		 * We don't do this for VM because a tpi drops the cpu
+		 * out of the sie which costs more cycles than it saves.
+		 */
+	} while (MACHINE_IS_LPAR && tpi(NULL) != 0);
+	irq_exit();
+	set_irq_regs(old_regs);
+}
+
+#ifdef CONFIG_CCW_CONSOLE
+static struct subchannel console_subchannel;
+static struct io_subchannel_private console_priv;
+static int console_subchannel_in_use;
+
+/*
+ * Use cio_tpi to get a pending interrupt and call the interrupt handler.
+ * Return non-zero if an interrupt was processed, zero otherwise.
+ */
+static int cio_tpi(void)
+{
+	struct tpi_info *tpi_info;
+	struct subchannel *sch;
+	struct irb *irb;
+	int irq_context;
+
+	tpi_info = (struct tpi_info *)&S390_lowcore.subchannel_id;
+	if (tpi(NULL) != 1)
+		return 0;
+	kstat_cpu(smp_processor_id()).irqs[IO_INTERRUPT]++;
+	if (tpi_info->adapter_IO) {
+		do_adapter_IO(tpi_info->isc);
+		return 1;
+	}
+	irb = (struct irb *)&S390_lowcore.irb;
+	/* Store interrupt response block to lowcore. */
+	if (tsch(tpi_info->schid, irb) != 0) {
+		/* Not status pending or not operational. */
+		kstat_cpu(smp_processor_id()).irqs[IOINT_CIO]++;
+		return 1;
+	}
+	sch = (struct subchannel *)(unsigned long)tpi_info->intparm;
+	if (!sch) {
+		kstat_cpu(smp_processor_id()).irqs[IOINT_CIO]++;
+		return 1;
+	}
+	irq_context = in_interrupt();
+	if (!irq_context)
+		local_bh_disable();
+	irq_enter();
+	spin_lock(sch->lock);
+	memcpy(&sch->schib.scsw, &irb->scsw, sizeof(union scsw));
+	if (sch->driver && sch->driver->irq)
+		sch->driver->irq(sch);
+	else
+		kstat_cpu(smp_processor_id()).irqs[IOINT_CIO]++;
+	spin_unlock(sch->lock);
+	irq_exit();
+	if (!irq_context)
+		_local_bh_enable();
+	return 1;
+}
+
+void *cio_get_console_priv(void)
+{
+	return &console_priv;
+}
+
+/*
+ * busy wait for the next interrupt on the console
+ */
+void wait_cons_dev(void)
+	__releases(console_subchannel.lock)
+	__acquires(console_subchannel.lock)
+{
+	unsigned long cr6      __attribute__ ((aligned (8)));
+	unsigned long save_cr6 __attribute__ ((aligned (8)));
+
+	/* 
+	 * before entering the spinlock we may already have
+	 * processed the interrupt on a different CPU...
+	 */
+	if (!console_subchannel_in_use)
+		return;
+
+	/* disable all but the console isc */
+	__ctl_store (save_cr6, 6, 6);
+	cr6 = 1UL << (31 - CONSOLE_ISC);
+	__ctl_load (cr6, 6, 6);
+
+	do {
+		spin_unlock(console_subchannel.lock);
+		if (!cio_tpi())
+			cpu_relax();
+		spin_lock(console_subchannel.lock);
+	} while (console_subchannel.schib.scsw.cmd.actl != 0);
+	/*
+	 * restore previous isc value
+	 */
+	__ctl_load (save_cr6, 6, 6);
+}
+
+static int
+cio_test_for_console(struct subchannel_id schid, void *data)
+{
+	if (stsch_err(schid, &console_subchannel.schib) != 0)
+		return -ENXIO;
+	if ((console_subchannel.schib.pmcw.st == SUBCHANNEL_TYPE_IO) &&
+	    console_subchannel.schib.pmcw.dnv &&
+	    (console_subchannel.schib.pmcw.dev == console_devno)) {
+		console_irq = schid.sch_no;
+		return 1; /* found */
+	}
+	return 0;
+}
+
+
+static int
+cio_get_console_sch_no(void)
+{
+	struct subchannel_id schid;
+	
+	init_subchannel_id(&schid);
+	if (console_irq != -1) {
+		/* VM provided us with the irq number of the console. */
+		schid.sch_no = console_irq;
+		if (stsch_err(schid, &console_subchannel.schib) != 0 ||
+		    (console_subchannel.schib.pmcw.st != SUBCHANNEL_TYPE_IO) ||
+		    !console_subchannel.schib.pmcw.dnv)
+			return -1;
+		console_devno = console_subchannel.schib.pmcw.dev;
+	} else if (console_devno != -1) {
+		/* At least the console device number is known. */
+		for_each_subchannel(cio_test_for_console, NULL);
+		if (console_irq == -1)
+			return -1;
+	} else {
+		/* unlike in 2.4, we cannot autoprobe here, since
+		 * the channel subsystem is not fully initialized.
+		 * With some luck, the HWC console can take over */
+		return -1;
+	}
+	return console_irq;
+}
+
+struct subchannel *
+cio_probe_console(void)
+{
+	int sch_no, ret;
+	struct subchannel_id schid;
+
+	if (xchg(&console_subchannel_in_use, 1) != 0)
+		return ERR_PTR(-EBUSY);
+	sch_no = cio_get_console_sch_no();
+	if (sch_no == -1) {
+		console_subchannel_in_use = 0;
+		pr_warning("No CCW console was found\n");
+		return ERR_PTR(-ENODEV);
+	}
+	memset(&console_subchannel, 0, sizeof(struct subchannel));
+	init_subchannel_id(&schid);
+	schid.sch_no = sch_no;
+	ret = cio_validate_subchannel(&console_subchannel, schid);
+	if (ret) {
+		console_subchannel_in_use = 0;
+		return ERR_PTR(-ENODEV);
+	}
+
+	/*
+	 * enable console I/O-interrupt subclass
+	 */
+	isc_register(CONSOLE_ISC);
+	console_subchannel.config.isc = CONSOLE_ISC;
+	console_subchannel.config.intparm = (u32)(addr_t)&console_subchannel;
+	ret = cio_commit_config(&console_subchannel);
+	if (ret) {
+		isc_unregister(CONSOLE_ISC);
+		console_subchannel_in_use = 0;
+		return ERR_PTR(ret);
+	}
+	return &console_subchannel;
+}
+
+void
+cio_release_console(void)
+{
+	console_subchannel.config.intparm = 0;
+	cio_commit_config(&console_subchannel);
+	isc_unregister(CONSOLE_ISC);
+	console_subchannel_in_use = 0;
+}
+
+/* Bah... hack to catch console special sausages. */
+int
+cio_is_console(struct subchannel_id schid)
+{
+	if (!console_subchannel_in_use)
+		return 0;
+	return schid_equal(&schid, &console_subchannel.schid);
+}
+
+struct subchannel *
+cio_get_console_subchannel(void)
+{
+	if (!console_subchannel_in_use)
+		return NULL;
+	return &console_subchannel;
+}
+
+#endif
+static int
+__disable_subchannel_easy(struct subchannel_id schid, struct schib *schib)
+{
+	int retry, cc;
+
+	cc = 0;
+	for (retry=0;retry<3;retry++) {
+		schib->pmcw.ena = 0;
+		cc = msch_err(schid, schib);
+		if (cc)
+			return (cc==3?-ENODEV:-EBUSY);
+		if (stsch_err(schid, schib) || !css_sch_is_valid(schib))
+			return -ENODEV;
+		if (!schib->pmcw.ena)
+			return 0;
+	}
+	return -EBUSY; /* uhm... */
+}
+
+static int
+__clear_io_subchannel_easy(struct subchannel_id schid)
+{
+	int retry;
+
+	if (csch(schid))
+		return -ENODEV;
+	for (retry=0;retry<20;retry++) {
+		struct tpi_info ti;
+
+		if (tpi(&ti)) {
+			tsch(ti.schid, (struct irb *)&S390_lowcore.irb);
+			if (schid_equal(&ti.schid, &schid))
+				return 0;
+		}
+		udelay_simple(100);
+	}
+	return -EBUSY;
+}
+
+static void __clear_chsc_subchannel_easy(void)
+{
+	/* It seems we can only wait for a bit here :/ */
+	udelay_simple(100);
+}
+
+static int pgm_check_occured;
+
+static void cio_reset_pgm_check_handler(void)
+{
+	pgm_check_occured = 1;
+}
+
+static int stsch_reset(struct subchannel_id schid, struct schib *addr)
+{
+	int rc;
+
+	pgm_check_occured = 0;
+	s390_base_pgm_handler_fn = cio_reset_pgm_check_handler;
+	rc = stsch_err(schid, addr);
+	s390_base_pgm_handler_fn = NULL;
+
+	/* The program check handler could have changed pgm_check_occured. */
+	barrier();
+
+	if (pgm_check_occured)
+		return -EIO;
+	else
+		return rc;
+}
+
+static int __shutdown_subchannel_easy(struct subchannel_id schid, void *data)
+{
+	struct schib schib;
+
+	if (stsch_reset(schid, &schib))
+		return -ENXIO;
+	if (!schib.pmcw.ena)
+		return 0;
+	switch(__disable_subchannel_easy(schid, &schib)) {
+	case 0:
+	case -ENODEV:
+		break;
+	default: /* -EBUSY */
+		switch (schib.pmcw.st) {
+		case SUBCHANNEL_TYPE_IO:
+			if (__clear_io_subchannel_easy(schid))
+				goto out; /* give up... */
+			break;
+		case SUBCHANNEL_TYPE_CHSC:
+			__clear_chsc_subchannel_easy();
+			break;
+		default:
+			/* No default clear strategy */
+			break;
+		}
+		stsch_err(schid, &schib);
+		__disable_subchannel_easy(schid, &schib);
+	}
+out:
+	return 0;
+}
+
+static atomic_t chpid_reset_count;
+
+static void s390_reset_chpids_mcck_handler(void)
+{
+	struct crw crw;
+	struct mci *mci;
+
+	/* Check for pending channel report word. */
+	mci = (struct mci *)&S390_lowcore.mcck_interruption_code;
+	if (!mci->cp)
+		return;
+	/* Process channel report words. */
+	while (stcrw(&crw) == 0) {
+		/* Check for responses to RCHP. */
+		if (crw.slct && crw.rsc == CRW_RSC_CPATH)
+			atomic_dec(&chpid_reset_count);
+	}
+}
+
+#define RCHP_TIMEOUT (30 * USEC_PER_SEC)
+static void css_reset(void)
+{
+	int i, ret;
+	unsigned long long timeout;
+	struct chp_id chpid;
+
+	/* Reset subchannels. */
+	for_each_subchannel(__shutdown_subchannel_easy,  NULL);
+	/* Reset channel paths. */
+	s390_base_mcck_handler_fn = s390_reset_chpids_mcck_handler;
+	/* Enable channel report machine checks. */
+	__ctl_set_bit(14, 28);
+	/* Temporarily reenable machine checks. */
+	local_mcck_enable();
+	chp_id_init(&chpid);
+	for (i = 0; i <= __MAX_CHPID; i++) {
+		chpid.id = i;
+		ret = rchp(chpid);
+		if ((ret == 0) || (ret == 2))
+			/*
+			 * rchp either succeeded, or another rchp is already
+			 * in progress. In either case, we'll get a crw.
+			 */
+			atomic_inc(&chpid_reset_count);
+	}
+	/* Wait for machine check for all channel paths. */
+	timeout = get_clock() + (RCHP_TIMEOUT << 12);
+	while (atomic_read(&chpid_reset_count) != 0) {
+		if (get_clock() > timeout)
+			break;
+		cpu_relax();
+	}
+	/* Disable machine checks again. */
+	local_mcck_disable();
+	/* Disable channel report machine checks. */
+	__ctl_clear_bit(14, 28);
+	s390_base_mcck_handler_fn = NULL;
+}
+
+static struct reset_call css_reset_call = {
+	.fn = css_reset,
+};
+
+static int __init init_css_reset_call(void)
+{
+	atomic_set(&chpid_reset_count, 0);
+	register_reset_call(&css_reset_call);
+	return 0;
+}
+
+arch_initcall(init_css_reset_call);
+
+struct sch_match_id {
+	struct subchannel_id schid;
+	struct ccw_dev_id devid;
+	int rc;
+};
+
+static int __reipl_subchannel_match(struct subchannel_id schid, void *data)
+{
+	struct schib schib;
+	struct sch_match_id *match_id = data;
+
+	if (stsch_reset(schid, &schib))
+		return -ENXIO;
+	if ((schib.pmcw.st == SUBCHANNEL_TYPE_IO) && schib.pmcw.dnv &&
+	    (schib.pmcw.dev == match_id->devid.devno) &&
+	    (schid.ssid == match_id->devid.ssid)) {
+		match_id->schid = schid;
+		match_id->rc = 0;
+		return 1;
+	}
+	return 0;
+}
+
+static int reipl_find_schid(struct ccw_dev_id *devid,
+			    struct subchannel_id *schid)
+{
+	struct sch_match_id match_id;
+
+	match_id.devid = *devid;
+	match_id.rc = -ENODEV;
+	for_each_subchannel(__reipl_subchannel_match, &match_id);
+	if (match_id.rc == 0)
+		*schid = match_id.schid;
+	return match_id.rc;
+}
+
+extern void do_reipl_asm(__u32 schid);
+
+/* Make sure all subchannels are quiet before we re-ipl an lpar. */
+void reipl_ccw_dev(struct ccw_dev_id *devid)
+{
+	struct subchannel_id schid;
+
+	s390_reset_system(NULL, NULL);
+	if (reipl_find_schid(devid, &schid) != 0)
+		panic("IPL Device not found\n");
+	do_reipl_asm(*((__u32*)&schid));
+}
+
+int __init cio_get_iplinfo(struct cio_iplinfo *iplinfo)
+{
+	struct subchannel_id schid;
+	struct schib schib;
+
+	schid = *(struct subchannel_id *)&S390_lowcore.subchannel_id;
+	if (!schid.one)
+		return -ENODEV;
+	if (stsch_err(schid, &schib))
+		return -ENODEV;
+	if (schib.pmcw.st != SUBCHANNEL_TYPE_IO)
+		return -ENODEV;
+	if (!schib.pmcw.dnv)
+		return -ENODEV;
+	iplinfo->devno = schib.pmcw.dev;
+	iplinfo->is_qdio = schib.pmcw.qf;
+	return 0;
+}
+
+/**
+ * cio_tm_start_key - perform start function
+ * @sch: subchannel on which to perform the start function
+ * @tcw: transport-command word to be started
+ * @lpm: mask of paths to use
+ * @key: storage key to use for storage access
+ *
+ * Start the tcw on the given subchannel. Return zero on success, non-zero
+ * otherwise.
+ */
+int cio_tm_start_key(struct subchannel *sch, struct tcw *tcw, u8 lpm, u8 key)
+{
+	int cc;
+	union orb *orb = &to_io_private(sch)->orb;
+
+	memset(orb, 0, sizeof(union orb));
+	orb->tm.intparm = (u32) (addr_t) sch;
+	orb->tm.key = key >> 4;
+	orb->tm.b = 1;
+	orb->tm.lpm = lpm ? lpm : sch->lpm;
+	orb->tm.tcw = (u32) (addr_t) tcw;
+	cc = ssch(sch->schid, orb);
+	switch (cc) {
+	case 0:
+		return 0;
+	case 1:
+	case 2:
+		return -EBUSY;
+	default:
+		return cio_start_handle_notoper(sch, lpm);
+	}
+}
+
+/**
+ * cio_tm_intrg - perform interrogate function
+ * @sch - subchannel on which to perform the interrogate function
+ *
+ * If the specified subchannel is running in transport-mode, perform the
+ * interrogate function. Return zero on success, non-zero otherwie.
+ */
+int cio_tm_intrg(struct subchannel *sch)
+{
+	int cc;
+
+	if (!to_io_private(sch)->orb.tm.b)
+		return -EINVAL;
+	cc = xsch(sch->schid);
+	switch (cc) {
+	case 0:
+	case 2:
+		return 0;
+	case 1:
+		return -EBUSY;
+	default:
+		return -ENODEV;
+	}
+}
diff --git a/ap/os/linux/linux-3.4.x/drivers/s390/cio/cio.h b/ap/os/linux/linux-3.4.x/drivers/s390/cio/cio.h
new file mode 100644
index 0000000..4a1ff5c
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/s390/cio/cio.h
@@ -0,0 +1,143 @@
+#ifndef S390_CIO_H
+#define S390_CIO_H
+
+#include <linux/mutex.h>
+#include <linux/device.h>
+#include <linux/mod_devicetable.h>
+#include <asm/chpid.h>
+#include <asm/cio.h>
+#include <asm/fcx.h>
+#include <asm/schid.h>
+#include "chsc.h"
+
+/*
+ * path management control word
+ */
+struct pmcw {
+	u32 intparm;		/* interruption parameter */
+	u32 qf	 : 1;		/* qdio facility */
+	u32 w	 : 1;
+	u32 isc  : 3;		/* interruption sublass */
+	u32 res5 : 3;		/* reserved zeros */
+	u32 ena  : 1;		/* enabled */
+	u32 lm	 : 2;		/* limit mode */
+	u32 mme  : 2;		/* measurement-mode enable */
+	u32 mp	 : 1;		/* multipath mode */
+	u32 tf	 : 1;		/* timing facility */
+	u32 dnv  : 1;		/* device number valid */
+	u32 dev  : 16;		/* device number */
+	u8  lpm;		/* logical path mask */
+	u8  pnom;		/* path not operational mask */
+	u8  lpum;		/* last path used mask */
+	u8  pim;		/* path installed mask */
+	u16 mbi;		/* measurement-block index */
+	u8  pom;		/* path operational mask */
+	u8  pam;		/* path available mask */
+	u8  chpid[8];		/* CHPID 0-7 (if available) */
+	u32 unused1 : 8;	/* reserved zeros */
+	u32 st	    : 3;	/* subchannel type */
+	u32 unused2 : 18;	/* reserved zeros */
+	u32 mbfc    : 1;	/* measurement block format control */
+	u32 xmwme   : 1;	/* extended measurement word mode enable */
+	u32 csense  : 1;	/* concurrent sense; can be enabled ...*/
+				/*  ... per MSCH, however, if facility */
+				/*  ... is not installed, this results */
+				/*  ... in an operand exception.       */
+} __attribute__ ((packed));
+
+/* Target SCHIB configuration. */
+struct schib_config {
+	u64 mba;
+	u32 intparm;
+	u16 mbi;
+	u32 isc:3;
+	u32 ena:1;
+	u32 mme:2;
+	u32 mp:1;
+	u32 csense:1;
+	u32 mbfc:1;
+} __attribute__ ((packed));
+
+/*
+ * subchannel information block
+ */
+struct schib {
+	struct pmcw pmcw;	 /* path management control word */
+	union scsw scsw;	 /* subchannel status word */
+	__u64 mba;               /* measurement block address */
+	__u8 mda[4];		 /* model dependent area */
+} __attribute__ ((packed,aligned(4)));
+
+/*
+ * When rescheduled, todo's with higher values will overwrite those
+ * with lower values.
+ */
+enum sch_todo {
+	SCH_TODO_NOTHING,
+	SCH_TODO_EVAL,
+	SCH_TODO_UNREG,
+};
+
+/* subchannel data structure used by I/O subroutines */
+struct subchannel {
+	struct subchannel_id schid;
+	spinlock_t *lock;	/* subchannel lock */
+	struct mutex reg_mutex;
+	enum {
+		SUBCHANNEL_TYPE_IO = 0,
+		SUBCHANNEL_TYPE_CHSC = 1,
+		SUBCHANNEL_TYPE_MSG = 2,
+		SUBCHANNEL_TYPE_ADM = 3,
+	} st;			/* subchannel type */
+	__u8 vpm;		/* verified path mask */
+	__u8 lpm;		/* logical path mask */
+	__u8 opm;               /* operational path mask */
+	struct schib schib;	/* subchannel information block */
+	int isc; /* desired interruption subclass */
+	struct chsc_ssd_info ssd_info;	/* subchannel description */
+	struct device dev;	/* entry in device tree */
+	struct css_driver *driver;
+	enum sch_todo todo;
+	struct work_struct todo_work;
+	struct schib_config config;
+} __attribute__ ((aligned(8)));
+
+#define to_subchannel(n) container_of(n, struct subchannel, dev)
+
+extern int cio_validate_subchannel (struct subchannel *, struct subchannel_id);
+extern int cio_enable_subchannel(struct subchannel *, u32);
+extern int cio_disable_subchannel (struct subchannel *);
+extern int cio_cancel (struct subchannel *);
+extern int cio_clear (struct subchannel *);
+extern int cio_resume (struct subchannel *);
+extern int cio_halt (struct subchannel *);
+extern int cio_start (struct subchannel *, struct ccw1 *, __u8);
+extern int cio_start_key (struct subchannel *, struct ccw1 *, __u8, __u8);
+extern int cio_cancel (struct subchannel *);
+extern int cio_set_options (struct subchannel *, int);
+extern int cio_update_schib(struct subchannel *sch);
+extern int cio_commit_config(struct subchannel *sch);
+
+int cio_tm_start_key(struct subchannel *sch, struct tcw *tcw, u8 lpm, u8 key);
+int cio_tm_intrg(struct subchannel *sch);
+
+int cio_create_sch_lock(struct subchannel *);
+void do_adapter_IO(u8 isc);
+void do_IRQ(struct pt_regs *);
+
+/* Use with care. */
+#ifdef CONFIG_CCW_CONSOLE
+extern struct subchannel *cio_probe_console(void);
+extern void cio_release_console(void);
+extern int cio_is_console(struct subchannel_id);
+extern struct subchannel *cio_get_console_subchannel(void);
+extern spinlock_t * cio_get_console_lock(void);
+extern void *cio_get_console_priv(void);
+#else
+#define cio_is_console(schid) 0
+#define cio_get_console_subchannel() NULL
+#define cio_get_console_lock() NULL
+#define cio_get_console_priv() NULL
+#endif
+
+#endif
diff --git a/ap/os/linux/linux-3.4.x/drivers/s390/cio/cio_debug.h b/ap/os/linux/linux-3.4.x/drivers/s390/cio/cio_debug.h
new file mode 100644
index 0000000..e64e827
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/s390/cio/cio_debug.h
@@ -0,0 +1,34 @@
+#ifndef CIO_DEBUG_H
+#define CIO_DEBUG_H
+
+#include <asm/debug.h>
+
+/* for use of debug feature */
+extern debug_info_t *cio_debug_msg_id;
+extern debug_info_t *cio_debug_trace_id;
+extern debug_info_t *cio_debug_crw_id;
+
+#define CIO_TRACE_EVENT(imp, txt) do {				\
+		debug_text_event(cio_debug_trace_id, imp, txt); \
+	} while (0)
+
+#define CIO_MSG_EVENT(imp, args...) do {				\
+		debug_sprintf_event(cio_debug_msg_id, imp , ##args);	\
+	} while (0)
+
+#define CIO_CRW_EVENT(imp, args...) do {				\
+		debug_sprintf_event(cio_debug_crw_id, imp , ##args);	\
+	} while (0)
+
+static inline void CIO_HEX_EVENT(int level, void *data, int length)
+{
+	if (unlikely(!cio_debug_trace_id))
+		return;
+	while (length > 0) {
+		debug_event(cio_debug_trace_id, level, data, length);
+		length -= cio_debug_trace_id->buf_size;
+		data += cio_debug_trace_id->buf_size;
+	}
+}
+
+#endif
diff --git a/ap/os/linux/linux-3.4.x/drivers/s390/cio/cmf.c b/ap/os/linux/linux-3.4.x/drivers/s390/cio/cmf.c
new file mode 100644
index 0000000..204ca72
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/s390/cio/cmf.c
@@ -0,0 +1,1349 @@
+/*
+ * linux/drivers/s390/cio/cmf.c
+ *
+ * Linux on zSeries Channel Measurement Facility support
+ *
+ * Copyright 2000,2006 IBM Corporation
+ *
+ * Authors: Arnd Bergmann <arndb@de.ibm.com>
+ *	    Cornelia Huck <cornelia.huck@de.ibm.com>
+ *
+ * original idea from Natarajan Krishnaswami <nkrishna@us.ibm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#define KMSG_COMPONENT "cio"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
+#include <linux/bootmem.h>
+#include <linux/device.h>
+#include <linux/init.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/slab.h>
+#include <linux/timex.h>	/* get_clock() */
+
+#include <asm/ccwdev.h>
+#include <asm/cio.h>
+#include <asm/cmb.h>
+#include <asm/div64.h>
+
+#include "cio.h"
+#include "css.h"
+#include "device.h"
+#include "ioasm.h"
+#include "chsc.h"
+
+/*
+ * parameter to enable cmf during boot, possible uses are:
+ *  "s390cmf" -- enable cmf and allocate 2 MB of ram so measuring can be
+ *               used on any subchannel
+ *  "s390cmf=<num>" -- enable cmf and allocate enough memory to measure
+ *                     <num> subchannel, where <num> is an integer
+ *                     between 1 and 65535, default is 1024
+ */
+#define ARGSTRING "s390cmf"
+
+/* indices for READCMB */
+enum cmb_index {
+ /* basic and exended format: */
+	cmb_ssch_rsch_count,
+	cmb_sample_count,
+	cmb_device_connect_time,
+	cmb_function_pending_time,
+	cmb_device_disconnect_time,
+	cmb_control_unit_queuing_time,
+	cmb_device_active_only_time,
+ /* extended format only: */
+	cmb_device_busy_time,
+	cmb_initial_command_response_time,
+};
+
+/**
+ * enum cmb_format - types of supported measurement block formats
+ *
+ * @CMF_BASIC:      traditional channel measurement blocks supported
+ *		    by all machines that we run on
+ * @CMF_EXTENDED:   improved format that was introduced with the z990
+ *		    machine
+ * @CMF_AUTODETECT: default: use extended format when running on a machine
+ *		    supporting extended format, otherwise fall back to
+ *		    basic format
+ */
+enum cmb_format {
+	CMF_BASIC,
+	CMF_EXTENDED,
+	CMF_AUTODETECT = -1,
+};
+
+/*
+ * format - actual format for all measurement blocks
+ *
+ * The format module parameter can be set to a value of 0 (zero)
+ * or 1, indicating basic or extended format as described for
+ * enum cmb_format.
+ */
+static int format = CMF_AUTODETECT;
+module_param(format, bint, 0444);
+
+/**
+ * struct cmb_operations - functions to use depending on cmb_format
+ *
+ * Most of these functions operate on a struct ccw_device. There is only
+ * one instance of struct cmb_operations because the format of the measurement
+ * data is guaranteed to be the same for every ccw_device.
+ *
+ * @alloc:	allocate memory for a channel measurement block,
+ *		either with the help of a special pool or with kmalloc
+ * @free:	free memory allocated with @alloc
+ * @set:	enable or disable measurement
+ * @read:	read a measurement entry at an index
+ * @readall:	read a measurement block in a common format
+ * @reset:	clear the data in the associated measurement block and
+ *		reset its time stamp
+ * @align:	align an allocated block so that the hardware can use it
+ */
+struct cmb_operations {
+	int  (*alloc)  (struct ccw_device *);
+	void (*free)   (struct ccw_device *);
+	int  (*set)    (struct ccw_device *, u32);
+	u64  (*read)   (struct ccw_device *, int);
+	int  (*readall)(struct ccw_device *, struct cmbdata *);
+	void (*reset)  (struct ccw_device *);
+	void *(*align) (void *);
+/* private: */
+	struct attribute_group *attr_group;
+};
+static struct cmb_operations *cmbops;
+
+struct cmb_data {
+	void *hw_block;   /* Pointer to block updated by hardware */
+	void *last_block; /* Last changed block copied from hardware block */
+	int size;	  /* Size of hw_block and last_block */
+	unsigned long long last_update;  /* when last_block was updated */
+};
+
+/*
+ * Our user interface is designed in terms of nanoseconds,
+ * while the hardware measures total times in its own
+ * unit.
+ */
+static inline u64 time_to_nsec(u32 value)
+{
+	return ((u64)value) * 128000ull;
+}
+
+/*
+ * Users are usually interested in average times,
+ * not accumulated time.
+ * This also helps us with atomicity problems
+ * when reading sinlge values.
+ */
+static inline u64 time_to_avg_nsec(u32 value, u32 count)
+{
+	u64 ret;
+
+	/* no samples yet, avoid division by 0 */
+	if (count == 0)
+		return 0;
+
+	/* value comes in units of 128 µsec */
+	ret = time_to_nsec(value);
+	do_div(ret, count);
+
+	return ret;
+}
+
+/*
+ * Activate or deactivate the channel monitor. When area is NULL,
+ * the monitor is deactivated. The channel monitor needs to
+ * be active in order to measure subchannels, which also need
+ * to be enabled.
+ */
+static inline void cmf_activate(void *area, unsigned int onoff)
+{
+	register void * __gpr2 asm("2");
+	register long __gpr1 asm("1");
+
+	__gpr2 = area;
+	__gpr1 = onoff ? 2 : 0;
+	/* activate channel measurement */
+	asm("schm" : : "d" (__gpr2), "d" (__gpr1) );
+}
+
+static int set_schib(struct ccw_device *cdev, u32 mme, int mbfc,
+		     unsigned long address)
+{
+	struct subchannel *sch;
+
+	sch = to_subchannel(cdev->dev.parent);
+
+	sch->config.mme = mme;
+	sch->config.mbfc = mbfc;
+	/* address can be either a block address or a block index */
+	if (mbfc)
+		sch->config.mba = address;
+	else
+		sch->config.mbi = address;
+
+	return cio_commit_config(sch);
+}
+
+struct set_schib_struct {
+	u32 mme;
+	int mbfc;
+	unsigned long address;
+	wait_queue_head_t wait;
+	int ret;
+	struct kref kref;
+};
+
+static void cmf_set_schib_release(struct kref *kref)
+{
+	struct set_schib_struct *set_data;
+
+	set_data = container_of(kref, struct set_schib_struct, kref);
+	kfree(set_data);
+}
+
+#define CMF_PENDING 1
+
+static int set_schib_wait(struct ccw_device *cdev, u32 mme,
+				int mbfc, unsigned long address)
+{
+	struct set_schib_struct *set_data;
+	int ret;
+
+	spin_lock_irq(cdev->ccwlock);
+	if (!cdev->private->cmb) {
+		ret = -ENODEV;
+		goto out;
+	}
+	set_data = kzalloc(sizeof(struct set_schib_struct), GFP_ATOMIC);
+	if (!set_data) {
+		ret = -ENOMEM;
+		goto out;
+	}
+	init_waitqueue_head(&set_data->wait);
+	kref_init(&set_data->kref);
+	set_data->mme = mme;
+	set_data->mbfc = mbfc;
+	set_data->address = address;
+
+	ret = set_schib(cdev, mme, mbfc, address);
+	if (ret != -EBUSY)
+		goto out_put;
+
+	if (cdev->private->state != DEV_STATE_ONLINE) {
+		/* if the device is not online, don't even try again */
+		ret = -EBUSY;
+		goto out_put;
+	}
+
+	cdev->private->state = DEV_STATE_CMFCHANGE;
+	set_data->ret = CMF_PENDING;
+	cdev->private->cmb_wait = set_data;
+
+	spin_unlock_irq(cdev->ccwlock);
+	if (wait_event_interruptible(set_data->wait,
+				     set_data->ret != CMF_PENDING)) {
+		spin_lock_irq(cdev->ccwlock);
+		if (set_data->ret == CMF_PENDING) {
+			set_data->ret = -ERESTARTSYS;
+			if (cdev->private->state == DEV_STATE_CMFCHANGE)
+				cdev->private->state = DEV_STATE_ONLINE;
+		}
+		spin_unlock_irq(cdev->ccwlock);
+	}
+	spin_lock_irq(cdev->ccwlock);
+	cdev->private->cmb_wait = NULL;
+	ret = set_data->ret;
+out_put:
+	kref_put(&set_data->kref, cmf_set_schib_release);
+out:
+	spin_unlock_irq(cdev->ccwlock);
+	return ret;
+}
+
+void retry_set_schib(struct ccw_device *cdev)
+{
+	struct set_schib_struct *set_data;
+
+	set_data = cdev->private->cmb_wait;
+	if (!set_data) {
+		WARN_ON(1);
+		return;
+	}
+	kref_get(&set_data->kref);
+	set_data->ret = set_schib(cdev, set_data->mme, set_data->mbfc,
+				  set_data->address);
+	wake_up(&set_data->wait);
+	kref_put(&set_data->kref, cmf_set_schib_release);
+}
+
+static int cmf_copy_block(struct ccw_device *cdev)
+{
+	struct subchannel *sch;
+	void *reference_buf;
+	void *hw_block;
+	struct cmb_data *cmb_data;
+
+	sch = to_subchannel(cdev->dev.parent);
+
+	if (cio_update_schib(sch))
+		return -ENODEV;
+
+	if (scsw_fctl(&sch->schib.scsw) & SCSW_FCTL_START_FUNC) {
+		/* Don't copy if a start function is in progress. */
+		if ((!(scsw_actl(&sch->schib.scsw) & SCSW_ACTL_SUSPENDED)) &&
+		    (scsw_actl(&sch->schib.scsw) &
+		     (SCSW_ACTL_DEVACT | SCSW_ACTL_SCHACT)) &&
+		    (!(scsw_stctl(&sch->schib.scsw) & SCSW_STCTL_SEC_STATUS)))
+			return -EBUSY;
+	}
+	cmb_data = cdev->private->cmb;
+	hw_block = cmbops->align(cmb_data->hw_block);
+	if (!memcmp(cmb_data->last_block, hw_block, cmb_data->size))
+		/* No need to copy. */
+		return 0;
+	reference_buf = kzalloc(cmb_data->size, GFP_ATOMIC);
+	if (!reference_buf)
+		return -ENOMEM;
+	/* Ensure consistency of block copied from hardware. */
+	do {
+		memcpy(cmb_data->last_block, hw_block, cmb_data->size);
+		memcpy(reference_buf, hw_block, cmb_data->size);
+	} while (memcmp(cmb_data->last_block, reference_buf, cmb_data->size));
+	cmb_data->last_update = get_clock();
+	kfree(reference_buf);
+	return 0;
+}
+
+struct copy_block_struct {
+	wait_queue_head_t wait;
+	int ret;
+	struct kref kref;
+};
+
+static void cmf_copy_block_release(struct kref *kref)
+{
+	struct copy_block_struct *copy_block;
+
+	copy_block = container_of(kref, struct copy_block_struct, kref);
+	kfree(copy_block);
+}
+
+static int cmf_cmb_copy_wait(struct ccw_device *cdev)
+{
+	struct copy_block_struct *copy_block;
+	int ret;
+	unsigned long flags;
+
+	spin_lock_irqsave(cdev->ccwlock, flags);
+	if (!cdev->private->cmb) {
+		ret = -ENODEV;
+		goto out;
+	}
+	copy_block = kzalloc(sizeof(struct copy_block_struct), GFP_ATOMIC);
+	if (!copy_block) {
+		ret = -ENOMEM;
+		goto out;
+	}
+	init_waitqueue_head(&copy_block->wait);
+	kref_init(&copy_block->kref);
+
+	ret = cmf_copy_block(cdev);
+	if (ret != -EBUSY)
+		goto out_put;
+
+	if (cdev->private->state != DEV_STATE_ONLINE) {
+		ret = -EBUSY;
+		goto out_put;
+	}
+
+	cdev->private->state = DEV_STATE_CMFUPDATE;
+	copy_block->ret = CMF_PENDING;
+	cdev->private->cmb_wait = copy_block;
+
+	spin_unlock_irqrestore(cdev->ccwlock, flags);
+	if (wait_event_interruptible(copy_block->wait,
+				     copy_block->ret != CMF_PENDING)) {
+		spin_lock_irqsave(cdev->ccwlock, flags);
+		if (copy_block->ret == CMF_PENDING) {
+			copy_block->ret = -ERESTARTSYS;
+			if (cdev->private->state == DEV_STATE_CMFUPDATE)
+				cdev->private->state = DEV_STATE_ONLINE;
+		}
+		spin_unlock_irqrestore(cdev->ccwlock, flags);
+	}
+	spin_lock_irqsave(cdev->ccwlock, flags);
+	cdev->private->cmb_wait = NULL;
+	ret = copy_block->ret;
+out_put:
+	kref_put(&copy_block->kref, cmf_copy_block_release);
+out:
+	spin_unlock_irqrestore(cdev->ccwlock, flags);
+	return ret;
+}
+
+void cmf_retry_copy_block(struct ccw_device *cdev)
+{
+	struct copy_block_struct *copy_block;
+
+	copy_block = cdev->private->cmb_wait;
+	if (!copy_block) {
+		WARN_ON(1);
+		return;
+	}
+	kref_get(&copy_block->kref);
+	copy_block->ret = cmf_copy_block(cdev);
+	wake_up(&copy_block->wait);
+	kref_put(&copy_block->kref, cmf_copy_block_release);
+}
+
+static void cmf_generic_reset(struct ccw_device *cdev)
+{
+	struct cmb_data *cmb_data;
+
+	spin_lock_irq(cdev->ccwlock);
+	cmb_data = cdev->private->cmb;
+	if (cmb_data) {
+		memset(cmb_data->last_block, 0, cmb_data->size);
+		/*
+		 * Need to reset hw block as well to make the hardware start
+		 * from 0 again.
+		 */
+		memset(cmbops->align(cmb_data->hw_block), 0, cmb_data->size);
+		cmb_data->last_update = 0;
+	}
+	cdev->private->cmb_start_time = get_clock();
+	spin_unlock_irq(cdev->ccwlock);
+}
+
+/**
+ * struct cmb_area - container for global cmb data
+ *
+ * @mem:	pointer to CMBs (only in basic measurement mode)
+ * @list:	contains a linked list of all subchannels
+ * @num_channels: number of channels to be measured
+ * @lock:	protect concurrent access to @mem and @list
+ */
+struct cmb_area {
+	struct cmb *mem;
+	struct list_head list;
+	int num_channels;
+	spinlock_t lock;
+};
+
+static struct cmb_area cmb_area = {
+	.lock = __SPIN_LOCK_UNLOCKED(cmb_area.lock),
+	.list = LIST_HEAD_INIT(cmb_area.list),
+	.num_channels  = 1024,
+};
+
+/* ****** old style CMB handling ********/
+
+/*
+ * Basic channel measurement blocks are allocated in one contiguous
+ * block of memory, which can not be moved as long as any channel
+ * is active. Therefore, a maximum number of subchannels needs to
+ * be defined somewhere. This is a module parameter, defaulting to
+ * a reasonable value of 1024, or 32 kb of memory.
+ * Current kernels don't allow kmalloc with more than 128kb, so the
+ * maximum is 4096.
+ */
+
+module_param_named(maxchannels, cmb_area.num_channels, uint, 0444);
+
+/**
+ * struct cmb - basic channel measurement block
+ * @ssch_rsch_count: number of ssch and rsch
+ * @sample_count: number of samples
+ * @device_connect_time: time of device connect
+ * @function_pending_time: time of function pending
+ * @device_disconnect_time: time of device disconnect
+ * @control_unit_queuing_time: time of control unit queuing
+ * @device_active_only_time: time of device active only
+ * @reserved: unused in basic measurement mode
+ *
+ * The measurement block as used by the hardware. The fields are described
+ * further in z/Architecture Principles of Operation, chapter 17.
+ *
+ * The cmb area made up from these blocks must be a contiguous array and may
+ * not be reallocated or freed.
+ * Only one cmb area can be present in the system.
+ */
+struct cmb {
+	u16 ssch_rsch_count;
+	u16 sample_count;
+	u32 device_connect_time;
+	u32 function_pending_time;
+	u32 device_disconnect_time;
+	u32 control_unit_queuing_time;
+	u32 device_active_only_time;
+	u32 reserved[2];
+};
+
+/*
+ * Insert a single device into the cmb_area list.
+ * Called with cmb_area.lock held from alloc_cmb.
+ */
+static int alloc_cmb_single(struct ccw_device *cdev,
+			    struct cmb_data *cmb_data)
+{
+	struct cmb *cmb;
+	struct ccw_device_private *node;
+	int ret;
+
+	spin_lock_irq(cdev->ccwlock);
+	if (!list_empty(&cdev->private->cmb_list)) {
+		ret = -EBUSY;
+		goto out;
+	}
+
+	/*
+	 * Find first unused cmb in cmb_area.mem.
+	 * This is a little tricky: cmb_area.list
+	 * remains sorted by ->cmb->hw_data pointers.
+	 */
+	cmb = cmb_area.mem;
+	list_for_each_entry(node, &cmb_area.list, cmb_list) {
+		struct cmb_data *data;
+		data = node->cmb;
+		if ((struct cmb*)data->hw_block > cmb)
+			break;
+		cmb++;
+	}
+	if (cmb - cmb_area.mem >= cmb_area.num_channels) {
+		ret = -ENOMEM;
+		goto out;
+	}
+
+	/* insert new cmb */
+	list_add_tail(&cdev->private->cmb_list, &node->cmb_list);
+	cmb_data->hw_block = cmb;
+	cdev->private->cmb = cmb_data;
+	ret = 0;
+out:
+	spin_unlock_irq(cdev->ccwlock);
+	return ret;
+}
+
+static int alloc_cmb(struct ccw_device *cdev)
+{
+	int ret;
+	struct cmb *mem;
+	ssize_t size;
+	struct cmb_data *cmb_data;
+
+	/* Allocate private cmb_data. */
+	cmb_data = kzalloc(sizeof(struct cmb_data), GFP_KERNEL);
+	if (!cmb_data)
+		return -ENOMEM;
+
+	cmb_data->last_block = kzalloc(sizeof(struct cmb), GFP_KERNEL);
+	if (!cmb_data->last_block) {
+		kfree(cmb_data);
+		return -ENOMEM;
+	}
+	cmb_data->size = sizeof(struct cmb);
+	spin_lock(&cmb_area.lock);
+
+	if (!cmb_area.mem) {
+		/* there is no user yet, so we need a new area */
+		size = sizeof(struct cmb) * cmb_area.num_channels;
+		WARN_ON(!list_empty(&cmb_area.list));
+
+		spin_unlock(&cmb_area.lock);
+		mem = (void*)__get_free_pages(GFP_KERNEL | GFP_DMA,
+				 get_order(size));
+		spin_lock(&cmb_area.lock);
+
+		if (cmb_area.mem) {
+			/* ok, another thread was faster */
+			free_pages((unsigned long)mem, get_order(size));
+		} else if (!mem) {
+			/* no luck */
+			ret = -ENOMEM;
+			goto out;
+		} else {
+			/* everything ok */
+			memset(mem, 0, size);
+			cmb_area.mem = mem;
+			cmf_activate(cmb_area.mem, 1);
+		}
+	}
+
+	/* do the actual allocation */
+	ret = alloc_cmb_single(cdev, cmb_data);
+out:
+	spin_unlock(&cmb_area.lock);
+	if (ret) {
+		kfree(cmb_data->last_block);
+		kfree(cmb_data);
+	}
+	return ret;
+}
+
+static void free_cmb(struct ccw_device *cdev)
+{
+	struct ccw_device_private *priv;
+	struct cmb_data *cmb_data;
+
+	spin_lock(&cmb_area.lock);
+	spin_lock_irq(cdev->ccwlock);
+
+	priv = cdev->private;
+
+	if (list_empty(&priv->cmb_list)) {
+		/* already freed */
+		goto out;
+	}
+
+	cmb_data = priv->cmb;
+	priv->cmb = NULL;
+	if (cmb_data)
+		kfree(cmb_data->last_block);
+	kfree(cmb_data);
+	list_del_init(&priv->cmb_list);
+
+	if (list_empty(&cmb_area.list)) {
+		ssize_t size;
+		size = sizeof(struct cmb) * cmb_area.num_channels;
+		cmf_activate(NULL, 0);
+		free_pages((unsigned long)cmb_area.mem, get_order(size));
+		cmb_area.mem = NULL;
+	}
+out:
+	spin_unlock_irq(cdev->ccwlock);
+	spin_unlock(&cmb_area.lock);
+}
+
+static int set_cmb(struct ccw_device *cdev, u32 mme)
+{
+	u16 offset;
+	struct cmb_data *cmb_data;
+	unsigned long flags;
+
+	spin_lock_irqsave(cdev->ccwlock, flags);
+	if (!cdev->private->cmb) {
+		spin_unlock_irqrestore(cdev->ccwlock, flags);
+		return -EINVAL;
+	}
+	cmb_data = cdev->private->cmb;
+	offset = mme ? (struct cmb *)cmb_data->hw_block - cmb_area.mem : 0;
+	spin_unlock_irqrestore(cdev->ccwlock, flags);
+
+	return set_schib_wait(cdev, mme, 0, offset);
+}
+
+static u64 read_cmb(struct ccw_device *cdev, int index)
+{
+	struct cmb *cmb;
+	u32 val;
+	int ret;
+	unsigned long flags;
+
+	ret = cmf_cmb_copy_wait(cdev);
+	if (ret < 0)
+		return 0;
+
+	spin_lock_irqsave(cdev->ccwlock, flags);
+	if (!cdev->private->cmb) {
+		ret = 0;
+		goto out;
+	}
+	cmb = ((struct cmb_data *)cdev->private->cmb)->last_block;
+
+	switch (index) {
+	case cmb_ssch_rsch_count:
+		ret = cmb->ssch_rsch_count;
+		goto out;
+	case cmb_sample_count:
+		ret = cmb->sample_count;
+		goto out;
+	case cmb_device_connect_time:
+		val = cmb->device_connect_time;
+		break;
+	case cmb_function_pending_time:
+		val = cmb->function_pending_time;
+		break;
+	case cmb_device_disconnect_time:
+		val = cmb->device_disconnect_time;
+		break;
+	case cmb_control_unit_queuing_time:
+		val = cmb->control_unit_queuing_time;
+		break;
+	case cmb_device_active_only_time:
+		val = cmb->device_active_only_time;
+		break;
+	default:
+		ret = 0;
+		goto out;
+	}
+	ret = time_to_avg_nsec(val, cmb->sample_count);
+out:
+	spin_unlock_irqrestore(cdev->ccwlock, flags);
+	return ret;
+}
+
+static int readall_cmb(struct ccw_device *cdev, struct cmbdata *data)
+{
+	struct cmb *cmb;
+	struct cmb_data *cmb_data;
+	u64 time;
+	unsigned long flags;
+	int ret;
+
+	ret = cmf_cmb_copy_wait(cdev);
+	if (ret < 0)
+		return ret;
+	spin_lock_irqsave(cdev->ccwlock, flags);
+	cmb_data = cdev->private->cmb;
+	if (!cmb_data) {
+		ret = -ENODEV;
+		goto out;
+	}
+	if (cmb_data->last_update == 0) {
+		ret = -EAGAIN;
+		goto out;
+	}
+	cmb = cmb_data->last_block;
+	time = cmb_data->last_update - cdev->private->cmb_start_time;
+
+	memset(data, 0, sizeof(struct cmbdata));
+
+	/* we only know values before device_busy_time */
+	data->size = offsetof(struct cmbdata, device_busy_time);
+
+	/* convert to nanoseconds */
+	data->elapsed_time = (time * 1000) >> 12;
+
+	/* copy data to new structure */
+	data->ssch_rsch_count = cmb->ssch_rsch_count;
+	data->sample_count = cmb->sample_count;
+
+	/* time fields are converted to nanoseconds while copying */
+	data->device_connect_time = time_to_nsec(cmb->device_connect_time);
+	data->function_pending_time = time_to_nsec(cmb->function_pending_time);
+	data->device_disconnect_time =
+		time_to_nsec(cmb->device_disconnect_time);
+	data->control_unit_queuing_time
+		= time_to_nsec(cmb->control_unit_queuing_time);
+	data->device_active_only_time
+		= time_to_nsec(cmb->device_active_only_time);
+	ret = 0;
+out:
+	spin_unlock_irqrestore(cdev->ccwlock, flags);
+	return ret;
+}
+
+static void reset_cmb(struct ccw_device *cdev)
+{
+	cmf_generic_reset(cdev);
+}
+
+static void * align_cmb(void *area)
+{
+	return area;
+}
+
+static struct attribute_group cmf_attr_group;
+
+static struct cmb_operations cmbops_basic = {
+	.alloc	= alloc_cmb,
+	.free	= free_cmb,
+	.set	= set_cmb,
+	.read	= read_cmb,
+	.readall    = readall_cmb,
+	.reset	    = reset_cmb,
+	.align	    = align_cmb,
+	.attr_group = &cmf_attr_group,
+};
+
+/* ******** extended cmb handling ********/
+
+/**
+ * struct cmbe - extended channel measurement block
+ * @ssch_rsch_count: number of ssch and rsch
+ * @sample_count: number of samples
+ * @device_connect_time: time of device connect
+ * @function_pending_time: time of function pending
+ * @device_disconnect_time: time of device disconnect
+ * @control_unit_queuing_time: time of control unit queuing
+ * @device_active_only_time: time of device active only
+ * @device_busy_time: time of device busy
+ * @initial_command_response_time: initial command response time
+ * @reserved: unused
+ *
+ * The measurement block as used by the hardware. May be in any 64 bit physical
+ * location.
+ * The fields are described further in z/Architecture Principles of Operation,
+ * third edition, chapter 17.
+ */
+struct cmbe {
+	u32 ssch_rsch_count;
+	u32 sample_count;
+	u32 device_connect_time;
+	u32 function_pending_time;
+	u32 device_disconnect_time;
+	u32 control_unit_queuing_time;
+	u32 device_active_only_time;
+	u32 device_busy_time;
+	u32 initial_command_response_time;
+	u32 reserved[7];
+};
+
+/*
+ * kmalloc only guarantees 8 byte alignment, but we need cmbe
+ * pointers to be naturally aligned. Make sure to allocate
+ * enough space for two cmbes.
+ */
+static inline struct cmbe *cmbe_align(struct cmbe *c)
+{
+	unsigned long addr;
+	addr = ((unsigned long)c + sizeof (struct cmbe) - sizeof(long)) &
+				 ~(sizeof (struct cmbe) - sizeof(long));
+	return (struct cmbe*)addr;
+}
+
+static int alloc_cmbe(struct ccw_device *cdev)
+{
+	struct cmbe *cmbe;
+	struct cmb_data *cmb_data;
+	int ret;
+
+	cmbe = kzalloc (sizeof (*cmbe) * 2, GFP_KERNEL);
+	if (!cmbe)
+		return -ENOMEM;
+	cmb_data = kzalloc(sizeof(struct cmb_data), GFP_KERNEL);
+	if (!cmb_data) {
+		ret = -ENOMEM;
+		goto out_free;
+	}
+	cmb_data->last_block = kzalloc(sizeof(struct cmbe), GFP_KERNEL);
+	if (!cmb_data->last_block) {
+		ret = -ENOMEM;
+		goto out_free;
+	}
+	cmb_data->size = sizeof(struct cmbe);
+	spin_lock_irq(cdev->ccwlock);
+	if (cdev->private->cmb) {
+		spin_unlock_irq(cdev->ccwlock);
+		ret = -EBUSY;
+		goto out_free;
+	}
+	cmb_data->hw_block = cmbe;
+	cdev->private->cmb = cmb_data;
+	spin_unlock_irq(cdev->ccwlock);
+
+	/* activate global measurement if this is the first channel */
+	spin_lock(&cmb_area.lock);
+	if (list_empty(&cmb_area.list))
+		cmf_activate(NULL, 1);
+	list_add_tail(&cdev->private->cmb_list, &cmb_area.list);
+	spin_unlock(&cmb_area.lock);
+
+	return 0;
+out_free:
+	if (cmb_data)
+		kfree(cmb_data->last_block);
+	kfree(cmb_data);
+	kfree(cmbe);
+	return ret;
+}
+
+static void free_cmbe(struct ccw_device *cdev)
+{
+	struct cmb_data *cmb_data;
+
+	spin_lock_irq(cdev->ccwlock);
+	cmb_data = cdev->private->cmb;
+	cdev->private->cmb = NULL;
+	if (cmb_data)
+		kfree(cmb_data->last_block);
+	kfree(cmb_data);
+	spin_unlock_irq(cdev->ccwlock);
+
+	/* deactivate global measurement if this is the last channel */
+	spin_lock(&cmb_area.lock);
+	list_del_init(&cdev->private->cmb_list);
+	if (list_empty(&cmb_area.list))
+		cmf_activate(NULL, 0);
+	spin_unlock(&cmb_area.lock);
+}
+
+static int set_cmbe(struct ccw_device *cdev, u32 mme)
+{
+	unsigned long mba;
+	struct cmb_data *cmb_data;
+	unsigned long flags;
+
+	spin_lock_irqsave(cdev->ccwlock, flags);
+	if (!cdev->private->cmb) {
+		spin_unlock_irqrestore(cdev->ccwlock, flags);
+		return -EINVAL;
+	}
+	cmb_data = cdev->private->cmb;
+	mba = mme ? (unsigned long) cmbe_align(cmb_data->hw_block) : 0;
+	spin_unlock_irqrestore(cdev->ccwlock, flags);
+
+	return set_schib_wait(cdev, mme, 1, mba);
+}
+
+
+static u64 read_cmbe(struct ccw_device *cdev, int index)
+{
+	struct cmbe *cmb;
+	struct cmb_data *cmb_data;
+	u32 val;
+	int ret;
+	unsigned long flags;
+
+	ret = cmf_cmb_copy_wait(cdev);
+	if (ret < 0)
+		return 0;
+
+	spin_lock_irqsave(cdev->ccwlock, flags);
+	cmb_data = cdev->private->cmb;
+	if (!cmb_data) {
+		ret = 0;
+		goto out;
+	}
+	cmb = cmb_data->last_block;
+
+	switch (index) {
+	case cmb_ssch_rsch_count:
+		ret = cmb->ssch_rsch_count;
+		goto out;
+	case cmb_sample_count:
+		ret = cmb->sample_count;
+		goto out;
+	case cmb_device_connect_time:
+		val = cmb->device_connect_time;
+		break;
+	case cmb_function_pending_time:
+		val = cmb->function_pending_time;
+		break;
+	case cmb_device_disconnect_time:
+		val = cmb->device_disconnect_time;
+		break;
+	case cmb_control_unit_queuing_time:
+		val = cmb->control_unit_queuing_time;
+		break;
+	case cmb_device_active_only_time:
+		val = cmb->device_active_only_time;
+		break;
+	case cmb_device_busy_time:
+		val = cmb->device_busy_time;
+		break;
+	case cmb_initial_command_response_time:
+		val = cmb->initial_command_response_time;
+		break;
+	default:
+		ret = 0;
+		goto out;
+	}
+	ret = time_to_avg_nsec(val, cmb->sample_count);
+out:
+	spin_unlock_irqrestore(cdev->ccwlock, flags);
+	return ret;
+}
+
+static int readall_cmbe(struct ccw_device *cdev, struct cmbdata *data)
+{
+	struct cmbe *cmb;
+	struct cmb_data *cmb_data;
+	u64 time;
+	unsigned long flags;
+	int ret;
+
+	ret = cmf_cmb_copy_wait(cdev);
+	if (ret < 0)
+		return ret;
+	spin_lock_irqsave(cdev->ccwlock, flags);
+	cmb_data = cdev->private->cmb;
+	if (!cmb_data) {
+		ret = -ENODEV;
+		goto out;
+	}
+	if (cmb_data->last_update == 0) {
+		ret = -EAGAIN;
+		goto out;
+	}
+	time = cmb_data->last_update - cdev->private->cmb_start_time;
+
+	memset (data, 0, sizeof(struct cmbdata));
+
+	/* we only know values before device_busy_time */
+	data->size = offsetof(struct cmbdata, device_busy_time);
+
+	/* conver to nanoseconds */
+	data->elapsed_time = (time * 1000) >> 12;
+
+	cmb = cmb_data->last_block;
+	/* copy data to new structure */
+	data->ssch_rsch_count = cmb->ssch_rsch_count;
+	data->sample_count = cmb->sample_count;
+
+	/* time fields are converted to nanoseconds while copying */
+	data->device_connect_time = time_to_nsec(cmb->device_connect_time);
+	data->function_pending_time = time_to_nsec(cmb->function_pending_time);
+	data->device_disconnect_time =
+		time_to_nsec(cmb->device_disconnect_time);
+	data->control_unit_queuing_time
+		= time_to_nsec(cmb->control_unit_queuing_time);
+	data->device_active_only_time
+		= time_to_nsec(cmb->device_active_only_time);
+	data->device_busy_time = time_to_nsec(cmb->device_busy_time);
+	data->initial_command_response_time
+		= time_to_nsec(cmb->initial_command_response_time);
+
+	ret = 0;
+out:
+	spin_unlock_irqrestore(cdev->ccwlock, flags);
+	return ret;
+}
+
+static void reset_cmbe(struct ccw_device *cdev)
+{
+	cmf_generic_reset(cdev);
+}
+
+static void * align_cmbe(void *area)
+{
+	return cmbe_align(area);
+}
+
+static struct attribute_group cmf_attr_group_ext;
+
+static struct cmb_operations cmbops_extended = {
+	.alloc	    = alloc_cmbe,
+	.free	    = free_cmbe,
+	.set	    = set_cmbe,
+	.read	    = read_cmbe,
+	.readall    = readall_cmbe,
+	.reset	    = reset_cmbe,
+	.align	    = align_cmbe,
+	.attr_group = &cmf_attr_group_ext,
+};
+
+static ssize_t cmb_show_attr(struct device *dev, char *buf, enum cmb_index idx)
+{
+	return sprintf(buf, "%lld\n",
+		(unsigned long long) cmf_read(to_ccwdev(dev), idx));
+}
+
+static ssize_t cmb_show_avg_sample_interval(struct device *dev,
+					    struct device_attribute *attr,
+					    char *buf)
+{
+	struct ccw_device *cdev;
+	long interval;
+	unsigned long count;
+	struct cmb_data *cmb_data;
+
+	cdev = to_ccwdev(dev);
+	count = cmf_read(cdev, cmb_sample_count);
+	spin_lock_irq(cdev->ccwlock);
+	cmb_data = cdev->private->cmb;
+	if (count) {
+		interval = cmb_data->last_update -
+			cdev->private->cmb_start_time;
+		interval = (interval * 1000) >> 12;
+		interval /= count;
+	} else
+		interval = -1;
+	spin_unlock_irq(cdev->ccwlock);
+	return sprintf(buf, "%ld\n", interval);
+}
+
+static ssize_t cmb_show_avg_utilization(struct device *dev,
+					struct device_attribute *attr,
+					char *buf)
+{
+	struct cmbdata data;
+	u64 utilization;
+	unsigned long t, u;
+	int ret;
+
+	ret = cmf_readall(to_ccwdev(dev), &data);
+	if (ret == -EAGAIN || ret == -ENODEV)
+		/* No data (yet/currently) available to use for calculation. */
+		return sprintf(buf, "n/a\n");
+	else if (ret)
+		return ret;
+
+	utilization = data.device_connect_time +
+		      data.function_pending_time +
+		      data.device_disconnect_time;
+
+	/* shift to avoid long long division */
+	while (-1ul < (data.elapsed_time | utilization)) {
+		utilization >>= 8;
+		data.elapsed_time >>= 8;
+	}
+
+	/* calculate value in 0.1 percent units */
+	t = (unsigned long) data.elapsed_time / 1000;
+	u = (unsigned long) utilization / t;
+
+	return sprintf(buf, "%02ld.%01ld%%\n", u/ 10, u - (u/ 10) * 10);
+}
+
+#define cmf_attr(name) \
+static ssize_t show_##name(struct device *dev, \
+			   struct device_attribute *attr, char *buf)	\
+{ return cmb_show_attr((dev), buf, cmb_##name); } \
+static DEVICE_ATTR(name, 0444, show_##name, NULL);
+
+#define cmf_attr_avg(name) \
+static ssize_t show_avg_##name(struct device *dev, \
+			       struct device_attribute *attr, char *buf) \
+{ return cmb_show_attr((dev), buf, cmb_##name); } \
+static DEVICE_ATTR(avg_##name, 0444, show_avg_##name, NULL);
+
+cmf_attr(ssch_rsch_count);
+cmf_attr(sample_count);
+cmf_attr_avg(device_connect_time);
+cmf_attr_avg(function_pending_time);
+cmf_attr_avg(device_disconnect_time);
+cmf_attr_avg(control_unit_queuing_time);
+cmf_attr_avg(device_active_only_time);
+cmf_attr_avg(device_busy_time);
+cmf_attr_avg(initial_command_response_time);
+
+static DEVICE_ATTR(avg_sample_interval, 0444, cmb_show_avg_sample_interval,
+		   NULL);
+static DEVICE_ATTR(avg_utilization, 0444, cmb_show_avg_utilization, NULL);
+
+static struct attribute *cmf_attributes[] = {
+	&dev_attr_avg_sample_interval.attr,
+	&dev_attr_avg_utilization.attr,
+	&dev_attr_ssch_rsch_count.attr,
+	&dev_attr_sample_count.attr,
+	&dev_attr_avg_device_connect_time.attr,
+	&dev_attr_avg_function_pending_time.attr,
+	&dev_attr_avg_device_disconnect_time.attr,
+	&dev_attr_avg_control_unit_queuing_time.attr,
+	&dev_attr_avg_device_active_only_time.attr,
+	NULL,
+};
+
+static struct attribute_group cmf_attr_group = {
+	.name  = "cmf",
+	.attrs = cmf_attributes,
+};
+
+static struct attribute *cmf_attributes_ext[] = {
+	&dev_attr_avg_sample_interval.attr,
+	&dev_attr_avg_utilization.attr,
+	&dev_attr_ssch_rsch_count.attr,
+	&dev_attr_sample_count.attr,
+	&dev_attr_avg_device_connect_time.attr,
+	&dev_attr_avg_function_pending_time.attr,
+	&dev_attr_avg_device_disconnect_time.attr,
+	&dev_attr_avg_control_unit_queuing_time.attr,
+	&dev_attr_avg_device_active_only_time.attr,
+	&dev_attr_avg_device_busy_time.attr,
+	&dev_attr_avg_initial_command_response_time.attr,
+	NULL,
+};
+
+static struct attribute_group cmf_attr_group_ext = {
+	.name  = "cmf",
+	.attrs = cmf_attributes_ext,
+};
+
+static ssize_t cmb_enable_show(struct device *dev,
+			       struct device_attribute *attr,
+			       char *buf)
+{
+	return sprintf(buf, "%d\n", to_ccwdev(dev)->private->cmb ? 1 : 0);
+}
+
+static ssize_t cmb_enable_store(struct device *dev,
+				struct device_attribute *attr, const char *buf,
+				size_t c)
+{
+	struct ccw_device *cdev;
+	int ret;
+	unsigned long val;
+
+	ret = strict_strtoul(buf, 16, &val);
+	if (ret)
+		return ret;
+
+	cdev = to_ccwdev(dev);
+
+	switch (val) {
+	case 0:
+		ret = disable_cmf(cdev);
+		break;
+	case 1:
+		ret = enable_cmf(cdev);
+		break;
+	}
+
+	return c;
+}
+
+DEVICE_ATTR(cmb_enable, 0644, cmb_enable_show, cmb_enable_store);
+
+int ccw_set_cmf(struct ccw_device *cdev, int enable)
+{
+	return cmbops->set(cdev, enable ? 2 : 0);
+}
+
+/**
+ * enable_cmf() - switch on the channel measurement for a specific device
+ *  @cdev:	The ccw device to be enabled
+ *
+ *  Returns %0 for success or a negative error value.
+ *
+ *  Context:
+ *    non-atomic
+ */
+int enable_cmf(struct ccw_device *cdev)
+{
+	int ret;
+
+	ret = cmbops->alloc(cdev);
+	cmbops->reset(cdev);
+	if (ret)
+		return ret;
+	ret = cmbops->set(cdev, 2);
+	if (ret) {
+		cmbops->free(cdev);
+		return ret;
+	}
+	ret = sysfs_create_group(&cdev->dev.kobj, cmbops->attr_group);
+	if (!ret)
+		return 0;
+	cmbops->set(cdev, 0);  //FIXME: this can fail
+	cmbops->free(cdev);
+	return ret;
+}
+
+/**
+ * disable_cmf() - switch off the channel measurement for a specific device
+ *  @cdev:	The ccw device to be disabled
+ *
+ *  Returns %0 for success or a negative error value.
+ *
+ *  Context:
+ *    non-atomic
+ */
+int disable_cmf(struct ccw_device *cdev)
+{
+	int ret;
+
+	ret = cmbops->set(cdev, 0);
+	if (ret)
+		return ret;
+	cmbops->free(cdev);
+	sysfs_remove_group(&cdev->dev.kobj, cmbops->attr_group);
+	return ret;
+}
+
+/**
+ * cmf_read() - read one value from the current channel measurement block
+ * @cdev:	the channel to be read
+ * @index:	the index of the value to be read
+ *
+ * Returns the value read or %0 if the value cannot be read.
+ *
+ *  Context:
+ *    any
+ */
+u64 cmf_read(struct ccw_device *cdev, int index)
+{
+	return cmbops->read(cdev, index);
+}
+
+/**
+ * cmf_readall() - read the current channel measurement block
+ * @cdev:	the channel to be read
+ * @data:	a pointer to a data block that will be filled
+ *
+ * Returns %0 on success, a negative error value otherwise.
+ *
+ *  Context:
+ *    any
+ */
+int cmf_readall(struct ccw_device *cdev, struct cmbdata *data)
+{
+	return cmbops->readall(cdev, data);
+}
+
+/* Reenable cmf when a disconnected device becomes available again. */
+int cmf_reenable(struct ccw_device *cdev)
+{
+	cmbops->reset(cdev);
+	return cmbops->set(cdev, 2);
+}
+
+static int __init init_cmf(void)
+{
+	char *format_string;
+	char *detect_string = "parameter";
+
+	/*
+	 * If the user did not give a parameter, see if we are running on a
+	 * machine supporting extended measurement blocks, otherwise fall back
+	 * to basic mode.
+	 */
+	if (format == CMF_AUTODETECT) {
+		if (!css_general_characteristics.ext_mb) {
+			format = CMF_BASIC;
+		} else {
+			format = CMF_EXTENDED;
+		}
+		detect_string = "autodetected";
+	} else {
+		detect_string = "parameter";
+	}
+
+	switch (format) {
+	case CMF_BASIC:
+		format_string = "basic";
+		cmbops = &cmbops_basic;
+		break;
+	case CMF_EXTENDED:
+		format_string = "extended";
+		cmbops = &cmbops_extended;
+		break;
+	default:
+		return 1;
+	}
+	pr_info("Channel measurement facility initialized using format "
+		"%s (mode %s)\n", format_string, detect_string);
+	return 0;
+}
+
+module_init(init_cmf);
+
+
+MODULE_AUTHOR("Arnd Bergmann <arndb@de.ibm.com>");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("channel measurement facility base driver\n"
+		   "Copyright 2003 IBM Corporation\n");
+
+EXPORT_SYMBOL_GPL(enable_cmf);
+EXPORT_SYMBOL_GPL(disable_cmf);
+EXPORT_SYMBOL_GPL(cmf_read);
+EXPORT_SYMBOL_GPL(cmf_readall);
diff --git a/ap/os/linux/linux-3.4.x/drivers/s390/cio/crw.c b/ap/os/linux/linux-3.4.x/drivers/s390/cio/crw.c
new file mode 100644
index 0000000..d0a2dff
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/s390/cio/crw.c
@@ -0,0 +1,161 @@
+/*
+ *   Channel report handling code
+ *
+ *    Copyright IBM Corp. 2000,2009
+ *    Author(s): Ingo Adlung <adlung@de.ibm.com>,
+ *		 Martin Schwidefsky <schwidefsky@de.ibm.com>,
+ *		 Cornelia Huck <cornelia.huck@de.ibm.com>,
+ *		 Heiko Carstens <heiko.carstens@de.ibm.com>,
+ */
+
+#include <linux/mutex.h>
+#include <linux/kthread.h>
+#include <linux/init.h>
+#include <linux/wait.h>
+#include <asm/crw.h>
+#include <asm/ctl_reg.h>
+
+static DEFINE_MUTEX(crw_handler_mutex);
+static crw_handler_t crw_handlers[NR_RSCS];
+static atomic_t crw_nr_req = ATOMIC_INIT(0);
+static DECLARE_WAIT_QUEUE_HEAD(crw_handler_wait_q);
+
+/**
+ * crw_register_handler() - register a channel report word handler
+ * @rsc: reporting source code to handle
+ * @handler: handler to be registered
+ *
+ * Returns %0 on success and a negative error value otherwise.
+ */
+int crw_register_handler(int rsc, crw_handler_t handler)
+{
+	int rc = 0;
+
+	if ((rsc < 0) || (rsc >= NR_RSCS))
+		return -EINVAL;
+	mutex_lock(&crw_handler_mutex);
+	if (crw_handlers[rsc])
+		rc = -EBUSY;
+	else
+		crw_handlers[rsc] = handler;
+	mutex_unlock(&crw_handler_mutex);
+	return rc;
+}
+
+/**
+ * crw_unregister_handler() - unregister a channel report word handler
+ * @rsc: reporting source code to handle
+ */
+void crw_unregister_handler(int rsc)
+{
+	if ((rsc < 0) || (rsc >= NR_RSCS))
+		return;
+	mutex_lock(&crw_handler_mutex);
+	crw_handlers[rsc] = NULL;
+	mutex_unlock(&crw_handler_mutex);
+}
+
+/*
+ * Retrieve CRWs and call function to handle event.
+ */
+static int crw_collect_info(void *unused)
+{
+	struct crw crw[2];
+	int ccode, signal;
+	unsigned int chain;
+
+repeat:
+	signal = wait_event_interruptible(crw_handler_wait_q,
+					  atomic_read(&crw_nr_req) > 0);
+	if (unlikely(signal))
+		atomic_inc(&crw_nr_req);
+	chain = 0;
+	while (1) {
+		crw_handler_t handler;
+
+		if (unlikely(chain > 1)) {
+			struct crw tmp_crw;
+
+			printk(KERN_WARNING"%s: Code does not support more "
+			       "than two chained crws; please report to "
+			       "linux390@de.ibm.com!\n", __func__);
+			ccode = stcrw(&tmp_crw);
+			printk(KERN_WARNING"%s: crw reports slct=%d, oflw=%d, "
+			       "chn=%d, rsc=%X, anc=%d, erc=%X, rsid=%X\n",
+			       __func__, tmp_crw.slct, tmp_crw.oflw,
+			       tmp_crw.chn, tmp_crw.rsc, tmp_crw.anc,
+			       tmp_crw.erc, tmp_crw.rsid);
+			printk(KERN_WARNING"%s: This was crw number %x in the "
+			       "chain\n", __func__, chain);
+			if (ccode != 0)
+				break;
+			chain = tmp_crw.chn ? chain + 1 : 0;
+			continue;
+		}
+		ccode = stcrw(&crw[chain]);
+		if (ccode != 0)
+			break;
+		printk(KERN_DEBUG "crw_info : CRW reports slct=%d, oflw=%d, "
+		       "chn=%d, rsc=%X, anc=%d, erc=%X, rsid=%X\n",
+		       crw[chain].slct, crw[chain].oflw, crw[chain].chn,
+		       crw[chain].rsc, crw[chain].anc, crw[chain].erc,
+		       crw[chain].rsid);
+		/* Check for overflows. */
+		if (crw[chain].oflw) {
+			int i;
+
+			pr_debug("%s: crw overflow detected!\n", __func__);
+			mutex_lock(&crw_handler_mutex);
+			for (i = 0; i < NR_RSCS; i++) {
+				if (crw_handlers[i])
+					crw_handlers[i](NULL, NULL, 1);
+			}
+			mutex_unlock(&crw_handler_mutex);
+			chain = 0;
+			continue;
+		}
+		if (crw[0].chn && !chain) {
+			chain++;
+			continue;
+		}
+		mutex_lock(&crw_handler_mutex);
+		handler = crw_handlers[crw[chain].rsc];
+		if (handler)
+			handler(&crw[0], chain ? &crw[1] : NULL, 0);
+		mutex_unlock(&crw_handler_mutex);
+		/* chain is always 0 or 1 here. */
+		chain = crw[chain].chn ? chain + 1 : 0;
+	}
+	if (atomic_dec_and_test(&crw_nr_req))
+		wake_up(&crw_handler_wait_q);
+	goto repeat;
+	return 0;
+}
+
+void crw_handle_channel_report(void)
+{
+	atomic_inc(&crw_nr_req);
+	wake_up(&crw_handler_wait_q);
+}
+
+void crw_wait_for_channel_report(void)
+{
+	crw_handle_channel_report();
+	wait_event(crw_handler_wait_q, atomic_read(&crw_nr_req) == 0);
+}
+
+/*
+ * Machine checks for the channel subsystem must be enabled
+ * after the channel subsystem is initialized
+ */
+static int __init crw_machine_check_init(void)
+{
+	struct task_struct *task;
+
+	task = kthread_run(crw_collect_info, NULL, "kmcheck");
+	if (IS_ERR(task))
+		return PTR_ERR(task);
+	ctl_set_bit(14, 28);	/* enable channel report MCH */
+	return 0;
+}
+device_initcall(crw_machine_check_init);
diff --git a/ap/os/linux/linux-3.4.x/drivers/s390/cio/css.c b/ap/os/linux/linux-3.4.x/drivers/s390/cio/css.c
new file mode 100644
index 0000000..21908e6
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/s390/cio/css.c
@@ -0,0 +1,1268 @@
+/*
+ * driver for channel subsystem
+ *
+ * Copyright IBM Corp. 2002, 2010
+ *
+ * Author(s): Arnd Bergmann (arndb@de.ibm.com)
+ *	      Cornelia Huck (cornelia.huck@de.ibm.com)
+ */
+
+#define KMSG_COMPONENT "cio"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/device.h>
+#include <linux/slab.h>
+#include <linux/errno.h>
+#include <linux/list.h>
+#include <linux/reboot.h>
+#include <linux/suspend.h>
+#include <linux/proc_fs.h>
+#include <asm/isc.h>
+#include <asm/crw.h>
+
+#include "css.h"
+#include "cio.h"
+#include "cio_debug.h"
+#include "ioasm.h"
+#include "chsc.h"
+#include "device.h"
+#include "idset.h"
+#include "chp.h"
+
+int css_init_done = 0;
+int max_ssid;
+
+struct channel_subsystem *channel_subsystems[__MAX_CSSID + 1];
+static struct bus_type css_bus_type;
+
+int
+for_each_subchannel(int(*fn)(struct subchannel_id, void *), void *data)
+{
+	struct subchannel_id schid;
+	int ret;
+
+	init_subchannel_id(&schid);
+	ret = -ENODEV;
+	do {
+		do {
+			ret = fn(schid, data);
+			if (ret)
+				break;
+		} while (schid.sch_no++ < __MAX_SUBCHANNEL);
+		schid.sch_no = 0;
+	} while (schid.ssid++ < max_ssid);
+	return ret;
+}
+
+struct cb_data {
+	void *data;
+	struct idset *set;
+	int (*fn_known_sch)(struct subchannel *, void *);
+	int (*fn_unknown_sch)(struct subchannel_id, void *);
+};
+
+static int call_fn_known_sch(struct device *dev, void *data)
+{
+	struct subchannel *sch = to_subchannel(dev);
+	struct cb_data *cb = data;
+	int rc = 0;
+
+	idset_sch_del(cb->set, sch->schid);
+	if (cb->fn_known_sch)
+		rc = cb->fn_known_sch(sch, cb->data);
+	return rc;
+}
+
+static int call_fn_unknown_sch(struct subchannel_id schid, void *data)
+{
+	struct cb_data *cb = data;
+	int rc = 0;
+
+	if (idset_sch_contains(cb->set, schid))
+		rc = cb->fn_unknown_sch(schid, cb->data);
+	return rc;
+}
+
+static int call_fn_all_sch(struct subchannel_id schid, void *data)
+{
+	struct cb_data *cb = data;
+	struct subchannel *sch;
+	int rc = 0;
+
+	sch = get_subchannel_by_schid(schid);
+	if (sch) {
+		if (cb->fn_known_sch)
+			rc = cb->fn_known_sch(sch, cb->data);
+		put_device(&sch->dev);
+	} else {
+		if (cb->fn_unknown_sch)
+			rc = cb->fn_unknown_sch(schid, cb->data);
+	}
+
+	return rc;
+}
+
+int for_each_subchannel_staged(int (*fn_known)(struct subchannel *, void *),
+			       int (*fn_unknown)(struct subchannel_id,
+			       void *), void *data)
+{
+	struct cb_data cb;
+	int rc;
+
+	cb.data = data;
+	cb.fn_known_sch = fn_known;
+	cb.fn_unknown_sch = fn_unknown;
+
+	cb.set = idset_sch_new();
+	if (!cb.set)
+		/* fall back to brute force scanning in case of oom */
+		return for_each_subchannel(call_fn_all_sch, &cb);
+
+	idset_fill(cb.set);
+
+	/* Process registered subchannels. */
+	rc = bus_for_each_dev(&css_bus_type, NULL, &cb, call_fn_known_sch);
+	if (rc)
+		goto out;
+	/* Process unregistered subchannels. */
+	if (fn_unknown)
+		rc = for_each_subchannel(call_fn_unknown_sch, &cb);
+out:
+	idset_free(cb.set);
+
+	return rc;
+}
+
+static void css_sch_todo(struct work_struct *work);
+
+static struct subchannel *
+css_alloc_subchannel(struct subchannel_id schid)
+{
+	struct subchannel *sch;
+	int ret;
+
+	sch = kmalloc (sizeof (*sch), GFP_KERNEL | GFP_DMA);
+	if (sch == NULL)
+		return ERR_PTR(-ENOMEM);
+	ret = cio_validate_subchannel (sch, schid);
+	if (ret < 0) {
+		kfree(sch);
+		return ERR_PTR(ret);
+	}
+	INIT_WORK(&sch->todo_work, css_sch_todo);
+	return sch;
+}
+
+static void
+css_subchannel_release(struct device *dev)
+{
+	struct subchannel *sch;
+
+	sch = to_subchannel(dev);
+	if (!cio_is_console(sch->schid)) {
+		/* Reset intparm to zeroes. */
+		sch->config.intparm = 0;
+		cio_commit_config(sch);
+		kfree(sch->lock);
+		kfree(sch);
+	}
+}
+
+static int css_sch_device_register(struct subchannel *sch)
+{
+	int ret;
+
+	mutex_lock(&sch->reg_mutex);
+	dev_set_name(&sch->dev, "0.%x.%04x", sch->schid.ssid,
+		     sch->schid.sch_no);
+	ret = device_register(&sch->dev);
+	mutex_unlock(&sch->reg_mutex);
+	return ret;
+}
+
+/**
+ * css_sch_device_unregister - unregister a subchannel
+ * @sch: subchannel to be unregistered
+ */
+void css_sch_device_unregister(struct subchannel *sch)
+{
+	mutex_lock(&sch->reg_mutex);
+	if (device_is_registered(&sch->dev))
+		device_unregister(&sch->dev);
+	mutex_unlock(&sch->reg_mutex);
+}
+EXPORT_SYMBOL_GPL(css_sch_device_unregister);
+
+static void ssd_from_pmcw(struct chsc_ssd_info *ssd, struct pmcw *pmcw)
+{
+	int i;
+	int mask;
+
+	memset(ssd, 0, sizeof(struct chsc_ssd_info));
+	ssd->path_mask = pmcw->pim;
+	for (i = 0; i < 8; i++) {
+		mask = 0x80 >> i;
+		if (pmcw->pim & mask) {
+			chp_id_init(&ssd->chpid[i]);
+			ssd->chpid[i].id = pmcw->chpid[i];
+		}
+	}
+}
+
+static void ssd_register_chpids(struct chsc_ssd_info *ssd)
+{
+	int i;
+	int mask;
+
+	for (i = 0; i < 8; i++) {
+		mask = 0x80 >> i;
+		if (ssd->path_mask & mask)
+			if (!chp_is_registered(ssd->chpid[i]))
+				chp_new(ssd->chpid[i]);
+	}
+}
+
+void css_update_ssd_info(struct subchannel *sch)
+{
+	int ret;
+
+	if (cio_is_console(sch->schid)) {
+		/* Console is initialized too early for functions requiring
+		 * memory allocation. */
+		ssd_from_pmcw(&sch->ssd_info, &sch->schib.pmcw);
+	} else {
+		ret = chsc_get_ssd_info(sch->schid, &sch->ssd_info);
+		if (ret)
+			ssd_from_pmcw(&sch->ssd_info, &sch->schib.pmcw);
+		ssd_register_chpids(&sch->ssd_info);
+	}
+}
+
+static ssize_t type_show(struct device *dev, struct device_attribute *attr,
+			 char *buf)
+{
+	struct subchannel *sch = to_subchannel(dev);
+
+	return sprintf(buf, "%01x\n", sch->st);
+}
+
+static DEVICE_ATTR(type, 0444, type_show, NULL);
+
+static ssize_t modalias_show(struct device *dev, struct device_attribute *attr,
+			     char *buf)
+{
+	struct subchannel *sch = to_subchannel(dev);
+
+	return sprintf(buf, "css:t%01X\n", sch->st);
+}
+
+static DEVICE_ATTR(modalias, 0444, modalias_show, NULL);
+
+static struct attribute *subch_attrs[] = {
+	&dev_attr_type.attr,
+	&dev_attr_modalias.attr,
+	NULL,
+};
+
+static struct attribute_group subch_attr_group = {
+	.attrs = subch_attrs,
+};
+
+static const struct attribute_group *default_subch_attr_groups[] = {
+	&subch_attr_group,
+	NULL,
+};
+
+static int css_register_subchannel(struct subchannel *sch)
+{
+	int ret;
+
+	/* Initialize the subchannel structure */
+	sch->dev.parent = &channel_subsystems[0]->device;
+	sch->dev.bus = &css_bus_type;
+	sch->dev.release = &css_subchannel_release;
+	sch->dev.groups = default_subch_attr_groups;
+	/*
+	 * We don't want to generate uevents for I/O subchannels that don't
+	 * have a working ccw device behind them since they will be
+	 * unregistered before they can be used anyway, so we delay the add
+	 * uevent until after device recognition was successful.
+	 * Note that we suppress the uevent for all subchannel types;
+	 * the subchannel driver can decide itself when it wants to inform
+	 * userspace of its existence.
+	 */
+	dev_set_uevent_suppress(&sch->dev, 1);
+	css_update_ssd_info(sch);
+	/* make it known to the system */
+	ret = css_sch_device_register(sch);
+	if (ret) {
+		CIO_MSG_EVENT(0, "Could not register sch 0.%x.%04x: %d\n",
+			      sch->schid.ssid, sch->schid.sch_no, ret);
+		return ret;
+	}
+	if (!sch->driver) {
+		/*
+		 * No driver matched. Generate the uevent now so that
+		 * a fitting driver module may be loaded based on the
+		 * modalias.
+		 */
+		dev_set_uevent_suppress(&sch->dev, 0);
+		kobject_uevent(&sch->dev.kobj, KOBJ_ADD);
+	}
+	return ret;
+}
+
+int css_probe_device(struct subchannel_id schid)
+{
+	int ret;
+	struct subchannel *sch;
+
+	if (cio_is_console(schid))
+		sch = cio_get_console_subchannel();
+	else {
+		sch = css_alloc_subchannel(schid);
+		if (IS_ERR(sch))
+			return PTR_ERR(sch);
+	}
+	ret = css_register_subchannel(sch);
+	if (ret) {
+		if (!cio_is_console(schid))
+			put_device(&sch->dev);
+	}
+	return ret;
+}
+
+static int
+check_subchannel(struct device * dev, void * data)
+{
+	struct subchannel *sch;
+	struct subchannel_id *schid = data;
+
+	sch = to_subchannel(dev);
+	return schid_equal(&sch->schid, schid);
+}
+
+struct subchannel *
+get_subchannel_by_schid(struct subchannel_id schid)
+{
+	struct device *dev;
+
+	dev = bus_find_device(&css_bus_type, NULL,
+			      &schid, check_subchannel);
+
+	return dev ? to_subchannel(dev) : NULL;
+}
+
+/**
+ * css_sch_is_valid() - check if a subchannel is valid
+ * @schib: subchannel information block for the subchannel
+ */
+int css_sch_is_valid(struct schib *schib)
+{
+	if ((schib->pmcw.st == SUBCHANNEL_TYPE_IO) && !schib->pmcw.dnv)
+		return 0;
+	if ((schib->pmcw.st == SUBCHANNEL_TYPE_MSG) && !schib->pmcw.w)
+		return 0;
+	return 1;
+}
+EXPORT_SYMBOL_GPL(css_sch_is_valid);
+
+static int css_evaluate_new_subchannel(struct subchannel_id schid, int slow)
+{
+	struct schib schib;
+
+	if (!slow) {
+		/* Will be done on the slow path. */
+		return -EAGAIN;
+	}
+	if (stsch_err(schid, &schib) || !css_sch_is_valid(&schib)) {
+		/* Unusable - ignore. */
+		return 0;
+	}
+	CIO_MSG_EVENT(4, "event: sch 0.%x.%04x, new\n", schid.ssid,
+		      schid.sch_no);
+
+	return css_probe_device(schid);
+}
+
+static int css_evaluate_known_subchannel(struct subchannel *sch, int slow)
+{
+	int ret = 0;
+
+	if (sch->driver) {
+		if (sch->driver->sch_event)
+			ret = sch->driver->sch_event(sch, slow);
+		else
+			dev_dbg(&sch->dev,
+				"Got subchannel machine check but "
+				"no sch_event handler provided.\n");
+	}
+	if (ret != 0 && ret != -EAGAIN) {
+		CIO_MSG_EVENT(2, "eval: sch 0.%x.%04x, rc=%d\n",
+			      sch->schid.ssid, sch->schid.sch_no, ret);
+	}
+	return ret;
+}
+
+static void css_evaluate_subchannel(struct subchannel_id schid, int slow)
+{
+	struct subchannel *sch;
+	int ret;
+
+	sch = get_subchannel_by_schid(schid);
+	if (sch) {
+		ret = css_evaluate_known_subchannel(sch, slow);
+		put_device(&sch->dev);
+	} else
+		ret = css_evaluate_new_subchannel(schid, slow);
+	if (ret == -EAGAIN)
+		css_schedule_eval(schid);
+}
+
+/**
+ * css_sched_sch_todo - schedule a subchannel operation
+ * @sch: subchannel
+ * @todo: todo
+ *
+ * Schedule the operation identified by @todo to be performed on the slow path
+ * workqueue. Do nothing if another operation with higher priority is already
+ * scheduled. Needs to be called with subchannel lock held.
+ */
+void css_sched_sch_todo(struct subchannel *sch, enum sch_todo todo)
+{
+	CIO_MSG_EVENT(4, "sch_todo: sched sch=0.%x.%04x todo=%d\n",
+		      sch->schid.ssid, sch->schid.sch_no, todo);
+	if (sch->todo >= todo)
+		return;
+	/* Get workqueue ref. */
+	if (!get_device(&sch->dev))
+		return;
+	sch->todo = todo;
+	if (!queue_work(cio_work_q, &sch->todo_work)) {
+		/* Already queued, release workqueue ref. */
+		put_device(&sch->dev);
+	}
+}
+
+static void css_sch_todo(struct work_struct *work)
+{
+	struct subchannel *sch;
+	enum sch_todo todo;
+	int ret;
+
+	sch = container_of(work, struct subchannel, todo_work);
+	/* Find out todo. */
+	spin_lock_irq(sch->lock);
+	todo = sch->todo;
+	CIO_MSG_EVENT(4, "sch_todo: sch=0.%x.%04x, todo=%d\n", sch->schid.ssid,
+		      sch->schid.sch_no, todo);
+	sch->todo = SCH_TODO_NOTHING;
+	spin_unlock_irq(sch->lock);
+	/* Perform todo. */
+	switch (todo) {
+	case SCH_TODO_NOTHING:
+		break;
+	case SCH_TODO_EVAL:
+		ret = css_evaluate_known_subchannel(sch, 1);
+		if (ret == -EAGAIN) {
+			spin_lock_irq(sch->lock);
+			css_sched_sch_todo(sch, todo);
+			spin_unlock_irq(sch->lock);
+		}
+		break;
+	case SCH_TODO_UNREG:
+		css_sch_device_unregister(sch);
+		break;
+	}
+	/* Release workqueue ref. */
+	put_device(&sch->dev);
+}
+
+static struct idset *slow_subchannel_set;
+static spinlock_t slow_subchannel_lock;
+static wait_queue_head_t css_eval_wq;
+static atomic_t css_eval_scheduled;
+
+static int __init slow_subchannel_init(void)
+{
+	spin_lock_init(&slow_subchannel_lock);
+	atomic_set(&css_eval_scheduled, 0);
+	init_waitqueue_head(&css_eval_wq);
+	slow_subchannel_set = idset_sch_new();
+	if (!slow_subchannel_set) {
+		CIO_MSG_EVENT(0, "could not allocate slow subchannel set\n");
+		return -ENOMEM;
+	}
+	return 0;
+}
+
+static int slow_eval_known_fn(struct subchannel *sch, void *data)
+{
+	int eval;
+	int rc;
+
+	spin_lock_irq(&slow_subchannel_lock);
+	eval = idset_sch_contains(slow_subchannel_set, sch->schid);
+	idset_sch_del(slow_subchannel_set, sch->schid);
+	spin_unlock_irq(&slow_subchannel_lock);
+	if (eval) {
+		rc = css_evaluate_known_subchannel(sch, 1);
+		if (rc == -EAGAIN)
+			css_schedule_eval(sch->schid);
+	}
+	return 0;
+}
+
+static int slow_eval_unknown_fn(struct subchannel_id schid, void *data)
+{
+	int eval;
+	int rc = 0;
+
+	spin_lock_irq(&slow_subchannel_lock);
+	eval = idset_sch_contains(slow_subchannel_set, schid);
+	idset_sch_del(slow_subchannel_set, schid);
+	spin_unlock_irq(&slow_subchannel_lock);
+	if (eval) {
+		rc = css_evaluate_new_subchannel(schid, 1);
+		switch (rc) {
+		case -EAGAIN:
+			css_schedule_eval(schid);
+			rc = 0;
+			break;
+		case -ENXIO:
+		case -ENOMEM:
+		case -EIO:
+			/* These should abort looping */
+			break;
+		default:
+			rc = 0;
+		}
+	}
+	return rc;
+}
+
+static void css_slow_path_func(struct work_struct *unused)
+{
+	unsigned long flags;
+
+	CIO_TRACE_EVENT(4, "slowpath");
+	for_each_subchannel_staged(slow_eval_known_fn, slow_eval_unknown_fn,
+				   NULL);
+	spin_lock_irqsave(&slow_subchannel_lock, flags);
+	if (idset_is_empty(slow_subchannel_set)) {
+		atomic_set(&css_eval_scheduled, 0);
+		wake_up(&css_eval_wq);
+	}
+	spin_unlock_irqrestore(&slow_subchannel_lock, flags);
+}
+
+static DECLARE_WORK(slow_path_work, css_slow_path_func);
+struct workqueue_struct *cio_work_q;
+
+void css_schedule_eval(struct subchannel_id schid)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&slow_subchannel_lock, flags);
+	idset_sch_add(slow_subchannel_set, schid);
+	atomic_set(&css_eval_scheduled, 1);
+	queue_work(cio_work_q, &slow_path_work);
+	spin_unlock_irqrestore(&slow_subchannel_lock, flags);
+}
+
+void css_schedule_eval_all(void)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&slow_subchannel_lock, flags);
+	idset_fill(slow_subchannel_set);
+	atomic_set(&css_eval_scheduled, 1);
+	queue_work(cio_work_q, &slow_path_work);
+	spin_unlock_irqrestore(&slow_subchannel_lock, flags);
+}
+
+static int __unset_registered(struct device *dev, void *data)
+{
+	struct idset *set = data;
+	struct subchannel *sch = to_subchannel(dev);
+
+	idset_sch_del(set, sch->schid);
+	return 0;
+}
+
+static void css_schedule_eval_all_unreg(void)
+{
+	unsigned long flags;
+	struct idset *unreg_set;
+
+	/* Find unregistered subchannels. */
+	unreg_set = idset_sch_new();
+	if (!unreg_set) {
+		/* Fallback. */
+		css_schedule_eval_all();
+		return;
+	}
+	idset_fill(unreg_set);
+	bus_for_each_dev(&css_bus_type, NULL, unreg_set, __unset_registered);
+	/* Apply to slow_subchannel_set. */
+	spin_lock_irqsave(&slow_subchannel_lock, flags);
+	idset_add_set(slow_subchannel_set, unreg_set);
+	atomic_set(&css_eval_scheduled, 1);
+	queue_work(cio_work_q, &slow_path_work);
+	spin_unlock_irqrestore(&slow_subchannel_lock, flags);
+	idset_free(unreg_set);
+}
+
+void css_wait_for_slow_path(void)
+{
+	flush_workqueue(cio_work_q);
+}
+
+/* Schedule reprobing of all unregistered subchannels. */
+void css_schedule_reprobe(void)
+{
+	css_schedule_eval_all_unreg();
+}
+EXPORT_SYMBOL_GPL(css_schedule_reprobe);
+
+/*
+ * Called from the machine check handler for subchannel report words.
+ */
+static void css_process_crw(struct crw *crw0, struct crw *crw1, int overflow)
+{
+	struct subchannel_id mchk_schid;
+	struct subchannel *sch;
+
+	if (overflow) {
+		css_schedule_eval_all();
+		return;
+	}
+	CIO_CRW_EVENT(2, "CRW0 reports slct=%d, oflw=%d, "
+		      "chn=%d, rsc=%X, anc=%d, erc=%X, rsid=%X\n",
+		      crw0->slct, crw0->oflw, crw0->chn, crw0->rsc, crw0->anc,
+		      crw0->erc, crw0->rsid);
+	if (crw1)
+		CIO_CRW_EVENT(2, "CRW1 reports slct=%d, oflw=%d, "
+			      "chn=%d, rsc=%X, anc=%d, erc=%X, rsid=%X\n",
+			      crw1->slct, crw1->oflw, crw1->chn, crw1->rsc,
+			      crw1->anc, crw1->erc, crw1->rsid);
+	init_subchannel_id(&mchk_schid);
+	mchk_schid.sch_no = crw0->rsid;
+	if (crw1)
+		mchk_schid.ssid = (crw1->rsid >> 4) & 3;
+
+	if (crw0->erc == CRW_ERC_PMOD) {
+		sch = get_subchannel_by_schid(mchk_schid);
+		if (sch) {
+			css_update_ssd_info(sch);
+			put_device(&sch->dev);
+		}
+	}
+	/*
+	 * Since we are always presented with IPI in the CRW, we have to
+	 * use stsch() to find out if the subchannel in question has come
+	 * or gone.
+	 */
+	css_evaluate_subchannel(mchk_schid, 0);
+}
+
+static void __init
+css_generate_pgid(struct channel_subsystem *css, u32 tod_high)
+{
+	struct cpuid cpu_id;
+
+	if (css_general_characteristics.mcss) {
+		css->global_pgid.pgid_high.ext_cssid.version = 0x80;
+		css->global_pgid.pgid_high.ext_cssid.cssid = css->cssid;
+	} else {
+#ifdef CONFIG_SMP
+		css->global_pgid.pgid_high.cpu_addr = stap();
+#else
+		css->global_pgid.pgid_high.cpu_addr = 0;
+#endif
+	}
+	get_cpu_id(&cpu_id);
+	css->global_pgid.cpu_id = cpu_id.ident;
+	css->global_pgid.cpu_model = cpu_id.machine;
+	css->global_pgid.tod_high = tod_high;
+
+}
+
+static void
+channel_subsystem_release(struct device *dev)
+{
+	struct channel_subsystem *css;
+
+	css = to_css(dev);
+	mutex_destroy(&css->mutex);
+	if (css->pseudo_subchannel) {
+		/* Implies that it has been generated but never registered. */
+		css_subchannel_release(&css->pseudo_subchannel->dev);
+		css->pseudo_subchannel = NULL;
+	}
+	kfree(css);
+}
+
+static ssize_t
+css_cm_enable_show(struct device *dev, struct device_attribute *attr,
+		   char *buf)
+{
+	struct channel_subsystem *css = to_css(dev);
+	int ret;
+
+	if (!css)
+		return 0;
+	mutex_lock(&css->mutex);
+	ret = sprintf(buf, "%x\n", css->cm_enabled);
+	mutex_unlock(&css->mutex);
+	return ret;
+}
+
+static ssize_t
+css_cm_enable_store(struct device *dev, struct device_attribute *attr,
+		    const char *buf, size_t count)
+{
+	struct channel_subsystem *css = to_css(dev);
+	int ret;
+	unsigned long val;
+
+	ret = strict_strtoul(buf, 16, &val);
+	if (ret)
+		return ret;
+	mutex_lock(&css->mutex);
+	switch (val) {
+	case 0:
+		ret = css->cm_enabled ? chsc_secm(css, 0) : 0;
+		break;
+	case 1:
+		ret = css->cm_enabled ? 0 : chsc_secm(css, 1);
+		break;
+	default:
+		ret = -EINVAL;
+	}
+	mutex_unlock(&css->mutex);
+	return ret < 0 ? ret : count;
+}
+
+static DEVICE_ATTR(cm_enable, 0644, css_cm_enable_show, css_cm_enable_store);
+
+static int __init setup_css(int nr)
+{
+	u32 tod_high;
+	int ret;
+	struct channel_subsystem *css;
+
+	css = channel_subsystems[nr];
+	memset(css, 0, sizeof(struct channel_subsystem));
+	css->pseudo_subchannel =
+		kzalloc(sizeof(*css->pseudo_subchannel), GFP_KERNEL);
+	if (!css->pseudo_subchannel)
+		return -ENOMEM;
+	css->pseudo_subchannel->dev.parent = &css->device;
+	css->pseudo_subchannel->dev.release = css_subchannel_release;
+	dev_set_name(&css->pseudo_subchannel->dev, "defunct");
+	mutex_init(&css->pseudo_subchannel->reg_mutex);
+	ret = cio_create_sch_lock(css->pseudo_subchannel);
+	if (ret) {
+		kfree(css->pseudo_subchannel);
+		return ret;
+	}
+	mutex_init(&css->mutex);
+	css->valid = 1;
+	css->cssid = nr;
+	dev_set_name(&css->device, "css%x", nr);
+	css->device.release = channel_subsystem_release;
+	tod_high = (u32) (get_clock() >> 32);
+	css_generate_pgid(css, tod_high);
+	return 0;
+}
+
+static int css_reboot_event(struct notifier_block *this,
+			    unsigned long event,
+			    void *ptr)
+{
+	int ret, i;
+
+	ret = NOTIFY_DONE;
+	for (i = 0; i <= __MAX_CSSID; i++) {
+		struct channel_subsystem *css;
+
+		css = channel_subsystems[i];
+		mutex_lock(&css->mutex);
+		if (css->cm_enabled)
+			if (chsc_secm(css, 0))
+				ret = NOTIFY_BAD;
+		mutex_unlock(&css->mutex);
+	}
+
+	return ret;
+}
+
+static struct notifier_block css_reboot_notifier = {
+	.notifier_call = css_reboot_event,
+};
+
+/*
+ * Since the css devices are neither on a bus nor have a class
+ * nor have a special device type, we cannot stop/restart channel
+ * path measurements via the normal suspend/resume callbacks, but have
+ * to use notifiers.
+ */
+static int css_power_event(struct notifier_block *this, unsigned long event,
+			   void *ptr)
+{
+	int ret, i;
+
+	switch (event) {
+	case PM_HIBERNATION_PREPARE:
+	case PM_SUSPEND_PREPARE:
+		ret = NOTIFY_DONE;
+		for (i = 0; i <= __MAX_CSSID; i++) {
+			struct channel_subsystem *css;
+
+			css = channel_subsystems[i];
+			mutex_lock(&css->mutex);
+			if (!css->cm_enabled) {
+				mutex_unlock(&css->mutex);
+				continue;
+			}
+			ret = __chsc_do_secm(css, 0);
+			ret = notifier_from_errno(ret);
+			mutex_unlock(&css->mutex);
+		}
+		break;
+	case PM_POST_HIBERNATION:
+	case PM_POST_SUSPEND:
+		ret = NOTIFY_DONE;
+		for (i = 0; i <= __MAX_CSSID; i++) {
+			struct channel_subsystem *css;
+
+			css = channel_subsystems[i];
+			mutex_lock(&css->mutex);
+			if (!css->cm_enabled) {
+				mutex_unlock(&css->mutex);
+				continue;
+			}
+			ret = __chsc_do_secm(css, 1);
+			ret = notifier_from_errno(ret);
+			mutex_unlock(&css->mutex);
+		}
+		/* search for subchannels, which appeared during hibernation */
+		css_schedule_reprobe();
+		break;
+	default:
+		ret = NOTIFY_DONE;
+	}
+	return ret;
+
+}
+static struct notifier_block css_power_notifier = {
+	.notifier_call = css_power_event,
+};
+
+/*
+ * Now that the driver core is running, we can setup our channel subsystem.
+ * The struct subchannel's are created during probing (except for the
+ * static console subchannel).
+ */
+static int __init css_bus_init(void)
+{
+	int ret, i;
+
+	ret = chsc_init();
+	if (ret)
+		return ret;
+
+	chsc_determine_css_characteristics();
+	/* Try to enable MSS. */
+	ret = chsc_enable_facility(CHSC_SDA_OC_MSS);
+	if (ret)
+		max_ssid = 0;
+	else /* Success. */
+		max_ssid = __MAX_SSID;
+
+	ret = slow_subchannel_init();
+	if (ret)
+		goto out;
+
+	ret = crw_register_handler(CRW_RSC_SCH, css_process_crw);
+	if (ret)
+		goto out;
+
+	if ((ret = bus_register(&css_bus_type)))
+		goto out;
+
+	/* Setup css structure. */
+	for (i = 0; i <= __MAX_CSSID; i++) {
+		struct channel_subsystem *css;
+
+		css = kmalloc(sizeof(struct channel_subsystem), GFP_KERNEL);
+		if (!css) {
+			ret = -ENOMEM;
+			goto out_unregister;
+		}
+		channel_subsystems[i] = css;
+		ret = setup_css(i);
+		if (ret) {
+			kfree(channel_subsystems[i]);
+			goto out_unregister;
+		}
+		ret = device_register(&css->device);
+		if (ret) {
+			put_device(&css->device);
+			goto out_unregister;
+		}
+		if (css_chsc_characteristics.secm) {
+			ret = device_create_file(&css->device,
+						 &dev_attr_cm_enable);
+			if (ret)
+				goto out_device;
+		}
+		ret = device_register(&css->pseudo_subchannel->dev);
+		if (ret) {
+			put_device(&css->pseudo_subchannel->dev);
+			goto out_file;
+		}
+	}
+	ret = register_reboot_notifier(&css_reboot_notifier);
+	if (ret)
+		goto out_unregister;
+	ret = register_pm_notifier(&css_power_notifier);
+	if (ret) {
+		unregister_reboot_notifier(&css_reboot_notifier);
+		goto out_unregister;
+	}
+	css_init_done = 1;
+
+	/* Enable default isc for I/O subchannels. */
+	isc_register(IO_SCH_ISC);
+
+	return 0;
+out_file:
+	if (css_chsc_characteristics.secm)
+		device_remove_file(&channel_subsystems[i]->device,
+				   &dev_attr_cm_enable);
+out_device:
+	device_unregister(&channel_subsystems[i]->device);
+out_unregister:
+	while (i > 0) {
+		struct channel_subsystem *css;
+
+		i--;
+		css = channel_subsystems[i];
+		device_unregister(&css->pseudo_subchannel->dev);
+		css->pseudo_subchannel = NULL;
+		if (css_chsc_characteristics.secm)
+			device_remove_file(&css->device,
+					   &dev_attr_cm_enable);
+		device_unregister(&css->device);
+	}
+	bus_unregister(&css_bus_type);
+out:
+	crw_unregister_handler(CRW_RSC_SCH);
+	idset_free(slow_subchannel_set);
+	chsc_init_cleanup();
+	pr_alert("The CSS device driver initialization failed with "
+		 "errno=%d\n", ret);
+	return ret;
+}
+
+static void __init css_bus_cleanup(void)
+{
+	struct channel_subsystem *css;
+	int i;
+
+	for (i = 0; i <= __MAX_CSSID; i++) {
+		css = channel_subsystems[i];
+		device_unregister(&css->pseudo_subchannel->dev);
+		css->pseudo_subchannel = NULL;
+		if (css_chsc_characteristics.secm)
+			device_remove_file(&css->device, &dev_attr_cm_enable);
+		device_unregister(&css->device);
+	}
+	bus_unregister(&css_bus_type);
+	crw_unregister_handler(CRW_RSC_SCH);
+	idset_free(slow_subchannel_set);
+	chsc_init_cleanup();
+	isc_unregister(IO_SCH_ISC);
+}
+
+static int __init channel_subsystem_init(void)
+{
+	int ret;
+
+	ret = css_bus_init();
+	if (ret)
+		return ret;
+	cio_work_q = create_singlethread_workqueue("cio");
+	if (!cio_work_q) {
+		ret = -ENOMEM;
+		goto out_bus;
+	}
+	ret = io_subchannel_init();
+	if (ret)
+		goto out_wq;
+
+	return ret;
+out_wq:
+	destroy_workqueue(cio_work_q);
+out_bus:
+	css_bus_cleanup();
+	return ret;
+}
+subsys_initcall(channel_subsystem_init);
+
+static int css_settle(struct device_driver *drv, void *unused)
+{
+	struct css_driver *cssdrv = to_cssdriver(drv);
+
+	if (cssdrv->settle)
+		return cssdrv->settle();
+	return 0;
+}
+
+int css_complete_work(void)
+{
+	int ret;
+
+	/* Wait for the evaluation of subchannels to finish. */
+	ret = wait_event_interruptible(css_eval_wq,
+				       atomic_read(&css_eval_scheduled) == 0);
+	if (ret)
+		return -EINTR;
+	flush_workqueue(cio_work_q);
+	/* Wait for the subchannel type specific initialization to finish */
+	return bus_for_each_drv(&css_bus_type, NULL, NULL, css_settle);
+}
+
+
+/*
+ * Wait for the initialization of devices to finish, to make sure we are
+ * done with our setup if the search for the root device starts.
+ */
+static int __init channel_subsystem_init_sync(void)
+{
+	/* Start initial subchannel evaluation. */
+	css_schedule_eval_all();
+	css_complete_work();
+	return 0;
+}
+subsys_initcall_sync(channel_subsystem_init_sync);
+
+void channel_subsystem_reinit(void)
+{
+	struct channel_path *chp;
+	struct chp_id chpid;
+
+	chsc_enable_facility(CHSC_SDA_OC_MSS);
+	chp_id_for_each(&chpid) {
+		chp = chpid_to_chp(chpid);
+		if (!chp)
+			continue;
+		chsc_determine_base_channel_path_desc(chpid, &chp->desc);
+	}
+}
+
+#ifdef CONFIG_PROC_FS
+static ssize_t cio_settle_write(struct file *file, const char __user *buf,
+				size_t count, loff_t *ppos)
+{
+	int ret;
+
+	/* Handle pending CRW's. */
+	crw_wait_for_channel_report();
+	ret = css_complete_work();
+
+	return ret ? ret : count;
+}
+
+static const struct file_operations cio_settle_proc_fops = {
+	.open = nonseekable_open,
+	.write = cio_settle_write,
+	.llseek = no_llseek,
+};
+
+static int __init cio_settle_init(void)
+{
+	struct proc_dir_entry *entry;
+
+	entry = proc_create("cio_settle", S_IWUSR, NULL,
+			    &cio_settle_proc_fops);
+	if (!entry)
+		return -ENOMEM;
+	return 0;
+}
+device_initcall(cio_settle_init);
+#endif /*CONFIG_PROC_FS*/
+
+int sch_is_pseudo_sch(struct subchannel *sch)
+{
+	return sch == to_css(sch->dev.parent)->pseudo_subchannel;
+}
+
+static int css_bus_match(struct device *dev, struct device_driver *drv)
+{
+	struct subchannel *sch = to_subchannel(dev);
+	struct css_driver *driver = to_cssdriver(drv);
+	struct css_device_id *id;
+
+	for (id = driver->subchannel_type; id->match_flags; id++) {
+		if (sch->st == id->type)
+			return 1;
+	}
+
+	return 0;
+}
+
+static int css_probe(struct device *dev)
+{
+	struct subchannel *sch;
+	int ret;
+
+	sch = to_subchannel(dev);
+	sch->driver = to_cssdriver(dev->driver);
+	ret = sch->driver->probe ? sch->driver->probe(sch) : 0;
+	if (ret)
+		sch->driver = NULL;
+	return ret;
+}
+
+static int css_remove(struct device *dev)
+{
+	struct subchannel *sch;
+	int ret;
+
+	sch = to_subchannel(dev);
+	ret = sch->driver->remove ? sch->driver->remove(sch) : 0;
+	sch->driver = NULL;
+	return ret;
+}
+
+static void css_shutdown(struct device *dev)
+{
+	struct subchannel *sch;
+
+	sch = to_subchannel(dev);
+	if (sch->driver && sch->driver->shutdown)
+		sch->driver->shutdown(sch);
+}
+
+static int css_uevent(struct device *dev, struct kobj_uevent_env *env)
+{
+	struct subchannel *sch = to_subchannel(dev);
+	int ret;
+
+	ret = add_uevent_var(env, "ST=%01X", sch->st);
+	if (ret)
+		return ret;
+	ret = add_uevent_var(env, "MODALIAS=css:t%01X", sch->st);
+	return ret;
+}
+
+static int css_pm_prepare(struct device *dev)
+{
+	struct subchannel *sch = to_subchannel(dev);
+	struct css_driver *drv;
+
+	if (mutex_is_locked(&sch->reg_mutex))
+		return -EAGAIN;
+	if (!sch->dev.driver)
+		return 0;
+	drv = to_cssdriver(sch->dev.driver);
+	/* Notify drivers that they may not register children. */
+	return drv->prepare ? drv->prepare(sch) : 0;
+}
+
+static void css_pm_complete(struct device *dev)
+{
+	struct subchannel *sch = to_subchannel(dev);
+	struct css_driver *drv;
+
+	if (!sch->dev.driver)
+		return;
+	drv = to_cssdriver(sch->dev.driver);
+	if (drv->complete)
+		drv->complete(sch);
+}
+
+static int css_pm_freeze(struct device *dev)
+{
+	struct subchannel *sch = to_subchannel(dev);
+	struct css_driver *drv;
+
+	if (!sch->dev.driver)
+		return 0;
+	drv = to_cssdriver(sch->dev.driver);
+	return drv->freeze ? drv->freeze(sch) : 0;
+}
+
+static int css_pm_thaw(struct device *dev)
+{
+	struct subchannel *sch = to_subchannel(dev);
+	struct css_driver *drv;
+
+	if (!sch->dev.driver)
+		return 0;
+	drv = to_cssdriver(sch->dev.driver);
+	return drv->thaw ? drv->thaw(sch) : 0;
+}
+
+static int css_pm_restore(struct device *dev)
+{
+	struct subchannel *sch = to_subchannel(dev);
+	struct css_driver *drv;
+
+	css_update_ssd_info(sch);
+	if (!sch->dev.driver)
+		return 0;
+	drv = to_cssdriver(sch->dev.driver);
+	return drv->restore ? drv->restore(sch) : 0;
+}
+
+static const struct dev_pm_ops css_pm_ops = {
+	.prepare = css_pm_prepare,
+	.complete = css_pm_complete,
+	.freeze = css_pm_freeze,
+	.thaw = css_pm_thaw,
+	.restore = css_pm_restore,
+};
+
+static struct bus_type css_bus_type = {
+	.name     = "css",
+	.match    = css_bus_match,
+	.probe    = css_probe,
+	.remove   = css_remove,
+	.shutdown = css_shutdown,
+	.uevent   = css_uevent,
+	.pm = &css_pm_ops,
+};
+
+/**
+ * css_driver_register - register a css driver
+ * @cdrv: css driver to register
+ *
+ * This is mainly a wrapper around driver_register that sets name
+ * and bus_type in the embedded struct device_driver correctly.
+ */
+int css_driver_register(struct css_driver *cdrv)
+{
+	cdrv->drv.bus = &css_bus_type;
+	return driver_register(&cdrv->drv);
+}
+EXPORT_SYMBOL_GPL(css_driver_register);
+
+/**
+ * css_driver_unregister - unregister a css driver
+ * @cdrv: css driver to unregister
+ *
+ * This is a wrapper around driver_unregister.
+ */
+void css_driver_unregister(struct css_driver *cdrv)
+{
+	driver_unregister(&cdrv->drv);
+}
+EXPORT_SYMBOL_GPL(css_driver_unregister);
+
+MODULE_LICENSE("GPL");
diff --git a/ap/os/linux/linux-3.4.x/drivers/s390/cio/css.h b/ap/os/linux/linux-3.4.x/drivers/s390/cio/css.h
new file mode 100644
index 0000000..33bb4d8
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/s390/cio/css.h
@@ -0,0 +1,150 @@
+#ifndef _CSS_H
+#define _CSS_H
+
+#include <linux/mutex.h>
+#include <linux/wait.h>
+#include <linux/workqueue.h>
+#include <linux/device.h>
+#include <linux/types.h>
+
+#include <asm/cio.h>
+#include <asm/chpid.h>
+#include <asm/schid.h>
+
+#include "cio.h"
+
+/*
+ * path grouping stuff
+ */
+#define SPID_FUNC_SINGLE_PATH	   0x00
+#define SPID_FUNC_MULTI_PATH	   0x80
+#define SPID_FUNC_ESTABLISH	   0x00
+#define SPID_FUNC_RESIGN	   0x40
+#define SPID_FUNC_DISBAND	   0x20
+
+#define SNID_STATE1_RESET	   0
+#define SNID_STATE1_UNGROUPED	   2
+#define SNID_STATE1_GROUPED	   3
+
+#define SNID_STATE2_NOT_RESVD	   0
+#define SNID_STATE2_RESVD_ELSE	   2
+#define SNID_STATE2_RESVD_SELF	   3
+
+#define SNID_STATE3_MULTI_PATH	   1
+#define SNID_STATE3_SINGLE_PATH	   0
+
+struct path_state {
+	__u8  state1 : 2;	/* path state value 1 */
+	__u8  state2 : 2;	/* path state value 2 */
+	__u8  state3 : 1;	/* path state value 3 */
+	__u8  resvd  : 3;	/* reserved */
+} __attribute__ ((packed));
+
+struct extended_cssid {
+	u8 version;
+	u8 cssid;
+} __attribute__ ((packed));
+
+struct pgid {
+	union {
+		__u8 fc;   	/* SPID function code */
+		struct path_state ps;	/* SNID path state */
+	} __attribute__ ((packed)) inf;
+	union {
+		__u32 cpu_addr	: 16;	/* CPU address */
+		struct extended_cssid ext_cssid;
+	} __attribute__ ((packed)) pgid_high;
+	__u32 cpu_id	: 24;	/* CPU identification */
+	__u32 cpu_model : 16;	/* CPU model */
+	__u32 tod_high;		/* high word TOD clock */
+} __attribute__ ((packed));
+
+struct subchannel;
+struct chp_link;
+/**
+ * struct css_driver - device driver for subchannels
+ * @subchannel_type: subchannel type supported by this driver
+ * @drv: embedded device driver structure
+ * @irq: called on interrupts
+ * @chp_event: called for events affecting a channel path
+ * @sch_event: called for events affecting the subchannel
+ * @probe: function called on probe
+ * @remove: function called on remove
+ * @shutdown: called at device shutdown
+ * @prepare: prepare for pm state transition
+ * @complete: undo work done in @prepare
+ * @freeze: callback for freezing during hibernation snapshotting
+ * @thaw: undo work done in @freeze
+ * @restore: callback for restoring after hibernation
+ * @settle: wait for asynchronous work to finish
+ */
+struct css_driver {
+	struct css_device_id *subchannel_type;
+	struct device_driver drv;
+	void (*irq)(struct subchannel *);
+	int (*chp_event)(struct subchannel *, struct chp_link *, int);
+	int (*sch_event)(struct subchannel *, int);
+	int (*probe)(struct subchannel *);
+	int (*remove)(struct subchannel *);
+	void (*shutdown)(struct subchannel *);
+	int (*prepare) (struct subchannel *);
+	void (*complete) (struct subchannel *);
+	int (*freeze)(struct subchannel *);
+	int (*thaw) (struct subchannel *);
+	int (*restore)(struct subchannel *);
+	int (*settle)(void);
+};
+
+#define to_cssdriver(n) container_of(n, struct css_driver, drv)
+
+extern int css_driver_register(struct css_driver *);
+extern void css_driver_unregister(struct css_driver *);
+
+extern void css_sch_device_unregister(struct subchannel *);
+extern int css_probe_device(struct subchannel_id);
+extern struct subchannel *get_subchannel_by_schid(struct subchannel_id);
+extern int css_init_done;
+extern int max_ssid;
+int for_each_subchannel_staged(int (*fn_known)(struct subchannel *, void *),
+			       int (*fn_unknown)(struct subchannel_id,
+			       void *), void *data);
+extern int for_each_subchannel(int(*fn)(struct subchannel_id, void *), void *);
+extern void css_reiterate_subchannels(void);
+void css_update_ssd_info(struct subchannel *sch);
+
+#define __MAX_SUBCHANNEL 65535
+#define __MAX_SSID 3
+
+struct channel_subsystem {
+	u8 cssid;
+	int valid;
+	struct channel_path *chps[__MAX_CHPID + 1];
+	struct device device;
+	struct pgid global_pgid;
+	struct mutex mutex;
+	/* channel measurement related */
+	int cm_enabled;
+	void *cub_addr1;
+	void *cub_addr2;
+	/* for orphaned ccw devices */
+	struct subchannel *pseudo_subchannel;
+};
+#define to_css(dev) container_of(dev, struct channel_subsystem, device)
+
+extern struct channel_subsystem *channel_subsystems[];
+
+void channel_subsystem_reinit(void);
+
+/* Helper functions to build lists for the slow path. */
+void css_schedule_eval(struct subchannel_id schid);
+void css_schedule_eval_all(void);
+int css_complete_work(void);
+
+int sch_is_pseudo_sch(struct subchannel *);
+struct schib;
+int css_sch_is_valid(struct schib *);
+
+extern struct workqueue_struct *cio_work_q;
+void css_wait_for_slow_path(void);
+void css_sched_sch_todo(struct subchannel *sch, enum sch_todo todo);
+#endif
diff --git a/ap/os/linux/linux-3.4.x/drivers/s390/cio/device.c b/ap/os/linux/linux-3.4.x/drivers/s390/cio/device.c
new file mode 100644
index 0000000..02d0152
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/s390/cio/device.c
@@ -0,0 +1,2132 @@
+/*
+ *  drivers/s390/cio/device.c
+ *  bus driver for ccw devices
+ *
+ *    Copyright IBM Corp. 2002,2008
+ *    Author(s): Arnd Bergmann (arndb@de.ibm.com)
+ *		 Cornelia Huck (cornelia.huck@de.ibm.com)
+ *		 Martin Schwidefsky (schwidefsky@de.ibm.com)
+ */
+
+#define KMSG_COMPONENT "cio"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/spinlock.h>
+#include <linux/errno.h>
+#include <linux/err.h>
+#include <linux/slab.h>
+#include <linux/list.h>
+#include <linux/device.h>
+#include <linux/workqueue.h>
+#include <linux/timer.h>
+#include <linux/kernel_stat.h>
+
+#include <asm/ccwdev.h>
+#include <asm/cio.h>
+#include <asm/param.h>		/* HZ */
+#include <asm/cmb.h>
+#include <asm/isc.h>
+
+#include "chp.h"
+#include "cio.h"
+#include "cio_debug.h"
+#include "css.h"
+#include "device.h"
+#include "ioasm.h"
+#include "io_sch.h"
+#include "blacklist.h"
+#include "chsc.h"
+
+static struct timer_list recovery_timer;
+static DEFINE_SPINLOCK(recovery_lock);
+static int recovery_phase;
+static const unsigned long recovery_delay[] = { 3, 30, 300 };
+
+/******************* bus type handling ***********************/
+
+/* The Linux driver model distinguishes between a bus type and
+ * the bus itself. Of course we only have one channel
+ * subsystem driver and one channel system per machine, but
+ * we still use the abstraction. T.R. says it's a good idea. */
+static int
+ccw_bus_match (struct device * dev, struct device_driver * drv)
+{
+	struct ccw_device *cdev = to_ccwdev(dev);
+	struct ccw_driver *cdrv = to_ccwdrv(drv);
+	const struct ccw_device_id *ids = cdrv->ids, *found;
+
+	if (!ids)
+		return 0;
+
+	found = ccw_device_id_match(ids, &cdev->id);
+	if (!found)
+		return 0;
+
+	cdev->id.driver_info = found->driver_info;
+
+	return 1;
+}
+
+/* Store modalias string delimited by prefix/suffix string into buffer with
+ * specified size. Return length of resulting string (excluding trailing '\0')
+ * even if string doesn't fit buffer (snprintf semantics). */
+static int snprint_alias(char *buf, size_t size,
+			 struct ccw_device_id *id, const char *suffix)
+{
+	int len;
+
+	len = snprintf(buf, size, "ccw:t%04Xm%02X", id->cu_type, id->cu_model);
+	if (len > size)
+		return len;
+	buf += len;
+	size -= len;
+
+	if (id->dev_type != 0)
+		len += snprintf(buf, size, "dt%04Xdm%02X%s", id->dev_type,
+				id->dev_model, suffix);
+	else
+		len += snprintf(buf, size, "dtdm%s", suffix);
+
+	return len;
+}
+
+/* Set up environment variables for ccw device uevent. Return 0 on success,
+ * non-zero otherwise. */
+static int ccw_uevent(struct device *dev, struct kobj_uevent_env *env)
+{
+	struct ccw_device *cdev = to_ccwdev(dev);
+	struct ccw_device_id *id = &(cdev->id);
+	int ret;
+	char modalias_buf[30];
+
+	/* CU_TYPE= */
+	ret = add_uevent_var(env, "CU_TYPE=%04X", id->cu_type);
+	if (ret)
+		return ret;
+
+	/* CU_MODEL= */
+	ret = add_uevent_var(env, "CU_MODEL=%02X", id->cu_model);
+	if (ret)
+		return ret;
+
+	/* The next two can be zero, that's ok for us */
+	/* DEV_TYPE= */
+	ret = add_uevent_var(env, "DEV_TYPE=%04X", id->dev_type);
+	if (ret)
+		return ret;
+
+	/* DEV_MODEL= */
+	ret = add_uevent_var(env, "DEV_MODEL=%02X", id->dev_model);
+	if (ret)
+		return ret;
+
+	/* MODALIAS=  */
+	snprint_alias(modalias_buf, sizeof(modalias_buf), id, "");
+	ret = add_uevent_var(env, "MODALIAS=%s", modalias_buf);
+	return ret;
+}
+
+static struct bus_type ccw_bus_type;
+
+static void io_subchannel_irq(struct subchannel *);
+static int io_subchannel_probe(struct subchannel *);
+static int io_subchannel_remove(struct subchannel *);
+static void io_subchannel_shutdown(struct subchannel *);
+static int io_subchannel_sch_event(struct subchannel *, int);
+static int io_subchannel_chp_event(struct subchannel *, struct chp_link *,
+				   int);
+static void recovery_func(unsigned long data);
+wait_queue_head_t ccw_device_init_wq;
+atomic_t ccw_device_init_count;
+
+static struct css_device_id io_subchannel_ids[] = {
+	{ .match_flags = 0x1, .type = SUBCHANNEL_TYPE_IO, },
+	{ /* end of list */ },
+};
+MODULE_DEVICE_TABLE(css, io_subchannel_ids);
+
+static int io_subchannel_prepare(struct subchannel *sch)
+{
+	struct ccw_device *cdev;
+	/*
+	 * Don't allow suspend while a ccw device registration
+	 * is still outstanding.
+	 */
+	cdev = sch_get_cdev(sch);
+	if (cdev && !device_is_registered(&cdev->dev))
+		return -EAGAIN;
+	return 0;
+}
+
+static int io_subchannel_settle(void)
+{
+	int ret;
+
+	ret = wait_event_interruptible(ccw_device_init_wq,
+				atomic_read(&ccw_device_init_count) == 0);
+	if (ret)
+		return -EINTR;
+	flush_workqueue(cio_work_q);
+	return 0;
+}
+
+static struct css_driver io_subchannel_driver = {
+	.drv = {
+		.owner = THIS_MODULE,
+		.name = "io_subchannel",
+	},
+	.subchannel_type = io_subchannel_ids,
+	.irq = io_subchannel_irq,
+	.sch_event = io_subchannel_sch_event,
+	.chp_event = io_subchannel_chp_event,
+	.probe = io_subchannel_probe,
+	.remove = io_subchannel_remove,
+	.shutdown = io_subchannel_shutdown,
+	.prepare = io_subchannel_prepare,
+	.settle = io_subchannel_settle,
+};
+
+int __init io_subchannel_init(void)
+{
+	int ret;
+
+	init_waitqueue_head(&ccw_device_init_wq);
+	atomic_set(&ccw_device_init_count, 0);
+	setup_timer(&recovery_timer, recovery_func, 0);
+
+	ret = bus_register(&ccw_bus_type);
+	if (ret)
+		return ret;
+	ret = css_driver_register(&io_subchannel_driver);
+	if (ret)
+		bus_unregister(&ccw_bus_type);
+
+	return ret;
+}
+
+
+/************************ device handling **************************/
+
+/*
+ * A ccw_device has some interfaces in sysfs in addition to the
+ * standard ones.
+ * The following entries are designed to export the information which
+ * resided in 2.4 in /proc/subchannels. Subchannel and device number
+ * are obvious, so they don't have an entry :)
+ * TODO: Split chpids and pimpampom up? Where is "in use" in the tree?
+ */
+static ssize_t
+chpids_show (struct device * dev, struct device_attribute *attr, char * buf)
+{
+	struct subchannel *sch = to_subchannel(dev);
+	struct chsc_ssd_info *ssd = &sch->ssd_info;
+	ssize_t ret = 0;
+	int chp;
+	int mask;
+
+	for (chp = 0; chp < 8; chp++) {
+		mask = 0x80 >> chp;
+		if (ssd->path_mask & mask)
+			ret += sprintf(buf + ret, "%02x ", ssd->chpid[chp].id);
+		else
+			ret += sprintf(buf + ret, "00 ");
+	}
+	ret += sprintf (buf+ret, "\n");
+	return min((ssize_t)PAGE_SIZE, ret);
+}
+
+static ssize_t
+pimpampom_show (struct device * dev, struct device_attribute *attr, char * buf)
+{
+	struct subchannel *sch = to_subchannel(dev);
+	struct pmcw *pmcw = &sch->schib.pmcw;
+
+	return sprintf (buf, "%02x %02x %02x\n",
+			pmcw->pim, pmcw->pam, pmcw->pom);
+}
+
+static ssize_t
+devtype_show (struct device *dev, struct device_attribute *attr, char *buf)
+{
+	struct ccw_device *cdev = to_ccwdev(dev);
+	struct ccw_device_id *id = &(cdev->id);
+
+	if (id->dev_type != 0)
+		return sprintf(buf, "%04x/%02x\n",
+				id->dev_type, id->dev_model);
+	else
+		return sprintf(buf, "n/a\n");
+}
+
+static ssize_t
+cutype_show (struct device *dev, struct device_attribute *attr, char *buf)
+{
+	struct ccw_device *cdev = to_ccwdev(dev);
+	struct ccw_device_id *id = &(cdev->id);
+
+	return sprintf(buf, "%04x/%02x\n",
+		       id->cu_type, id->cu_model);
+}
+
+static ssize_t
+modalias_show (struct device *dev, struct device_attribute *attr, char *buf)
+{
+	struct ccw_device *cdev = to_ccwdev(dev);
+	struct ccw_device_id *id = &(cdev->id);
+	int len;
+
+	len = snprint_alias(buf, PAGE_SIZE, id, "\n");
+
+	return len > PAGE_SIZE ? PAGE_SIZE : len;
+}
+
+static ssize_t
+online_show (struct device *dev, struct device_attribute *attr, char *buf)
+{
+	struct ccw_device *cdev = to_ccwdev(dev);
+
+	return sprintf(buf, cdev->online ? "1\n" : "0\n");
+}
+
+int ccw_device_is_orphan(struct ccw_device *cdev)
+{
+	return sch_is_pseudo_sch(to_subchannel(cdev->dev.parent));
+}
+
+static void ccw_device_unregister(struct ccw_device *cdev)
+{
+	if (device_is_registered(&cdev->dev)) {
+		/* Undo device_add(). */
+		device_del(&cdev->dev);
+	}
+	if (cdev->private->flags.initialized) {
+		cdev->private->flags.initialized = 0;
+		/* Release reference from device_initialize(). */
+		put_device(&cdev->dev);
+	}
+}
+
+static void io_subchannel_quiesce(struct subchannel *);
+
+/**
+ * ccw_device_set_offline() - disable a ccw device for I/O
+ * @cdev: target ccw device
+ *
+ * This function calls the driver's set_offline() function for @cdev, if
+ * given, and then disables @cdev.
+ * Returns:
+ *   %0 on success and a negative error value on failure.
+ * Context:
+ *  enabled, ccw device lock not held
+ */
+int ccw_device_set_offline(struct ccw_device *cdev)
+{
+	struct subchannel *sch;
+	int ret, state;
+
+	if (!cdev)
+		return -ENODEV;
+	if (!cdev->online || !cdev->drv)
+		return -EINVAL;
+
+	if (cdev->drv->set_offline) {
+		ret = cdev->drv->set_offline(cdev);
+		if (ret != 0)
+			return ret;
+	}
+	cdev->online = 0;
+	spin_lock_irq(cdev->ccwlock);
+	sch = to_subchannel(cdev->dev.parent);
+	/* Wait until a final state or DISCONNECTED is reached */
+	while (!dev_fsm_final_state(cdev) &&
+	       cdev->private->state != DEV_STATE_DISCONNECTED) {
+		spin_unlock_irq(cdev->ccwlock);
+		wait_event(cdev->private->wait_q, (dev_fsm_final_state(cdev) ||
+			   cdev->private->state == DEV_STATE_DISCONNECTED));
+		spin_lock_irq(cdev->ccwlock);
+	}
+	do {
+		ret = ccw_device_offline(cdev);
+		if (!ret)
+			break;
+		CIO_MSG_EVENT(0, "ccw_device_offline returned %d, device "
+			      "0.%x.%04x\n", ret, cdev->private->dev_id.ssid,
+			      cdev->private->dev_id.devno);
+		if (ret != -EBUSY)
+			goto error;
+		state = cdev->private->state;
+		spin_unlock_irq(cdev->ccwlock);
+		io_subchannel_quiesce(sch);
+		spin_lock_irq(cdev->ccwlock);
+		cdev->private->state = state;
+	} while (ret == -EBUSY);
+	spin_unlock_irq(cdev->ccwlock);
+	wait_event(cdev->private->wait_q, (dev_fsm_final_state(cdev) ||
+		   cdev->private->state == DEV_STATE_DISCONNECTED));
+	/* Inform the user if set offline failed. */
+	if (cdev->private->state == DEV_STATE_BOXED) {
+		pr_warning("%s: The device entered boxed state while "
+			   "being set offline\n", dev_name(&cdev->dev));
+	} else if (cdev->private->state == DEV_STATE_NOT_OPER) {
+		pr_warning("%s: The device stopped operating while "
+			   "being set offline\n", dev_name(&cdev->dev));
+	}
+	/* Give up reference from ccw_device_set_online(). */
+	put_device(&cdev->dev);
+	return 0;
+
+error:
+	cdev->private->state = DEV_STATE_OFFLINE;
+	dev_fsm_event(cdev, DEV_EVENT_NOTOPER);
+	spin_unlock_irq(cdev->ccwlock);
+	/* Give up reference from ccw_device_set_online(). */
+	put_device(&cdev->dev);
+	return -ENODEV;
+}
+
+/**
+ * ccw_device_set_online() - enable a ccw device for I/O
+ * @cdev: target ccw device
+ *
+ * This function first enables @cdev and then calls the driver's set_online()
+ * function for @cdev, if given. If set_online() returns an error, @cdev is
+ * disabled again.
+ * Returns:
+ *   %0 on success and a negative error value on failure.
+ * Context:
+ *  enabled, ccw device lock not held
+ */
+int ccw_device_set_online(struct ccw_device *cdev)
+{
+	int ret;
+	int ret2;
+
+	if (!cdev)
+		return -ENODEV;
+	if (cdev->online || !cdev->drv)
+		return -EINVAL;
+	/* Hold on to an extra reference while device is online. */
+	if (!get_device(&cdev->dev))
+		return -ENODEV;
+
+	spin_lock_irq(cdev->ccwlock);
+	ret = ccw_device_online(cdev);
+	spin_unlock_irq(cdev->ccwlock);
+	if (ret == 0)
+		wait_event(cdev->private->wait_q, dev_fsm_final_state(cdev));
+	else {
+		CIO_MSG_EVENT(0, "ccw_device_online returned %d, "
+			      "device 0.%x.%04x\n",
+			      ret, cdev->private->dev_id.ssid,
+			      cdev->private->dev_id.devno);
+		/* Give up online reference since onlining failed. */
+		put_device(&cdev->dev);
+		return ret;
+	}
+	spin_lock_irq(cdev->ccwlock);
+	/* Check if online processing was successful */
+	if ((cdev->private->state != DEV_STATE_ONLINE) &&
+	    (cdev->private->state != DEV_STATE_W4SENSE)) {
+		spin_unlock_irq(cdev->ccwlock);
+		/* Inform the user that set online failed. */
+		if (cdev->private->state == DEV_STATE_BOXED) {
+			pr_warning("%s: Setting the device online failed "
+				   "because it is boxed\n",
+				   dev_name(&cdev->dev));
+		} else if (cdev->private->state == DEV_STATE_NOT_OPER) {
+			pr_warning("%s: Setting the device online failed "
+				   "because it is not operational\n",
+				   dev_name(&cdev->dev));
+		}
+		/* Give up online reference since onlining failed. */
+		put_device(&cdev->dev);
+		return -ENODEV;
+	}
+	spin_unlock_irq(cdev->ccwlock);
+	if (cdev->drv->set_online)
+		ret = cdev->drv->set_online(cdev);
+	if (ret)
+		goto rollback;
+	cdev->online = 1;
+	return 0;
+
+rollback:
+	spin_lock_irq(cdev->ccwlock);
+	/* Wait until a final state or DISCONNECTED is reached */
+	while (!dev_fsm_final_state(cdev) &&
+	       cdev->private->state != DEV_STATE_DISCONNECTED) {
+		spin_unlock_irq(cdev->ccwlock);
+		wait_event(cdev->private->wait_q, (dev_fsm_final_state(cdev) ||
+			   cdev->private->state == DEV_STATE_DISCONNECTED));
+		spin_lock_irq(cdev->ccwlock);
+	}
+	ret2 = ccw_device_offline(cdev);
+	if (ret2)
+		goto error;
+	spin_unlock_irq(cdev->ccwlock);
+	wait_event(cdev->private->wait_q, (dev_fsm_final_state(cdev) ||
+		   cdev->private->state == DEV_STATE_DISCONNECTED));
+	/* Give up online reference since onlining failed. */
+	put_device(&cdev->dev);
+	return ret;
+
+error:
+	CIO_MSG_EVENT(0, "rollback ccw_device_offline returned %d, "
+		      "device 0.%x.%04x\n",
+		      ret2, cdev->private->dev_id.ssid,
+		      cdev->private->dev_id.devno);
+	cdev->private->state = DEV_STATE_OFFLINE;
+	spin_unlock_irq(cdev->ccwlock);
+	/* Give up online reference since onlining failed. */
+	put_device(&cdev->dev);
+	return ret;
+}
+
+static int online_store_handle_offline(struct ccw_device *cdev)
+{
+	if (cdev->private->state == DEV_STATE_DISCONNECTED) {
+		spin_lock_irq(cdev->ccwlock);
+		ccw_device_sched_todo(cdev, CDEV_TODO_UNREG_EVAL);
+		spin_unlock_irq(cdev->ccwlock);
+		return 0;
+	}
+	if (cdev->drv && cdev->drv->set_offline)
+		return ccw_device_set_offline(cdev);
+	return -EINVAL;
+}
+
+static int online_store_recog_and_online(struct ccw_device *cdev)
+{
+	/* Do device recognition, if needed. */
+	if (cdev->private->state == DEV_STATE_BOXED) {
+		spin_lock_irq(cdev->ccwlock);
+		ccw_device_recognition(cdev);
+		spin_unlock_irq(cdev->ccwlock);
+		wait_event(cdev->private->wait_q,
+			   cdev->private->flags.recog_done);
+		if (cdev->private->state != DEV_STATE_OFFLINE)
+			/* recognition failed */
+			return -EAGAIN;
+	}
+	if (cdev->drv && cdev->drv->set_online)
+		return ccw_device_set_online(cdev);
+	return -EINVAL;
+}
+
+static int online_store_handle_online(struct ccw_device *cdev, int force)
+{
+	int ret;
+
+	ret = online_store_recog_and_online(cdev);
+	if (ret && !force)
+		return ret;
+	if (force && cdev->private->state == DEV_STATE_BOXED) {
+		ret = ccw_device_stlck(cdev);
+		if (ret)
+			return ret;
+		if (cdev->id.cu_type == 0)
+			cdev->private->state = DEV_STATE_NOT_OPER;
+		ret = online_store_recog_and_online(cdev);
+		if (ret)
+			return ret;
+	}
+	return 0;
+}
+
+static ssize_t online_store (struct device *dev, struct device_attribute *attr,
+			     const char *buf, size_t count)
+{
+	struct ccw_device *cdev = to_ccwdev(dev);
+	int force, ret;
+	unsigned long i;
+
+	/* Prevent conflict between multiple on-/offline processing requests. */
+	if (atomic_cmpxchg(&cdev->private->onoff, 0, 1) != 0)
+		return -EAGAIN;
+	/* Prevent conflict between internal I/Os and on-/offline processing. */
+	if (!dev_fsm_final_state(cdev) &&
+	    cdev->private->state != DEV_STATE_DISCONNECTED) {
+		ret = -EAGAIN;
+		goto out_onoff;
+	}
+	/* Prevent conflict between pending work and on-/offline processing.*/
+	if (work_pending(&cdev->private->todo_work)) {
+		ret = -EAGAIN;
+		goto out_onoff;
+	}
+
+	if (cdev->drv && !try_module_get(cdev->drv->driver.owner)) {
+		ret = -EINVAL;
+		goto out_onoff;
+	}
+	if (!strncmp(buf, "force\n", count)) {
+		force = 1;
+		i = 1;
+		ret = 0;
+	} else {
+		force = 0;
+		ret = strict_strtoul(buf, 16, &i);
+	}
+	if (ret)
+		goto out;
+	switch (i) {
+	case 0:
+		ret = online_store_handle_offline(cdev);
+		break;
+	case 1:
+		ret = online_store_handle_online(cdev, force);
+		break;
+	default:
+		ret = -EINVAL;
+	}
+out:
+	if (cdev->drv)
+		module_put(cdev->drv->driver.owner);
+out_onoff:
+	atomic_set(&cdev->private->onoff, 0);
+	return (ret < 0) ? ret : count;
+}
+
+static ssize_t
+available_show (struct device *dev, struct device_attribute *attr, char *buf)
+{
+	struct ccw_device *cdev = to_ccwdev(dev);
+	struct subchannel *sch;
+
+	if (ccw_device_is_orphan(cdev))
+		return sprintf(buf, "no device\n");
+	switch (cdev->private->state) {
+	case DEV_STATE_BOXED:
+		return sprintf(buf, "boxed\n");
+	case DEV_STATE_DISCONNECTED:
+	case DEV_STATE_DISCONNECTED_SENSE_ID:
+	case DEV_STATE_NOT_OPER:
+		sch = to_subchannel(dev->parent);
+		if (!sch->lpm)
+			return sprintf(buf, "no path\n");
+		else
+			return sprintf(buf, "no device\n");
+	default:
+		/* All other states considered fine. */
+		return sprintf(buf, "good\n");
+	}
+}
+
+static ssize_t
+initiate_logging(struct device *dev, struct device_attribute *attr,
+		 const char *buf, size_t count)
+{
+	struct subchannel *sch = to_subchannel(dev);
+	int rc;
+
+	rc = chsc_siosl(sch->schid);
+	if (rc < 0) {
+		pr_warning("Logging for subchannel 0.%x.%04x failed with "
+			   "errno=%d\n",
+			   sch->schid.ssid, sch->schid.sch_no, rc);
+		return rc;
+	}
+	pr_notice("Logging for subchannel 0.%x.%04x was triggered\n",
+		  sch->schid.ssid, sch->schid.sch_no);
+	return count;
+}
+
+static DEVICE_ATTR(chpids, 0444, chpids_show, NULL);
+static DEVICE_ATTR(pimpampom, 0444, pimpampom_show, NULL);
+static DEVICE_ATTR(devtype, 0444, devtype_show, NULL);
+static DEVICE_ATTR(cutype, 0444, cutype_show, NULL);
+static DEVICE_ATTR(modalias, 0444, modalias_show, NULL);
+static DEVICE_ATTR(online, 0644, online_show, online_store);
+static DEVICE_ATTR(availability, 0444, available_show, NULL);
+static DEVICE_ATTR(logging, 0200, NULL, initiate_logging);
+
+static struct attribute *io_subchannel_attrs[] = {
+	&dev_attr_chpids.attr,
+	&dev_attr_pimpampom.attr,
+	&dev_attr_logging.attr,
+	NULL,
+};
+
+static struct attribute_group io_subchannel_attr_group = {
+	.attrs = io_subchannel_attrs,
+};
+
+static struct attribute * ccwdev_attrs[] = {
+	&dev_attr_devtype.attr,
+	&dev_attr_cutype.attr,
+	&dev_attr_modalias.attr,
+	&dev_attr_online.attr,
+	&dev_attr_cmb_enable.attr,
+	&dev_attr_availability.attr,
+	NULL,
+};
+
+static struct attribute_group ccwdev_attr_group = {
+	.attrs = ccwdev_attrs,
+};
+
+static const struct attribute_group *ccwdev_attr_groups[] = {
+	&ccwdev_attr_group,
+	NULL,
+};
+
+/* this is a simple abstraction for device_register that sets the
+ * correct bus type and adds the bus specific files */
+static int ccw_device_register(struct ccw_device *cdev)
+{
+	struct device *dev = &cdev->dev;
+	int ret;
+
+	dev->bus = &ccw_bus_type;
+	ret = dev_set_name(&cdev->dev, "0.%x.%04x", cdev->private->dev_id.ssid,
+			   cdev->private->dev_id.devno);
+	if (ret)
+		return ret;
+	return device_add(dev);
+}
+
+static int match_dev_id(struct device *dev, void *data)
+{
+	struct ccw_device *cdev = to_ccwdev(dev);
+	struct ccw_dev_id *dev_id = data;
+
+	return ccw_dev_id_is_equal(&cdev->private->dev_id, dev_id);
+}
+
+static struct ccw_device *get_ccwdev_by_dev_id(struct ccw_dev_id *dev_id)
+{
+	struct device *dev;
+
+	dev = bus_find_device(&ccw_bus_type, NULL, dev_id, match_dev_id);
+
+	return dev ? to_ccwdev(dev) : NULL;
+}
+
+static void ccw_device_do_unbind_bind(struct ccw_device *cdev)
+{
+	int ret;
+
+	if (device_is_registered(&cdev->dev)) {
+		device_release_driver(&cdev->dev);
+		ret = device_attach(&cdev->dev);
+		WARN_ON(ret == -ENODEV);
+	}
+}
+
+static void
+ccw_device_release(struct device *dev)
+{
+	struct ccw_device *cdev;
+
+	cdev = to_ccwdev(dev);
+	/* Release reference of parent subchannel. */
+	put_device(cdev->dev.parent);
+	kfree(cdev->private);
+	kfree(cdev);
+}
+
+static struct ccw_device * io_subchannel_allocate_dev(struct subchannel *sch)
+{
+	struct ccw_device *cdev;
+
+	cdev  = kzalloc(sizeof(*cdev), GFP_KERNEL);
+	if (cdev) {
+		cdev->private = kzalloc(sizeof(struct ccw_device_private),
+					GFP_KERNEL | GFP_DMA);
+		if (cdev->private)
+			return cdev;
+	}
+	kfree(cdev);
+	return ERR_PTR(-ENOMEM);
+}
+
+static void ccw_device_todo(struct work_struct *work);
+
+static int io_subchannel_initialize_dev(struct subchannel *sch,
+					struct ccw_device *cdev)
+{
+	cdev->private->cdev = cdev;
+	cdev->private->int_class = IOINT_CIO;
+	atomic_set(&cdev->private->onoff, 0);
+	cdev->dev.parent = &sch->dev;
+	cdev->dev.release = ccw_device_release;
+	INIT_WORK(&cdev->private->todo_work, ccw_device_todo);
+	cdev->dev.groups = ccwdev_attr_groups;
+	/* Do first half of device_register. */
+	device_initialize(&cdev->dev);
+	if (!get_device(&sch->dev)) {
+		/* Release reference from device_initialize(). */
+		put_device(&cdev->dev);
+		return -ENODEV;
+	}
+	cdev->private->flags.initialized = 1;
+	return 0;
+}
+
+static struct ccw_device * io_subchannel_create_ccwdev(struct subchannel *sch)
+{
+	struct ccw_device *cdev;
+	int ret;
+
+	cdev = io_subchannel_allocate_dev(sch);
+	if (!IS_ERR(cdev)) {
+		ret = io_subchannel_initialize_dev(sch, cdev);
+		if (ret)
+			cdev = ERR_PTR(ret);
+	}
+	return cdev;
+}
+
+static void io_subchannel_recog(struct ccw_device *, struct subchannel *);
+
+static void sch_create_and_recog_new_device(struct subchannel *sch)
+{
+	struct ccw_device *cdev;
+
+	/* Need to allocate a new ccw device. */
+	cdev = io_subchannel_create_ccwdev(sch);
+	if (IS_ERR(cdev)) {
+		/* OK, we did everything we could... */
+		css_sch_device_unregister(sch);
+		return;
+	}
+	/* Start recognition for the new ccw device. */
+	io_subchannel_recog(cdev, sch);
+}
+
+/*
+ * Register recognized device.
+ */
+static void io_subchannel_register(struct ccw_device *cdev)
+{
+	struct subchannel *sch;
+	int ret, adjust_init_count = 1;
+	unsigned long flags;
+
+	sch = to_subchannel(cdev->dev.parent);
+	/*
+	 * Check if subchannel is still registered. It may have become
+	 * unregistered if a machine check hit us after finishing
+	 * device recognition but before the register work could be
+	 * queued.
+	 */
+	if (!device_is_registered(&sch->dev))
+		goto out_err;
+	css_update_ssd_info(sch);
+	/*
+	 * io_subchannel_register() will also be called after device
+	 * recognition has been done for a boxed device (which will already
+	 * be registered). We need to reprobe since we may now have sense id
+	 * information.
+	 */
+	if (device_is_registered(&cdev->dev)) {
+		if (!cdev->drv) {
+			ret = device_reprobe(&cdev->dev);
+			if (ret)
+				/* We can't do much here. */
+				CIO_MSG_EVENT(0, "device_reprobe() returned"
+					      " %d for 0.%x.%04x\n", ret,
+					      cdev->private->dev_id.ssid,
+					      cdev->private->dev_id.devno);
+		}
+		adjust_init_count = 0;
+		goto out;
+	}
+	/*
+	 * Now we know this subchannel will stay, we can throw
+	 * our delayed uevent.
+	 */
+	dev_set_uevent_suppress(&sch->dev, 0);
+	kobject_uevent(&sch->dev.kobj, KOBJ_ADD);
+	/* make it known to the system */
+	ret = ccw_device_register(cdev);
+	if (ret) {
+		CIO_MSG_EVENT(0, "Could not register ccw dev 0.%x.%04x: %d\n",
+			      cdev->private->dev_id.ssid,
+			      cdev->private->dev_id.devno, ret);
+		spin_lock_irqsave(sch->lock, flags);
+		sch_set_cdev(sch, NULL);
+		spin_unlock_irqrestore(sch->lock, flags);
+		/* Release initial device reference. */
+		put_device(&cdev->dev);
+		goto out_err;
+	}
+out:
+	cdev->private->flags.recog_done = 1;
+	wake_up(&cdev->private->wait_q);
+out_err:
+	if (adjust_init_count && atomic_dec_and_test(&ccw_device_init_count))
+		wake_up(&ccw_device_init_wq);
+}
+
+static void ccw_device_call_sch_unregister(struct ccw_device *cdev)
+{
+	struct subchannel *sch;
+
+	/* Get subchannel reference for local processing. */
+	if (!get_device(cdev->dev.parent))
+		return;
+	sch = to_subchannel(cdev->dev.parent);
+	css_sch_device_unregister(sch);
+	/* Release subchannel reference for local processing. */
+	put_device(&sch->dev);
+}
+
+/*
+ * subchannel recognition done. Called from the state machine.
+ */
+void
+io_subchannel_recog_done(struct ccw_device *cdev)
+{
+	if (css_init_done == 0) {
+		cdev->private->flags.recog_done = 1;
+		return;
+	}
+	switch (cdev->private->state) {
+	case DEV_STATE_BOXED:
+		/* Device did not respond in time. */
+	case DEV_STATE_NOT_OPER:
+		cdev->private->flags.recog_done = 1;
+		/* Remove device found not operational. */
+		ccw_device_sched_todo(cdev, CDEV_TODO_UNREG);
+		if (atomic_dec_and_test(&ccw_device_init_count))
+			wake_up(&ccw_device_init_wq);
+		break;
+	case DEV_STATE_OFFLINE:
+		/* 
+		 * We can't register the device in interrupt context so
+		 * we schedule a work item.
+		 */
+		ccw_device_sched_todo(cdev, CDEV_TODO_REGISTER);
+		break;
+	}
+}
+
+static void io_subchannel_recog(struct ccw_device *cdev, struct subchannel *sch)
+{
+	struct ccw_device_private *priv;
+
+	cdev->ccwlock = sch->lock;
+
+	/* Init private data. */
+	priv = cdev->private;
+	priv->dev_id.devno = sch->schib.pmcw.dev;
+	priv->dev_id.ssid = sch->schid.ssid;
+	priv->schid = sch->schid;
+	priv->state = DEV_STATE_NOT_OPER;
+	INIT_LIST_HEAD(&priv->cmb_list);
+	init_waitqueue_head(&priv->wait_q);
+	init_timer(&priv->timer);
+
+	/* Increase counter of devices currently in recognition. */
+	atomic_inc(&ccw_device_init_count);
+
+	/* Start async. device sensing. */
+	spin_lock_irq(sch->lock);
+	sch_set_cdev(sch, cdev);
+	ccw_device_recognition(cdev);
+	spin_unlock_irq(sch->lock);
+}
+
+static int ccw_device_move_to_sch(struct ccw_device *cdev,
+				  struct subchannel *sch)
+{
+	struct subchannel *old_sch;
+	int rc, old_enabled = 0;
+
+	old_sch = to_subchannel(cdev->dev.parent);
+	/* Obtain child reference for new parent. */
+	if (!get_device(&sch->dev))
+		return -ENODEV;
+
+	if (!sch_is_pseudo_sch(old_sch)) {
+		spin_lock_irq(old_sch->lock);
+		old_enabled = old_sch->schib.pmcw.ena;
+		rc = 0;
+		if (old_enabled)
+			rc = cio_disable_subchannel(old_sch);
+		spin_unlock_irq(old_sch->lock);
+		if (rc == -EBUSY) {
+			/* Release child reference for new parent. */
+			put_device(&sch->dev);
+			return rc;
+		}
+	}
+
+	mutex_lock(&sch->reg_mutex);
+	rc = device_move(&cdev->dev, &sch->dev, DPM_ORDER_PARENT_BEFORE_DEV);
+	mutex_unlock(&sch->reg_mutex);
+	if (rc) {
+		CIO_MSG_EVENT(0, "device_move(0.%x.%04x,0.%x.%04x)=%d\n",
+			      cdev->private->dev_id.ssid,
+			      cdev->private->dev_id.devno, sch->schid.ssid,
+			      sch->schib.pmcw.dev, rc);
+		if (old_enabled) {
+			/* Try to reenable the old subchannel. */
+			spin_lock_irq(old_sch->lock);
+			cio_enable_subchannel(old_sch, (u32)(addr_t)old_sch);
+			spin_unlock_irq(old_sch->lock);
+		}
+		/* Release child reference for new parent. */
+		put_device(&sch->dev);
+		return rc;
+	}
+	/* Clean up old subchannel. */
+	if (!sch_is_pseudo_sch(old_sch)) {
+		spin_lock_irq(old_sch->lock);
+		sch_set_cdev(old_sch, NULL);
+		spin_unlock_irq(old_sch->lock);
+		css_schedule_eval(old_sch->schid);
+	}
+	/* Release child reference for old parent. */
+	put_device(&old_sch->dev);
+	/* Initialize new subchannel. */
+	spin_lock_irq(sch->lock);
+	cdev->private->schid = sch->schid;
+	cdev->ccwlock = sch->lock;
+	if (!sch_is_pseudo_sch(sch))
+		sch_set_cdev(sch, cdev);
+	spin_unlock_irq(sch->lock);
+	if (!sch_is_pseudo_sch(sch))
+		css_update_ssd_info(sch);
+	return 0;
+}
+
+static int ccw_device_move_to_orph(struct ccw_device *cdev)
+{
+	struct subchannel *sch = to_subchannel(cdev->dev.parent);
+	struct channel_subsystem *css = to_css(sch->dev.parent);
+
+	return ccw_device_move_to_sch(cdev, css->pseudo_subchannel);
+}
+
+static void io_subchannel_irq(struct subchannel *sch)
+{
+	struct ccw_device *cdev;
+
+	cdev = sch_get_cdev(sch);
+
+	CIO_TRACE_EVENT(6, "IRQ");
+	CIO_TRACE_EVENT(6, dev_name(&sch->dev));
+	if (cdev)
+		dev_fsm_event(cdev, DEV_EVENT_INTERRUPT);
+	else
+		kstat_cpu(smp_processor_id()).irqs[IOINT_CIO]++;
+}
+
+void io_subchannel_init_config(struct subchannel *sch)
+{
+	memset(&sch->config, 0, sizeof(sch->config));
+	sch->config.csense = 1;
+}
+
+static void io_subchannel_init_fields(struct subchannel *sch)
+{
+	if (cio_is_console(sch->schid))
+		sch->opm = 0xff;
+	else
+		sch->opm = chp_get_sch_opm(sch);
+	sch->lpm = sch->schib.pmcw.pam & sch->opm;
+	sch->isc = cio_is_console(sch->schid) ? CONSOLE_ISC : IO_SCH_ISC;
+
+	CIO_MSG_EVENT(6, "Detected device %04x on subchannel 0.%x.%04X"
+		      " - PIM = %02X, PAM = %02X, POM = %02X\n",
+		      sch->schib.pmcw.dev, sch->schid.ssid,
+		      sch->schid.sch_no, sch->schib.pmcw.pim,
+		      sch->schib.pmcw.pam, sch->schib.pmcw.pom);
+
+	io_subchannel_init_config(sch);
+}
+
+/*
+ * Note: We always return 0 so that we bind to the device even on error.
+ * This is needed so that our remove function is called on unregister.
+ */
+static int io_subchannel_probe(struct subchannel *sch)
+{
+	struct io_subchannel_private *io_priv;
+	struct ccw_device *cdev;
+	int rc;
+
+	if (cio_is_console(sch->schid)) {
+		rc = sysfs_create_group(&sch->dev.kobj,
+					&io_subchannel_attr_group);
+		if (rc)
+			CIO_MSG_EVENT(0, "Failed to create io subchannel "
+				      "attributes for subchannel "
+				      "0.%x.%04x (rc=%d)\n",
+				      sch->schid.ssid, sch->schid.sch_no, rc);
+		/*
+		 * The console subchannel already has an associated ccw_device.
+		 * Throw the delayed uevent for the subchannel, register
+		 * the ccw_device and exit.
+		 */
+		dev_set_uevent_suppress(&sch->dev, 0);
+		kobject_uevent(&sch->dev.kobj, KOBJ_ADD);
+		cdev = sch_get_cdev(sch);
+		cdev->dev.groups = ccwdev_attr_groups;
+		device_initialize(&cdev->dev);
+		cdev->private->flags.initialized = 1;
+		ccw_device_register(cdev);
+		/*
+		 * Check if the device is already online. If it is
+		 * the reference count needs to be corrected since we
+		 * didn't obtain a reference in ccw_device_set_online.
+		 */
+		if (cdev->private->state != DEV_STATE_NOT_OPER &&
+		    cdev->private->state != DEV_STATE_OFFLINE &&
+		    cdev->private->state != DEV_STATE_BOXED)
+			get_device(&cdev->dev);
+		return 0;
+	}
+	io_subchannel_init_fields(sch);
+	rc = cio_commit_config(sch);
+	if (rc)
+		goto out_schedule;
+	rc = sysfs_create_group(&sch->dev.kobj,
+				&io_subchannel_attr_group);
+	if (rc)
+		goto out_schedule;
+	/* Allocate I/O subchannel private data. */
+	io_priv = kzalloc(sizeof(*io_priv), GFP_KERNEL | GFP_DMA);
+	if (!io_priv)
+		goto out_schedule;
+
+	set_io_private(sch, io_priv);
+	css_schedule_eval(sch->schid);
+	return 0;
+
+out_schedule:
+	spin_lock_irq(sch->lock);
+	css_sched_sch_todo(sch, SCH_TODO_UNREG);
+	spin_unlock_irq(sch->lock);
+	return 0;
+}
+
+static int
+io_subchannel_remove (struct subchannel *sch)
+{
+	struct io_subchannel_private *io_priv = to_io_private(sch);
+	struct ccw_device *cdev;
+
+	cdev = sch_get_cdev(sch);
+	if (!cdev)
+		goto out_free;
+	io_subchannel_quiesce(sch);
+	/* Set ccw device to not operational and drop reference. */
+	spin_lock_irq(cdev->ccwlock);
+	sch_set_cdev(sch, NULL);
+	set_io_private(sch, NULL);
+	cdev->private->state = DEV_STATE_NOT_OPER;
+	spin_unlock_irq(cdev->ccwlock);
+	ccw_device_unregister(cdev);
+out_free:
+	kfree(io_priv);
+	sysfs_remove_group(&sch->dev.kobj, &io_subchannel_attr_group);
+	return 0;
+}
+
+static void io_subchannel_verify(struct subchannel *sch)
+{
+	struct ccw_device *cdev;
+
+	cdev = sch_get_cdev(sch);
+	if (cdev)
+		dev_fsm_event(cdev, DEV_EVENT_VERIFY);
+}
+
+static void io_subchannel_terminate_path(struct subchannel *sch, u8 mask)
+{
+	struct ccw_device *cdev;
+
+	cdev = sch_get_cdev(sch);
+	if (!cdev)
+		return;
+	if (cio_update_schib(sch))
+		goto err;
+	/* Check for I/O on path. */
+	if (scsw_actl(&sch->schib.scsw) == 0 || sch->schib.pmcw.lpum != mask)
+		goto out;
+	if (cdev->private->state == DEV_STATE_ONLINE) {
+		ccw_device_kill_io(cdev);
+		goto out;
+	}
+	if (cio_clear(sch))
+		goto err;
+out:
+	/* Trigger path verification. */
+	dev_fsm_event(cdev, DEV_EVENT_VERIFY);
+	return;
+
+err:
+	dev_fsm_event(cdev, DEV_EVENT_NOTOPER);
+}
+
+static int io_subchannel_chp_event(struct subchannel *sch,
+				   struct chp_link *link, int event)
+{
+	struct ccw_device *cdev = sch_get_cdev(sch);
+	int mask;
+
+	mask = chp_ssd_get_mask(&sch->ssd_info, link);
+	if (!mask)
+		return 0;
+	switch (event) {
+	case CHP_VARY_OFF:
+		sch->opm &= ~mask;
+		sch->lpm &= ~mask;
+		if (cdev)
+			cdev->private->path_gone_mask |= mask;
+		io_subchannel_terminate_path(sch, mask);
+		break;
+	case CHP_VARY_ON:
+		sch->opm |= mask;
+		sch->lpm |= mask;
+		if (cdev)
+			cdev->private->path_new_mask |= mask;
+		io_subchannel_verify(sch);
+		break;
+	case CHP_OFFLINE:
+		if (cio_update_schib(sch))
+			return -ENODEV;
+		if (cdev)
+			cdev->private->path_gone_mask |= mask;
+		io_subchannel_terminate_path(sch, mask);
+		break;
+	case CHP_ONLINE:
+		if (cio_update_schib(sch))
+			return -ENODEV;
+		sch->lpm |= mask & sch->opm;
+		if (cdev)
+			cdev->private->path_new_mask |= mask;
+		io_subchannel_verify(sch);
+		break;
+	}
+	return 0;
+}
+
+static void io_subchannel_quiesce(struct subchannel *sch)
+{
+	struct ccw_device *cdev;
+	int ret;
+
+	spin_lock_irq(sch->lock);
+	cdev = sch_get_cdev(sch);
+	if (cio_is_console(sch->schid))
+		goto out_unlock;
+	if (!sch->schib.pmcw.ena)
+		goto out_unlock;
+	ret = cio_disable_subchannel(sch);
+	if (ret != -EBUSY)
+		goto out_unlock;
+	if (cdev->handler)
+		cdev->handler(cdev, cdev->private->intparm, ERR_PTR(-EIO));
+	while (ret == -EBUSY) {
+		cdev->private->state = DEV_STATE_QUIESCE;
+		cdev->private->iretry = 255;
+		ret = ccw_device_cancel_halt_clear(cdev);
+		if (ret == -EBUSY) {
+			ccw_device_set_timeout(cdev, HZ/10);
+			spin_unlock_irq(sch->lock);
+			wait_event(cdev->private->wait_q,
+				   cdev->private->state != DEV_STATE_QUIESCE);
+			spin_lock_irq(sch->lock);
+		}
+		ret = cio_disable_subchannel(sch);
+	}
+out_unlock:
+	spin_unlock_irq(sch->lock);
+}
+
+static void io_subchannel_shutdown(struct subchannel *sch)
+{
+	io_subchannel_quiesce(sch);
+}
+
+static int device_is_disconnected(struct ccw_device *cdev)
+{
+	if (!cdev)
+		return 0;
+	return (cdev->private->state == DEV_STATE_DISCONNECTED ||
+		cdev->private->state == DEV_STATE_DISCONNECTED_SENSE_ID);
+}
+
+static int recovery_check(struct device *dev, void *data)
+{
+	struct ccw_device *cdev = to_ccwdev(dev);
+	int *redo = data;
+
+	spin_lock_irq(cdev->ccwlock);
+	switch (cdev->private->state) {
+	case DEV_STATE_DISCONNECTED:
+		CIO_MSG_EVENT(3, "recovery: trigger 0.%x.%04x\n",
+			      cdev->private->dev_id.ssid,
+			      cdev->private->dev_id.devno);
+		dev_fsm_event(cdev, DEV_EVENT_VERIFY);
+		*redo = 1;
+		break;
+	case DEV_STATE_DISCONNECTED_SENSE_ID:
+		*redo = 1;
+		break;
+	}
+	spin_unlock_irq(cdev->ccwlock);
+
+	return 0;
+}
+
+static void recovery_work_func(struct work_struct *unused)
+{
+	int redo = 0;
+
+	bus_for_each_dev(&ccw_bus_type, NULL, &redo, recovery_check);
+	if (redo) {
+		spin_lock_irq(&recovery_lock);
+		if (!timer_pending(&recovery_timer)) {
+			if (recovery_phase < ARRAY_SIZE(recovery_delay) - 1)
+				recovery_phase++;
+			mod_timer(&recovery_timer, jiffies +
+				  recovery_delay[recovery_phase] * HZ);
+		}
+		spin_unlock_irq(&recovery_lock);
+	} else
+		CIO_MSG_EVENT(4, "recovery: end\n");
+}
+
+static DECLARE_WORK(recovery_work, recovery_work_func);
+
+static void recovery_func(unsigned long data)
+{
+	/*
+	 * We can't do our recovery in softirq context and it's not
+	 * performance critical, so we schedule it.
+	 */
+	schedule_work(&recovery_work);
+}
+
+static void ccw_device_schedule_recovery(void)
+{
+	unsigned long flags;
+
+	CIO_MSG_EVENT(4, "recovery: schedule\n");
+	spin_lock_irqsave(&recovery_lock, flags);
+	if (!timer_pending(&recovery_timer) || (recovery_phase != 0)) {
+		recovery_phase = 0;
+		mod_timer(&recovery_timer, jiffies + recovery_delay[0] * HZ);
+	}
+	spin_unlock_irqrestore(&recovery_lock, flags);
+}
+
+static int purge_fn(struct device *dev, void *data)
+{
+	struct ccw_device *cdev = to_ccwdev(dev);
+	struct ccw_dev_id *id = &cdev->private->dev_id;
+
+	spin_lock_irq(cdev->ccwlock);
+	if (is_blacklisted(id->ssid, id->devno) &&
+	    (cdev->private->state == DEV_STATE_OFFLINE) &&
+	    (atomic_cmpxchg(&cdev->private->onoff, 0, 1) == 0)) {
+		CIO_MSG_EVENT(3, "ccw: purging 0.%x.%04x\n", id->ssid,
+			      id->devno);
+		ccw_device_sched_todo(cdev, CDEV_TODO_UNREG);
+		atomic_set(&cdev->private->onoff, 0);
+	}
+	spin_unlock_irq(cdev->ccwlock);
+	/* Abort loop in case of pending signal. */
+	if (signal_pending(current))
+		return -EINTR;
+
+	return 0;
+}
+
+/**
+ * ccw_purge_blacklisted - purge unused, blacklisted devices
+ *
+ * Unregister all ccw devices that are offline and on the blacklist.
+ */
+int ccw_purge_blacklisted(void)
+{
+	CIO_MSG_EVENT(2, "ccw: purging blacklisted devices\n");
+	bus_for_each_dev(&ccw_bus_type, NULL, NULL, purge_fn);
+	return 0;
+}
+
+void ccw_device_set_disconnected(struct ccw_device *cdev)
+{
+	if (!cdev)
+		return;
+	ccw_device_set_timeout(cdev, 0);
+	cdev->private->flags.fake_irb = 0;
+	cdev->private->state = DEV_STATE_DISCONNECTED;
+	if (cdev->online)
+		ccw_device_schedule_recovery();
+}
+
+void ccw_device_set_notoper(struct ccw_device *cdev)
+{
+	struct subchannel *sch = to_subchannel(cdev->dev.parent);
+
+	CIO_TRACE_EVENT(2, "notoper");
+	CIO_TRACE_EVENT(2, dev_name(&sch->dev));
+	ccw_device_set_timeout(cdev, 0);
+	cio_disable_subchannel(sch);
+	cdev->private->state = DEV_STATE_NOT_OPER;
+}
+
+enum io_sch_action {
+	IO_SCH_UNREG,
+	IO_SCH_ORPH_UNREG,
+	IO_SCH_ATTACH,
+	IO_SCH_UNREG_ATTACH,
+	IO_SCH_ORPH_ATTACH,
+	IO_SCH_REPROBE,
+	IO_SCH_VERIFY,
+	IO_SCH_DISC,
+	IO_SCH_NOP,
+};
+
+static enum io_sch_action sch_get_action(struct subchannel *sch)
+{
+	struct ccw_device *cdev;
+
+	cdev = sch_get_cdev(sch);
+	if (cio_update_schib(sch)) {
+		/* Not operational. */
+		if (!cdev)
+			return IO_SCH_UNREG;
+		if (ccw_device_notify(cdev, CIO_GONE) != NOTIFY_OK)
+			return IO_SCH_UNREG;
+		return IO_SCH_ORPH_UNREG;
+	}
+	/* Operational. */
+	if (!cdev)
+		return IO_SCH_ATTACH;
+	if (sch->schib.pmcw.dev != cdev->private->dev_id.devno) {
+		if (ccw_device_notify(cdev, CIO_GONE) != NOTIFY_OK)
+			return IO_SCH_UNREG_ATTACH;
+		return IO_SCH_ORPH_ATTACH;
+	}
+	if ((sch->schib.pmcw.pam & sch->opm) == 0) {
+		if (ccw_device_notify(cdev, CIO_NO_PATH) != NOTIFY_OK)
+			return IO_SCH_UNREG;
+		return IO_SCH_DISC;
+	}
+	if (device_is_disconnected(cdev))
+		return IO_SCH_REPROBE;
+	if (cdev->online)
+		return IO_SCH_VERIFY;
+	return IO_SCH_NOP;
+}
+
+/**
+ * io_subchannel_sch_event - process subchannel event
+ * @sch: subchannel
+ * @process: non-zero if function is called in process context
+ *
+ * An unspecified event occurred for this subchannel. Adjust data according
+ * to the current operational state of the subchannel and device. Return
+ * zero when the event has been handled sufficiently or -EAGAIN when this
+ * function should be called again in process context.
+ */
+static int io_subchannel_sch_event(struct subchannel *sch, int process)
+{
+	unsigned long flags;
+	struct ccw_device *cdev;
+	struct ccw_dev_id dev_id;
+	enum io_sch_action action;
+	int rc = -EAGAIN;
+
+	spin_lock_irqsave(sch->lock, flags);
+	if (!device_is_registered(&sch->dev))
+		goto out_unlock;
+	if (work_pending(&sch->todo_work))
+		goto out_unlock;
+	cdev = sch_get_cdev(sch);
+	if (cdev && work_pending(&cdev->private->todo_work))
+		goto out_unlock;
+	action = sch_get_action(sch);
+	CIO_MSG_EVENT(2, "event: sch 0.%x.%04x, process=%d, action=%d\n",
+		      sch->schid.ssid, sch->schid.sch_no, process,
+		      action);
+	/* Perform immediate actions while holding the lock. */
+	switch (action) {
+	case IO_SCH_REPROBE:
+		/* Trigger device recognition. */
+		ccw_device_trigger_reprobe(cdev);
+		rc = 0;
+		goto out_unlock;
+	case IO_SCH_VERIFY:
+		if (cdev->private->flags.resuming == 1) {
+			if (cio_enable_subchannel(sch, (u32)(addr_t)sch)) {
+				ccw_device_set_notoper(cdev);
+				break;
+			}
+		}
+		/* Trigger path verification. */
+		io_subchannel_verify(sch);
+		rc = 0;
+		goto out_unlock;
+	case IO_SCH_DISC:
+		ccw_device_set_disconnected(cdev);
+		rc = 0;
+		goto out_unlock;
+	case IO_SCH_ORPH_UNREG:
+	case IO_SCH_ORPH_ATTACH:
+		ccw_device_set_disconnected(cdev);
+		break;
+	case IO_SCH_UNREG_ATTACH:
+	case IO_SCH_UNREG:
+		if (!cdev)
+			break;
+		if (cdev->private->state == DEV_STATE_SENSE_ID) {
+			/*
+			 * Note: delayed work triggered by this event
+			 * and repeated calls to sch_event are synchronized
+			 * by the above check for work_pending(cdev).
+			 */
+			dev_fsm_event(cdev, DEV_EVENT_NOTOPER);
+		} else
+			ccw_device_set_notoper(cdev);
+		break;
+	case IO_SCH_NOP:
+		rc = 0;
+		goto out_unlock;
+	default:
+		break;
+	}
+	spin_unlock_irqrestore(sch->lock, flags);
+	/* All other actions require process context. */
+	if (!process)
+		goto out;
+	/* Handle attached ccw device. */
+	switch (action) {
+	case IO_SCH_ORPH_UNREG:
+	case IO_SCH_ORPH_ATTACH:
+		/* Move ccw device to orphanage. */
+		rc = ccw_device_move_to_orph(cdev);
+		if (rc)
+			goto out;
+		break;
+	case IO_SCH_UNREG_ATTACH:
+		if (cdev->private->flags.resuming) {
+			/* Device will be handled later. */
+			rc = 0;
+			goto out;
+		}
+		/* Unregister ccw device. */
+		ccw_device_unregister(cdev);
+		break;
+	default:
+		break;
+	}
+	/* Handle subchannel. */
+	switch (action) {
+	case IO_SCH_ORPH_UNREG:
+	case IO_SCH_UNREG:
+		if (!cdev || !cdev->private->flags.resuming)
+			css_sch_device_unregister(sch);
+		break;
+	case IO_SCH_ORPH_ATTACH:
+	case IO_SCH_UNREG_ATTACH:
+	case IO_SCH_ATTACH:
+		dev_id.ssid = sch->schid.ssid;
+		dev_id.devno = sch->schib.pmcw.dev;
+		cdev = get_ccwdev_by_dev_id(&dev_id);
+		if (!cdev) {
+			sch_create_and_recog_new_device(sch);
+			break;
+		}
+		rc = ccw_device_move_to_sch(cdev, sch);
+		if (rc) {
+			/* Release reference from get_ccwdev_by_dev_id() */
+			put_device(&cdev->dev);
+			goto out;
+		}
+		spin_lock_irqsave(sch->lock, flags);
+		ccw_device_trigger_reprobe(cdev);
+		spin_unlock_irqrestore(sch->lock, flags);
+		/* Release reference from get_ccwdev_by_dev_id() */
+		put_device(&cdev->dev);
+		break;
+	default:
+		break;
+	}
+	return 0;
+
+out_unlock:
+	spin_unlock_irqrestore(sch->lock, flags);
+out:
+	return rc;
+}
+
+#ifdef CONFIG_CCW_CONSOLE
+static struct ccw_device console_cdev;
+static struct ccw_device_private console_private;
+static int console_cdev_in_use;
+
+static DEFINE_SPINLOCK(ccw_console_lock);
+
+spinlock_t * cio_get_console_lock(void)
+{
+	return &ccw_console_lock;
+}
+
+static int ccw_device_console_enable(struct ccw_device *cdev,
+				     struct subchannel *sch)
+{
+	struct io_subchannel_private *io_priv = cio_get_console_priv();
+	int rc;
+
+	/* Attach subchannel private data. */
+	memset(io_priv, 0, sizeof(*io_priv));
+	set_io_private(sch, io_priv);
+	io_subchannel_init_fields(sch);
+	rc = cio_commit_config(sch);
+	if (rc)
+		return rc;
+	sch->driver = &io_subchannel_driver;
+	/* Initialize the ccw_device structure. */
+	cdev->dev.parent= &sch->dev;
+	sch_set_cdev(sch, cdev);
+	io_subchannel_recog(cdev, sch);
+	/* Now wait for the async. recognition to come to an end. */
+	spin_lock_irq(cdev->ccwlock);
+	while (!dev_fsm_final_state(cdev))
+		wait_cons_dev();
+	rc = -EIO;
+	if (cdev->private->state != DEV_STATE_OFFLINE)
+		goto out_unlock;
+	ccw_device_online(cdev);
+	while (!dev_fsm_final_state(cdev))
+		wait_cons_dev();
+	if (cdev->private->state != DEV_STATE_ONLINE)
+		goto out_unlock;
+	rc = 0;
+out_unlock:
+	spin_unlock_irq(cdev->ccwlock);
+	return rc;
+}
+
+struct ccw_device *
+ccw_device_probe_console(void)
+{
+	struct subchannel *sch;
+	int ret;
+
+	if (xchg(&console_cdev_in_use, 1) != 0)
+		return ERR_PTR(-EBUSY);
+	sch = cio_probe_console();
+	if (IS_ERR(sch)) {
+		console_cdev_in_use = 0;
+		return (void *) sch;
+	}
+	memset(&console_cdev, 0, sizeof(struct ccw_device));
+	memset(&console_private, 0, sizeof(struct ccw_device_private));
+	console_cdev.private = &console_private;
+	console_private.cdev = &console_cdev;
+	console_private.int_class = IOINT_CIO;
+	ret = ccw_device_console_enable(&console_cdev, sch);
+	if (ret) {
+		cio_release_console();
+		console_cdev_in_use = 0;
+		return ERR_PTR(ret);
+	}
+	console_cdev.online = 1;
+	return &console_cdev;
+}
+
+static int ccw_device_pm_restore(struct device *dev);
+
+int ccw_device_force_console(void)
+{
+	if (!console_cdev_in_use)
+		return -ENODEV;
+	return ccw_device_pm_restore(&console_cdev.dev);
+}
+EXPORT_SYMBOL_GPL(ccw_device_force_console);
+#endif
+
+/*
+ * get ccw_device matching the busid, but only if owned by cdrv
+ */
+static int
+__ccwdev_check_busid(struct device *dev, void *id)
+{
+	char *bus_id;
+
+	bus_id = id;
+
+	return (strcmp(bus_id, dev_name(dev)) == 0);
+}
+
+
+/**
+ * get_ccwdev_by_busid() - obtain device from a bus id
+ * @cdrv: driver the device is owned by
+ * @bus_id: bus id of the device to be searched
+ *
+ * This function searches all devices owned by @cdrv for a device with a bus
+ * id matching @bus_id.
+ * Returns:
+ *  If a match is found, its reference count of the found device is increased
+ *  and it is returned; else %NULL is returned.
+ */
+struct ccw_device *get_ccwdev_by_busid(struct ccw_driver *cdrv,
+				       const char *bus_id)
+{
+	struct device *dev;
+
+	dev = driver_find_device(&cdrv->driver, NULL, (void *)bus_id,
+				 __ccwdev_check_busid);
+
+	return dev ? to_ccwdev(dev) : NULL;
+}
+
+/************************** device driver handling ************************/
+
+/* This is the implementation of the ccw_driver class. The probe, remove
+ * and release methods are initially very similar to the device_driver
+ * implementations, with the difference that they have ccw_device
+ * arguments.
+ *
+ * A ccw driver also contains the information that is needed for
+ * device matching.
+ */
+static int
+ccw_device_probe (struct device *dev)
+{
+	struct ccw_device *cdev = to_ccwdev(dev);
+	struct ccw_driver *cdrv = to_ccwdrv(dev->driver);
+	int ret;
+
+	cdev->drv = cdrv; /* to let the driver call _set_online */
+	/* Note: we interpret class 0 in this context as an uninitialized
+	 * field since it translates to a non-I/O interrupt class. */
+	if (cdrv->int_class != 0)
+		cdev->private->int_class = cdrv->int_class;
+	else
+		cdev->private->int_class = IOINT_CIO;
+
+	ret = cdrv->probe ? cdrv->probe(cdev) : -ENODEV;
+
+	if (ret) {
+		cdev->drv = NULL;
+		cdev->private->int_class = IOINT_CIO;
+		return ret;
+	}
+
+	return 0;
+}
+
+static int
+ccw_device_remove (struct device *dev)
+{
+	struct ccw_device *cdev = to_ccwdev(dev);
+	struct ccw_driver *cdrv = cdev->drv;
+	int ret;
+
+	if (cdrv->remove)
+		cdrv->remove(cdev);
+	if (cdev->online) {
+		cdev->online = 0;
+		spin_lock_irq(cdev->ccwlock);
+		ret = ccw_device_offline(cdev);
+		spin_unlock_irq(cdev->ccwlock);
+		if (ret == 0)
+			wait_event(cdev->private->wait_q,
+				   dev_fsm_final_state(cdev));
+		else
+			CIO_MSG_EVENT(0, "ccw_device_offline returned %d, "
+				      "device 0.%x.%04x\n",
+				      ret, cdev->private->dev_id.ssid,
+				      cdev->private->dev_id.devno);
+		/* Give up reference obtained in ccw_device_set_online(). */
+		put_device(&cdev->dev);
+	}
+	ccw_device_set_timeout(cdev, 0);
+	cdev->drv = NULL;
+	cdev->private->int_class = IOINT_CIO;
+	return 0;
+}
+
+static void ccw_device_shutdown(struct device *dev)
+{
+	struct ccw_device *cdev;
+
+	cdev = to_ccwdev(dev);
+	if (cdev->drv && cdev->drv->shutdown)
+		cdev->drv->shutdown(cdev);
+	disable_cmf(cdev);
+}
+
+static int ccw_device_pm_prepare(struct device *dev)
+{
+	struct ccw_device *cdev = to_ccwdev(dev);
+
+	if (work_pending(&cdev->private->todo_work))
+		return -EAGAIN;
+	/* Fail while device is being set online/offline. */
+	if (atomic_read(&cdev->private->onoff))
+		return -EAGAIN;
+
+	if (cdev->online && cdev->drv && cdev->drv->prepare)
+		return cdev->drv->prepare(cdev);
+
+	return 0;
+}
+
+static void ccw_device_pm_complete(struct device *dev)
+{
+	struct ccw_device *cdev = to_ccwdev(dev);
+
+	if (cdev->online && cdev->drv && cdev->drv->complete)
+		cdev->drv->complete(cdev);
+}
+
+static int ccw_device_pm_freeze(struct device *dev)
+{
+	struct ccw_device *cdev = to_ccwdev(dev);
+	struct subchannel *sch = to_subchannel(cdev->dev.parent);
+	int ret, cm_enabled;
+
+	/* Fail suspend while device is in transistional state. */
+	if (!dev_fsm_final_state(cdev))
+		return -EAGAIN;
+	if (!cdev->online)
+		return 0;
+	if (cdev->drv && cdev->drv->freeze) {
+		ret = cdev->drv->freeze(cdev);
+		if (ret)
+			return ret;
+	}
+
+	spin_lock_irq(sch->lock);
+	cm_enabled = cdev->private->cmb != NULL;
+	spin_unlock_irq(sch->lock);
+	if (cm_enabled) {
+		/* Don't have the css write on memory. */
+		ret = ccw_set_cmf(cdev, 0);
+		if (ret)
+			return ret;
+	}
+	/* From here on, disallow device driver I/O. */
+	spin_lock_irq(sch->lock);
+	ret = cio_disable_subchannel(sch);
+	spin_unlock_irq(sch->lock);
+
+	return ret;
+}
+
+static int ccw_device_pm_thaw(struct device *dev)
+{
+	struct ccw_device *cdev = to_ccwdev(dev);
+	struct subchannel *sch = to_subchannel(cdev->dev.parent);
+	int ret, cm_enabled;
+
+	if (!cdev->online)
+		return 0;
+
+	spin_lock_irq(sch->lock);
+	/* Allow device driver I/O again. */
+	ret = cio_enable_subchannel(sch, (u32)(addr_t)sch);
+	cm_enabled = cdev->private->cmb != NULL;
+	spin_unlock_irq(sch->lock);
+	if (ret)
+		return ret;
+
+	if (cm_enabled) {
+		ret = ccw_set_cmf(cdev, 1);
+		if (ret)
+			return ret;
+	}
+
+	if (cdev->drv && cdev->drv->thaw)
+		ret = cdev->drv->thaw(cdev);
+
+	return ret;
+}
+
+static void __ccw_device_pm_restore(struct ccw_device *cdev)
+{
+	struct subchannel *sch = to_subchannel(cdev->dev.parent);
+
+	spin_lock_irq(sch->lock);
+	if (cio_is_console(sch->schid)) {
+		cio_enable_subchannel(sch, (u32)(addr_t)sch);
+		goto out_unlock;
+	}
+	/*
+	 * While we were sleeping, devices may have gone or become
+	 * available again. Kick re-detection.
+	 */
+	cdev->private->flags.resuming = 1;
+	cdev->private->path_new_mask = LPM_ANYPATH;
+	css_sched_sch_todo(sch, SCH_TODO_EVAL);
+	spin_unlock_irq(sch->lock);
+	css_wait_for_slow_path();
+
+	/* cdev may have been moved to a different subchannel. */
+	sch = to_subchannel(cdev->dev.parent);
+	spin_lock_irq(sch->lock);
+	if (cdev->private->state != DEV_STATE_ONLINE &&
+	    cdev->private->state != DEV_STATE_OFFLINE)
+		goto out_unlock;
+
+	ccw_device_recognition(cdev);
+	spin_unlock_irq(sch->lock);
+	wait_event(cdev->private->wait_q, dev_fsm_final_state(cdev) ||
+		   cdev->private->state == DEV_STATE_DISCONNECTED);
+	spin_lock_irq(sch->lock);
+
+out_unlock:
+	cdev->private->flags.resuming = 0;
+	spin_unlock_irq(sch->lock);
+}
+
+static int resume_handle_boxed(struct ccw_device *cdev)
+{
+	cdev->private->state = DEV_STATE_BOXED;
+	if (ccw_device_notify(cdev, CIO_BOXED) == NOTIFY_OK)
+		return 0;
+	ccw_device_sched_todo(cdev, CDEV_TODO_UNREG);
+	return -ENODEV;
+}
+
+static int resume_handle_disc(struct ccw_device *cdev)
+{
+	cdev->private->state = DEV_STATE_DISCONNECTED;
+	if (ccw_device_notify(cdev, CIO_GONE) == NOTIFY_OK)
+		return 0;
+	ccw_device_sched_todo(cdev, CDEV_TODO_UNREG);
+	return -ENODEV;
+}
+
+static int ccw_device_pm_restore(struct device *dev)
+{
+	struct ccw_device *cdev = to_ccwdev(dev);
+	struct subchannel *sch;
+	int ret = 0;
+
+	__ccw_device_pm_restore(cdev);
+	sch = to_subchannel(cdev->dev.parent);
+	spin_lock_irq(sch->lock);
+	if (cio_is_console(sch->schid))
+		goto out_restore;
+
+	/* check recognition results */
+	switch (cdev->private->state) {
+	case DEV_STATE_OFFLINE:
+	case DEV_STATE_ONLINE:
+		cdev->private->flags.donotify = 0;
+		break;
+	case DEV_STATE_BOXED:
+		ret = resume_handle_boxed(cdev);
+		if (ret)
+			goto out_unlock;
+		goto out_restore;
+	default:
+		ret = resume_handle_disc(cdev);
+		if (ret)
+			goto out_unlock;
+		goto out_restore;
+	}
+	/* check if the device type has changed */
+	if (!ccw_device_test_sense_data(cdev)) {
+		ccw_device_update_sense_data(cdev);
+		ccw_device_sched_todo(cdev, CDEV_TODO_REBIND);
+		ret = -ENODEV;
+		goto out_unlock;
+	}
+	if (!cdev->online)
+		goto out_unlock;
+
+	if (ccw_device_online(cdev)) {
+		ret = resume_handle_disc(cdev);
+		if (ret)
+			goto out_unlock;
+		goto out_restore;
+	}
+	spin_unlock_irq(sch->lock);
+	wait_event(cdev->private->wait_q, dev_fsm_final_state(cdev));
+	spin_lock_irq(sch->lock);
+
+	if (ccw_device_notify(cdev, CIO_OPER) == NOTIFY_BAD) {
+		ccw_device_sched_todo(cdev, CDEV_TODO_UNREG);
+		ret = -ENODEV;
+		goto out_unlock;
+	}
+
+	/* reenable cmf, if needed */
+	if (cdev->private->cmb) {
+		spin_unlock_irq(sch->lock);
+		ret = ccw_set_cmf(cdev, 1);
+		spin_lock_irq(sch->lock);
+		if (ret) {
+			CIO_MSG_EVENT(2, "resume: cdev 0.%x.%04x: cmf failed "
+				      "(rc=%d)\n", cdev->private->dev_id.ssid,
+				      cdev->private->dev_id.devno, ret);
+			ret = 0;
+		}
+	}
+
+out_restore:
+	spin_unlock_irq(sch->lock);
+	if (cdev->online && cdev->drv && cdev->drv->restore)
+		ret = cdev->drv->restore(cdev);
+	return ret;
+
+out_unlock:
+	spin_unlock_irq(sch->lock);
+	return ret;
+}
+
+static const struct dev_pm_ops ccw_pm_ops = {
+	.prepare = ccw_device_pm_prepare,
+	.complete = ccw_device_pm_complete,
+	.freeze = ccw_device_pm_freeze,
+	.thaw = ccw_device_pm_thaw,
+	.restore = ccw_device_pm_restore,
+};
+
+static struct bus_type ccw_bus_type = {
+	.name   = "ccw",
+	.match  = ccw_bus_match,
+	.uevent = ccw_uevent,
+	.probe  = ccw_device_probe,
+	.remove = ccw_device_remove,
+	.shutdown = ccw_device_shutdown,
+	.pm = &ccw_pm_ops,
+};
+
+/**
+ * ccw_driver_register() - register a ccw driver
+ * @cdriver: driver to be registered
+ *
+ * This function is mainly a wrapper around driver_register().
+ * Returns:
+ *   %0 on success and a negative error value on failure.
+ */
+int ccw_driver_register(struct ccw_driver *cdriver)
+{
+	struct device_driver *drv = &cdriver->driver;
+
+	drv->bus = &ccw_bus_type;
+
+	return driver_register(drv);
+}
+
+/**
+ * ccw_driver_unregister() - deregister a ccw driver
+ * @cdriver: driver to be deregistered
+ *
+ * This function is mainly a wrapper around driver_unregister().
+ */
+void ccw_driver_unregister(struct ccw_driver *cdriver)
+{
+	driver_unregister(&cdriver->driver);
+}
+
+/* Helper func for qdio. */
+struct subchannel_id
+ccw_device_get_subchannel_id(struct ccw_device *cdev)
+{
+	struct subchannel *sch;
+
+	sch = to_subchannel(cdev->dev.parent);
+	return sch->schid;
+}
+
+static void ccw_device_todo(struct work_struct *work)
+{
+	struct ccw_device_private *priv;
+	struct ccw_device *cdev;
+	struct subchannel *sch;
+	enum cdev_todo todo;
+
+	priv = container_of(work, struct ccw_device_private, todo_work);
+	cdev = priv->cdev;
+	sch = to_subchannel(cdev->dev.parent);
+	/* Find out todo. */
+	spin_lock_irq(cdev->ccwlock);
+	todo = priv->todo;
+	priv->todo = CDEV_TODO_NOTHING;
+	CIO_MSG_EVENT(4, "cdev_todo: cdev=0.%x.%04x todo=%d\n",
+		      priv->dev_id.ssid, priv->dev_id.devno, todo);
+	spin_unlock_irq(cdev->ccwlock);
+	/* Perform todo. */
+	switch (todo) {
+	case CDEV_TODO_ENABLE_CMF:
+		cmf_reenable(cdev);
+		break;
+	case CDEV_TODO_REBIND:
+		ccw_device_do_unbind_bind(cdev);
+		break;
+	case CDEV_TODO_REGISTER:
+		io_subchannel_register(cdev);
+		break;
+	case CDEV_TODO_UNREG_EVAL:
+		if (!sch_is_pseudo_sch(sch))
+			css_schedule_eval(sch->schid);
+		/* fall-through */
+	case CDEV_TODO_UNREG:
+		if (sch_is_pseudo_sch(sch))
+			ccw_device_unregister(cdev);
+		else
+			ccw_device_call_sch_unregister(cdev);
+		break;
+	default:
+		break;
+	}
+	/* Release workqueue ref. */
+	put_device(&cdev->dev);
+}
+
+/**
+ * ccw_device_sched_todo - schedule ccw device operation
+ * @cdev: ccw device
+ * @todo: todo
+ *
+ * Schedule the operation identified by @todo to be performed on the slow path
+ * workqueue. Do nothing if another operation with higher priority is already
+ * scheduled. Needs to be called with ccwdev lock held.
+ */
+void ccw_device_sched_todo(struct ccw_device *cdev, enum cdev_todo todo)
+{
+	CIO_MSG_EVENT(4, "cdev_todo: sched cdev=0.%x.%04x todo=%d\n",
+		      cdev->private->dev_id.ssid, cdev->private->dev_id.devno,
+		      todo);
+	if (cdev->private->todo >= todo)
+		return;
+	cdev->private->todo = todo;
+	/* Get workqueue ref. */
+	if (!get_device(&cdev->dev))
+		return;
+	if (!queue_work(cio_work_q, &cdev->private->todo_work)) {
+		/* Already queued, release workqueue ref. */
+		put_device(&cdev->dev);
+	}
+}
+
+/**
+ * ccw_device_siosl() - initiate logging
+ * @cdev: ccw device
+ *
+ * This function is used to invoke model-dependent logging within the channel
+ * subsystem.
+ */
+int ccw_device_siosl(struct ccw_device *cdev)
+{
+	struct subchannel *sch = to_subchannel(cdev->dev.parent);
+
+	return chsc_siosl(sch->schid);
+}
+EXPORT_SYMBOL_GPL(ccw_device_siosl);
+
+MODULE_LICENSE("GPL");
+EXPORT_SYMBOL(ccw_device_set_online);
+EXPORT_SYMBOL(ccw_device_set_offline);
+EXPORT_SYMBOL(ccw_driver_register);
+EXPORT_SYMBOL(ccw_driver_unregister);
+EXPORT_SYMBOL(get_ccwdev_by_busid);
+EXPORT_SYMBOL_GPL(ccw_device_get_subchannel_id);
diff --git a/ap/os/linux/linux-3.4.x/drivers/s390/cio/device.h b/ap/os/linux/linux-3.4.x/drivers/s390/cio/device.h
new file mode 100644
index 0000000..179824b
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/s390/cio/device.h
@@ -0,0 +1,154 @@
+#ifndef S390_DEVICE_H
+#define S390_DEVICE_H
+
+#include <asm/ccwdev.h>
+#include <linux/atomic.h>
+#include <linux/wait.h>
+#include <linux/notifier.h>
+#include <linux/kernel_stat.h>
+#include "io_sch.h"
+
+/*
+ * states of the device statemachine
+ */
+enum dev_state {
+	DEV_STATE_NOT_OPER,
+	DEV_STATE_SENSE_PGID,
+	DEV_STATE_SENSE_ID,
+	DEV_STATE_OFFLINE,
+	DEV_STATE_VERIFY,
+	DEV_STATE_ONLINE,
+	DEV_STATE_W4SENSE,
+	DEV_STATE_DISBAND_PGID,
+	DEV_STATE_BOXED,
+	/* states to wait for i/o completion before doing something */
+	DEV_STATE_TIMEOUT_KILL,
+	DEV_STATE_QUIESCE,
+	/* special states for devices gone not operational */
+	DEV_STATE_DISCONNECTED,
+	DEV_STATE_DISCONNECTED_SENSE_ID,
+	DEV_STATE_CMFCHANGE,
+	DEV_STATE_CMFUPDATE,
+	DEV_STATE_STEAL_LOCK,
+	/* last element! */
+	NR_DEV_STATES
+};
+
+/*
+ * asynchronous events of the device statemachine
+ */
+enum dev_event {
+	DEV_EVENT_NOTOPER,
+	DEV_EVENT_INTERRUPT,
+	DEV_EVENT_TIMEOUT,
+	DEV_EVENT_VERIFY,
+	/* last element! */
+	NR_DEV_EVENTS
+};
+
+struct ccw_device;
+
+/*
+ * action called through jumptable
+ */
+typedef void (fsm_func_t)(struct ccw_device *, enum dev_event);
+extern fsm_func_t *dev_jumptable[NR_DEV_STATES][NR_DEV_EVENTS];
+
+static inline void
+dev_fsm_event(struct ccw_device *cdev, enum dev_event dev_event)
+{
+	int state = cdev->private->state;
+
+	if (dev_event == DEV_EVENT_INTERRUPT) {
+		if (state == DEV_STATE_ONLINE)
+			kstat_cpu(smp_processor_id()).
+				irqs[cdev->private->int_class]++;
+		else if (state != DEV_STATE_CMFCHANGE &&
+			 state != DEV_STATE_CMFUPDATE)
+			kstat_cpu(smp_processor_id()).irqs[IOINT_CIO]++;
+	}
+	dev_jumptable[state][dev_event](cdev, dev_event);
+}
+
+/*
+ * Delivers 1 if the device state is final.
+ */
+static inline int
+dev_fsm_final_state(struct ccw_device *cdev)
+{
+	return (cdev->private->state == DEV_STATE_NOT_OPER ||
+		cdev->private->state == DEV_STATE_OFFLINE ||
+		cdev->private->state == DEV_STATE_ONLINE ||
+		cdev->private->state == DEV_STATE_BOXED);
+}
+
+extern wait_queue_head_t ccw_device_init_wq;
+extern atomic_t ccw_device_init_count;
+int __init io_subchannel_init(void);
+
+void io_subchannel_recog_done(struct ccw_device *cdev);
+void io_subchannel_init_config(struct subchannel *sch);
+
+int ccw_device_cancel_halt_clear(struct ccw_device *);
+
+int ccw_device_is_orphan(struct ccw_device *);
+
+void ccw_device_recognition(struct ccw_device *);
+int ccw_device_online(struct ccw_device *);
+int ccw_device_offline(struct ccw_device *);
+void ccw_device_update_sense_data(struct ccw_device *);
+int ccw_device_test_sense_data(struct ccw_device *);
+void ccw_device_schedule_sch_unregister(struct ccw_device *);
+int ccw_purge_blacklisted(void);
+void ccw_device_sched_todo(struct ccw_device *cdev, enum cdev_todo todo);
+
+/* Function prototypes for device status and basic sense stuff. */
+void ccw_device_accumulate_irb(struct ccw_device *, struct irb *);
+void ccw_device_accumulate_basic_sense(struct ccw_device *, struct irb *);
+int ccw_device_accumulate_and_sense(struct ccw_device *, struct irb *);
+int ccw_device_do_sense(struct ccw_device *, struct irb *);
+
+/* Function prototype for internal request handling. */
+int lpm_adjust(int lpm, int mask);
+void ccw_request_start(struct ccw_device *);
+int ccw_request_cancel(struct ccw_device *cdev);
+void ccw_request_handler(struct ccw_device *cdev);
+void ccw_request_timeout(struct ccw_device *cdev);
+void ccw_request_notoper(struct ccw_device *cdev);
+
+/* Function prototypes for sense id stuff. */
+void ccw_device_sense_id_start(struct ccw_device *);
+void ccw_device_sense_id_done(struct ccw_device *, int);
+
+/* Function prototypes for path grouping stuff. */
+void ccw_device_verify_start(struct ccw_device *);
+void ccw_device_verify_done(struct ccw_device *, int);
+
+void ccw_device_disband_start(struct ccw_device *);
+void ccw_device_disband_done(struct ccw_device *, int);
+
+void ccw_device_stlck_start(struct ccw_device *, void *, void *, void *);
+void ccw_device_stlck_done(struct ccw_device *, void *, int);
+
+int ccw_device_call_handler(struct ccw_device *);
+
+int ccw_device_stlck(struct ccw_device *);
+
+/* Helper function for machine check handling. */
+void ccw_device_trigger_reprobe(struct ccw_device *);
+void ccw_device_kill_io(struct ccw_device *);
+int ccw_device_notify(struct ccw_device *, int);
+void ccw_device_set_disconnected(struct ccw_device *cdev);
+void ccw_device_set_notoper(struct ccw_device *cdev);
+
+/* qdio needs this. */
+void ccw_device_set_timeout(struct ccw_device *, int);
+extern struct subchannel_id ccw_device_get_subchannel_id(struct ccw_device *);
+
+/* Channel measurement facility related */
+void retry_set_schib(struct ccw_device *cdev);
+void cmf_retry_copy_block(struct ccw_device *);
+int cmf_reenable(struct ccw_device *);
+int ccw_set_cmf(struct ccw_device *cdev, int enable);
+extern struct device_attribute dev_attr_cmb_enable;
+#endif
diff --git a/ap/os/linux/linux-3.4.x/drivers/s390/cio/device_fsm.c b/ap/os/linux/linux-3.4.x/drivers/s390/cio/device_fsm.c
new file mode 100644
index 0000000..1b85351
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/s390/cio/device_fsm.c
@@ -0,0 +1,1118 @@
+/*
+ * drivers/s390/cio/device_fsm.c
+ * finite state machine for device handling
+ *
+ *    Copyright IBM Corp. 2002,2008
+ *    Author(s): Cornelia Huck (cornelia.huck@de.ibm.com)
+ *		 Martin Schwidefsky (schwidefsky@de.ibm.com)
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/jiffies.h>
+#include <linux/string.h>
+
+#include <asm/ccwdev.h>
+#include <asm/cio.h>
+#include <asm/chpid.h>
+
+#include "cio.h"
+#include "cio_debug.h"
+#include "css.h"
+#include "device.h"
+#include "chsc.h"
+#include "ioasm.h"
+#include "chp.h"
+
+static int timeout_log_enabled;
+
+static int __init ccw_timeout_log_setup(char *unused)
+{
+	timeout_log_enabled = 1;
+	return 1;
+}
+
+__setup("ccw_timeout_log", ccw_timeout_log_setup);
+
+static void ccw_timeout_log(struct ccw_device *cdev)
+{
+	struct schib schib;
+	struct subchannel *sch;
+	struct io_subchannel_private *private;
+	union orb *orb;
+	int cc;
+
+	sch = to_subchannel(cdev->dev.parent);
+	private = to_io_private(sch);
+	orb = &private->orb;
+	cc = stsch_err(sch->schid, &schib);
+
+	printk(KERN_WARNING "cio: ccw device timeout occurred at %llx, "
+	       "device information:\n", get_clock());
+	printk(KERN_WARNING "cio: orb:\n");
+	print_hex_dump(KERN_WARNING, "cio:  ", DUMP_PREFIX_NONE, 16, 1,
+		       orb, sizeof(*orb), 0);
+	printk(KERN_WARNING "cio: ccw device bus id: %s\n",
+	       dev_name(&cdev->dev));
+	printk(KERN_WARNING "cio: subchannel bus id: %s\n",
+	       dev_name(&sch->dev));
+	printk(KERN_WARNING "cio: subchannel lpm: %02x, opm: %02x, "
+	       "vpm: %02x\n", sch->lpm, sch->opm, sch->vpm);
+
+	if (orb->tm.b) {
+		printk(KERN_WARNING "cio: orb indicates transport mode\n");
+		printk(KERN_WARNING "cio: last tcw:\n");
+		print_hex_dump(KERN_WARNING, "cio:  ", DUMP_PREFIX_NONE, 16, 1,
+			       (void *)(addr_t)orb->tm.tcw,
+			       sizeof(struct tcw), 0);
+	} else {
+		printk(KERN_WARNING "cio: orb indicates command mode\n");
+		if ((void *)(addr_t)orb->cmd.cpa == &private->sense_ccw ||
+		    (void *)(addr_t)orb->cmd.cpa == cdev->private->iccws)
+			printk(KERN_WARNING "cio: last channel program "
+			       "(intern):\n");
+		else
+			printk(KERN_WARNING "cio: last channel program:\n");
+
+		print_hex_dump(KERN_WARNING, "cio:  ", DUMP_PREFIX_NONE, 16, 1,
+			       (void *)(addr_t)orb->cmd.cpa,
+			       sizeof(struct ccw1), 0);
+	}
+	printk(KERN_WARNING "cio: ccw device state: %d\n",
+	       cdev->private->state);
+	printk(KERN_WARNING "cio: store subchannel returned: cc=%d\n", cc);
+	printk(KERN_WARNING "cio: schib:\n");
+	print_hex_dump(KERN_WARNING, "cio:  ", DUMP_PREFIX_NONE, 16, 1,
+		       &schib, sizeof(schib), 0);
+	printk(KERN_WARNING "cio: ccw device flags:\n");
+	print_hex_dump(KERN_WARNING, "cio:  ", DUMP_PREFIX_NONE, 16, 1,
+		       &cdev->private->flags, sizeof(cdev->private->flags), 0);
+}
+
+/*
+ * Timeout function. It just triggers a DEV_EVENT_TIMEOUT.
+ */
+static void
+ccw_device_timeout(unsigned long data)
+{
+	struct ccw_device *cdev;
+
+	cdev = (struct ccw_device *) data;
+	spin_lock_irq(cdev->ccwlock);
+	if (timeout_log_enabled)
+		ccw_timeout_log(cdev);
+	dev_fsm_event(cdev, DEV_EVENT_TIMEOUT);
+	spin_unlock_irq(cdev->ccwlock);
+}
+
+/*
+ * Set timeout
+ */
+void
+ccw_device_set_timeout(struct ccw_device *cdev, int expires)
+{
+	if (expires == 0) {
+		del_timer(&cdev->private->timer);
+		return;
+	}
+	if (timer_pending(&cdev->private->timer)) {
+		if (mod_timer(&cdev->private->timer, jiffies + expires))
+			return;
+	}
+	cdev->private->timer.function = ccw_device_timeout;
+	cdev->private->timer.data = (unsigned long) cdev;
+	cdev->private->timer.expires = jiffies + expires;
+	add_timer(&cdev->private->timer);
+}
+
+/*
+ * Cancel running i/o. This is called repeatedly since halt/clear are
+ * asynchronous operations. We do one try with cio_cancel, two tries
+ * with cio_halt, 255 tries with cio_clear. If everythings fails panic.
+ * Returns 0 if device now idle, -ENODEV for device not operational and
+ * -EBUSY if an interrupt is expected (either from halt/clear or from a
+ * status pending).
+ */
+int
+ccw_device_cancel_halt_clear(struct ccw_device *cdev)
+{
+	struct subchannel *sch;
+	int ret;
+
+	sch = to_subchannel(cdev->dev.parent);
+	if (cio_update_schib(sch))
+		return -ENODEV; 
+	if (!sch->schib.pmcw.ena)
+		/* Not operational -> done. */
+		return 0;
+	/* Stage 1: cancel io. */
+	if (!(scsw_actl(&sch->schib.scsw) & SCSW_ACTL_HALT_PEND) &&
+	    !(scsw_actl(&sch->schib.scsw) & SCSW_ACTL_CLEAR_PEND)) {
+		if (!scsw_is_tm(&sch->schib.scsw)) {
+			ret = cio_cancel(sch);
+			if (ret != -EINVAL)
+				return ret;
+		}
+		/* cancel io unsuccessful or not applicable (transport mode).
+		 * Continue with asynchronous instructions. */
+		cdev->private->iretry = 3;	/* 3 halt retries. */
+	}
+	if (!(scsw_actl(&sch->schib.scsw) & SCSW_ACTL_CLEAR_PEND)) {
+		/* Stage 2: halt io. */
+		if (cdev->private->iretry) {
+			cdev->private->iretry--;
+			ret = cio_halt(sch);
+			if (ret != -EBUSY)
+				return (ret == 0) ? -EBUSY : ret;
+		}
+		/* halt io unsuccessful. */
+		cdev->private->iretry = 255;	/* 255 clear retries. */
+	}
+	/* Stage 3: clear io. */
+	if (cdev->private->iretry) {
+		cdev->private->iretry--;
+		ret = cio_clear (sch);
+		return (ret == 0) ? -EBUSY : ret;
+	}
+	/* Function was unsuccessful */
+	CIO_MSG_EVENT(0, "0.%x.%04x: could not stop I/O\n",
+		      cdev->private->dev_id.ssid, cdev->private->dev_id.devno);
+	return -EIO;
+}
+
+void ccw_device_update_sense_data(struct ccw_device *cdev)
+{
+	memset(&cdev->id, 0, sizeof(cdev->id));
+	cdev->id.cu_type   = cdev->private->senseid.cu_type;
+	cdev->id.cu_model  = cdev->private->senseid.cu_model;
+	cdev->id.dev_type  = cdev->private->senseid.dev_type;
+	cdev->id.dev_model = cdev->private->senseid.dev_model;
+}
+
+int ccw_device_test_sense_data(struct ccw_device *cdev)
+{
+	return cdev->id.cu_type == cdev->private->senseid.cu_type &&
+		cdev->id.cu_model == cdev->private->senseid.cu_model &&
+		cdev->id.dev_type == cdev->private->senseid.dev_type &&
+		cdev->id.dev_model == cdev->private->senseid.dev_model;
+}
+
+/*
+ * The machine won't give us any notification by machine check if a chpid has
+ * been varied online on the SE so we have to find out by magic (i. e. driving
+ * the channel subsystem to device selection and updating our path masks).
+ */
+static void
+__recover_lost_chpids(struct subchannel *sch, int old_lpm)
+{
+	int mask, i;
+	struct chp_id chpid;
+
+	chp_id_init(&chpid);
+	for (i = 0; i<8; i++) {
+		mask = 0x80 >> i;
+		if (!(sch->lpm & mask))
+			continue;
+		if (old_lpm & mask)
+			continue;
+		chpid.id = sch->schib.pmcw.chpid[i];
+		if (!chp_is_registered(chpid))
+			css_schedule_eval_all();
+	}
+}
+
+/*
+ * Stop device recognition.
+ */
+static void
+ccw_device_recog_done(struct ccw_device *cdev, int state)
+{
+	struct subchannel *sch;
+	int old_lpm;
+
+	sch = to_subchannel(cdev->dev.parent);
+
+	if (cio_disable_subchannel(sch))
+		state = DEV_STATE_NOT_OPER;
+	/*
+	 * Now that we tried recognition, we have performed device selection
+	 * through ssch() and the path information is up to date.
+	 */
+	old_lpm = sch->lpm;
+
+	/* Check since device may again have become not operational. */
+	if (cio_update_schib(sch))
+		state = DEV_STATE_NOT_OPER;
+	else
+		sch->lpm = sch->schib.pmcw.pam & sch->opm;
+
+	if (cdev->private->state == DEV_STATE_DISCONNECTED_SENSE_ID)
+		/* Force reprobe on all chpids. */
+		old_lpm = 0;
+	if (sch->lpm != old_lpm)
+		__recover_lost_chpids(sch, old_lpm);
+	if (cdev->private->state == DEV_STATE_DISCONNECTED_SENSE_ID &&
+	    (state == DEV_STATE_NOT_OPER || state == DEV_STATE_BOXED)) {
+		cdev->private->flags.recog_done = 1;
+		cdev->private->state = DEV_STATE_DISCONNECTED;
+		wake_up(&cdev->private->wait_q);
+		return;
+	}
+	if (cdev->private->flags.resuming) {
+		cdev->private->state = state;
+		cdev->private->flags.recog_done = 1;
+		wake_up(&cdev->private->wait_q);
+		return;
+	}
+	switch (state) {
+	case DEV_STATE_NOT_OPER:
+		break;
+	case DEV_STATE_OFFLINE:
+		if (!cdev->online) {
+			ccw_device_update_sense_data(cdev);
+			break;
+		}
+		cdev->private->state = DEV_STATE_OFFLINE;
+		cdev->private->flags.recog_done = 1;
+		if (ccw_device_test_sense_data(cdev)) {
+			cdev->private->flags.donotify = 1;
+			ccw_device_online(cdev);
+			wake_up(&cdev->private->wait_q);
+		} else {
+			ccw_device_update_sense_data(cdev);
+			ccw_device_sched_todo(cdev, CDEV_TODO_REBIND);
+		}
+		return;
+	case DEV_STATE_BOXED:
+		if (cdev->id.cu_type != 0) { /* device was recognized before */
+			cdev->private->flags.recog_done = 1;
+			cdev->private->state = DEV_STATE_BOXED;
+			wake_up(&cdev->private->wait_q);
+			return;
+		}
+		break;
+	}
+	cdev->private->state = state;
+	io_subchannel_recog_done(cdev);
+	wake_up(&cdev->private->wait_q);
+}
+
+/*
+ * Function called from device_id.c after sense id has completed.
+ */
+void
+ccw_device_sense_id_done(struct ccw_device *cdev, int err)
+{
+	switch (err) {
+	case 0:
+		ccw_device_recog_done(cdev, DEV_STATE_OFFLINE);
+		break;
+	case -ETIME:		/* Sense id stopped by timeout. */
+		ccw_device_recog_done(cdev, DEV_STATE_BOXED);
+		break;
+	default:
+		ccw_device_recog_done(cdev, DEV_STATE_NOT_OPER);
+		break;
+	}
+}
+
+/**
+  * ccw_device_notify() - inform the device's driver about an event
+  * @cdev: device for which an event occurred
+  * @event: event that occurred
+  *
+  * Returns:
+  *   -%EINVAL if the device is offline or has no driver.
+  *   -%EOPNOTSUPP if the device's driver has no notifier registered.
+  *   %NOTIFY_OK if the driver wants to keep the device.
+  *   %NOTIFY_BAD if the driver doesn't want to keep the device.
+  */
+int ccw_device_notify(struct ccw_device *cdev, int event)
+{
+	int ret = -EINVAL;
+
+	if (!cdev->drv)
+		goto out;
+	if (!cdev->online)
+		goto out;
+	CIO_MSG_EVENT(2, "notify called for 0.%x.%04x, event=%d\n",
+		      cdev->private->dev_id.ssid, cdev->private->dev_id.devno,
+		      event);
+	if (!cdev->drv->notify) {
+		ret = -EOPNOTSUPP;
+		goto out;
+	}
+	if (cdev->drv->notify(cdev, event))
+		ret = NOTIFY_OK;
+	else
+		ret = NOTIFY_BAD;
+out:
+	return ret;
+}
+
+static void ccw_device_oper_notify(struct ccw_device *cdev)
+{
+	struct subchannel *sch = to_subchannel(cdev->dev.parent);
+
+	if (ccw_device_notify(cdev, CIO_OPER) == NOTIFY_OK) {
+		/* Reenable channel measurements, if needed. */
+		ccw_device_sched_todo(cdev, CDEV_TODO_ENABLE_CMF);
+		/* Save indication for new paths. */
+		cdev->private->path_new_mask = sch->vpm;
+		return;
+	}
+	/* Driver doesn't want device back. */
+	ccw_device_set_notoper(cdev);
+	ccw_device_sched_todo(cdev, CDEV_TODO_REBIND);
+}
+
+/*
+ * Finished with online/offline processing.
+ */
+static void
+ccw_device_done(struct ccw_device *cdev, int state)
+{
+	struct subchannel *sch;
+
+	sch = to_subchannel(cdev->dev.parent);
+
+	ccw_device_set_timeout(cdev, 0);
+
+	if (state != DEV_STATE_ONLINE)
+		cio_disable_subchannel(sch);
+
+	/* Reset device status. */
+	memset(&cdev->private->irb, 0, sizeof(struct irb));
+
+	cdev->private->state = state;
+
+	switch (state) {
+	case DEV_STATE_BOXED:
+		CIO_MSG_EVENT(0, "Boxed device %04x on subchannel %04x\n",
+			      cdev->private->dev_id.devno, sch->schid.sch_no);
+		if (cdev->online &&
+		    ccw_device_notify(cdev, CIO_BOXED) != NOTIFY_OK)
+			ccw_device_sched_todo(cdev, CDEV_TODO_UNREG);
+		cdev->private->flags.donotify = 0;
+		break;
+	case DEV_STATE_NOT_OPER:
+		CIO_MSG_EVENT(0, "Device %04x gone on subchannel %04x\n",
+			      cdev->private->dev_id.devno, sch->schid.sch_no);
+		if (ccw_device_notify(cdev, CIO_GONE) != NOTIFY_OK)
+			ccw_device_sched_todo(cdev, CDEV_TODO_UNREG);
+		else
+			ccw_device_set_disconnected(cdev);
+		cdev->private->flags.donotify = 0;
+		break;
+	case DEV_STATE_DISCONNECTED:
+		CIO_MSG_EVENT(0, "Disconnected device %04x on subchannel "
+			      "%04x\n", cdev->private->dev_id.devno,
+			      sch->schid.sch_no);
+		if (ccw_device_notify(cdev, CIO_NO_PATH) != NOTIFY_OK) {
+			cdev->private->state = DEV_STATE_NOT_OPER;
+			ccw_device_sched_todo(cdev, CDEV_TODO_UNREG);
+		} else
+			ccw_device_set_disconnected(cdev);
+		cdev->private->flags.donotify = 0;
+		break;
+	default:
+		break;
+	}
+
+	if (cdev->private->flags.donotify) {
+		cdev->private->flags.donotify = 0;
+		ccw_device_oper_notify(cdev);
+	}
+	wake_up(&cdev->private->wait_q);
+}
+
+/*
+ * Start device recognition.
+ */
+void ccw_device_recognition(struct ccw_device *cdev)
+{
+	struct subchannel *sch = to_subchannel(cdev->dev.parent);
+
+	/*
+	 * We used to start here with a sense pgid to find out whether a device
+	 * is locked by someone else. Unfortunately, the sense pgid command
+	 * code has other meanings on devices predating the path grouping
+	 * algorithm, so we start with sense id and box the device after an
+	 * timeout (or if sense pgid during path verification detects the device
+	 * is locked, as may happen on newer devices).
+	 */
+	cdev->private->flags.recog_done = 0;
+	cdev->private->state = DEV_STATE_SENSE_ID;
+	if (cio_enable_subchannel(sch, (u32) (addr_t) sch)) {
+		ccw_device_recog_done(cdev, DEV_STATE_NOT_OPER);
+		return;
+	}
+	ccw_device_sense_id_start(cdev);
+}
+
+/*
+ * Handle events for states that use the ccw request infrastructure.
+ */
+static void ccw_device_request_event(struct ccw_device *cdev, enum dev_event e)
+{
+	switch (e) {
+	case DEV_EVENT_NOTOPER:
+		ccw_request_notoper(cdev);
+		break;
+	case DEV_EVENT_INTERRUPT:
+		ccw_request_handler(cdev);
+		break;
+	case DEV_EVENT_TIMEOUT:
+		ccw_request_timeout(cdev);
+		break;
+	default:
+		break;
+	}
+}
+
+static void ccw_device_report_path_events(struct ccw_device *cdev)
+{
+	struct subchannel *sch = to_subchannel(cdev->dev.parent);
+	int path_event[8];
+	int chp, mask;
+
+	for (chp = 0, mask = 0x80; chp < 8; chp++, mask >>= 1) {
+		path_event[chp] = PE_NONE;
+		if (mask & cdev->private->path_gone_mask & ~(sch->vpm))
+			path_event[chp] |= PE_PATH_GONE;
+		if (mask & cdev->private->path_new_mask & sch->vpm)
+			path_event[chp] |= PE_PATH_AVAILABLE;
+		if (mask & cdev->private->pgid_reset_mask & sch->vpm)
+			path_event[chp] |= PE_PATHGROUP_ESTABLISHED;
+	}
+	if (cdev->online && cdev->drv->path_event)
+		cdev->drv->path_event(cdev, path_event);
+}
+
+static void ccw_device_reset_path_events(struct ccw_device *cdev)
+{
+	cdev->private->path_gone_mask = 0;
+	cdev->private->path_new_mask = 0;
+	cdev->private->pgid_reset_mask = 0;
+}
+
+static void create_fake_irb(struct irb *irb, int type)
+{
+	memset(irb, 0, sizeof(*irb));
+	if (type == FAKE_CMD_IRB) {
+		struct cmd_scsw *scsw = &irb->scsw.cmd;
+		scsw->cc = 1;
+		scsw->fctl = SCSW_FCTL_START_FUNC;
+		scsw->actl = SCSW_ACTL_START_PEND;
+		scsw->stctl = SCSW_STCTL_STATUS_PEND;
+	} else if (type == FAKE_TM_IRB) {
+		struct tm_scsw *scsw = &irb->scsw.tm;
+		scsw->x = 1;
+		scsw->cc = 1;
+		scsw->fctl = SCSW_FCTL_START_FUNC;
+		scsw->actl = SCSW_ACTL_START_PEND;
+		scsw->stctl = SCSW_STCTL_STATUS_PEND;
+	}
+}
+
+void ccw_device_verify_done(struct ccw_device *cdev, int err)
+{
+	struct subchannel *sch;
+
+	sch = to_subchannel(cdev->dev.parent);
+	/* Update schib - pom may have changed. */
+	if (cio_update_schib(sch)) {
+		err = -ENODEV;
+		goto callback;
+	}
+	/* Update lpm with verified path mask. */
+	sch->lpm = sch->vpm;
+	/* Repeat path verification? */
+	if (cdev->private->flags.doverify) {
+		ccw_device_verify_start(cdev);
+		return;
+	}
+callback:
+	switch (err) {
+	case 0:
+		ccw_device_done(cdev, DEV_STATE_ONLINE);
+		/* Deliver fake irb to device driver, if needed. */
+		if (cdev->private->flags.fake_irb) {
+			create_fake_irb(&cdev->private->irb,
+					cdev->private->flags.fake_irb);
+			cdev->private->flags.fake_irb = 0;
+			if (cdev->handler)
+				cdev->handler(cdev, cdev->private->intparm,
+					      &cdev->private->irb);
+			memset(&cdev->private->irb, 0, sizeof(struct irb));
+		}
+		ccw_device_report_path_events(cdev);
+		break;
+	case -ETIME:
+	case -EUSERS:
+		/* Reset oper notify indication after verify error. */
+		cdev->private->flags.donotify = 0;
+		ccw_device_done(cdev, DEV_STATE_BOXED);
+		break;
+	case -EACCES:
+		/* Reset oper notify indication after verify error. */
+		cdev->private->flags.donotify = 0;
+		ccw_device_done(cdev, DEV_STATE_DISCONNECTED);
+		break;
+	default:
+		/* Reset oper notify indication after verify error. */
+		cdev->private->flags.donotify = 0;
+		ccw_device_done(cdev, DEV_STATE_NOT_OPER);
+		break;
+	}
+	ccw_device_reset_path_events(cdev);
+}
+
+/*
+ * Get device online.
+ */
+int
+ccw_device_online(struct ccw_device *cdev)
+{
+	struct subchannel *sch;
+	int ret;
+
+	if ((cdev->private->state != DEV_STATE_OFFLINE) &&
+	    (cdev->private->state != DEV_STATE_BOXED))
+		return -EINVAL;
+	sch = to_subchannel(cdev->dev.parent);
+	ret = cio_enable_subchannel(sch, (u32)(addr_t)sch);
+	if (ret != 0) {
+		/* Couldn't enable the subchannel for i/o. Sick device. */
+		if (ret == -ENODEV)
+			dev_fsm_event(cdev, DEV_EVENT_NOTOPER);
+		return ret;
+	}
+	/* Start initial path verification. */
+	cdev->private->state = DEV_STATE_VERIFY;
+	ccw_device_verify_start(cdev);
+	return 0;
+}
+
+void
+ccw_device_disband_done(struct ccw_device *cdev, int err)
+{
+	switch (err) {
+	case 0:
+		ccw_device_done(cdev, DEV_STATE_OFFLINE);
+		break;
+	case -ETIME:
+		ccw_device_done(cdev, DEV_STATE_BOXED);
+		break;
+	default:
+		cdev->private->flags.donotify = 0;
+		ccw_device_done(cdev, DEV_STATE_NOT_OPER);
+		break;
+	}
+}
+
+/*
+ * Shutdown device.
+ */
+int
+ccw_device_offline(struct ccw_device *cdev)
+{
+	struct subchannel *sch;
+
+	/* Allow ccw_device_offline while disconnected. */
+	if (cdev->private->state == DEV_STATE_DISCONNECTED ||
+	    cdev->private->state == DEV_STATE_NOT_OPER) {
+		cdev->private->flags.donotify = 0;
+		ccw_device_done(cdev, DEV_STATE_NOT_OPER);
+		return 0;
+	}
+	if (cdev->private->state == DEV_STATE_BOXED) {
+		ccw_device_done(cdev, DEV_STATE_BOXED);
+		return 0;
+	}
+	if (ccw_device_is_orphan(cdev)) {
+		ccw_device_done(cdev, DEV_STATE_OFFLINE);
+		return 0;
+	}
+	sch = to_subchannel(cdev->dev.parent);
+	if (cio_update_schib(sch))
+		return -ENODEV;
+	if (scsw_actl(&sch->schib.scsw) != 0)
+		return -EBUSY;
+	if (cdev->private->state != DEV_STATE_ONLINE)
+		return -EINVAL;
+	/* Are we doing path grouping? */
+	if (!cdev->private->flags.pgroup) {
+		/* No, set state offline immediately. */
+		ccw_device_done(cdev, DEV_STATE_OFFLINE);
+		return 0;
+	}
+	/* Start Set Path Group commands. */
+	cdev->private->state = DEV_STATE_DISBAND_PGID;
+	ccw_device_disband_start(cdev);
+	return 0;
+}
+
+/*
+ * Handle not operational event in non-special state.
+ */
+static void ccw_device_generic_notoper(struct ccw_device *cdev,
+				       enum dev_event dev_event)
+{
+	if (ccw_device_notify(cdev, CIO_GONE) != NOTIFY_OK)
+		ccw_device_sched_todo(cdev, CDEV_TODO_UNREG);
+	else
+		ccw_device_set_disconnected(cdev);
+}
+
+/*
+ * Handle path verification event in offline state.
+ */
+static void ccw_device_offline_verify(struct ccw_device *cdev,
+				      enum dev_event dev_event)
+{
+	struct subchannel *sch = to_subchannel(cdev->dev.parent);
+
+	css_schedule_eval(sch->schid);
+}
+
+/*
+ * Handle path verification event.
+ */
+static void
+ccw_device_online_verify(struct ccw_device *cdev, enum dev_event dev_event)
+{
+	struct subchannel *sch;
+
+	if (cdev->private->state == DEV_STATE_W4SENSE) {
+		cdev->private->flags.doverify = 1;
+		return;
+	}
+	sch = to_subchannel(cdev->dev.parent);
+	/*
+	 * Since we might not just be coming from an interrupt from the
+	 * subchannel we have to update the schib.
+	 */
+	if (cio_update_schib(sch)) {
+		ccw_device_verify_done(cdev, -ENODEV);
+		return;
+	}
+
+	if (scsw_actl(&sch->schib.scsw) != 0 ||
+	    (scsw_stctl(&sch->schib.scsw) & SCSW_STCTL_STATUS_PEND) ||
+	    (scsw_stctl(&cdev->private->irb.scsw) & SCSW_STCTL_STATUS_PEND)) {
+		/*
+		 * No final status yet or final status not yet delivered
+		 * to the device driver. Can't do path verification now,
+		 * delay until final status was delivered.
+		 */
+		cdev->private->flags.doverify = 1;
+		return;
+	}
+	/* Device is idle, we can do the path verification. */
+	cdev->private->state = DEV_STATE_VERIFY;
+	ccw_device_verify_start(cdev);
+}
+
+/*
+ * Handle path verification event in boxed state.
+ */
+static void ccw_device_boxed_verify(struct ccw_device *cdev,
+				    enum dev_event dev_event)
+{
+	struct subchannel *sch = to_subchannel(cdev->dev.parent);
+
+	if (cdev->online) {
+		if (cio_enable_subchannel(sch, (u32) (addr_t) sch))
+			ccw_device_done(cdev, DEV_STATE_NOT_OPER);
+		else
+			ccw_device_online_verify(cdev, dev_event);
+	} else
+		css_schedule_eval(sch->schid);
+}
+
+/*
+ * Got an interrupt for a normal io (state online).
+ */
+static void
+ccw_device_irq(struct ccw_device *cdev, enum dev_event dev_event)
+{
+	struct irb *irb;
+	int is_cmd;
+
+	irb = (struct irb *)&S390_lowcore.irb;
+	is_cmd = !scsw_is_tm(&irb->scsw);
+	/* Check for unsolicited interrupt. */
+	if (!scsw_is_solicited(&irb->scsw)) {
+		if (is_cmd && (irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK) &&
+		    !irb->esw.esw0.erw.cons) {
+			/* Unit check but no sense data. Need basic sense. */
+			if (ccw_device_do_sense(cdev, irb) != 0)
+				goto call_handler_unsol;
+			memcpy(&cdev->private->irb, irb, sizeof(struct irb));
+			cdev->private->state = DEV_STATE_W4SENSE;
+			cdev->private->intparm = 0;
+			return;
+		}
+call_handler_unsol:
+		if (cdev->handler)
+			cdev->handler (cdev, 0, irb);
+		if (cdev->private->flags.doverify)
+			ccw_device_online_verify(cdev, 0);
+		return;
+	}
+	/* Accumulate status and find out if a basic sense is needed. */
+	ccw_device_accumulate_irb(cdev, irb);
+	if (is_cmd && cdev->private->flags.dosense) {
+		if (ccw_device_do_sense(cdev, irb) == 0) {
+			cdev->private->state = DEV_STATE_W4SENSE;
+		}
+		return;
+	}
+	/* Call the handler. */
+	if (ccw_device_call_handler(cdev) && cdev->private->flags.doverify)
+		/* Start delayed path verification. */
+		ccw_device_online_verify(cdev, 0);
+}
+
+/*
+ * Got an timeout in online state.
+ */
+static void
+ccw_device_online_timeout(struct ccw_device *cdev, enum dev_event dev_event)
+{
+	int ret;
+
+	ccw_device_set_timeout(cdev, 0);
+	cdev->private->iretry = 255;
+	ret = ccw_device_cancel_halt_clear(cdev);
+	if (ret == -EBUSY) {
+		ccw_device_set_timeout(cdev, 3*HZ);
+		cdev->private->state = DEV_STATE_TIMEOUT_KILL;
+		return;
+	}
+	if (ret)
+		dev_fsm_event(cdev, DEV_EVENT_NOTOPER);
+	else if (cdev->handler)
+		cdev->handler(cdev, cdev->private->intparm,
+			      ERR_PTR(-ETIMEDOUT));
+}
+
+/*
+ * Got an interrupt for a basic sense.
+ */
+static void
+ccw_device_w4sense(struct ccw_device *cdev, enum dev_event dev_event)
+{
+	struct irb *irb;
+
+	irb = (struct irb *)&S390_lowcore.irb;
+	/* Check for unsolicited interrupt. */
+	if (scsw_stctl(&irb->scsw) ==
+	    (SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS)) {
+		if (scsw_cc(&irb->scsw) == 1)
+			/* Basic sense hasn't started. Try again. */
+			ccw_device_do_sense(cdev, irb);
+		else {
+			CIO_MSG_EVENT(0, "0.%x.%04x: unsolicited "
+				      "interrupt during w4sense...\n",
+				      cdev->private->dev_id.ssid,
+				      cdev->private->dev_id.devno);
+			if (cdev->handler)
+				cdev->handler (cdev, 0, irb);
+		}
+		return;
+	}
+	/*
+	 * Check if a halt or clear has been issued in the meanwhile. If yes,
+	 * only deliver the halt/clear interrupt to the device driver as if it
+	 * had killed the original request.
+	 */
+	if (scsw_fctl(&irb->scsw) &
+	    (SCSW_FCTL_CLEAR_FUNC | SCSW_FCTL_HALT_FUNC)) {
+		cdev->private->flags.dosense = 0;
+		memset(&cdev->private->irb, 0, sizeof(struct irb));
+		ccw_device_accumulate_irb(cdev, irb);
+		goto call_handler;
+	}
+	/* Add basic sense info to irb. */
+	ccw_device_accumulate_basic_sense(cdev, irb);
+	if (cdev->private->flags.dosense) {
+		/* Another basic sense is needed. */
+		ccw_device_do_sense(cdev, irb);
+		return;
+	}
+call_handler:
+	cdev->private->state = DEV_STATE_ONLINE;
+	/* In case sensing interfered with setting the device online */
+	wake_up(&cdev->private->wait_q);
+	/* Call the handler. */
+	if (ccw_device_call_handler(cdev) && cdev->private->flags.doverify)
+		/* Start delayed path verification. */
+		ccw_device_online_verify(cdev, 0);
+}
+
+static void
+ccw_device_killing_irq(struct ccw_device *cdev, enum dev_event dev_event)
+{
+	ccw_device_set_timeout(cdev, 0);
+	/* Start delayed path verification. */
+	ccw_device_online_verify(cdev, 0);
+	/* OK, i/o is dead now. Call interrupt handler. */
+	if (cdev->handler)
+		cdev->handler(cdev, cdev->private->intparm,
+			      ERR_PTR(-EIO));
+}
+
+static void
+ccw_device_killing_timeout(struct ccw_device *cdev, enum dev_event dev_event)
+{
+	int ret;
+
+	ret = ccw_device_cancel_halt_clear(cdev);
+	if (ret == -EBUSY) {
+		ccw_device_set_timeout(cdev, 3*HZ);
+		return;
+	}
+	/* Start delayed path verification. */
+	ccw_device_online_verify(cdev, 0);
+	if (cdev->handler)
+		cdev->handler(cdev, cdev->private->intparm,
+			      ERR_PTR(-EIO));
+}
+
+void ccw_device_kill_io(struct ccw_device *cdev)
+{
+	int ret;
+
+	cdev->private->iretry = 255;
+	ret = ccw_device_cancel_halt_clear(cdev);
+	if (ret == -EBUSY) {
+		ccw_device_set_timeout(cdev, 3*HZ);
+		cdev->private->state = DEV_STATE_TIMEOUT_KILL;
+		return;
+	}
+	/* Start delayed path verification. */
+	ccw_device_online_verify(cdev, 0);
+	if (cdev->handler)
+		cdev->handler(cdev, cdev->private->intparm,
+			      ERR_PTR(-EIO));
+}
+
+static void
+ccw_device_delay_verify(struct ccw_device *cdev, enum dev_event dev_event)
+{
+	/* Start verification after current task finished. */
+	cdev->private->flags.doverify = 1;
+}
+
+static void
+ccw_device_start_id(struct ccw_device *cdev, enum dev_event dev_event)
+{
+	struct subchannel *sch;
+
+	sch = to_subchannel(cdev->dev.parent);
+	if (cio_enable_subchannel(sch, (u32)(addr_t)sch) != 0)
+		/* Couldn't enable the subchannel for i/o. Sick device. */
+		return;
+	cdev->private->state = DEV_STATE_DISCONNECTED_SENSE_ID;
+	ccw_device_sense_id_start(cdev);
+}
+
+void ccw_device_trigger_reprobe(struct ccw_device *cdev)
+{
+	struct subchannel *sch;
+
+	if (cdev->private->state != DEV_STATE_DISCONNECTED)
+		return;
+
+	sch = to_subchannel(cdev->dev.parent);
+	/* Update some values. */
+	if (cio_update_schib(sch))
+		return;
+	/*
+	 * The pim, pam, pom values may not be accurate, but they are the best
+	 * we have before performing device selection :/
+	 */
+	sch->lpm = sch->schib.pmcw.pam & sch->opm;
+	/*
+	 * Use the initial configuration since we can't be shure that the old
+	 * paths are valid.
+	 */
+	io_subchannel_init_config(sch);
+	if (cio_commit_config(sch))
+		return;
+
+	/* We should also udate ssd info, but this has to wait. */
+	/* Check if this is another device which appeared on the same sch. */
+	if (sch->schib.pmcw.dev != cdev->private->dev_id.devno)
+		css_schedule_eval(sch->schid);
+	else
+		ccw_device_start_id(cdev, 0);
+}
+
+static void ccw_device_disabled_irq(struct ccw_device *cdev,
+				    enum dev_event dev_event)
+{
+	struct subchannel *sch;
+
+	sch = to_subchannel(cdev->dev.parent);
+	/*
+	 * An interrupt in a disabled state means a previous disable was not
+	 * successful - should not happen, but we try to disable again.
+	 */
+	cio_disable_subchannel(sch);
+}
+
+static void
+ccw_device_change_cmfstate(struct ccw_device *cdev, enum dev_event dev_event)
+{
+	retry_set_schib(cdev);
+	cdev->private->state = DEV_STATE_ONLINE;
+	dev_fsm_event(cdev, dev_event);
+}
+
+static void ccw_device_update_cmfblock(struct ccw_device *cdev,
+				       enum dev_event dev_event)
+{
+	cmf_retry_copy_block(cdev);
+	cdev->private->state = DEV_STATE_ONLINE;
+	dev_fsm_event(cdev, dev_event);
+}
+
+static void
+ccw_device_quiesce_done(struct ccw_device *cdev, enum dev_event dev_event)
+{
+	ccw_device_set_timeout(cdev, 0);
+	cdev->private->state = DEV_STATE_NOT_OPER;
+	wake_up(&cdev->private->wait_q);
+}
+
+static void
+ccw_device_quiesce_timeout(struct ccw_device *cdev, enum dev_event dev_event)
+{
+	int ret;
+
+	ret = ccw_device_cancel_halt_clear(cdev);
+	if (ret == -EBUSY) {
+		ccw_device_set_timeout(cdev, HZ/10);
+	} else {
+		cdev->private->state = DEV_STATE_NOT_OPER;
+		wake_up(&cdev->private->wait_q);
+	}
+}
+
+/*
+ * No operation action. This is used e.g. to ignore a timeout event in
+ * state offline.
+ */
+static void
+ccw_device_nop(struct ccw_device *cdev, enum dev_event dev_event)
+{
+}
+
+/*
+ * device statemachine
+ */
+fsm_func_t *dev_jumptable[NR_DEV_STATES][NR_DEV_EVENTS] = {
+	[DEV_STATE_NOT_OPER] = {
+		[DEV_EVENT_NOTOPER]	= ccw_device_nop,
+		[DEV_EVENT_INTERRUPT]	= ccw_device_disabled_irq,
+		[DEV_EVENT_TIMEOUT]	= ccw_device_nop,
+		[DEV_EVENT_VERIFY]	= ccw_device_nop,
+	},
+	[DEV_STATE_SENSE_PGID] = {
+		[DEV_EVENT_NOTOPER]	= ccw_device_request_event,
+		[DEV_EVENT_INTERRUPT]	= ccw_device_request_event,
+		[DEV_EVENT_TIMEOUT]	= ccw_device_request_event,
+		[DEV_EVENT_VERIFY]	= ccw_device_nop,
+	},
+	[DEV_STATE_SENSE_ID] = {
+		[DEV_EVENT_NOTOPER]	= ccw_device_request_event,
+		[DEV_EVENT_INTERRUPT]	= ccw_device_request_event,
+		[DEV_EVENT_TIMEOUT]	= ccw_device_request_event,
+		[DEV_EVENT_VERIFY]	= ccw_device_nop,
+	},
+	[DEV_STATE_OFFLINE] = {
+		[DEV_EVENT_NOTOPER]	= ccw_device_generic_notoper,
+		[DEV_EVENT_INTERRUPT]	= ccw_device_disabled_irq,
+		[DEV_EVENT_TIMEOUT]	= ccw_device_nop,
+		[DEV_EVENT_VERIFY]	= ccw_device_offline_verify,
+	},
+	[DEV_STATE_VERIFY] = {
+		[DEV_EVENT_NOTOPER]	= ccw_device_request_event,
+		[DEV_EVENT_INTERRUPT]	= ccw_device_request_event,
+		[DEV_EVENT_TIMEOUT]	= ccw_device_request_event,
+		[DEV_EVENT_VERIFY]	= ccw_device_delay_verify,
+	},
+	[DEV_STATE_ONLINE] = {
+		[DEV_EVENT_NOTOPER]	= ccw_device_generic_notoper,
+		[DEV_EVENT_INTERRUPT]	= ccw_device_irq,
+		[DEV_EVENT_TIMEOUT]	= ccw_device_online_timeout,
+		[DEV_EVENT_VERIFY]	= ccw_device_online_verify,
+	},
+	[DEV_STATE_W4SENSE] = {
+		[DEV_EVENT_NOTOPER]	= ccw_device_generic_notoper,
+		[DEV_EVENT_INTERRUPT]	= ccw_device_w4sense,
+		[DEV_EVENT_TIMEOUT]	= ccw_device_nop,
+		[DEV_EVENT_VERIFY]	= ccw_device_online_verify,
+	},
+	[DEV_STATE_DISBAND_PGID] = {
+		[DEV_EVENT_NOTOPER]	= ccw_device_request_event,
+		[DEV_EVENT_INTERRUPT]	= ccw_device_request_event,
+		[DEV_EVENT_TIMEOUT]	= ccw_device_request_event,
+		[DEV_EVENT_VERIFY]	= ccw_device_nop,
+	},
+	[DEV_STATE_BOXED] = {
+		[DEV_EVENT_NOTOPER]	= ccw_device_generic_notoper,
+		[DEV_EVENT_INTERRUPT]	= ccw_device_nop,
+		[DEV_EVENT_TIMEOUT]	= ccw_device_nop,
+		[DEV_EVENT_VERIFY]	= ccw_device_boxed_verify,
+	},
+	/* states to wait for i/o completion before doing something */
+	[DEV_STATE_TIMEOUT_KILL] = {
+		[DEV_EVENT_NOTOPER]	= ccw_device_generic_notoper,
+		[DEV_EVENT_INTERRUPT]	= ccw_device_killing_irq,
+		[DEV_EVENT_TIMEOUT]	= ccw_device_killing_timeout,
+		[DEV_EVENT_VERIFY]	= ccw_device_nop, //FIXME
+	},
+	[DEV_STATE_QUIESCE] = {
+		[DEV_EVENT_NOTOPER]	= ccw_device_quiesce_done,
+		[DEV_EVENT_INTERRUPT]	= ccw_device_quiesce_done,
+		[DEV_EVENT_TIMEOUT]	= ccw_device_quiesce_timeout,
+		[DEV_EVENT_VERIFY]	= ccw_device_nop,
+	},
+	/* special states for devices gone not operational */
+	[DEV_STATE_DISCONNECTED] = {
+		[DEV_EVENT_NOTOPER]	= ccw_device_nop,
+		[DEV_EVENT_INTERRUPT]	= ccw_device_start_id,
+		[DEV_EVENT_TIMEOUT]	= ccw_device_nop,
+		[DEV_EVENT_VERIFY]	= ccw_device_start_id,
+	},
+	[DEV_STATE_DISCONNECTED_SENSE_ID] = {
+		[DEV_EVENT_NOTOPER]	= ccw_device_request_event,
+		[DEV_EVENT_INTERRUPT]	= ccw_device_request_event,
+		[DEV_EVENT_TIMEOUT]	= ccw_device_request_event,
+		[DEV_EVENT_VERIFY]	= ccw_device_nop,
+	},
+	[DEV_STATE_CMFCHANGE] = {
+		[DEV_EVENT_NOTOPER]	= ccw_device_change_cmfstate,
+		[DEV_EVENT_INTERRUPT]	= ccw_device_change_cmfstate,
+		[DEV_EVENT_TIMEOUT]	= ccw_device_change_cmfstate,
+		[DEV_EVENT_VERIFY]	= ccw_device_change_cmfstate,
+	},
+	[DEV_STATE_CMFUPDATE] = {
+		[DEV_EVENT_NOTOPER]	= ccw_device_update_cmfblock,
+		[DEV_EVENT_INTERRUPT]	= ccw_device_update_cmfblock,
+		[DEV_EVENT_TIMEOUT]	= ccw_device_update_cmfblock,
+		[DEV_EVENT_VERIFY]	= ccw_device_update_cmfblock,
+	},
+	[DEV_STATE_STEAL_LOCK] = {
+		[DEV_EVENT_NOTOPER]	= ccw_device_request_event,
+		[DEV_EVENT_INTERRUPT]	= ccw_device_request_event,
+		[DEV_EVENT_TIMEOUT]	= ccw_device_request_event,
+		[DEV_EVENT_VERIFY]	= ccw_device_nop,
+	},
+};
+
+EXPORT_SYMBOL_GPL(ccw_device_set_timeout);
diff --git a/ap/os/linux/linux-3.4.x/drivers/s390/cio/device_id.c b/ap/os/linux/linux-3.4.x/drivers/s390/cio/device_id.c
new file mode 100644
index 0000000..78a0b43
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/s390/cio/device_id.c
@@ -0,0 +1,222 @@
+/*
+ *  CCW device SENSE ID I/O handling.
+ *
+ *    Copyright IBM Corp. 2002,2009
+ *    Author(s): Cornelia Huck <cornelia.huck@de.ibm.com>
+ *		 Martin Schwidefsky <schwidefsky@de.ibm.com>
+ *		 Peter Oberparleiter <peter.oberparleiter@de.ibm.com>
+ */
+
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/types.h>
+#include <linux/errno.h>
+#include <asm/ccwdev.h>
+#include <asm/setup.h>
+#include <asm/cio.h>
+#include <asm/diag.h>
+
+#include "cio.h"
+#include "cio_debug.h"
+#include "device.h"
+#include "io_sch.h"
+
+#define SENSE_ID_RETRIES	256
+#define SENSE_ID_TIMEOUT	(10 * HZ)
+#define SENSE_ID_MIN_LEN	4
+#define SENSE_ID_BASIC_LEN	7
+
+/**
+ * diag210_to_senseid - convert diag 0x210 data to sense id information
+ * @senseid: sense id
+ * @diag: diag 0x210 data
+ *
+ * Return 0 on success, non-zero otherwise.
+ */
+static int diag210_to_senseid(struct senseid *senseid, struct diag210 *diag)
+{
+	static struct {
+		int class, type, cu_type;
+	} vm_devices[] = {
+		{ 0x08, 0x01, 0x3480 },
+		{ 0x08, 0x02, 0x3430 },
+		{ 0x08, 0x10, 0x3420 },
+		{ 0x08, 0x42, 0x3424 },
+		{ 0x08, 0x44, 0x9348 },
+		{ 0x08, 0x81, 0x3490 },
+		{ 0x08, 0x82, 0x3422 },
+		{ 0x10, 0x41, 0x1403 },
+		{ 0x10, 0x42, 0x3211 },
+		{ 0x10, 0x43, 0x3203 },
+		{ 0x10, 0x45, 0x3800 },
+		{ 0x10, 0x47, 0x3262 },
+		{ 0x10, 0x48, 0x3820 },
+		{ 0x10, 0x49, 0x3800 },
+		{ 0x10, 0x4a, 0x4245 },
+		{ 0x10, 0x4b, 0x4248 },
+		{ 0x10, 0x4d, 0x3800 },
+		{ 0x10, 0x4e, 0x3820 },
+		{ 0x10, 0x4f, 0x3820 },
+		{ 0x10, 0x82, 0x2540 },
+		{ 0x10, 0x84, 0x3525 },
+		{ 0x20, 0x81, 0x2501 },
+		{ 0x20, 0x82, 0x2540 },
+		{ 0x20, 0x84, 0x3505 },
+		{ 0x40, 0x01, 0x3278 },
+		{ 0x40, 0x04, 0x3277 },
+		{ 0x40, 0x80, 0x2250 },
+		{ 0x40, 0xc0, 0x5080 },
+		{ 0x80, 0x00, 0x3215 },
+	};
+	int i;
+
+	/* Special case for osa devices. */
+	if (diag->vrdcvcla == 0x02 && diag->vrdcvtyp == 0x20) {
+		senseid->cu_type = 0x3088;
+		senseid->cu_model = 0x60;
+		senseid->reserved = 0xff;
+		return 0;
+	}
+	for (i = 0; i < ARRAY_SIZE(vm_devices); i++) {
+		if (diag->vrdcvcla == vm_devices[i].class &&
+		    diag->vrdcvtyp == vm_devices[i].type) {
+			senseid->cu_type = vm_devices[i].cu_type;
+			senseid->reserved = 0xff;
+			return 0;
+		}
+	}
+
+	return -ENODEV;
+}
+
+/**
+ * diag_get_dev_info - retrieve device information via diag 0x210
+ * @cdev: ccw device
+ *
+ * Returns zero on success, non-zero otherwise.
+ */
+static int diag210_get_dev_info(struct ccw_device *cdev)
+{
+	struct ccw_dev_id *dev_id = &cdev->private->dev_id;
+	struct senseid *senseid = &cdev->private->senseid;
+	struct diag210 diag_data;
+	int rc;
+
+	if (dev_id->ssid != 0)
+		return -ENODEV;
+	memset(&diag_data, 0, sizeof(diag_data));
+	diag_data.vrdcdvno	= dev_id->devno;
+	diag_data.vrdclen	= sizeof(diag_data);
+	rc = diag210(&diag_data);
+	CIO_TRACE_EVENT(4, "diag210");
+	CIO_HEX_EVENT(4, &rc, sizeof(rc));
+	CIO_HEX_EVENT(4, &diag_data, sizeof(diag_data));
+	if (rc != 0 && rc != 2)
+		goto err_failed;
+	if (diag210_to_senseid(senseid, &diag_data))
+		goto err_unknown;
+	return 0;
+
+err_unknown:
+	CIO_MSG_EVENT(0, "snsid: device 0.%x.%04x: unknown diag210 data\n",
+		      dev_id->ssid, dev_id->devno);
+	return -ENODEV;
+err_failed:
+	CIO_MSG_EVENT(0, "snsid: device 0.%x.%04x: diag210 failed (rc=%d)\n",
+		      dev_id->ssid, dev_id->devno, rc);
+	return -ENODEV;
+}
+
+/*
+ * Initialize SENSE ID data.
+ */
+static void snsid_init(struct ccw_device *cdev)
+{
+	cdev->private->flags.esid = 0;
+	memset(&cdev->private->senseid, 0, sizeof(cdev->private->senseid));
+	cdev->private->senseid.cu_type = 0xffff;
+}
+
+/*
+ * Check for complete SENSE ID data.
+ */
+static int snsid_check(struct ccw_device *cdev, void *data)
+{
+	struct cmd_scsw *scsw = &cdev->private->irb.scsw.cmd;
+	int len = sizeof(struct senseid) - scsw->count;
+
+	/* Check for incomplete SENSE ID data. */
+	if (len < SENSE_ID_MIN_LEN)
+		goto out_restart;
+	if (cdev->private->senseid.cu_type == 0xffff)
+		goto out_restart;
+	/* Check for incompatible SENSE ID data. */
+	if (cdev->private->senseid.reserved != 0xff)
+		return -EOPNOTSUPP;
+	/* Check for extended-identification information. */
+	if (len > SENSE_ID_BASIC_LEN)
+		cdev->private->flags.esid = 1;
+	return 0;
+
+out_restart:
+	snsid_init(cdev);
+	return -EAGAIN;
+}
+
+/*
+ * Process SENSE ID request result.
+ */
+static void snsid_callback(struct ccw_device *cdev, void *data, int rc)
+{
+	struct ccw_dev_id *id = &cdev->private->dev_id;
+	struct senseid *senseid = &cdev->private->senseid;
+	int vm = 0;
+
+	if (rc && MACHINE_IS_VM) {
+		/* Try diag 0x210 fallback on z/VM. */
+		snsid_init(cdev);
+		if (diag210_get_dev_info(cdev) == 0) {
+			rc = 0;
+			vm = 1;
+		}
+	}
+	CIO_MSG_EVENT(2, "snsid: device 0.%x.%04x: rc=%d %04x/%02x "
+		      "%04x/%02x%s\n", id->ssid, id->devno, rc,
+		      senseid->cu_type, senseid->cu_model, senseid->dev_type,
+		      senseid->dev_model, vm ? " (diag210)" : "");
+	ccw_device_sense_id_done(cdev, rc);
+}
+
+/**
+ * ccw_device_sense_id_start - perform SENSE ID
+ * @cdev: ccw device
+ *
+ * Execute a SENSE ID channel program on @cdev to update its sense id
+ * information. When finished, call ccw_device_sense_id_done with a
+ * return code specifying the result.
+ */
+void ccw_device_sense_id_start(struct ccw_device *cdev)
+{
+	struct subchannel *sch = to_subchannel(cdev->dev.parent);
+	struct ccw_request *req = &cdev->private->req;
+	struct ccw1 *cp = cdev->private->iccws;
+
+	CIO_TRACE_EVENT(4, "snsid");
+	CIO_HEX_EVENT(4, &cdev->private->dev_id, sizeof(cdev->private->dev_id));
+	/* Data setup. */
+	snsid_init(cdev);
+	/* Channel program setup. */
+	cp->cmd_code	= CCW_CMD_SENSE_ID;
+	cp->cda		= (u32) (addr_t) &cdev->private->senseid;
+	cp->count	= sizeof(struct senseid);
+	cp->flags	= CCW_FLAG_SLI;
+	/* Request setup. */
+	memset(req, 0, sizeof(*req));
+	req->cp		= cp;
+	req->timeout	= SENSE_ID_TIMEOUT;
+	req->maxretries	= SENSE_ID_RETRIES;
+	req->lpm	= sch->schib.pmcw.pam & sch->opm;
+	req->check	= snsid_check;
+	req->callback	= snsid_callback;
+	ccw_request_start(cdev);
+}
diff --git a/ap/os/linux/linux-3.4.x/drivers/s390/cio/device_ops.c b/ap/os/linux/linux-3.4.x/drivers/s390/cio/device_ops.c
new file mode 100644
index 0000000..ec7fb6d
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/s390/cio/device_ops.c
@@ -0,0 +1,781 @@
+/*
+ * Copyright IBM Corp. 2002, 2009
+ *
+ * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
+ *	      Cornelia Huck (cornelia.huck@de.ibm.com)
+ */
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/errno.h>
+#include <linux/slab.h>
+#include <linux/list.h>
+#include <linux/device.h>
+#include <linux/delay.h>
+#include <linux/completion.h>
+
+#include <asm/ccwdev.h>
+#include <asm/idals.h>
+#include <asm/chpid.h>
+#include <asm/fcx.h>
+
+#include "cio.h"
+#include "cio_debug.h"
+#include "css.h"
+#include "chsc.h"
+#include "device.h"
+#include "chp.h"
+
+/**
+ * ccw_device_set_options_mask() - set some options and unset the rest
+ * @cdev: device for which the options are to be set
+ * @flags: options to be set
+ *
+ * All flags specified in @flags are set, all flags not specified in @flags
+ * are cleared.
+ * Returns:
+ *   %0 on success, -%EINVAL on an invalid flag combination.
+ */
+int ccw_device_set_options_mask(struct ccw_device *cdev, unsigned long flags)
+{
+       /*
+	* The flag usage is mutal exclusive ...
+	*/
+	if ((flags & CCWDEV_EARLY_NOTIFICATION) &&
+	    (flags & CCWDEV_REPORT_ALL))
+		return -EINVAL;
+	cdev->private->options.fast = (flags & CCWDEV_EARLY_NOTIFICATION) != 0;
+	cdev->private->options.repall = (flags & CCWDEV_REPORT_ALL) != 0;
+	cdev->private->options.pgroup = (flags & CCWDEV_DO_PATHGROUP) != 0;
+	cdev->private->options.force = (flags & CCWDEV_ALLOW_FORCE) != 0;
+	cdev->private->options.mpath = (flags & CCWDEV_DO_MULTIPATH) != 0;
+	return 0;
+}
+
+/**
+ * ccw_device_set_options() - set some options
+ * @cdev: device for which the options are to be set
+ * @flags: options to be set
+ *
+ * All flags specified in @flags are set, the remainder is left untouched.
+ * Returns:
+ *   %0 on success, -%EINVAL if an invalid flag combination would ensue.
+ */
+int ccw_device_set_options(struct ccw_device *cdev, unsigned long flags)
+{
+       /*
+	* The flag usage is mutal exclusive ...
+	*/
+	if (((flags & CCWDEV_EARLY_NOTIFICATION) &&
+	    (flags & CCWDEV_REPORT_ALL)) ||
+	    ((flags & CCWDEV_EARLY_NOTIFICATION) &&
+	     cdev->private->options.repall) ||
+	    ((flags & CCWDEV_REPORT_ALL) &&
+	     cdev->private->options.fast))
+		return -EINVAL;
+	cdev->private->options.fast |= (flags & CCWDEV_EARLY_NOTIFICATION) != 0;
+	cdev->private->options.repall |= (flags & CCWDEV_REPORT_ALL) != 0;
+	cdev->private->options.pgroup |= (flags & CCWDEV_DO_PATHGROUP) != 0;
+	cdev->private->options.force |= (flags & CCWDEV_ALLOW_FORCE) != 0;
+	cdev->private->options.mpath |= (flags & CCWDEV_DO_MULTIPATH) != 0;
+	return 0;
+}
+
+/**
+ * ccw_device_clear_options() - clear some options
+ * @cdev: device for which the options are to be cleared
+ * @flags: options to be cleared
+ *
+ * All flags specified in @flags are cleared, the remainder is left untouched.
+ */
+void ccw_device_clear_options(struct ccw_device *cdev, unsigned long flags)
+{
+	cdev->private->options.fast &= (flags & CCWDEV_EARLY_NOTIFICATION) == 0;
+	cdev->private->options.repall &= (flags & CCWDEV_REPORT_ALL) == 0;
+	cdev->private->options.pgroup &= (flags & CCWDEV_DO_PATHGROUP) == 0;
+	cdev->private->options.force &= (flags & CCWDEV_ALLOW_FORCE) == 0;
+	cdev->private->options.mpath &= (flags & CCWDEV_DO_MULTIPATH) == 0;
+}
+
+/**
+ * ccw_device_is_pathgroup - determine if paths to this device are grouped
+ * @cdev: ccw device
+ *
+ * Return non-zero if there is a path group, zero otherwise.
+ */
+int ccw_device_is_pathgroup(struct ccw_device *cdev)
+{
+	return cdev->private->flags.pgroup;
+}
+EXPORT_SYMBOL(ccw_device_is_pathgroup);
+
+/**
+ * ccw_device_is_multipath - determine if device is operating in multipath mode
+ * @cdev: ccw device
+ *
+ * Return non-zero if device is operating in multipath mode, zero otherwise.
+ */
+int ccw_device_is_multipath(struct ccw_device *cdev)
+{
+	return cdev->private->flags.mpath;
+}
+EXPORT_SYMBOL(ccw_device_is_multipath);
+
+/**
+ * ccw_device_clear() - terminate I/O request processing
+ * @cdev: target ccw device
+ * @intparm: interruption parameter; value is only used if no I/O is
+ *	     outstanding, otherwise the intparm associated with the I/O request
+ *	     is returned
+ *
+ * ccw_device_clear() calls csch on @cdev's subchannel.
+ * Returns:
+ *  %0 on success,
+ *  -%ENODEV on device not operational,
+ *  -%EINVAL on invalid device state.
+ * Context:
+ *  Interrupts disabled, ccw device lock held
+ */
+int ccw_device_clear(struct ccw_device *cdev, unsigned long intparm)
+{
+	struct subchannel *sch;
+	int ret;
+
+	if (!cdev || !cdev->dev.parent)
+		return -ENODEV;
+	sch = to_subchannel(cdev->dev.parent);
+	if (!sch->schib.pmcw.ena)
+		return -EINVAL;
+	if (cdev->private->state == DEV_STATE_NOT_OPER)
+		return -ENODEV;
+	if (cdev->private->state != DEV_STATE_ONLINE &&
+	    cdev->private->state != DEV_STATE_W4SENSE)
+		return -EINVAL;
+
+	ret = cio_clear(sch);
+	if (ret == 0)
+		cdev->private->intparm = intparm;
+	return ret;
+}
+
+/**
+ * ccw_device_start_key() - start a s390 channel program with key
+ * @cdev: target ccw device
+ * @cpa: logical start address of channel program
+ * @intparm: user specific interruption parameter; will be presented back to
+ *	     @cdev's interrupt handler. Allows a device driver to associate
+ *	     the interrupt with a particular I/O request.
+ * @lpm: defines the channel path to be used for a specific I/O request. A
+ *	 value of 0 will make cio use the opm.
+ * @key: storage key to be used for the I/O
+ * @flags: additional flags; defines the action to be performed for I/O
+ *	   processing.
+ *
+ * Start a S/390 channel program. When the interrupt arrives, the
+ * IRQ handler is called, either immediately, delayed (dev-end missing,
+ * or sense required) or never (no IRQ handler registered).
+ * Returns:
+ *  %0, if the operation was successful;
+ *  -%EBUSY, if the device is busy, or status pending;
+ *  -%EACCES, if no path specified in @lpm is operational;
+ *  -%ENODEV, if the device is not operational.
+ * Context:
+ *  Interrupts disabled, ccw device lock held
+ */
+int ccw_device_start_key(struct ccw_device *cdev, struct ccw1 *cpa,
+			 unsigned long intparm, __u8 lpm, __u8 key,
+			 unsigned long flags)
+{
+	struct subchannel *sch;
+	int ret;
+
+	if (!cdev || !cdev->dev.parent)
+		return -ENODEV;
+	sch = to_subchannel(cdev->dev.parent);
+	if (!sch->schib.pmcw.ena)
+		return -EINVAL;
+	if (cdev->private->state == DEV_STATE_NOT_OPER)
+		return -ENODEV;
+	if (cdev->private->state == DEV_STATE_VERIFY) {
+		/* Remember to fake irb when finished. */
+		if (!cdev->private->flags.fake_irb) {
+			cdev->private->flags.fake_irb = FAKE_CMD_IRB;
+			cdev->private->intparm = intparm;
+			return 0;
+		} else
+			/* There's already a fake I/O around. */
+			return -EBUSY;
+	}
+	if (cdev->private->state != DEV_STATE_ONLINE ||
+	    ((sch->schib.scsw.cmd.stctl & SCSW_STCTL_PRIM_STATUS) &&
+	     !(sch->schib.scsw.cmd.stctl & SCSW_STCTL_SEC_STATUS)) ||
+	    cdev->private->flags.doverify)
+		return -EBUSY;
+	ret = cio_set_options (sch, flags);
+	if (ret)
+		return ret;
+	/* Adjust requested path mask to exclude unusable paths. */
+	if (lpm) {
+		lpm &= sch->lpm;
+		if (lpm == 0)
+			return -EACCES;
+	}
+	ret = cio_start_key (sch, cpa, lpm, key);
+	switch (ret) {
+	case 0:
+		cdev->private->intparm = intparm;
+		break;
+	case -EACCES:
+	case -ENODEV:
+		dev_fsm_event(cdev, DEV_EVENT_VERIFY);
+		break;
+	}
+	return ret;
+}
+
+/**
+ * ccw_device_start_timeout_key() - start a s390 channel program with timeout and key
+ * @cdev: target ccw device
+ * @cpa: logical start address of channel program
+ * @intparm: user specific interruption parameter; will be presented back to
+ *	     @cdev's interrupt handler. Allows a device driver to associate
+ *	     the interrupt with a particular I/O request.
+ * @lpm: defines the channel path to be used for a specific I/O request. A
+ *	 value of 0 will make cio use the opm.
+ * @key: storage key to be used for the I/O
+ * @flags: additional flags; defines the action to be performed for I/O
+ *	   processing.
+ * @expires: timeout value in jiffies
+ *
+ * Start a S/390 channel program. When the interrupt arrives, the
+ * IRQ handler is called, either immediately, delayed (dev-end missing,
+ * or sense required) or never (no IRQ handler registered).
+ * This function notifies the device driver if the channel program has not
+ * completed during the time specified by @expires. If a timeout occurs, the
+ * channel program is terminated via xsch, hsch or csch, and the device's
+ * interrupt handler will be called with an irb containing ERR_PTR(-%ETIMEDOUT).
+ * Returns:
+ *  %0, if the operation was successful;
+ *  -%EBUSY, if the device is busy, or status pending;
+ *  -%EACCES, if no path specified in @lpm is operational;
+ *  -%ENODEV, if the device is not operational.
+ * Context:
+ *  Interrupts disabled, ccw device lock held
+ */
+int ccw_device_start_timeout_key(struct ccw_device *cdev, struct ccw1 *cpa,
+				 unsigned long intparm, __u8 lpm, __u8 key,
+				 unsigned long flags, int expires)
+{
+	int ret;
+
+	if (!cdev)
+		return -ENODEV;
+	ccw_device_set_timeout(cdev, expires);
+	ret = ccw_device_start_key(cdev, cpa, intparm, lpm, key, flags);
+	if (ret != 0)
+		ccw_device_set_timeout(cdev, 0);
+	return ret;
+}
+
+/**
+ * ccw_device_start() - start a s390 channel program
+ * @cdev: target ccw device
+ * @cpa: logical start address of channel program
+ * @intparm: user specific interruption parameter; will be presented back to
+ *	     @cdev's interrupt handler. Allows a device driver to associate
+ *	     the interrupt with a particular I/O request.
+ * @lpm: defines the channel path to be used for a specific I/O request. A
+ *	 value of 0 will make cio use the opm.
+ * @flags: additional flags; defines the action to be performed for I/O
+ *	   processing.
+ *
+ * Start a S/390 channel program. When the interrupt arrives, the
+ * IRQ handler is called, either immediately, delayed (dev-end missing,
+ * or sense required) or never (no IRQ handler registered).
+ * Returns:
+ *  %0, if the operation was successful;
+ *  -%EBUSY, if the device is busy, or status pending;
+ *  -%EACCES, if no path specified in @lpm is operational;
+ *  -%ENODEV, if the device is not operational.
+ * Context:
+ *  Interrupts disabled, ccw device lock held
+ */
+int ccw_device_start(struct ccw_device *cdev, struct ccw1 *cpa,
+		     unsigned long intparm, __u8 lpm, unsigned long flags)
+{
+	return ccw_device_start_key(cdev, cpa, intparm, lpm,
+				    PAGE_DEFAULT_KEY, flags);
+}
+
+/**
+ * ccw_device_start_timeout() - start a s390 channel program with timeout
+ * @cdev: target ccw device
+ * @cpa: logical start address of channel program
+ * @intparm: user specific interruption parameter; will be presented back to
+ *	     @cdev's interrupt handler. Allows a device driver to associate
+ *	     the interrupt with a particular I/O request.
+ * @lpm: defines the channel path to be used for a specific I/O request. A
+ *	 value of 0 will make cio use the opm.
+ * @flags: additional flags; defines the action to be performed for I/O
+ *	   processing.
+ * @expires: timeout value in jiffies
+ *
+ * Start a S/390 channel program. When the interrupt arrives, the
+ * IRQ handler is called, either immediately, delayed (dev-end missing,
+ * or sense required) or never (no IRQ handler registered).
+ * This function notifies the device driver if the channel program has not
+ * completed during the time specified by @expires. If a timeout occurs, the
+ * channel program is terminated via xsch, hsch or csch, and the device's
+ * interrupt handler will be called with an irb containing ERR_PTR(-%ETIMEDOUT).
+ * Returns:
+ *  %0, if the operation was successful;
+ *  -%EBUSY, if the device is busy, or status pending;
+ *  -%EACCES, if no path specified in @lpm is operational;
+ *  -%ENODEV, if the device is not operational.
+ * Context:
+ *  Interrupts disabled, ccw device lock held
+ */
+int ccw_device_start_timeout(struct ccw_device *cdev, struct ccw1 *cpa,
+			     unsigned long intparm, __u8 lpm,
+			     unsigned long flags, int expires)
+{
+	return ccw_device_start_timeout_key(cdev, cpa, intparm, lpm,
+					    PAGE_DEFAULT_KEY, flags,
+					    expires);
+}
+
+
+/**
+ * ccw_device_halt() - halt I/O request processing
+ * @cdev: target ccw device
+ * @intparm: interruption parameter; value is only used if no I/O is
+ *	     outstanding, otherwise the intparm associated with the I/O request
+ *	     is returned
+ *
+ * ccw_device_halt() calls hsch on @cdev's subchannel.
+ * Returns:
+ *  %0 on success,
+ *  -%ENODEV on device not operational,
+ *  -%EINVAL on invalid device state,
+ *  -%EBUSY on device busy or interrupt pending.
+ * Context:
+ *  Interrupts disabled, ccw device lock held
+ */
+int ccw_device_halt(struct ccw_device *cdev, unsigned long intparm)
+{
+	struct subchannel *sch;
+	int ret;
+
+	if (!cdev || !cdev->dev.parent)
+		return -ENODEV;
+	sch = to_subchannel(cdev->dev.parent);
+	if (!sch->schib.pmcw.ena)
+		return -EINVAL;
+	if (cdev->private->state == DEV_STATE_NOT_OPER)
+		return -ENODEV;
+	if (cdev->private->state != DEV_STATE_ONLINE &&
+	    cdev->private->state != DEV_STATE_W4SENSE)
+		return -EINVAL;
+
+	ret = cio_halt(sch);
+	if (ret == 0)
+		cdev->private->intparm = intparm;
+	return ret;
+}
+
+/**
+ * ccw_device_resume() - resume channel program execution
+ * @cdev: target ccw device
+ *
+ * ccw_device_resume() calls rsch on @cdev's subchannel.
+ * Returns:
+ *  %0 on success,
+ *  -%ENODEV on device not operational,
+ *  -%EINVAL on invalid device state,
+ *  -%EBUSY on device busy or interrupt pending.
+ * Context:
+ *  Interrupts disabled, ccw device lock held
+ */
+int ccw_device_resume(struct ccw_device *cdev)
+{
+	struct subchannel *sch;
+
+	if (!cdev || !cdev->dev.parent)
+		return -ENODEV;
+	sch = to_subchannel(cdev->dev.parent);
+	if (!sch->schib.pmcw.ena)
+		return -EINVAL;
+	if (cdev->private->state == DEV_STATE_NOT_OPER)
+		return -ENODEV;
+	if (cdev->private->state != DEV_STATE_ONLINE ||
+	    !(sch->schib.scsw.cmd.actl & SCSW_ACTL_SUSPENDED))
+		return -EINVAL;
+	return cio_resume(sch);
+}
+
+/*
+ * Pass interrupt to device driver.
+ */
+int
+ccw_device_call_handler(struct ccw_device *cdev)
+{
+	unsigned int stctl;
+	int ending_status;
+
+	/*
+	 * we allow for the device action handler if .
+	 *  - we received ending status
+	 *  - the action handler requested to see all interrupts
+	 *  - we received an intermediate status
+	 *  - fast notification was requested (primary status)
+	 *  - unsolicited interrupts
+	 */
+	stctl = scsw_stctl(&cdev->private->irb.scsw);
+	ending_status = (stctl & SCSW_STCTL_SEC_STATUS) ||
+		(stctl == (SCSW_STCTL_ALERT_STATUS | SCSW_STCTL_STATUS_PEND)) ||
+		(stctl == SCSW_STCTL_STATUS_PEND);
+	if (!ending_status &&
+	    !cdev->private->options.repall &&
+	    !(stctl & SCSW_STCTL_INTER_STATUS) &&
+	    !(cdev->private->options.fast &&
+	      (stctl & SCSW_STCTL_PRIM_STATUS)))
+		return 0;
+
+	/* Clear pending timers for device driver initiated I/O. */
+	if (ending_status)
+		ccw_device_set_timeout(cdev, 0);
+	/*
+	 * Now we are ready to call the device driver interrupt handler.
+	 */
+	if (cdev->handler)
+		cdev->handler(cdev, cdev->private->intparm,
+			      &cdev->private->irb);
+
+	/*
+	 * Clear the old and now useless interrupt response block.
+	 */
+	memset(&cdev->private->irb, 0, sizeof(struct irb));
+
+	return 1;
+}
+
+/**
+ * ccw_device_get_ciw() - Search for CIW command in extended sense data.
+ * @cdev: ccw device to inspect
+ * @ct: command type to look for
+ *
+ * During SenseID, command information words (CIWs) describing special
+ * commands available to the device may have been stored in the extended
+ * sense data. This function searches for CIWs of a specified command
+ * type in the extended sense data.
+ * Returns:
+ *  %NULL if no extended sense data has been stored or if no CIW of the
+ *  specified command type could be found,
+ *  else a pointer to the CIW of the specified command type.
+ */
+struct ciw *ccw_device_get_ciw(struct ccw_device *cdev, __u32 ct)
+{
+	int ciw_cnt;
+
+	if (cdev->private->flags.esid == 0)
+		return NULL;
+	for (ciw_cnt = 0; ciw_cnt < MAX_CIWS; ciw_cnt++)
+		if (cdev->private->senseid.ciw[ciw_cnt].ct == ct)
+			return cdev->private->senseid.ciw + ciw_cnt;
+	return NULL;
+}
+
+/**
+ * ccw_device_get_path_mask() - get currently available paths
+ * @cdev: ccw device to be queried
+ * Returns:
+ *  %0 if no subchannel for the device is available,
+ *  else the mask of currently available paths for the ccw device's subchannel.
+ */
+__u8 ccw_device_get_path_mask(struct ccw_device *cdev)
+{
+	struct subchannel *sch;
+
+	if (!cdev->dev.parent)
+		return 0;
+
+	sch = to_subchannel(cdev->dev.parent);
+	return sch->lpm;
+}
+
+struct stlck_data {
+	struct completion done;
+	int rc;
+};
+
+void ccw_device_stlck_done(struct ccw_device *cdev, void *data, int rc)
+{
+	struct stlck_data *sdata = data;
+
+	sdata->rc = rc;
+	complete(&sdata->done);
+}
+
+/*
+ * Perform unconditional reserve + release.
+ */
+int ccw_device_stlck(struct ccw_device *cdev)
+{
+	struct subchannel *sch = to_subchannel(cdev->dev.parent);
+	struct stlck_data data;
+	u8 *buffer;
+	int rc;
+
+	/* Check if steal lock operation is valid for this device. */
+	if (cdev->drv) {
+		if (!cdev->private->options.force)
+			return -EINVAL;
+	}
+	buffer = kzalloc(64, GFP_DMA | GFP_KERNEL);
+	if (!buffer)
+		return -ENOMEM;
+	init_completion(&data.done);
+	data.rc = -EIO;
+	spin_lock_irq(sch->lock);
+	rc = cio_enable_subchannel(sch, (u32) (addr_t) sch);
+	if (rc)
+		goto out_unlock;
+	/* Perform operation. */
+	cdev->private->state = DEV_STATE_STEAL_LOCK,
+	ccw_device_stlck_start(cdev, &data, &buffer[0], &buffer[32]);
+	spin_unlock_irq(sch->lock);
+	/* Wait for operation to finish. */
+	if (wait_for_completion_interruptible(&data.done)) {
+		/* Got a signal. */
+		spin_lock_irq(sch->lock);
+		ccw_request_cancel(cdev);
+		spin_unlock_irq(sch->lock);
+		wait_for_completion(&data.done);
+	}
+	rc = data.rc;
+	/* Check results. */
+	spin_lock_irq(sch->lock);
+	cio_disable_subchannel(sch);
+	cdev->private->state = DEV_STATE_BOXED;
+out_unlock:
+	spin_unlock_irq(sch->lock);
+	kfree(buffer);
+
+	return rc;
+}
+
+void *ccw_device_get_chp_desc(struct ccw_device *cdev, int chp_no)
+{
+	struct subchannel *sch;
+	struct chp_id chpid;
+
+	sch = to_subchannel(cdev->dev.parent);
+	chp_id_init(&chpid);
+	chpid.id = sch->schib.pmcw.chpid[chp_no];
+	return chp_get_chp_desc(chpid);
+}
+
+/**
+ * ccw_device_get_id - obtain a ccw device id
+ * @cdev: device to obtain the id for
+ * @dev_id: where to fill in the values
+ */
+void ccw_device_get_id(struct ccw_device *cdev, struct ccw_dev_id *dev_id)
+{
+	*dev_id = cdev->private->dev_id;
+}
+EXPORT_SYMBOL(ccw_device_get_id);
+
+/**
+ * ccw_device_tm_start_key - perform start function
+ * @cdev: ccw device on which to perform the start function
+ * @tcw: transport-command word to be started
+ * @intparm: user defined parameter to be passed to the interrupt handler
+ * @lpm: mask of paths to use
+ * @key: storage key to use for storage access
+ *
+ * Start the tcw on the given ccw device. Return zero on success, non-zero
+ * otherwise.
+ */
+int ccw_device_tm_start_key(struct ccw_device *cdev, struct tcw *tcw,
+			    unsigned long intparm, u8 lpm, u8 key)
+{
+	struct subchannel *sch;
+	int rc;
+
+	sch = to_subchannel(cdev->dev.parent);
+	if (!sch->schib.pmcw.ena)
+		return -EINVAL;
+	if (cdev->private->state == DEV_STATE_VERIFY) {
+		/* Remember to fake irb when finished. */
+		if (!cdev->private->flags.fake_irb) {
+			cdev->private->flags.fake_irb = FAKE_TM_IRB;
+			cdev->private->intparm = intparm;
+			return 0;
+		} else
+			/* There's already a fake I/O around. */
+			return -EBUSY;
+	}
+	if (cdev->private->state != DEV_STATE_ONLINE)
+		return -EIO;
+	/* Adjust requested path mask to exclude unusable paths. */
+	if (lpm) {
+		lpm &= sch->lpm;
+		if (lpm == 0)
+			return -EACCES;
+	}
+	rc = cio_tm_start_key(sch, tcw, lpm, key);
+	if (rc == 0)
+		cdev->private->intparm = intparm;
+	return rc;
+}
+EXPORT_SYMBOL(ccw_device_tm_start_key);
+
+/**
+ * ccw_device_tm_start_timeout_key - perform start function
+ * @cdev: ccw device on which to perform the start function
+ * @tcw: transport-command word to be started
+ * @intparm: user defined parameter to be passed to the interrupt handler
+ * @lpm: mask of paths to use
+ * @key: storage key to use for storage access
+ * @expires: time span in jiffies after which to abort request
+ *
+ * Start the tcw on the given ccw device. Return zero on success, non-zero
+ * otherwise.
+ */
+int ccw_device_tm_start_timeout_key(struct ccw_device *cdev, struct tcw *tcw,
+				    unsigned long intparm, u8 lpm, u8 key,
+				    int expires)
+{
+	int ret;
+
+	ccw_device_set_timeout(cdev, expires);
+	ret = ccw_device_tm_start_key(cdev, tcw, intparm, lpm, key);
+	if (ret != 0)
+		ccw_device_set_timeout(cdev, 0);
+	return ret;
+}
+EXPORT_SYMBOL(ccw_device_tm_start_timeout_key);
+
+/**
+ * ccw_device_tm_start - perform start function
+ * @cdev: ccw device on which to perform the start function
+ * @tcw: transport-command word to be started
+ * @intparm: user defined parameter to be passed to the interrupt handler
+ * @lpm: mask of paths to use
+ *
+ * Start the tcw on the given ccw device. Return zero on success, non-zero
+ * otherwise.
+ */
+int ccw_device_tm_start(struct ccw_device *cdev, struct tcw *tcw,
+			unsigned long intparm, u8 lpm)
+{
+	return ccw_device_tm_start_key(cdev, tcw, intparm, lpm,
+				       PAGE_DEFAULT_KEY);
+}
+EXPORT_SYMBOL(ccw_device_tm_start);
+
+/**
+ * ccw_device_tm_start_timeout - perform start function
+ * @cdev: ccw device on which to perform the start function
+ * @tcw: transport-command word to be started
+ * @intparm: user defined parameter to be passed to the interrupt handler
+ * @lpm: mask of paths to use
+ * @expires: time span in jiffies after which to abort request
+ *
+ * Start the tcw on the given ccw device. Return zero on success, non-zero
+ * otherwise.
+ */
+int ccw_device_tm_start_timeout(struct ccw_device *cdev, struct tcw *tcw,
+			       unsigned long intparm, u8 lpm, int expires)
+{
+	return ccw_device_tm_start_timeout_key(cdev, tcw, intparm, lpm,
+					       PAGE_DEFAULT_KEY, expires);
+}
+EXPORT_SYMBOL(ccw_device_tm_start_timeout);
+
+/**
+ * ccw_device_get_mdc - accumulate max data count
+ * @cdev: ccw device for which the max data count is accumulated
+ * @mask: mask of paths to use
+ *
+ * Return the number of 64K-bytes blocks all paths at least support
+ * for a transport command. Return values <= 0 indicate failures.
+ */
+int ccw_device_get_mdc(struct ccw_device *cdev, u8 mask)
+{
+	struct subchannel *sch = to_subchannel(cdev->dev.parent);
+	struct channel_path_desc_fmt1 desc;
+	struct chp_id chpid;
+	int mdc = 0, ret, i;
+
+	/* Adjust requested path mask to excluded varied off paths. */
+	if (mask)
+		mask &= sch->lpm;
+	else
+		mask = sch->lpm;
+
+	chp_id_init(&chpid);
+	for (i = 0; i < 8; i++) {
+		if (!(mask & (0x80 >> i)))
+			continue;
+		chpid.id = sch->schib.pmcw.chpid[i];
+		ret = chsc_determine_fmt1_channel_path_desc(chpid, &desc);
+		if (ret)
+			return ret;
+		if (!desc.f)
+			return 0;
+		if (!desc.r)
+			mdc = 1;
+		mdc = mdc ? min(mdc, (int)desc.mdc) : desc.mdc;
+	}
+
+	return mdc;
+}
+EXPORT_SYMBOL(ccw_device_get_mdc);
+
+/**
+ * ccw_device_tm_intrg - perform interrogate function
+ * @cdev: ccw device on which to perform the interrogate function
+ *
+ * Perform an interrogate function on the given ccw device. Return zero on
+ * success, non-zero otherwise.
+ */
+int ccw_device_tm_intrg(struct ccw_device *cdev)
+{
+	struct subchannel *sch = to_subchannel(cdev->dev.parent);
+
+	if (!sch->schib.pmcw.ena)
+		return -EINVAL;
+	if (cdev->private->state != DEV_STATE_ONLINE)
+		return -EIO;
+	if (!scsw_is_tm(&sch->schib.scsw) ||
+	    !(scsw_actl(&sch->schib.scsw) & SCSW_ACTL_START_PEND))
+		return -EINVAL;
+	return cio_tm_intrg(sch);
+}
+EXPORT_SYMBOL(ccw_device_tm_intrg);
+
+// FIXME: these have to go:
+
+int
+_ccw_device_get_subchannel_number(struct ccw_device *cdev)
+{
+	return cdev->private->schid.sch_no;
+}
+
+
+MODULE_LICENSE("GPL");
+EXPORT_SYMBOL(ccw_device_set_options_mask);
+EXPORT_SYMBOL(ccw_device_set_options);
+EXPORT_SYMBOL(ccw_device_clear_options);
+EXPORT_SYMBOL(ccw_device_clear);
+EXPORT_SYMBOL(ccw_device_halt);
+EXPORT_SYMBOL(ccw_device_resume);
+EXPORT_SYMBOL(ccw_device_start_timeout);
+EXPORT_SYMBOL(ccw_device_start);
+EXPORT_SYMBOL(ccw_device_start_timeout_key);
+EXPORT_SYMBOL(ccw_device_start_key);
+EXPORT_SYMBOL(ccw_device_get_ciw);
+EXPORT_SYMBOL(ccw_device_get_path_mask);
+EXPORT_SYMBOL(_ccw_device_get_subchannel_number);
+EXPORT_SYMBOL_GPL(ccw_device_get_chp_desc);
diff --git a/ap/os/linux/linux-3.4.x/drivers/s390/cio/device_pgid.c b/ap/os/linux/linux-3.4.x/drivers/s390/cio/device_pgid.c
new file mode 100644
index 0000000..daa6b90
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/s390/cio/device_pgid.c
@@ -0,0 +1,582 @@
+/*
+ *  CCW device PGID and path verification I/O handling.
+ *
+ *    Copyright IBM Corp. 2002,2009
+ *    Author(s): Cornelia Huck <cornelia.huck@de.ibm.com>
+ *		 Martin Schwidefsky <schwidefsky@de.ibm.com>
+ *		 Peter Oberparleiter <peter.oberparleiter@de.ibm.com>
+ */
+
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/types.h>
+#include <linux/errno.h>
+#include <linux/bitops.h>
+#include <asm/ccwdev.h>
+#include <asm/cio.h>
+
+#include "cio.h"
+#include "cio_debug.h"
+#include "device.h"
+#include "io_sch.h"
+
+#define PGID_RETRIES	256
+#define PGID_TIMEOUT	(10 * HZ)
+
+/*
+ * Process path verification data and report result.
+ */
+static void verify_done(struct ccw_device *cdev, int rc)
+{
+	struct subchannel *sch = to_subchannel(cdev->dev.parent);
+	struct ccw_dev_id *id = &cdev->private->dev_id;
+	int mpath = cdev->private->flags.mpath;
+	int pgroup = cdev->private->flags.pgroup;
+
+	if (rc)
+		goto out;
+	/* Ensure consistent multipathing state at device and channel. */
+	if (sch->config.mp != mpath) {
+		sch->config.mp = mpath;
+		rc = cio_commit_config(sch);
+	}
+out:
+	CIO_MSG_EVENT(2, "vrfy: device 0.%x.%04x: rc=%d pgroup=%d mpath=%d "
+			 "vpm=%02x\n", id->ssid, id->devno, rc, pgroup, mpath,
+			 sch->vpm);
+	ccw_device_verify_done(cdev, rc);
+}
+
+/*
+ * Create channel program to perform a NOOP.
+ */
+static void nop_build_cp(struct ccw_device *cdev)
+{
+	struct ccw_request *req = &cdev->private->req;
+	struct ccw1 *cp = cdev->private->iccws;
+
+	cp->cmd_code	= CCW_CMD_NOOP;
+	cp->cda		= 0;
+	cp->count	= 0;
+	cp->flags	= CCW_FLAG_SLI;
+	req->cp		= cp;
+}
+
+/*
+ * Perform NOOP on a single path.
+ */
+static void nop_do(struct ccw_device *cdev)
+{
+	struct subchannel *sch = to_subchannel(cdev->dev.parent);
+	struct ccw_request *req = &cdev->private->req;
+
+	/* Adjust lpm. */
+	req->lpm = lpm_adjust(req->lpm, sch->schib.pmcw.pam & sch->opm);
+	if (!req->lpm)
+		goto out_nopath;
+	nop_build_cp(cdev);
+	ccw_request_start(cdev);
+	return;
+
+out_nopath:
+	verify_done(cdev, sch->vpm ? 0 : -EACCES);
+}
+
+/*
+ * Adjust NOOP I/O status.
+ */
+static enum io_status nop_filter(struct ccw_device *cdev, void *data,
+				 struct irb *irb, enum io_status status)
+{
+	/* Only subchannel status might indicate a path error. */
+	if (status == IO_STATUS_ERROR && irb->scsw.cmd.cstat == 0)
+		return IO_DONE;
+	return status;
+}
+
+/*
+ * Process NOOP request result for a single path.
+ */
+static void nop_callback(struct ccw_device *cdev, void *data, int rc)
+{
+	struct subchannel *sch = to_subchannel(cdev->dev.parent);
+	struct ccw_request *req = &cdev->private->req;
+
+	if (rc == 0)
+		sch->vpm |= req->lpm;
+	else if (rc != -EACCES)
+		goto err;
+	req->lpm >>= 1;
+	nop_do(cdev);
+	return;
+
+err:
+	verify_done(cdev, rc);
+}
+
+/*
+ * Create channel program to perform SET PGID on a single path.
+ */
+static void spid_build_cp(struct ccw_device *cdev, u8 fn)
+{
+	struct ccw_request *req = &cdev->private->req;
+	struct ccw1 *cp = cdev->private->iccws;
+	int i = 8 - ffs(req->lpm);
+	struct pgid *pgid = &cdev->private->pgid[i];
+
+	pgid->inf.fc	= fn;
+	cp->cmd_code	= CCW_CMD_SET_PGID;
+	cp->cda		= (u32) (addr_t) pgid;
+	cp->count	= sizeof(*pgid);
+	cp->flags	= CCW_FLAG_SLI;
+	req->cp		= cp;
+}
+
+/*
+ * Perform establish/resign SET PGID on a single path.
+ */
+static void spid_do(struct ccw_device *cdev)
+{
+	struct subchannel *sch = to_subchannel(cdev->dev.parent);
+	struct ccw_request *req = &cdev->private->req;
+	u8 fn;
+
+	/* Use next available path that is not already in correct state. */
+	req->lpm = lpm_adjust(req->lpm, cdev->private->pgid_todo_mask);
+	if (!req->lpm)
+		goto out_nopath;
+	/* Channel program setup. */
+	if (req->lpm & sch->opm)
+		fn = SPID_FUNC_ESTABLISH;
+	else
+		fn = SPID_FUNC_RESIGN;
+	if (cdev->private->flags.mpath)
+		fn |= SPID_FUNC_MULTI_PATH;
+	spid_build_cp(cdev, fn);
+	ccw_request_start(cdev);
+	return;
+
+out_nopath:
+	verify_done(cdev, sch->vpm ? 0 : -EACCES);
+}
+
+static void verify_start(struct ccw_device *cdev);
+
+/*
+ * Process SET PGID request result for a single path.
+ */
+static void spid_callback(struct ccw_device *cdev, void *data, int rc)
+{
+	struct subchannel *sch = to_subchannel(cdev->dev.parent);
+	struct ccw_request *req = &cdev->private->req;
+
+	switch (rc) {
+	case 0:
+		sch->vpm |= req->lpm & sch->opm;
+		break;
+	case -EACCES:
+		break;
+	case -EOPNOTSUPP:
+		if (cdev->private->flags.mpath) {
+			/* Try without multipathing. */
+			cdev->private->flags.mpath = 0;
+			goto out_restart;
+		}
+		/* Try without pathgrouping. */
+		cdev->private->flags.pgroup = 0;
+		goto out_restart;
+	default:
+		goto err;
+	}
+	req->lpm >>= 1;
+	spid_do(cdev);
+	return;
+
+out_restart:
+	verify_start(cdev);
+	return;
+err:
+	verify_done(cdev, rc);
+}
+
+static void spid_start(struct ccw_device *cdev)
+{
+	struct ccw_request *req = &cdev->private->req;
+
+	/* Initialize request data. */
+	memset(req, 0, sizeof(*req));
+	req->timeout	= PGID_TIMEOUT;
+	req->maxretries	= PGID_RETRIES;
+	req->lpm	= 0x80;
+	req->singlepath	= 1;
+	req->callback	= spid_callback;
+	spid_do(cdev);
+}
+
+static int pgid_is_reset(struct pgid *p)
+{
+	char *c;
+
+	for (c = (char *)p + 1; c < (char *)(p + 1); c++) {
+		if (*c != 0)
+			return 0;
+	}
+	return 1;
+}
+
+static int pgid_cmp(struct pgid *p1, struct pgid *p2)
+{
+	return memcmp((char *) p1 + 1, (char *) p2 + 1,
+		      sizeof(struct pgid) - 1);
+}
+
+/*
+ * Determine pathgroup state from PGID data.
+ */
+static void pgid_analyze(struct ccw_device *cdev, struct pgid **p,
+			 int *mismatch, u8 *reserved, u8 *reset)
+{
+	struct pgid *pgid = &cdev->private->pgid[0];
+	struct pgid *first = NULL;
+	int lpm;
+	int i;
+
+	*mismatch = 0;
+	*reserved = 0;
+	*reset = 0;
+	for (i = 0, lpm = 0x80; i < 8; i++, pgid++, lpm >>= 1) {
+		if ((cdev->private->pgid_valid_mask & lpm) == 0)
+			continue;
+		if (pgid->inf.ps.state2 == SNID_STATE2_RESVD_ELSE)
+			*reserved |= lpm;
+		if (pgid_is_reset(pgid)) {
+			*reset |= lpm;
+			continue;
+		}
+		if (!first) {
+			first = pgid;
+			continue;
+		}
+		if (pgid_cmp(pgid, first) != 0)
+			*mismatch = 1;
+	}
+	if (!first)
+		first = &channel_subsystems[0]->global_pgid;
+	*p = first;
+}
+
+static u8 pgid_to_donepm(struct ccw_device *cdev)
+{
+	struct subchannel *sch = to_subchannel(cdev->dev.parent);
+	struct pgid *pgid;
+	int i;
+	int lpm;
+	u8 donepm = 0;
+
+	/* Set bits for paths which are already in the target state. */
+	for (i = 0; i < 8; i++) {
+		lpm = 0x80 >> i;
+		if ((cdev->private->pgid_valid_mask & lpm) == 0)
+			continue;
+		pgid = &cdev->private->pgid[i];
+		if (sch->opm & lpm) {
+			if (pgid->inf.ps.state1 != SNID_STATE1_GROUPED)
+				continue;
+		} else {
+			if (pgid->inf.ps.state1 != SNID_STATE1_UNGROUPED)
+				continue;
+		}
+		if (cdev->private->flags.mpath) {
+			if (pgid->inf.ps.state3 != SNID_STATE3_MULTI_PATH)
+				continue;
+		} else {
+			if (pgid->inf.ps.state3 != SNID_STATE3_SINGLE_PATH)
+				continue;
+		}
+		donepm |= lpm;
+	}
+
+	return donepm;
+}
+
+static void pgid_fill(struct ccw_device *cdev, struct pgid *pgid)
+{
+	int i;
+
+	for (i = 0; i < 8; i++)
+		memcpy(&cdev->private->pgid[i], pgid, sizeof(struct pgid));
+}
+
+/*
+ * Process SENSE PGID data and report result.
+ */
+static void snid_done(struct ccw_device *cdev, int rc)
+{
+	struct ccw_dev_id *id = &cdev->private->dev_id;
+	struct subchannel *sch = to_subchannel(cdev->dev.parent);
+	struct pgid *pgid;
+	int mismatch = 0;
+	u8 reserved = 0;
+	u8 reset = 0;
+	u8 donepm;
+
+	if (rc)
+		goto out;
+	pgid_analyze(cdev, &pgid, &mismatch, &reserved, &reset);
+	if (reserved == cdev->private->pgid_valid_mask)
+		rc = -EUSERS;
+	else if (mismatch)
+		rc = -EOPNOTSUPP;
+	else {
+		donepm = pgid_to_donepm(cdev);
+		sch->vpm = donepm & sch->opm;
+		cdev->private->pgid_todo_mask &= ~donepm;
+		cdev->private->pgid_reset_mask |= reset;
+		pgid_fill(cdev, pgid);
+	}
+out:
+	CIO_MSG_EVENT(2, "snid: device 0.%x.%04x: rc=%d pvm=%02x vpm=%02x "
+		      "todo=%02x mism=%d rsvd=%02x reset=%02x\n", id->ssid,
+		      id->devno, rc, cdev->private->pgid_valid_mask, sch->vpm,
+		      cdev->private->pgid_todo_mask, mismatch, reserved, reset);
+	switch (rc) {
+	case 0:
+		/* Anything left to do? */
+		if (cdev->private->pgid_todo_mask == 0) {
+			verify_done(cdev, sch->vpm == 0 ? -EACCES : 0);
+			return;
+		}
+		/* Perform path-grouping. */
+		spid_start(cdev);
+		break;
+	case -EOPNOTSUPP:
+		/* Path-grouping not supported. */
+		cdev->private->flags.pgroup = 0;
+		cdev->private->flags.mpath = 0;
+		verify_start(cdev);
+		break;
+	default:
+		verify_done(cdev, rc);
+	}
+}
+
+/*
+ * Create channel program to perform a SENSE PGID on a single path.
+ */
+static void snid_build_cp(struct ccw_device *cdev)
+{
+	struct ccw_request *req = &cdev->private->req;
+	struct ccw1 *cp = cdev->private->iccws;
+	int i = 8 - ffs(req->lpm);
+
+	/* Channel program setup. */
+	cp->cmd_code	= CCW_CMD_SENSE_PGID;
+	cp->cda		= (u32) (addr_t) &cdev->private->pgid[i];
+	cp->count	= sizeof(struct pgid);
+	cp->flags	= CCW_FLAG_SLI;
+	req->cp		= cp;
+}
+
+/*
+ * Perform SENSE PGID on a single path.
+ */
+static void snid_do(struct ccw_device *cdev)
+{
+	struct subchannel *sch = to_subchannel(cdev->dev.parent);
+	struct ccw_request *req = &cdev->private->req;
+
+	/* Adjust lpm if paths are not set in pam. */
+	req->lpm = lpm_adjust(req->lpm, sch->schib.pmcw.pam);
+	if (!req->lpm)
+		goto out_nopath;
+	snid_build_cp(cdev);
+	ccw_request_start(cdev);
+	return;
+
+out_nopath:
+	snid_done(cdev, cdev->private->pgid_valid_mask ? 0 : -EACCES);
+}
+
+/*
+ * Process SENSE PGID request result for single path.
+ */
+static void snid_callback(struct ccw_device *cdev, void *data, int rc)
+{
+	struct ccw_request *req = &cdev->private->req;
+
+	if (rc == 0)
+		cdev->private->pgid_valid_mask |= req->lpm;
+	else if (rc != -EACCES)
+		goto err;
+	req->lpm >>= 1;
+	snid_do(cdev);
+	return;
+
+err:
+	snid_done(cdev, rc);
+}
+
+/*
+ * Perform path verification.
+ */
+static void verify_start(struct ccw_device *cdev)
+{
+	struct subchannel *sch = to_subchannel(cdev->dev.parent);
+	struct ccw_request *req = &cdev->private->req;
+	struct ccw_dev_id *devid = &cdev->private->dev_id;
+
+	sch->vpm = 0;
+	sch->lpm = sch->schib.pmcw.pam;
+	/* Initialize request data. */
+	memset(req, 0, sizeof(*req));
+	req->timeout	= PGID_TIMEOUT;
+	req->maxretries	= PGID_RETRIES;
+	req->lpm	= 0x80;
+	req->singlepath	= 1;
+	if (cdev->private->flags.pgroup) {
+		CIO_TRACE_EVENT(4, "snid");
+		CIO_HEX_EVENT(4, devid, sizeof(*devid));
+		req->callback	= snid_callback;
+		snid_do(cdev);
+	} else {
+		CIO_TRACE_EVENT(4, "nop");
+		CIO_HEX_EVENT(4, devid, sizeof(*devid));
+		req->filter	= nop_filter;
+		req->callback	= nop_callback;
+		nop_do(cdev);
+	}
+}
+
+/**
+ * ccw_device_verify_start - perform path verification
+ * @cdev: ccw device
+ *
+ * Perform an I/O on each available channel path to @cdev to determine which
+ * paths are operational. The resulting path mask is stored in sch->vpm.
+ * If device options specify pathgrouping, establish a pathgroup for the
+ * operational paths. When finished, call ccw_device_verify_done with a
+ * return code specifying the result.
+ */
+void ccw_device_verify_start(struct ccw_device *cdev)
+{
+	struct subchannel *sch = to_subchannel(cdev->dev.parent);
+
+	CIO_TRACE_EVENT(4, "vrfy");
+	CIO_HEX_EVENT(4, &cdev->private->dev_id, sizeof(cdev->private->dev_id));
+	/* Initialize PGID data. */
+	memset(cdev->private->pgid, 0, sizeof(cdev->private->pgid));
+	cdev->private->pgid_valid_mask = 0;
+	cdev->private->pgid_todo_mask = sch->schib.pmcw.pam;
+	/*
+	 * Initialize pathgroup and multipath state with target values.
+	 * They may change in the course of path verification.
+	 */
+	cdev->private->flags.pgroup = cdev->private->options.pgroup;
+	cdev->private->flags.mpath = cdev->private->options.mpath;
+	cdev->private->flags.doverify = 0;
+	verify_start(cdev);
+}
+
+/*
+ * Process disband SET PGID request result.
+ */
+static void disband_callback(struct ccw_device *cdev, void *data, int rc)
+{
+	struct subchannel *sch = to_subchannel(cdev->dev.parent);
+	struct ccw_dev_id *id = &cdev->private->dev_id;
+
+	if (rc)
+		goto out;
+	/* Ensure consistent multipathing state at device and channel. */
+	cdev->private->flags.mpath = 0;
+	if (sch->config.mp) {
+		sch->config.mp = 0;
+		rc = cio_commit_config(sch);
+	}
+out:
+	CIO_MSG_EVENT(0, "disb: device 0.%x.%04x: rc=%d\n", id->ssid, id->devno,
+		      rc);
+	ccw_device_disband_done(cdev, rc);
+}
+
+/**
+ * ccw_device_disband_start - disband pathgroup
+ * @cdev: ccw device
+ *
+ * Execute a SET PGID channel program on @cdev to disband a previously
+ * established pathgroup. When finished, call ccw_device_disband_done with
+ * a return code specifying the result.
+ */
+void ccw_device_disband_start(struct ccw_device *cdev)
+{
+	struct subchannel *sch = to_subchannel(cdev->dev.parent);
+	struct ccw_request *req = &cdev->private->req;
+	u8 fn;
+
+	CIO_TRACE_EVENT(4, "disb");
+	CIO_HEX_EVENT(4, &cdev->private->dev_id, sizeof(cdev->private->dev_id));
+	/* Request setup. */
+	memset(req, 0, sizeof(*req));
+	req->timeout	= PGID_TIMEOUT;
+	req->maxretries	= PGID_RETRIES;
+	req->lpm	= sch->schib.pmcw.pam & sch->opm;
+	req->singlepath	= 1;
+	req->callback	= disband_callback;
+	fn = SPID_FUNC_DISBAND;
+	if (cdev->private->flags.mpath)
+		fn |= SPID_FUNC_MULTI_PATH;
+	spid_build_cp(cdev, fn);
+	ccw_request_start(cdev);
+}
+
+static void stlck_build_cp(struct ccw_device *cdev, void *buf1, void *buf2)
+{
+	struct ccw_request *req = &cdev->private->req;
+	struct ccw1 *cp = cdev->private->iccws;
+
+	cp[0].cmd_code = CCW_CMD_STLCK;
+	cp[0].cda = (u32) (addr_t) buf1;
+	cp[0].count = 32;
+	cp[0].flags = CCW_FLAG_CC;
+	cp[1].cmd_code = CCW_CMD_RELEASE;
+	cp[1].cda = (u32) (addr_t) buf2;
+	cp[1].count = 32;
+	cp[1].flags = 0;
+	req->cp = cp;
+}
+
+static void stlck_callback(struct ccw_device *cdev, void *data, int rc)
+{
+	ccw_device_stlck_done(cdev, data, rc);
+}
+
+/**
+ * ccw_device_stlck_start - perform unconditional release
+ * @cdev: ccw device
+ * @data: data pointer to be passed to ccw_device_stlck_done
+ * @buf1: data pointer used in channel program
+ * @buf2: data pointer used in channel program
+ *
+ * Execute a channel program on @cdev to release an existing PGID reservation.
+ * When finished, call ccw_device_stlck_done with a return code specifying the
+ * result.
+ */
+void ccw_device_stlck_start(struct ccw_device *cdev, void *data, void *buf1,
+			    void *buf2)
+{
+	struct subchannel *sch = to_subchannel(cdev->dev.parent);
+	struct ccw_request *req = &cdev->private->req;
+
+	CIO_TRACE_EVENT(4, "stlck");
+	CIO_HEX_EVENT(4, &cdev->private->dev_id, sizeof(cdev->private->dev_id));
+	/* Request setup. */
+	memset(req, 0, sizeof(*req));
+	req->timeout	= PGID_TIMEOUT;
+	req->maxretries	= PGID_RETRIES;
+	req->lpm	= sch->schib.pmcw.pam & sch->opm;
+	req->data	= data;
+	req->callback	= stlck_callback;
+	stlck_build_cp(cdev, buf1, buf2);
+	ccw_request_start(cdev);
+}
+
diff --git a/ap/os/linux/linux-3.4.x/drivers/s390/cio/device_status.c b/ap/os/linux/linux-3.4.x/drivers/s390/cio/device_status.c
new file mode 100644
index 0000000..66d8066
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/s390/cio/device_status.c
@@ -0,0 +1,400 @@
+/*
+ * drivers/s390/cio/device_status.c
+ *
+ *    Copyright (C) 2002 IBM Deutschland Entwicklung GmbH,
+ *			 IBM Corporation
+ *    Author(s): Cornelia Huck (cornelia.huck@de.ibm.com)
+ *		 Martin Schwidefsky (schwidefsky@de.ibm.com)
+ *
+ * Status accumulation and basic sense functions.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+
+#include <asm/ccwdev.h>
+#include <asm/cio.h>
+
+#include "cio.h"
+#include "cio_debug.h"
+#include "css.h"
+#include "device.h"
+#include "ioasm.h"
+#include "io_sch.h"
+
+/*
+ * Check for any kind of channel or interface control check but don't
+ * issue the message for the console device
+ */
+static void
+ccw_device_msg_control_check(struct ccw_device *cdev, struct irb *irb)
+{
+	char dbf_text[15];
+
+	if (!scsw_is_valid_cstat(&irb->scsw) ||
+	    !(scsw_cstat(&irb->scsw) & (SCHN_STAT_CHN_DATA_CHK |
+	      SCHN_STAT_CHN_CTRL_CHK | SCHN_STAT_INTF_CTRL_CHK)))
+		return;
+	CIO_MSG_EVENT(0, "Channel-Check or Interface-Control-Check "
+		      "received"
+		      " ... device %04x on subchannel 0.%x.%04x, dev_stat "
+		      ": %02X sch_stat : %02X\n",
+		      cdev->private->dev_id.devno, cdev->private->schid.ssid,
+		      cdev->private->schid.sch_no,
+		      scsw_dstat(&irb->scsw), scsw_cstat(&irb->scsw));
+	sprintf(dbf_text, "chk%x", cdev->private->schid.sch_no);
+	CIO_TRACE_EVENT(0, dbf_text);
+	CIO_HEX_EVENT(0, irb, sizeof(struct irb));
+}
+
+/*
+ * Some paths became not operational (pno bit in scsw is set).
+ */
+static void
+ccw_device_path_notoper(struct ccw_device *cdev)
+{
+	struct subchannel *sch;
+
+	sch = to_subchannel(cdev->dev.parent);
+	if (cio_update_schib(sch))
+		goto doverify;
+
+	CIO_MSG_EVENT(0, "%s(0.%x.%04x) - path(s) %02x are "
+		      "not operational \n", __func__,
+		      sch->schid.ssid, sch->schid.sch_no,
+		      sch->schib.pmcw.pnom);
+
+	sch->lpm &= ~sch->schib.pmcw.pnom;
+doverify:
+	cdev->private->flags.doverify = 1;
+}
+
+/*
+ * Copy valid bits from the extended control word to device irb.
+ */
+static void
+ccw_device_accumulate_ecw(struct ccw_device *cdev, struct irb *irb)
+{
+	/*
+	 * Copy extended control bit if it is valid... yes there
+	 * are condition that have to be met for the extended control
+	 * bit to have meaning. Sick.
+	 */
+	cdev->private->irb.scsw.cmd.ectl = 0;
+	if ((irb->scsw.cmd.stctl & SCSW_STCTL_ALERT_STATUS) &&
+	    !(irb->scsw.cmd.stctl & SCSW_STCTL_INTER_STATUS))
+		cdev->private->irb.scsw.cmd.ectl = irb->scsw.cmd.ectl;
+	/* Check if extended control word is valid. */
+	if (!cdev->private->irb.scsw.cmd.ectl)
+		return;
+	/* Copy concurrent sense / model dependent information. */
+	memcpy (&cdev->private->irb.ecw, irb->ecw, sizeof (irb->ecw));
+}
+
+/*
+ * Check if extended status word is valid.
+ */
+static int
+ccw_device_accumulate_esw_valid(struct irb *irb)
+{
+	if (!irb->scsw.cmd.eswf &&
+	    (irb->scsw.cmd.stctl == SCSW_STCTL_STATUS_PEND))
+		return 0;
+	if (irb->scsw.cmd.stctl ==
+			(SCSW_STCTL_INTER_STATUS|SCSW_STCTL_STATUS_PEND) &&
+	    !(irb->scsw.cmd.actl & SCSW_ACTL_SUSPENDED))
+		return 0;
+	return 1;
+}
+
+/*
+ * Copy valid bits from the extended status word to device irb.
+ */
+static void
+ccw_device_accumulate_esw(struct ccw_device *cdev, struct irb *irb)
+{
+	struct irb *cdev_irb;
+	struct sublog *cdev_sublog, *sublog;
+
+	if (!ccw_device_accumulate_esw_valid(irb))
+		return;
+
+	cdev_irb = &cdev->private->irb;
+
+	/* Copy last path used mask. */
+	cdev_irb->esw.esw1.lpum = irb->esw.esw1.lpum;
+
+	/* Copy subchannel logout information if esw is of format 0. */
+	if (irb->scsw.cmd.eswf) {
+		cdev_sublog = &cdev_irb->esw.esw0.sublog;
+		sublog = &irb->esw.esw0.sublog;
+		/* Copy extended status flags. */
+		cdev_sublog->esf = sublog->esf;
+		/*
+		 * Copy fields that have a meaning for channel data check
+		 * channel control check and interface control check.
+		 */
+		if (irb->scsw.cmd.cstat & (SCHN_STAT_CHN_DATA_CHK |
+				       SCHN_STAT_CHN_CTRL_CHK |
+				       SCHN_STAT_INTF_CTRL_CHK)) {
+			/* Copy ancillary report bit. */
+			cdev_sublog->arep = sublog->arep;
+			/* Copy field-validity-flags. */
+			cdev_sublog->fvf = sublog->fvf;
+			/* Copy storage access code. */
+			cdev_sublog->sacc = sublog->sacc;
+			/* Copy termination code. */
+			cdev_sublog->termc = sublog->termc;
+			/* Copy sequence code. */
+			cdev_sublog->seqc = sublog->seqc;
+		}
+		/* Copy device status check. */
+		cdev_sublog->devsc = sublog->devsc;
+		/* Copy secondary error. */
+		cdev_sublog->serr = sublog->serr;
+		/* Copy i/o-error alert. */
+		cdev_sublog->ioerr = sublog->ioerr;
+		/* Copy channel path timeout bit. */
+		if (irb->scsw.cmd.cstat & SCHN_STAT_INTF_CTRL_CHK)
+			cdev_irb->esw.esw0.erw.cpt = irb->esw.esw0.erw.cpt;
+		/* Copy failing storage address validity flag. */
+		cdev_irb->esw.esw0.erw.fsavf = irb->esw.esw0.erw.fsavf;
+		if (cdev_irb->esw.esw0.erw.fsavf) {
+			/* ... and copy the failing storage address. */
+			memcpy(cdev_irb->esw.esw0.faddr, irb->esw.esw0.faddr,
+			       sizeof (irb->esw.esw0.faddr));
+			/* ... and copy the failing storage address format. */
+			cdev_irb->esw.esw0.erw.fsaf = irb->esw.esw0.erw.fsaf;
+		}
+		/* Copy secondary ccw address validity bit. */
+		cdev_irb->esw.esw0.erw.scavf = irb->esw.esw0.erw.scavf;
+		if (irb->esw.esw0.erw.scavf)
+			/* ... and copy the secondary ccw address. */
+			cdev_irb->esw.esw0.saddr = irb->esw.esw0.saddr;
+		
+	}
+	/* FIXME: DCTI for format 2? */
+
+	/* Copy authorization bit. */
+	cdev_irb->esw.esw0.erw.auth = irb->esw.esw0.erw.auth;
+	/* Copy path verification required flag. */
+	cdev_irb->esw.esw0.erw.pvrf = irb->esw.esw0.erw.pvrf;
+	if (irb->esw.esw0.erw.pvrf)
+		cdev->private->flags.doverify = 1;
+	/* Copy concurrent sense bit. */
+	cdev_irb->esw.esw0.erw.cons = irb->esw.esw0.erw.cons;
+	if (irb->esw.esw0.erw.cons)
+		cdev_irb->esw.esw0.erw.scnt = irb->esw.esw0.erw.scnt;
+}
+
+/*
+ * Accumulate status from irb to devstat.
+ */
+void
+ccw_device_accumulate_irb(struct ccw_device *cdev, struct irb *irb)
+{
+	struct irb *cdev_irb;
+
+	/*
+	 * Check if the status pending bit is set in stctl.
+	 * If not, the remaining bit have no meaning and we must ignore them.
+	 * The esw is not meaningful as well...
+	 */
+	if (!(scsw_stctl(&irb->scsw) & SCSW_STCTL_STATUS_PEND))
+		return;
+
+	/* Check for channel checks and interface control checks. */
+	ccw_device_msg_control_check(cdev, irb);
+
+	/* Check for path not operational. */
+	if (scsw_is_valid_pno(&irb->scsw) && scsw_pno(&irb->scsw))
+		ccw_device_path_notoper(cdev);
+	/* No irb accumulation for transport mode irbs. */
+	if (scsw_is_tm(&irb->scsw)) {
+		memcpy(&cdev->private->irb, irb, sizeof(struct irb));
+		return;
+	}
+	/*
+	 * Don't accumulate unsolicited interrupts.
+	 */
+	if (!scsw_is_solicited(&irb->scsw))
+		return;
+
+	cdev_irb = &cdev->private->irb;
+
+	/*
+	 * If the clear function had been performed, all formerly pending
+	 * status at the subchannel has been cleared and we must not pass
+	 * intermediate accumulated status to the device driver.
+	 */
+	if (irb->scsw.cmd.fctl & SCSW_FCTL_CLEAR_FUNC)
+		memset(&cdev->private->irb, 0, sizeof(struct irb));
+
+	/* Copy bits which are valid only for the start function. */
+	if (irb->scsw.cmd.fctl & SCSW_FCTL_START_FUNC) {
+		/* Copy key. */
+		cdev_irb->scsw.cmd.key = irb->scsw.cmd.key;
+		/* Copy suspend control bit. */
+		cdev_irb->scsw.cmd.sctl = irb->scsw.cmd.sctl;
+		/* Accumulate deferred condition code. */
+		cdev_irb->scsw.cmd.cc |= irb->scsw.cmd.cc;
+		/* Copy ccw format bit. */
+		cdev_irb->scsw.cmd.fmt = irb->scsw.cmd.fmt;
+		/* Copy prefetch bit. */
+		cdev_irb->scsw.cmd.pfch = irb->scsw.cmd.pfch;
+		/* Copy initial-status-interruption-control. */
+		cdev_irb->scsw.cmd.isic = irb->scsw.cmd.isic;
+		/* Copy address limit checking control. */
+		cdev_irb->scsw.cmd.alcc = irb->scsw.cmd.alcc;
+		/* Copy suppress suspend bit. */
+		cdev_irb->scsw.cmd.ssi = irb->scsw.cmd.ssi;
+	}
+
+	/* Take care of the extended control bit and extended control word. */
+	ccw_device_accumulate_ecw(cdev, irb);
+	    
+	/* Accumulate function control. */
+	cdev_irb->scsw.cmd.fctl |= irb->scsw.cmd.fctl;
+	/* Copy activity control. */
+	cdev_irb->scsw.cmd.actl = irb->scsw.cmd.actl;
+	/* Accumulate status control. */
+	cdev_irb->scsw.cmd.stctl |= irb->scsw.cmd.stctl;
+	/*
+	 * Copy ccw address if it is valid. This is a bit simplified
+	 * but should be close enough for all practical purposes.
+	 */
+	if ((irb->scsw.cmd.stctl & SCSW_STCTL_PRIM_STATUS) ||
+	    ((irb->scsw.cmd.stctl ==
+	      (SCSW_STCTL_INTER_STATUS|SCSW_STCTL_STATUS_PEND)) &&
+	     (irb->scsw.cmd.actl & SCSW_ACTL_DEVACT) &&
+	     (irb->scsw.cmd.actl & SCSW_ACTL_SCHACT)) ||
+	    (irb->scsw.cmd.actl & SCSW_ACTL_SUSPENDED))
+		cdev_irb->scsw.cmd.cpa = irb->scsw.cmd.cpa;
+	/* Accumulate device status, but not the device busy flag. */
+	cdev_irb->scsw.cmd.dstat &= ~DEV_STAT_BUSY;
+	/* dstat is not always valid. */
+	if (irb->scsw.cmd.stctl &
+	    (SCSW_STCTL_PRIM_STATUS | SCSW_STCTL_SEC_STATUS
+	     | SCSW_STCTL_INTER_STATUS | SCSW_STCTL_ALERT_STATUS))
+		cdev_irb->scsw.cmd.dstat |= irb->scsw.cmd.dstat;
+	/* Accumulate subchannel status. */
+	cdev_irb->scsw.cmd.cstat |= irb->scsw.cmd.cstat;
+	/* Copy residual count if it is valid. */
+	if ((irb->scsw.cmd.stctl & SCSW_STCTL_PRIM_STATUS) &&
+	    (irb->scsw.cmd.cstat & ~(SCHN_STAT_PCI | SCHN_STAT_INCORR_LEN))
+	     == 0)
+		cdev_irb->scsw.cmd.count = irb->scsw.cmd.count;
+
+	/* Take care of bits in the extended status word. */
+	ccw_device_accumulate_esw(cdev, irb);
+
+	/*
+	 * Check whether we must issue a SENSE CCW ourselves if there is no
+	 * concurrent sense facility installed for the subchannel.
+	 * No sense is required if no delayed sense is pending
+	 * and we did not get a unit check without sense information.
+	 *
+	 * Note: We should check for ioinfo[irq]->flags.consns but VM
+	 *	 violates the ESA/390 architecture and doesn't present an
+	 *	 operand exception for virtual devices without concurrent
+	 *	 sense facility available/supported when enabling the
+	 *	 concurrent sense facility.
+	 */
+	if ((cdev_irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK) &&
+	    !(cdev_irb->esw.esw0.erw.cons))
+		cdev->private->flags.dosense = 1;
+}
+
+/*
+ * Do a basic sense.
+ */
+int
+ccw_device_do_sense(struct ccw_device *cdev, struct irb *irb)
+{
+	struct subchannel *sch;
+	struct ccw1 *sense_ccw;
+	int rc;
+
+	sch = to_subchannel(cdev->dev.parent);
+
+	/* A sense is required, can we do it now ? */
+	if (scsw_actl(&irb->scsw) & (SCSW_ACTL_DEVACT | SCSW_ACTL_SCHACT))
+		/*
+		 * we received an Unit Check but we have no final
+		 *  status yet, therefore we must delay the SENSE
+		 *  processing. We must not report this intermediate
+		 *  status to the device interrupt handler.
+		 */
+		return -EBUSY;
+
+	/*
+	 * We have ending status but no sense information. Do a basic sense.
+	 */
+	sense_ccw = &to_io_private(sch)->sense_ccw;
+	sense_ccw->cmd_code = CCW_CMD_BASIC_SENSE;
+	sense_ccw->cda = (__u32) __pa(cdev->private->irb.ecw);
+	sense_ccw->count = SENSE_MAX_COUNT;
+	sense_ccw->flags = CCW_FLAG_SLI;
+
+	rc = cio_start(sch, sense_ccw, 0xff);
+	if (rc == -ENODEV || rc == -EACCES)
+		dev_fsm_event(cdev, DEV_EVENT_VERIFY);
+	return rc;
+}
+
+/*
+ * Add information from basic sense to devstat.
+ */
+void
+ccw_device_accumulate_basic_sense(struct ccw_device *cdev, struct irb *irb)
+{
+	/*
+	 * Check if the status pending bit is set in stctl.
+	 * If not, the remaining bit have no meaning and we must ignore them.
+	 * The esw is not meaningful as well...
+	 */
+	if (!(scsw_stctl(&irb->scsw) & SCSW_STCTL_STATUS_PEND))
+		return;
+
+	/* Check for channel checks and interface control checks. */
+	ccw_device_msg_control_check(cdev, irb);
+
+	/* Check for path not operational. */
+	if (scsw_is_valid_pno(&irb->scsw) && scsw_pno(&irb->scsw))
+		ccw_device_path_notoper(cdev);
+
+	if (!(irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK) &&
+	    (irb->scsw.cmd.dstat & DEV_STAT_CHN_END)) {
+		cdev->private->irb.esw.esw0.erw.cons = 1;
+		cdev->private->flags.dosense = 0;
+	}
+	/* Check if path verification is required. */
+	if (ccw_device_accumulate_esw_valid(irb) &&
+	    irb->esw.esw0.erw.pvrf)
+		cdev->private->flags.doverify = 1;
+}
+
+/*
+ * This function accumulates the status into the private devstat and
+ * starts a basic sense if one is needed.
+ */
+int
+ccw_device_accumulate_and_sense(struct ccw_device *cdev, struct irb *irb)
+{
+	ccw_device_accumulate_irb(cdev, irb);
+	if ((irb->scsw.cmd.actl  & (SCSW_ACTL_DEVACT | SCSW_ACTL_SCHACT)) != 0)
+		return -EBUSY;
+	/* Check for basic sense. */
+	if (cdev->private->flags.dosense &&
+	    !(irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK)) {
+		cdev->private->irb.esw.esw0.erw.cons = 1;
+		cdev->private->flags.dosense = 0;
+		return 0;
+	}
+	if (cdev->private->flags.dosense) {
+		ccw_device_do_sense(cdev, irb);
+		return -EBUSY;
+	}
+	return 0;
+}
+
diff --git a/ap/os/linux/linux-3.4.x/drivers/s390/cio/fcx.c b/ap/os/linux/linux-3.4.x/drivers/s390/cio/fcx.c
new file mode 100644
index 0000000..ca5e9bb
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/s390/cio/fcx.c
@@ -0,0 +1,350 @@
+/*
+ *  Functions for assembling fcx enabled I/O control blocks.
+ *
+ *    Copyright IBM Corp. 2008
+ *    Author(s): Peter Oberparleiter <peter.oberparleiter@de.ibm.com>
+ */
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <linux/err.h>
+#include <linux/module.h>
+#include <asm/fcx.h>
+#include "cio.h"
+
+/**
+ * tcw_get_intrg - return pointer to associated interrogate tcw
+ * @tcw: pointer to the original tcw
+ *
+ * Return a pointer to the interrogate tcw associated with the specified tcw
+ * or %NULL if there is no associated interrogate tcw.
+ */
+struct tcw *tcw_get_intrg(struct tcw *tcw)
+{
+	return (struct tcw *) ((addr_t) tcw->intrg);
+}
+EXPORT_SYMBOL(tcw_get_intrg);
+
+/**
+ * tcw_get_data - return pointer to input/output data associated with tcw
+ * @tcw: pointer to the tcw
+ *
+ * Return the input or output data address specified in the tcw depending
+ * on whether the r-bit or the w-bit is set. If neither bit is set, return
+ * %NULL.
+ */
+void *tcw_get_data(struct tcw *tcw)
+{
+	if (tcw->r)
+		return (void *) ((addr_t) tcw->input);
+	if (tcw->w)
+		return (void *) ((addr_t) tcw->output);
+	return NULL;
+}
+EXPORT_SYMBOL(tcw_get_data);
+
+/**
+ * tcw_get_tccb - return pointer to tccb associated with tcw
+ * @tcw: pointer to the tcw
+ *
+ * Return pointer to the tccb associated with this tcw.
+ */
+struct tccb *tcw_get_tccb(struct tcw *tcw)
+{
+	return (struct tccb *) ((addr_t) tcw->tccb);
+}
+EXPORT_SYMBOL(tcw_get_tccb);
+
+/**
+ * tcw_get_tsb - return pointer to tsb associated with tcw
+ * @tcw: pointer to the tcw
+ *
+ * Return pointer to the tsb associated with this tcw.
+ */
+struct tsb *tcw_get_tsb(struct tcw *tcw)
+{
+	return (struct tsb *) ((addr_t) tcw->tsb);
+}
+EXPORT_SYMBOL(tcw_get_tsb);
+
+/**
+ * tcw_init - initialize tcw data structure
+ * @tcw: pointer to the tcw to be initialized
+ * @r: initial value of the r-bit
+ * @w: initial value of the w-bit
+ *
+ * Initialize all fields of the specified tcw data structure with zero and
+ * fill in the format, flags, r and w fields.
+ */
+void tcw_init(struct tcw *tcw, int r, int w)
+{
+	memset(tcw, 0, sizeof(struct tcw));
+	tcw->format = TCW_FORMAT_DEFAULT;
+	tcw->flags = TCW_FLAGS_TIDAW_FORMAT(TCW_TIDAW_FORMAT_DEFAULT);
+	if (r)
+		tcw->r = 1;
+	if (w)
+		tcw->w = 1;
+}
+EXPORT_SYMBOL(tcw_init);
+
+static inline size_t tca_size(struct tccb *tccb)
+{
+	return tccb->tcah.tcal - 12;
+}
+
+static u32 calc_dcw_count(struct tccb *tccb)
+{
+	int offset;
+	struct dcw *dcw;
+	u32 count = 0;
+	size_t size;
+
+	size = tca_size(tccb);
+	for (offset = 0; offset < size;) {
+		dcw = (struct dcw *) &tccb->tca[offset];
+		count += dcw->count;
+		if (!(dcw->flags & DCW_FLAGS_CC))
+			break;
+		offset += sizeof(struct dcw) + ALIGN((int) dcw->cd_count, 4);
+	}
+	return count;
+}
+
+static u32 calc_cbc_size(struct tidaw *tidaw, int num)
+{
+	int i;
+	u32 cbc_data;
+	u32 cbc_count = 0;
+	u64 data_count = 0;
+
+	for (i = 0; i < num; i++) {
+		if (tidaw[i].flags & TIDAW_FLAGS_LAST)
+			break;
+		/* TODO: find out if padding applies to total of data
+		 * transferred or data transferred by this tidaw. Assumption:
+		 * applies to total. */
+		data_count += tidaw[i].count;
+		if (tidaw[i].flags & TIDAW_FLAGS_INSERT_CBC) {
+			cbc_data = 4 + ALIGN(data_count, 4) - data_count;
+			cbc_count += cbc_data;
+			data_count += cbc_data;
+		}
+	}
+	return cbc_count;
+}
+
+/**
+ * tcw_finalize - finalize tcw length fields and tidaw list
+ * @tcw: pointer to the tcw
+ * @num_tidaws: the number of tidaws used to address input/output data or zero
+ * if no tida is used
+ *
+ * Calculate the input-/output-count and tccbl field in the tcw, add a
+ * tcat the tccb and terminate the data tidaw list if used.
+ *
+ * Note: in case input- or output-tida is used, the tidaw-list must be stored
+ * in contiguous storage (no ttic). The tcal field in the tccb must be
+ * up-to-date.
+ */
+void tcw_finalize(struct tcw *tcw, int num_tidaws)
+{
+	struct tidaw *tidaw;
+	struct tccb *tccb;
+	struct tccb_tcat *tcat;
+	u32 count;
+
+	/* Terminate tidaw list. */
+	tidaw = tcw_get_data(tcw);
+	if (num_tidaws > 0)
+		tidaw[num_tidaws - 1].flags |= TIDAW_FLAGS_LAST;
+	/* Add tcat to tccb. */
+	tccb = tcw_get_tccb(tcw);
+	tcat = (struct tccb_tcat *) &tccb->tca[tca_size(tccb)];
+	memset(tcat, 0, sizeof(*tcat));
+	/* Calculate tcw input/output count and tcat transport count. */
+	count = calc_dcw_count(tccb);
+	if (tcw->w && (tcw->flags & TCW_FLAGS_OUTPUT_TIDA))
+		count += calc_cbc_size(tidaw, num_tidaws);
+	if (tcw->r)
+		tcw->input_count = count;
+	else if (tcw->w)
+		tcw->output_count = count;
+	tcat->count = ALIGN(count, 4) + 4;
+	/* Calculate tccbl. */
+	tcw->tccbl = (sizeof(struct tccb) + tca_size(tccb) +
+		      sizeof(struct tccb_tcat) - 20) >> 2;
+}
+EXPORT_SYMBOL(tcw_finalize);
+
+/**
+ * tcw_set_intrg - set the interrogate tcw address of a tcw
+ * @tcw: the tcw address
+ * @intrg_tcw: the address of the interrogate tcw
+ *
+ * Set the address of the interrogate tcw in the specified tcw.
+ */
+void tcw_set_intrg(struct tcw *tcw, struct tcw *intrg_tcw)
+{
+	tcw->intrg = (u32) ((addr_t) intrg_tcw);
+}
+EXPORT_SYMBOL(tcw_set_intrg);
+
+/**
+ * tcw_set_data - set data address and tida flag of a tcw
+ * @tcw: the tcw address
+ * @data: the data address
+ * @use_tidal: zero of the data address specifies a contiguous block of data,
+ * non-zero if it specifies a list if tidaws.
+ *
+ * Set the input/output data address of a tcw (depending on the value of the
+ * r-flag and w-flag). If @use_tidal is non-zero, the corresponding tida flag
+ * is set as well.
+ */
+void tcw_set_data(struct tcw *tcw, void *data, int use_tidal)
+{
+	if (tcw->r) {
+		tcw->input = (u64) ((addr_t) data);
+		if (use_tidal)
+			tcw->flags |= TCW_FLAGS_INPUT_TIDA;
+	} else if (tcw->w) {
+		tcw->output = (u64) ((addr_t) data);
+		if (use_tidal)
+			tcw->flags |= TCW_FLAGS_OUTPUT_TIDA;
+	}
+}
+EXPORT_SYMBOL(tcw_set_data);
+
+/**
+ * tcw_set_tccb - set tccb address of a tcw
+ * @tcw: the tcw address
+ * @tccb: the tccb address
+ *
+ * Set the address of the tccb in the specified tcw.
+ */
+void tcw_set_tccb(struct tcw *tcw, struct tccb *tccb)
+{
+	tcw->tccb = (u64) ((addr_t) tccb);
+}
+EXPORT_SYMBOL(tcw_set_tccb);
+
+/**
+ * tcw_set_tsb - set tsb address of a tcw
+ * @tcw: the tcw address
+ * @tsb: the tsb address
+ *
+ * Set the address of the tsb in the specified tcw.
+ */
+void tcw_set_tsb(struct tcw *tcw, struct tsb *tsb)
+{
+	tcw->tsb = (u64) ((addr_t) tsb);
+}
+EXPORT_SYMBOL(tcw_set_tsb);
+
+/**
+ * tccb_init - initialize tccb
+ * @tccb: the tccb address
+ * @size: the maximum size of the tccb
+ * @sac: the service-action-code to be user
+ *
+ * Initialize the header of the specified tccb by resetting all values to zero
+ * and filling in defaults for format, sac and initial tcal fields.
+ */
+void tccb_init(struct tccb *tccb, size_t size, u32 sac)
+{
+	memset(tccb, 0, size);
+	tccb->tcah.format = TCCB_FORMAT_DEFAULT;
+	tccb->tcah.sac = sac;
+	tccb->tcah.tcal = 12;
+}
+EXPORT_SYMBOL(tccb_init);
+
+/**
+ * tsb_init - initialize tsb
+ * @tsb: the tsb address
+ *
+ * Initialize the specified tsb by resetting all values to zero.
+ */
+void tsb_init(struct tsb *tsb)
+{
+	memset(tsb, 0, sizeof(*tsb));
+}
+EXPORT_SYMBOL(tsb_init);
+
+/**
+ * tccb_add_dcw - add a dcw to the tccb
+ * @tccb: the tccb address
+ * @tccb_size: the maximum tccb size
+ * @cmd: the dcw command
+ * @flags: flags for the dcw
+ * @cd: pointer to control data for this dcw or NULL if none is required
+ * @cd_count: number of control data bytes for this dcw
+ * @count: number of data bytes for this dcw
+ *
+ * Add a new dcw to the specified tccb by writing the dcw information specified
+ * by @cmd, @flags, @cd, @cd_count and @count to the tca of the tccb. Return
+ * a pointer to the newly added dcw on success or -%ENOSPC if the new dcw
+ * would exceed the available space as defined by @tccb_size.
+ *
+ * Note: the tcal field of the tccb header will be updates to reflect added
+ * content.
+ */
+struct dcw *tccb_add_dcw(struct tccb *tccb, size_t tccb_size, u8 cmd, u8 flags,
+			 void *cd, u8 cd_count, u32 count)
+{
+	struct dcw *dcw;
+	int size;
+	int tca_offset;
+
+	/* Check for space. */
+	tca_offset = tca_size(tccb);
+	size = ALIGN(sizeof(struct dcw) + cd_count, 4);
+	if (sizeof(struct tccb_tcah) + tca_offset + size +
+	    sizeof(struct tccb_tcat) > tccb_size)
+		return ERR_PTR(-ENOSPC);
+	/* Add dcw to tca. */
+	dcw = (struct dcw *) &tccb->tca[tca_offset];
+	memset(dcw, 0, size);
+	dcw->cmd = cmd;
+	dcw->flags = flags;
+	dcw->count = count;
+	dcw->cd_count = cd_count;
+	if (cd)
+		memcpy(&dcw->cd[0], cd, cd_count);
+	tccb->tcah.tcal += size;
+	return dcw;
+}
+EXPORT_SYMBOL(tccb_add_dcw);
+
+/**
+ * tcw_add_tidaw - add a tidaw to a tcw
+ * @tcw: the tcw address
+ * @num_tidaws: the current number of tidaws
+ * @flags: flags for the new tidaw
+ * @addr: address value for the new tidaw
+ * @count: count value for the new tidaw
+ *
+ * Add a new tidaw to the input/output data tidaw-list of the specified tcw
+ * (depending on the value of the r-flag and w-flag) and return a pointer to
+ * the new tidaw.
+ *
+ * Note: the tidaw-list is assumed to be contiguous with no ttics. The caller
+ * must ensure that there is enough space for the new tidaw. The last-tidaw
+ * flag for the last tidaw in the list will be set by tcw_finalize.
+ */
+struct tidaw *tcw_add_tidaw(struct tcw *tcw, int num_tidaws, u8 flags,
+			    void *addr, u32 count)
+{
+	struct tidaw *tidaw;
+
+	/* Add tidaw to tidaw-list. */
+	tidaw = ((struct tidaw *) tcw_get_data(tcw)) + num_tidaws;
+	memset(tidaw, 0, sizeof(struct tidaw));
+	tidaw->flags = flags;
+	tidaw->count = count;
+	tidaw->addr = (u64) ((addr_t) addr);
+	return tidaw;
+}
+EXPORT_SYMBOL(tcw_add_tidaw);
diff --git a/ap/os/linux/linux-3.4.x/drivers/s390/cio/idset.c b/ap/os/linux/linux-3.4.x/drivers/s390/cio/idset.c
new file mode 100644
index 0000000..4d10981
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/s390/cio/idset.c
@@ -0,0 +1,132 @@
+/*
+ *  drivers/s390/cio/idset.c
+ *
+ *    Copyright IBM Corp. 2007
+ *    Author(s): Peter Oberparleiter <peter.oberparleiter@de.ibm.com>
+ */
+
+#include <linux/vmalloc.h>
+#include <linux/bitops.h>
+#include "idset.h"
+#include "css.h"
+
+struct idset {
+	int num_ssid;
+	int num_id;
+	unsigned long bitmap[0];
+};
+
+static inline unsigned long bitmap_size(int num_ssid, int num_id)
+{
+	return __BITOPS_WORDS(num_ssid * num_id) * sizeof(unsigned long);
+}
+
+static struct idset *idset_new(int num_ssid, int num_id)
+{
+	struct idset *set;
+
+	set = vmalloc(sizeof(struct idset) + bitmap_size(num_ssid, num_id));
+	if (set) {
+		set->num_ssid = num_ssid;
+		set->num_id = num_id;
+		memset(set->bitmap, 0, bitmap_size(num_ssid, num_id));
+	}
+	return set;
+}
+
+void idset_free(struct idset *set)
+{
+	vfree(set);
+}
+
+void idset_clear(struct idset *set)
+{
+	memset(set->bitmap, 0, bitmap_size(set->num_ssid, set->num_id));
+}
+
+void idset_fill(struct idset *set)
+{
+	memset(set->bitmap, 0xff, bitmap_size(set->num_ssid, set->num_id));
+}
+
+static inline void idset_add(struct idset *set, int ssid, int id)
+{
+	set_bit(ssid * set->num_id + id, set->bitmap);
+}
+
+static inline void idset_del(struct idset *set, int ssid, int id)
+{
+	clear_bit(ssid * set->num_id + id, set->bitmap);
+}
+
+static inline int idset_contains(struct idset *set, int ssid, int id)
+{
+	return test_bit(ssid * set->num_id + id, set->bitmap);
+}
+
+static inline int idset_get_first(struct idset *set, int *ssid, int *id)
+{
+	int bitnum;
+
+	bitnum = find_first_bit(set->bitmap, set->num_ssid * set->num_id);
+	if (bitnum >= set->num_ssid * set->num_id)
+		return 0;
+	*ssid = bitnum / set->num_id;
+	*id = bitnum % set->num_id;
+	return 1;
+}
+
+struct idset *idset_sch_new(void)
+{
+	return idset_new(max_ssid + 1, __MAX_SUBCHANNEL + 1);
+}
+
+void idset_sch_add(struct idset *set, struct subchannel_id schid)
+{
+	idset_add(set, schid.ssid, schid.sch_no);
+}
+
+void idset_sch_del(struct idset *set, struct subchannel_id schid)
+{
+	idset_del(set, schid.ssid, schid.sch_no);
+}
+
+int idset_sch_contains(struct idset *set, struct subchannel_id schid)
+{
+	return idset_contains(set, schid.ssid, schid.sch_no);
+}
+
+int idset_sch_get_first(struct idset *set, struct subchannel_id *schid)
+{
+	int ssid = 0;
+	int id = 0;
+	int rc;
+
+	rc = idset_get_first(set, &ssid, &id);
+	if (rc) {
+		init_subchannel_id(schid);
+		schid->ssid = ssid;
+		schid->sch_no = id;
+	}
+	return rc;
+}
+
+int idset_is_empty(struct idset *set)
+{
+	int bitnum;
+
+	bitnum = find_first_bit(set->bitmap, set->num_ssid * set->num_id);
+	if (bitnum >= set->num_ssid * set->num_id)
+		return 1;
+	return 0;
+}
+
+void idset_add_set(struct idset *to, struct idset *from)
+{
+	unsigned long i, len;
+
+	len = min(__BITOPS_WORDS(to->num_ssid * to->num_id),
+		  __BITOPS_WORDS(from->num_ssid * from->num_id));
+	for (i = 0; i < len ; i++)
+		to->bitmap[i] |= from->bitmap[i];
+}
diff --git a/ap/os/linux/linux-3.4.x/drivers/s390/cio/idset.h b/ap/os/linux/linux-3.4.x/drivers/s390/cio/idset.h
new file mode 100644
index 0000000..7543da4
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/s390/cio/idset.h
@@ -0,0 +1,27 @@
+/*
+ *  drivers/s390/cio/idset.h
+ *
+ *    Copyright IBM Corp. 2007
+ *    Author(s): Peter Oberparleiter <peter.oberparleiter@de.ibm.com>
+ */
+
+#ifndef S390_IDSET_H
+#define S390_IDSET_H S390_IDSET_H
+
+#include <asm/schid.h>
+
+struct idset;
+
+void idset_free(struct idset *set);
+void idset_clear(struct idset *set);
+void idset_fill(struct idset *set);
+
+struct idset *idset_sch_new(void);
+void idset_sch_add(struct idset *set, struct subchannel_id id);
+void idset_sch_del(struct idset *set, struct subchannel_id id);
+int idset_sch_contains(struct idset *set, struct subchannel_id id);
+int idset_sch_get_first(struct idset *set, struct subchannel_id *id);
+int idset_is_empty(struct idset *set);
+void idset_add_set(struct idset *to, struct idset *from);
+
+#endif /* S390_IDSET_H */
diff --git a/ap/os/linux/linux-3.4.x/drivers/s390/cio/io_sch.h b/ap/os/linux/linux-3.4.x/drivers/s390/cio/io_sch.h
new file mode 100644
index 0000000..76253df
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/s390/cio/io_sch.h
@@ -0,0 +1,212 @@
+#ifndef S390_IO_SCH_H
+#define S390_IO_SCH_H
+
+#include <linux/types.h>
+#include <asm/schid.h>
+#include <asm/ccwdev.h>
+#include <asm/irq.h>
+#include "css.h"
+#include "orb.h"
+
+struct io_subchannel_private {
+	union orb orb;		/* operation request block */
+	struct ccw1 sense_ccw;	/* static ccw for sense command */
+	struct ccw_device *cdev;/* pointer to the child ccw device */
+	struct {
+		unsigned int suspend:1;	/* allow suspend */
+		unsigned int prefetch:1;/* deny prefetch */
+		unsigned int inter:1;	/* suppress intermediate interrupts */
+	} __packed options;
+} __aligned(8);
+
+#define to_io_private(n) ((struct io_subchannel_private *) \
+			  dev_get_drvdata(&(n)->dev))
+#define set_io_private(n, p) (dev_set_drvdata(&(n)->dev, p))
+
+static inline struct ccw_device *sch_get_cdev(struct subchannel *sch)
+{
+	struct io_subchannel_private *priv = to_io_private(sch);
+	return priv ? priv->cdev : NULL;
+}
+
+static inline void sch_set_cdev(struct subchannel *sch,
+				struct ccw_device *cdev)
+{
+	struct io_subchannel_private *priv = to_io_private(sch);
+	if (priv)
+		priv->cdev = cdev;
+}
+
+#define MAX_CIWS 8
+
+/*
+ * Possible status values for a CCW request's I/O.
+ */
+enum io_status {
+	IO_DONE,
+	IO_RUNNING,
+	IO_STATUS_ERROR,
+	IO_PATH_ERROR,
+	IO_REJECTED,
+	IO_KILLED
+};
+
+/**
+ * ccw_request - Internal CCW request.
+ * @cp: channel program to start
+ * @timeout: maximum allowable time in jiffies between start I/O and interrupt
+ * @maxretries: number of retries per I/O operation and path
+ * @lpm: mask of paths to use
+ * @check: optional callback that determines if results are final
+ * @filter: optional callback to adjust request status based on IRB data
+ * @callback: final callback
+ * @data: user-defined pointer passed to all callbacks
+ * @singlepath: if set, use only one path from @lpm per start I/O
+ * @cancel: non-zero if request was cancelled
+ * @done: non-zero if request was finished
+ * @mask: current path mask
+ * @retries: current number of retries
+ * @drc: delayed return code
+ */
+struct ccw_request {
+	struct ccw1 *cp;
+	unsigned long timeout;
+	u16 maxretries;
+	u8 lpm;
+	int (*check)(struct ccw_device *, void *);
+	enum io_status (*filter)(struct ccw_device *, void *, struct irb *,
+				 enum io_status);
+	void (*callback)(struct ccw_device *, void *, int);
+	void *data;
+	unsigned int singlepath:1;
+	/* These fields are used internally. */
+	unsigned int cancel:1;
+	unsigned int done:1;
+	u16 mask;
+	u16 retries;
+	int drc;
+} __attribute__((packed));
+
+/*
+ * sense-id response buffer layout
+ */
+struct senseid {
+	/* common part */
+	u8  reserved;	/* always 0x'FF' */
+	u16 cu_type;	/* control unit type */
+	u8  cu_model;	/* control unit model */
+	u16 dev_type;	/* device type */
+	u8  dev_model;	/* device model */
+	u8  unused;	/* padding byte */
+	/* extended part */
+	struct ciw ciw[MAX_CIWS];	/* variable # of CIWs */
+}  __attribute__ ((packed, aligned(4)));
+
+enum cdev_todo {
+	CDEV_TODO_NOTHING,
+	CDEV_TODO_ENABLE_CMF,
+	CDEV_TODO_REBIND,
+	CDEV_TODO_REGISTER,
+	CDEV_TODO_UNREG,
+	CDEV_TODO_UNREG_EVAL,
+};
+
+#define FAKE_CMD_IRB	1
+#define FAKE_TM_IRB	2
+
+struct ccw_device_private {
+	struct ccw_device *cdev;
+	struct subchannel *sch;
+	int state;		/* device state */
+	atomic_t onoff;
+	struct ccw_dev_id dev_id;	/* device id */
+	struct subchannel_id schid;	/* subchannel number */
+	struct ccw_request req;		/* internal I/O request */
+	int iretry;
+	u8 pgid_valid_mask;	/* mask of valid PGIDs */
+	u8 pgid_todo_mask;	/* mask of PGIDs to be adjusted */
+	u8 pgid_reset_mask;	/* mask of PGIDs which were reset */
+	u8 path_gone_mask;	/* mask of paths, that became unavailable */
+	u8 path_new_mask;	/* mask of paths, that became available */
+	struct {
+		unsigned int fast:1;	/* post with "channel end" */
+		unsigned int repall:1;	/* report every interrupt status */
+		unsigned int pgroup:1;	/* do path grouping */
+		unsigned int force:1;	/* allow forced online */
+		unsigned int mpath:1;	/* do multipathing */
+	} __attribute__ ((packed)) options;
+	struct {
+		unsigned int esid:1;	    /* Ext. SenseID supported by HW */
+		unsigned int dosense:1;	    /* delayed SENSE required */
+		unsigned int doverify:1;    /* delayed path verification */
+		unsigned int donotify:1;    /* call notify function */
+		unsigned int recog_done:1;  /* dev. recog. complete */
+		unsigned int fake_irb:2;    /* deliver faked irb */
+		unsigned int resuming:1;    /* recognition while resume */
+		unsigned int pgroup:1;	    /* pathgroup is set up */
+		unsigned int mpath:1;	    /* multipathing is set up */
+		unsigned int initialized:1; /* set if initial reference held */
+	} __attribute__((packed)) flags;
+	unsigned long intparm;	/* user interruption parameter */
+	struct qdio_irq *qdio_data;
+	struct irb irb;		/* device status */
+	struct senseid senseid;	/* SenseID info */
+	struct pgid pgid[8];	/* path group IDs per chpid*/
+	struct ccw1 iccws[2];	/* ccws for SNID/SID/SPGID commands */
+	struct work_struct todo_work;
+	enum cdev_todo todo;
+	wait_queue_head_t wait_q;
+	struct timer_list timer;
+	void *cmb;			/* measurement information */
+	struct list_head cmb_list;	/* list of measured devices */
+	u64 cmb_start_time;		/* clock value of cmb reset */
+	void *cmb_wait;			/* deferred cmb enable/disable */
+	enum interruption_class int_class;
+};
+
+static inline int rsch(struct subchannel_id schid)
+{
+	register struct subchannel_id reg1 asm("1") = schid;
+	int ccode;
+
+	asm volatile(
+		"	rsch\n"
+		"	ipm	%0\n"
+		"	srl	%0,28"
+		: "=d" (ccode)
+		: "d" (reg1)
+		: "cc", "memory");
+	return ccode;
+}
+
+static inline int hsch(struct subchannel_id schid)
+{
+	register struct subchannel_id reg1 asm("1") = schid;
+	int ccode;
+
+	asm volatile(
+		"	hsch\n"
+		"	ipm	%0\n"
+		"	srl	%0,28"
+		: "=d" (ccode)
+		: "d" (reg1)
+		: "cc");
+	return ccode;
+}
+
+static inline int xsch(struct subchannel_id schid)
+{
+	register struct subchannel_id reg1 asm("1") = schid;
+	int ccode;
+
+	asm volatile(
+		"	.insn	rre,0xb2760000,%1,0\n"
+		"	ipm	%0\n"
+		"	srl	%0,28"
+		: "=d" (ccode)
+		: "d" (reg1)
+		: "cc");
+	return ccode;
+}
+
+#endif
diff --git a/ap/os/linux/linux-3.4.x/drivers/s390/cio/ioasm.h b/ap/os/linux/linux-3.4.x/drivers/s390/cio/ioasm.h
new file mode 100644
index 0000000..4d80fc6
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/s390/cio/ioasm.h
@@ -0,0 +1,167 @@
+#ifndef S390_CIO_IOASM_H
+#define S390_CIO_IOASM_H
+
+#include <asm/chpid.h>
+#include <asm/schid.h>
+#include "orb.h"
+#include "cio.h"
+
+/*
+ * TPI info structure
+ */
+struct tpi_info {
+	struct subchannel_id schid;
+	__u32 intparm;		 /* interruption parameter */
+	__u32 adapter_IO : 1;
+	__u32 reserved2	 : 1;
+	__u32 isc	 : 3;
+	__u32 reserved3	 : 12;
+	__u32 int_type	 : 3;
+	__u32 reserved4	 : 12;
+} __attribute__ ((packed));
+
+
+/*
+ * Some S390 specific IO instructions as inline
+ */
+
+static inline int stsch_err(struct subchannel_id schid, struct schib *addr)
+{
+	register struct subchannel_id reg1 asm ("1") = schid;
+	int ccode = -EIO;
+
+	asm volatile(
+		"	stsch	0(%3)\n"
+		"0:	ipm	%0\n"
+		"	srl	%0,28\n"
+		"1:\n"
+		EX_TABLE(0b,1b)
+		: "+d" (ccode), "=m" (*addr)
+		: "d" (reg1), "a" (addr)
+		: "cc");
+	return ccode;
+}
+
+static inline int msch(struct subchannel_id schid, struct schib *addr)
+{
+	register struct subchannel_id reg1 asm ("1") = schid;
+	int ccode;
+
+	asm volatile(
+		"	msch	0(%2)\n"
+		"	ipm	%0\n"
+		"	srl	%0,28"
+		: "=d" (ccode)
+		: "d" (reg1), "a" (addr), "m" (*addr)
+		: "cc");
+	return ccode;
+}
+
+static inline int msch_err(struct subchannel_id schid, struct schib *addr)
+{
+	register struct subchannel_id reg1 asm ("1") = schid;
+	int ccode = -EIO;
+
+	asm volatile(
+		"	msch	0(%2)\n"
+		"0:	ipm	%0\n"
+		"	srl	%0,28\n"
+		"1:\n"
+		EX_TABLE(0b,1b)
+		: "+d" (ccode)
+		: "d" (reg1), "a" (addr), "m" (*addr)
+		: "cc");
+	return ccode;
+}
+
+static inline int tsch(struct subchannel_id schid, struct irb *addr)
+{
+	register struct subchannel_id reg1 asm ("1") = schid;
+	int ccode;
+
+	asm volatile(
+		"	tsch	0(%3)\n"
+		"	ipm	%0\n"
+		"	srl	%0,28"
+		: "=d" (ccode), "=m" (*addr)
+		: "d" (reg1), "a" (addr)
+		: "cc");
+	return ccode;
+}
+
+static inline int ssch(struct subchannel_id schid, union orb *addr)
+{
+	register struct subchannel_id reg1 asm("1") = schid;
+	int ccode = -EIO;
+
+	asm volatile(
+		"	ssch	0(%2)\n"
+		"0:	ipm	%0\n"
+		"	srl	%0,28\n"
+		"1:\n"
+		EX_TABLE(0b, 1b)
+		: "+d" (ccode)
+		: "d" (reg1), "a" (addr), "m" (*addr)
+		: "cc", "memory");
+	return ccode;
+}
+
+static inline int csch(struct subchannel_id schid)
+{
+	register struct subchannel_id reg1 asm("1") = schid;
+	int ccode;
+
+	asm volatile(
+		"	csch\n"
+		"	ipm	%0\n"
+		"	srl	%0,28"
+		: "=d" (ccode)
+		: "d" (reg1)
+		: "cc");
+	return ccode;
+}
+
+static inline int tpi(struct tpi_info *addr)
+{
+	int ccode;
+
+	asm volatile(
+		"	tpi	0(%2)\n"
+		"	ipm	%0\n"
+		"	srl	%0,28"
+		: "=d" (ccode), "=m" (*addr)
+		: "a" (addr)
+		: "cc");
+	return ccode;
+}
+
+static inline int chsc(void *chsc_area)
+{
+	typedef struct { char _[4096]; } addr_type;
+	int cc;
+
+	asm volatile(
+		"	.insn	rre,0xb25f0000,%2,0\n"
+		"	ipm	%0\n"
+		"	srl	%0,28\n"
+		: "=d" (cc), "=m" (*(addr_type *) chsc_area)
+		: "d" (chsc_area), "m" (*(addr_type *) chsc_area)
+		: "cc");
+	return cc;
+}
+
+static inline int rchp(struct chp_id chpid)
+{
+	register struct chp_id reg1 asm ("1") = chpid;
+	int ccode;
+
+	asm volatile(
+		"	lr	1,%1\n"
+		"	rchp\n"
+		"	ipm	%0\n"
+		"	srl	%0,28"
+		: "=d" (ccode) : "d" (reg1) : "cc");
+	return ccode;
+}
+
+#endif
diff --git a/ap/os/linux/linux-3.4.x/drivers/s390/cio/isc.c b/ap/os/linux/linux-3.4.x/drivers/s390/cio/isc.c
new file mode 100644
index 0000000..c592087
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/s390/cio/isc.c
@@ -0,0 +1,68 @@
+/*
+ * Functions for registration of I/O interruption subclasses on s390.
+ *
+ * Copyright IBM Corp. 2008
+ * Authors: Sebastian Ott <sebott@linux.vnet.ibm.com>
+ */
+
+#include <linux/spinlock.h>
+#include <linux/module.h>
+#include <asm/isc.h>
+
+static unsigned int isc_refs[MAX_ISC + 1];
+static DEFINE_SPINLOCK(isc_ref_lock);
+
+
+/**
+ * isc_register - register an I/O interruption subclass.
+ * @isc: I/O interruption subclass to register
+ *
+ * The number of users for @isc is increased. If this is the first user to
+ * register @isc, the corresponding I/O interruption subclass mask is enabled.
+ *
+ * Context:
+ *   This function must not be called in interrupt context.
+ */
+void isc_register(unsigned int isc)
+{
+	if (isc > MAX_ISC) {
+		WARN_ON(1);
+		return;
+	}
+
+	spin_lock(&isc_ref_lock);
+	if (isc_refs[isc] == 0)
+		ctl_set_bit(6, 31 - isc);
+	isc_refs[isc]++;
+	spin_unlock(&isc_ref_lock);
+}
+EXPORT_SYMBOL_GPL(isc_register);
+
+/**
+ * isc_unregister - unregister an I/O interruption subclass.
+ * @isc: I/O interruption subclass to unregister
+ *
+ * The number of users for @isc is decreased. If this is the last user to
+ * unregister @isc, the corresponding I/O interruption subclass mask is
+ * disabled.
+ * Note: This function must not be called if isc_register() hasn't been called
+ * before by the driver for @isc.
+ *
+ * Context:
+ *   This function must not be called in interrupt context.
+ */
+void isc_unregister(unsigned int isc)
+{
+	spin_lock(&isc_ref_lock);
+	/* check for misuse */
+	if (isc > MAX_ISC || isc_refs[isc] == 0) {
+		WARN_ON(1);
+		goto out_unlock;
+	}
+	if (isc_refs[isc] == 1)
+		ctl_clear_bit(6, 31 - isc);
+	isc_refs[isc]--;
+out_unlock:
+	spin_unlock(&isc_ref_lock);
+}
+EXPORT_SYMBOL_GPL(isc_unregister);
diff --git a/ap/os/linux/linux-3.4.x/drivers/s390/cio/itcw.c b/ap/os/linux/linux-3.4.x/drivers/s390/cio/itcw.c
new file mode 100644
index 0000000..358ee16
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/s390/cio/itcw.c
@@ -0,0 +1,369 @@
+/*
+ *  Functions for incremental construction of fcx enabled I/O control blocks.
+ *
+ *    Copyright IBM Corp. 2008
+ *    Author(s): Peter Oberparleiter <peter.oberparleiter@de.ibm.com>
+ */
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <linux/err.h>
+#include <linux/module.h>
+#include <asm/fcx.h>
+#include <asm/itcw.h>
+
+/**
+ * struct itcw - incremental tcw helper data type
+ *
+ * This structure serves as a handle for the incremental construction of a
+ * tcw and associated tccb, tsb, data tidaw-list plus an optional interrogate
+ * tcw and associated data. The data structures are contained inside a single
+ * contiguous buffer provided by the user.
+ *
+ * The itcw construction functions take care of overall data integrity:
+ * - reset unused fields to zero
+ * - fill in required pointers
+ * - ensure required alignment for data structures
+ * - prevent data structures to cross 4k-byte boundary where required
+ * - calculate tccb-related length fields
+ * - optionally provide ready-made interrogate tcw and associated structures
+ *
+ * Restrictions apply to the itcws created with these construction functions:
+ * - tida only supported for data address, not for tccb
+ * - only contiguous tidaw-lists (no ttic)
+ * - total number of bytes required per itcw may not exceed 4k bytes
+ * - either read or write operation (may not work with r=0 and w=0)
+ *
+ * Example:
+ * struct itcw *itcw;
+ * void *buffer;
+ * size_t size;
+ *
+ * size = itcw_calc_size(1, 2, 0);
+ * buffer = kmalloc(size, GFP_KERNEL | GFP_DMA);
+ * if (!buffer)
+ *	return -ENOMEM;
+ * itcw = itcw_init(buffer, size, ITCW_OP_READ, 1, 2, 0);
+ * if (IS_ERR(itcw))
+ *	return PTR_ER(itcw);
+ * itcw_add_dcw(itcw, 0x2, 0, NULL, 0, 72);
+ * itcw_add_tidaw(itcw, 0, 0x30000, 20);
+ * itcw_add_tidaw(itcw, 0, 0x40000, 52);
+ * itcw_finalize(itcw);
+ *
+ */
+struct itcw {
+	struct tcw *tcw;
+	struct tcw *intrg_tcw;
+	int num_tidaws;
+	int max_tidaws;
+	int intrg_num_tidaws;
+	int intrg_max_tidaws;
+};
+
+/**
+ * itcw_get_tcw - return pointer to tcw associated with the itcw
+ * @itcw: address of the itcw
+ *
+ * Return pointer to the tcw associated with the itcw.
+ */
+struct tcw *itcw_get_tcw(struct itcw *itcw)
+{
+	return itcw->tcw;
+}
+EXPORT_SYMBOL(itcw_get_tcw);
+
+/**
+ * itcw_calc_size - return the size of an itcw with the given parameters
+ * @intrg: if non-zero, add an interrogate tcw
+ * @max_tidaws: maximum number of tidaws to be used for data addressing or zero
+ * if no tida is to be used.
+ * @intrg_max_tidaws: maximum number of tidaws to be used for data addressing
+ * by the interrogate tcw, if specified
+ *
+ * Calculate and return the number of bytes required to hold an itcw with the
+ * given parameters and assuming tccbs with maximum size.
+ *
+ * Note that the resulting size also contains bytes needed for alignment
+ * padding as well as padding to ensure that data structures don't cross a
+ * 4k-boundary where required.
+ */
+size_t itcw_calc_size(int intrg, int max_tidaws, int intrg_max_tidaws)
+{
+	size_t len;
+	int cross_count;
+
+	/* Main data. */
+	len = sizeof(struct itcw);
+	len += /* TCW */ sizeof(struct tcw) + /* TCCB */ TCCB_MAX_SIZE +
+	       /* TSB */ sizeof(struct tsb) +
+	       /* TIDAL */ max_tidaws * sizeof(struct tidaw);
+	/* Interrogate data. */
+	if (intrg) {
+		len += /* TCW */ sizeof(struct tcw) + /* TCCB */ TCCB_MAX_SIZE +
+		       /* TSB */ sizeof(struct tsb) +
+		       /* TIDAL */ intrg_max_tidaws * sizeof(struct tidaw);
+	}
+
+	/* Maximum required alignment padding. */
+	len += /* Initial TCW */ 63 + /* Interrogate TCCB */ 7;
+
+	/* TIDAW lists may not cross a 4k boundary. To cross a
+	 * boundary we need to add a TTIC TIDAW. We need to reserve
+	 * one additional TIDAW for a TTIC that we may need to add due
+	 * to the placement of the data chunk in memory, and a further
+	 * TIDAW for each page boundary that the TIDAW list may cross
+	 * due to it's own size.
+	 */
+	if (max_tidaws) {
+		cross_count = 1 + ((max_tidaws * sizeof(struct tidaw) - 1)
+				   >> PAGE_SHIFT);
+		len += cross_count * sizeof(struct tidaw);
+	}
+	if (intrg_max_tidaws) {
+		cross_count = 1 + ((intrg_max_tidaws * sizeof(struct tidaw) - 1)
+				   >> PAGE_SHIFT);
+		len += cross_count * sizeof(struct tidaw);
+	}
+	return len;
+}
+EXPORT_SYMBOL(itcw_calc_size);
+
+#define CROSS4K(x, l)	(((x) & ~4095) != ((x + l) & ~4095))
+
+static inline void *fit_chunk(addr_t *start, addr_t end, size_t len,
+			      int align, int check_4k)
+{
+	addr_t addr;
+
+	addr = ALIGN(*start, align);
+	if (check_4k && CROSS4K(addr, len)) {
+		addr = ALIGN(addr, 4096);
+		addr = ALIGN(addr, align);
+	}
+	if (addr + len > end)
+		return ERR_PTR(-ENOSPC);
+	*start = addr + len;
+	return (void *) addr;
+}
+
+/**
+ * itcw_init - initialize incremental tcw data structure
+ * @buffer: address of buffer to use for data structures
+ * @size: number of bytes in buffer
+ * @op: %ITCW_OP_READ for a read operation tcw, %ITCW_OP_WRITE for a write
+ * operation tcw
+ * @intrg: if non-zero, add and initialize an interrogate tcw
+ * @max_tidaws: maximum number of tidaws to be used for data addressing or zero
+ * if no tida is to be used.
+ * @intrg_max_tidaws: maximum number of tidaws to be used for data addressing
+ * by the interrogate tcw, if specified
+ *
+ * Prepare the specified buffer to be used as an incremental tcw, i.e. a
+ * helper data structure that can be used to construct a valid tcw by
+ * successive calls to other helper functions. Note: the buffer needs to be
+ * located below the 2G address limit. The resulting tcw has the following
+ * restrictions:
+ *  - no tccb tidal
+ *  - input/output tidal is contiguous (no ttic)
+ *  - total data should not exceed 4k
+ *  - tcw specifies either read or write operation
+ *
+ * On success, return pointer to the resulting incremental tcw data structure,
+ * ERR_PTR otherwise.
+ */
+struct itcw *itcw_init(void *buffer, size_t size, int op, int intrg,
+		       int max_tidaws, int intrg_max_tidaws)
+{
+	struct itcw *itcw;
+	void *chunk;
+	addr_t start;
+	addr_t end;
+	int cross_count;
+
+	/* Check for 2G limit. */
+	start = (addr_t) buffer;
+	end = start + size;
+	if (end > (1 << 31))
+		return ERR_PTR(-EINVAL);
+	memset(buffer, 0, size);
+	/* ITCW. */
+	chunk = fit_chunk(&start, end, sizeof(struct itcw), 1, 0);
+	if (IS_ERR(chunk))
+		return chunk;
+	itcw = chunk;
+	/* allow for TTIC tidaws that may be needed to cross a page boundary */
+	cross_count = 0;
+	if (max_tidaws)
+		cross_count = 1 + ((max_tidaws * sizeof(struct tidaw) - 1)
+				   >> PAGE_SHIFT);
+	itcw->max_tidaws = max_tidaws + cross_count;
+	cross_count = 0;
+	if (intrg_max_tidaws)
+		cross_count = 1 + ((intrg_max_tidaws * sizeof(struct tidaw) - 1)
+				   >> PAGE_SHIFT);
+	itcw->intrg_max_tidaws = intrg_max_tidaws + cross_count;
+	/* Main TCW. */
+	chunk = fit_chunk(&start, end, sizeof(struct tcw), 64, 0);
+	if (IS_ERR(chunk))
+		return chunk;
+	itcw->tcw = chunk;
+	tcw_init(itcw->tcw, (op == ITCW_OP_READ) ? 1 : 0,
+		 (op == ITCW_OP_WRITE) ? 1 : 0);
+	/* Interrogate TCW. */
+	if (intrg) {
+		chunk = fit_chunk(&start, end, sizeof(struct tcw), 64, 0);
+		if (IS_ERR(chunk))
+			return chunk;
+		itcw->intrg_tcw = chunk;
+		tcw_init(itcw->intrg_tcw, 1, 0);
+		tcw_set_intrg(itcw->tcw, itcw->intrg_tcw);
+	}
+	/* Data TIDAL. */
+	if (max_tidaws > 0) {
+		chunk = fit_chunk(&start, end, sizeof(struct tidaw) *
+				  itcw->max_tidaws, 16, 0);
+		if (IS_ERR(chunk))
+			return chunk;
+		tcw_set_data(itcw->tcw, chunk, 1);
+	}
+	/* Interrogate data TIDAL. */
+	if (intrg && (intrg_max_tidaws > 0)) {
+		chunk = fit_chunk(&start, end, sizeof(struct tidaw) *
+				  itcw->intrg_max_tidaws, 16, 0);
+		if (IS_ERR(chunk))
+			return chunk;
+		tcw_set_data(itcw->intrg_tcw, chunk, 1);
+	}
+	/* TSB. */
+	chunk = fit_chunk(&start, end, sizeof(struct tsb), 8, 0);
+	if (IS_ERR(chunk))
+		return chunk;
+	tsb_init(chunk);
+	tcw_set_tsb(itcw->tcw, chunk);
+	/* Interrogate TSB. */
+	if (intrg) {
+		chunk = fit_chunk(&start, end, sizeof(struct tsb), 8, 0);
+		if (IS_ERR(chunk))
+			return chunk;
+		tsb_init(chunk);
+		tcw_set_tsb(itcw->intrg_tcw, chunk);
+	}
+	/* TCCB. */
+	chunk = fit_chunk(&start, end, TCCB_MAX_SIZE, 8, 0);
+	if (IS_ERR(chunk))
+		return chunk;
+	tccb_init(chunk, TCCB_MAX_SIZE, TCCB_SAC_DEFAULT);
+	tcw_set_tccb(itcw->tcw, chunk);
+	/* Interrogate TCCB. */
+	if (intrg) {
+		chunk = fit_chunk(&start, end, TCCB_MAX_SIZE, 8, 0);
+		if (IS_ERR(chunk))
+			return chunk;
+		tccb_init(chunk, TCCB_MAX_SIZE, TCCB_SAC_INTRG);
+		tcw_set_tccb(itcw->intrg_tcw, chunk);
+		tccb_add_dcw(chunk, TCCB_MAX_SIZE, DCW_CMD_INTRG, 0, NULL,
+			     sizeof(struct dcw_intrg_data), 0);
+		tcw_finalize(itcw->intrg_tcw, 0);
+	}
+	return itcw;
+}
+EXPORT_SYMBOL(itcw_init);
+
+/**
+ * itcw_add_dcw - add a dcw to the itcw
+ * @itcw: address of the itcw
+ * @cmd: the dcw command
+ * @flags: flags for the dcw
+ * @cd: address of control data for this dcw or NULL if none is required
+ * @cd_count: number of control data bytes for this dcw
+ * @count: number of data bytes for this dcw
+ *
+ * Add a new dcw to the specified itcw by writing the dcw information specified
+ * by @cmd, @flags, @cd, @cd_count and @count to the tca of the tccb. Return
+ * a pointer to the newly added dcw on success or -%ENOSPC if the new dcw
+ * would exceed the available space.
+ *
+ * Note: the tcal field of the tccb header will be updated to reflect added
+ * content.
+ */
+struct dcw *itcw_add_dcw(struct itcw *itcw, u8 cmd, u8 flags, void *cd,
+			 u8 cd_count, u32 count)
+{
+	return tccb_add_dcw(tcw_get_tccb(itcw->tcw), TCCB_MAX_SIZE, cmd,
+			    flags, cd, cd_count, count);
+}
+EXPORT_SYMBOL(itcw_add_dcw);
+
+/**
+ * itcw_add_tidaw - add a tidaw to the itcw
+ * @itcw: address of the itcw
+ * @flags: flags for the new tidaw
+ * @addr: address value for the new tidaw
+ * @count: count value for the new tidaw
+ *
+ * Add a new tidaw to the input/output data tidaw-list of the specified itcw
+ * (depending on the value of the r-flag and w-flag). Return a pointer to
+ * the new tidaw on success or -%ENOSPC if the new tidaw would exceed the
+ * available space.
+ *
+ * Note: TTIC tidaws are automatically added when needed, so explicitly calling
+ * this interface with the TTIC flag is not supported. The last-tidaw flag
+ * for the last tidaw in the list will be set by itcw_finalize.
+ */
+struct tidaw *itcw_add_tidaw(struct itcw *itcw, u8 flags, void *addr, u32 count)
+{
+	struct tidaw *following;
+
+	if (itcw->num_tidaws >= itcw->max_tidaws)
+		return ERR_PTR(-ENOSPC);
+	/*
+	 * Is the tidaw, which follows the one we are about to fill, on the next
+	 * page? Then we have to insert a TTIC tidaw first, that points to the
+	 * tidaw on the new page.
+	 */
+	following = ((struct tidaw *) tcw_get_data(itcw->tcw))
+		+ itcw->num_tidaws + 1;
+	if (itcw->num_tidaws && !((unsigned long) following & ~PAGE_MASK)) {
+		tcw_add_tidaw(itcw->tcw, itcw->num_tidaws++,
+			      TIDAW_FLAGS_TTIC, following, 0);
+		if (itcw->num_tidaws >= itcw->max_tidaws)
+			return ERR_PTR(-ENOSPC);
+	}
+	return tcw_add_tidaw(itcw->tcw, itcw->num_tidaws++, flags, addr, count);
+}
+EXPORT_SYMBOL(itcw_add_tidaw);
+
+/**
+ * itcw_set_data - set data address and tida flag of the itcw
+ * @itcw: address of the itcw
+ * @addr: the data address
+ * @use_tidal: zero of the data address specifies a contiguous block of data,
+ * non-zero if it specifies a list if tidaws.
+ *
+ * Set the input/output data address of the itcw (depending on the value of the
+ * r-flag and w-flag). If @use_tidal is non-zero, the corresponding tida flag
+ * is set as well.
+ */
+void itcw_set_data(struct itcw *itcw, void *addr, int use_tidal)
+{
+	tcw_set_data(itcw->tcw, addr, use_tidal);
+}
+EXPORT_SYMBOL(itcw_set_data);
+
+/**
+ * itcw_finalize - calculate length and count fields of the itcw
+ * @itcw: address of the itcw
+ *
+ * Calculate tcw input-/output-count and tccbl fields and add a tcat the tccb.
+ * In case input- or output-tida is used, the tidaw-list must be stored in
+ * continuous storage (no ttic). The tcal field in the tccb must be
+ * up-to-date.
+ */
+void itcw_finalize(struct itcw *itcw)
+{
+	tcw_finalize(itcw->tcw, itcw->num_tidaws);
+}
+EXPORT_SYMBOL(itcw_finalize);
diff --git a/ap/os/linux/linux-3.4.x/drivers/s390/cio/orb.h b/ap/os/linux/linux-3.4.x/drivers/s390/cio/orb.h
new file mode 100644
index 0000000..45a9865
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/s390/cio/orb.h
@@ -0,0 +1,67 @@
+/*
+ * Orb related data structures.
+ *
+ * Copyright IBM Corp. 2007, 2011
+ *
+ * Author(s): Cornelia Huck <cornelia.huck@de.ibm.com>
+ *	      Peter Oberparleiter <peter.oberparleiter@de.ibm.com>
+ *	      Sebastian Ott <sebott@linux.vnet.ibm.com>
+ */
+
+#ifndef S390_ORB_H
+#define S390_ORB_H
+
+/*
+ * Command-mode operation request block
+ */
+struct cmd_orb {
+	u32 intparm;	/* interruption parameter */
+	u32 key:4;	/* flags, like key, suspend control, etc. */
+	u32 spnd:1;	/* suspend control */
+	u32 res1:1;	/* reserved */
+	u32 mod:1;	/* modification control */
+	u32 sync:1;	/* synchronize control */
+	u32 fmt:1;	/* format control */
+	u32 pfch:1;	/* prefetch control */
+	u32 isic:1;	/* initial-status-interruption control */
+	u32 alcc:1;	/* address-limit-checking control */
+	u32 ssic:1;	/* suppress-suspended-interr. control */
+	u32 res2:1;	/* reserved */
+	u32 c64:1;	/* IDAW/QDIO 64 bit control  */
+	u32 i2k:1;	/* IDAW 2/4kB block size control */
+	u32 lpm:8;	/* logical path mask */
+	u32 ils:1;	/* incorrect length */
+	u32 zero:6;	/* reserved zeros */
+	u32 orbx:1;	/* ORB extension control */
+	u32 cpa;	/* channel program address */
+}  __packed __aligned(4);
+
+/*
+ * Transport-mode operation request block
+ */
+struct tm_orb {
+	u32 intparm;
+	u32 key:4;
+	u32:9;
+	u32 b:1;
+	u32:2;
+	u32 lpm:8;
+	u32:7;
+	u32 x:1;
+	u32 tcw;
+	u32 prio:8;
+	u32:8;
+	u32 rsvpgm:8;
+	u32:8;
+	u32:32;
+	u32:32;
+	u32:32;
+	u32:32;
+}  __packed __aligned(4);
+
+union orb {
+	struct cmd_orb cmd;
+	struct tm_orb tm;
+}  __packed __aligned(4);
+
+#endif /* S390_ORB_H */
diff --git a/ap/os/linux/linux-3.4.x/drivers/s390/cio/qdio.h b/ap/os/linux/linux-3.4.x/drivers/s390/cio/qdio.h
new file mode 100644
index 0000000..b962ffb
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/s390/cio/qdio.h
@@ -0,0 +1,463 @@
+/*
+ * linux/drivers/s390/cio/qdio.h
+ *
+ * Copyright 2000,2009 IBM Corp.
+ * Author(s): Utz Bacher <utz.bacher@de.ibm.com>
+ *	      Jan Glauber <jang@linux.vnet.ibm.com>
+ */
+#ifndef _CIO_QDIO_H
+#define _CIO_QDIO_H
+
+#include <asm/page.h>
+#include <asm/schid.h>
+#include <asm/debug.h>
+#include "chsc.h"
+
+#define QDIO_BUSY_BIT_PATIENCE		(100 << 12)	/* 100 microseconds */
+#define QDIO_BUSY_BIT_RETRY_DELAY	10		/* 10 milliseconds */
+#define QDIO_BUSY_BIT_RETRIES		1000		/* = 10s retry time */
+#define QDIO_INPUT_THRESHOLD		(500 << 12)	/* 500 microseconds */
+
+enum qdio_irq_states {
+	QDIO_IRQ_STATE_INACTIVE,
+	QDIO_IRQ_STATE_ESTABLISHED,
+	QDIO_IRQ_STATE_ACTIVE,
+	QDIO_IRQ_STATE_STOPPED,
+	QDIO_IRQ_STATE_CLEANUP,
+	QDIO_IRQ_STATE_ERR,
+	NR_QDIO_IRQ_STATES,
+};
+
+/* used as intparm in do_IO */
+#define QDIO_DOING_ESTABLISH	1
+#define QDIO_DOING_ACTIVATE	2
+#define QDIO_DOING_CLEANUP	3
+
+#define SLSB_STATE_NOT_INIT	0x0
+#define SLSB_STATE_EMPTY	0x1
+#define SLSB_STATE_PRIMED	0x2
+#define SLSB_STATE_PENDING	0x3
+#define SLSB_STATE_HALTED	0xe
+#define SLSB_STATE_ERROR	0xf
+#define SLSB_TYPE_INPUT		0x0
+#define SLSB_TYPE_OUTPUT	0x20
+#define SLSB_OWNER_PROG		0x80
+#define SLSB_OWNER_CU		0x40
+
+#define SLSB_P_INPUT_NOT_INIT	\
+	(SLSB_OWNER_PROG | SLSB_TYPE_INPUT | SLSB_STATE_NOT_INIT)  /* 0x80 */
+#define SLSB_P_INPUT_ACK	\
+	(SLSB_OWNER_PROG | SLSB_TYPE_INPUT | SLSB_STATE_EMPTY)	   /* 0x81 */
+#define SLSB_CU_INPUT_EMPTY	\
+	(SLSB_OWNER_CU | SLSB_TYPE_INPUT | SLSB_STATE_EMPTY)	   /* 0x41 */
+#define SLSB_P_INPUT_PRIMED	\
+	(SLSB_OWNER_PROG | SLSB_TYPE_INPUT | SLSB_STATE_PRIMED)	   /* 0x82 */
+#define SLSB_P_INPUT_HALTED	\
+	(SLSB_OWNER_PROG | SLSB_TYPE_INPUT | SLSB_STATE_HALTED)	   /* 0x8e */
+#define SLSB_P_INPUT_ERROR	\
+	(SLSB_OWNER_PROG | SLSB_TYPE_INPUT | SLSB_STATE_ERROR)	   /* 0x8f */
+#define SLSB_P_OUTPUT_NOT_INIT	\
+	(SLSB_OWNER_PROG | SLSB_TYPE_OUTPUT | SLSB_STATE_NOT_INIT) /* 0xa0 */
+#define SLSB_P_OUTPUT_EMPTY	\
+	(SLSB_OWNER_PROG | SLSB_TYPE_OUTPUT | SLSB_STATE_EMPTY)	   /* 0xa1 */
+#define SLSB_P_OUTPUT_PENDING \
+	(SLSB_OWNER_PROG | SLSB_TYPE_OUTPUT | SLSB_STATE_PENDING)  /* 0xa3 */
+#define SLSB_CU_OUTPUT_PRIMED	\
+	(SLSB_OWNER_CU | SLSB_TYPE_OUTPUT | SLSB_STATE_PRIMED)	   /* 0x62 */
+#define SLSB_P_OUTPUT_HALTED	\
+	(SLSB_OWNER_PROG | SLSB_TYPE_OUTPUT | SLSB_STATE_HALTED)   /* 0xae */
+#define SLSB_P_OUTPUT_ERROR	\
+	(SLSB_OWNER_PROG | SLSB_TYPE_OUTPUT | SLSB_STATE_ERROR)	   /* 0xaf */
+
+#define SLSB_ERROR_DURING_LOOKUP  0xff
+
+/* additional CIWs returned by extended Sense-ID */
+#define CIW_TYPE_EQUEUE			0x3 /* establish QDIO queues */
+#define CIW_TYPE_AQUEUE			0x4 /* activate QDIO queues */
+
+/* flags for st qdio sch data */
+#define CHSC_FLAG_QDIO_CAPABILITY	0x80
+#define CHSC_FLAG_VALIDITY		0x40
+
+/* SIGA flags */
+#define QDIO_SIGA_WRITE		0x00
+#define QDIO_SIGA_READ		0x01
+#define QDIO_SIGA_SYNC		0x02
+#define QDIO_SIGA_WRITEQ	0x04
+#define QDIO_SIGA_QEBSM_FLAG	0x80
+
+#ifdef CONFIG_64BIT
+static inline int do_sqbs(u64 token, unsigned char state, int queue,
+			  int *start, int *count)
+{
+	register unsigned long _ccq asm ("0") = *count;
+	register unsigned long _token asm ("1") = token;
+	unsigned long _queuestart = ((unsigned long)queue << 32) | *start;
+
+	asm volatile(
+		"	.insn	rsy,0xeb000000008A,%1,0,0(%2)"
+		: "+d" (_ccq), "+d" (_queuestart)
+		: "d" ((unsigned long)state), "d" (_token)
+		: "memory", "cc");
+	*count = _ccq & 0xff;
+	*start = _queuestart & 0xff;
+
+	return (_ccq >> 32) & 0xff;
+}
+
+static inline int do_eqbs(u64 token, unsigned char *state, int queue,
+			  int *start, int *count, int ack)
+{
+	register unsigned long _ccq asm ("0") = *count;
+	register unsigned long _token asm ("1") = token;
+	unsigned long _queuestart = ((unsigned long)queue << 32) | *start;
+	unsigned long _state = (unsigned long)ack << 63;
+
+	asm volatile(
+		"	.insn	rrf,0xB99c0000,%1,%2,0,0"
+		: "+d" (_ccq), "+d" (_queuestart), "+d" (_state)
+		: "d" (_token)
+		: "memory", "cc");
+	*count = _ccq & 0xff;
+	*start = _queuestart & 0xff;
+	*state = _state & 0xff;
+
+	return (_ccq >> 32) & 0xff;
+}
+#else
+static inline int do_sqbs(u64 token, unsigned char state, int queue,
+			  int *start, int *count) { return 0; }
+static inline int do_eqbs(u64 token, unsigned char *state, int queue,
+			  int *start, int *count, int ack) { return 0; }
+#endif /* CONFIG_64BIT */
+
+struct qdio_irq;
+
+struct siga_flag {
+	u8 input:1;
+	u8 output:1;
+	u8 sync:1;
+	u8 sync_after_ai:1;
+	u8 sync_out_after_pci:1;
+	u8:3;
+} __attribute__ ((packed));
+
+struct chsc_ssqd_area {
+	struct chsc_header request;
+	u16:10;
+	u8 ssid:2;
+	u8 fmt:4;
+	u16 first_sch;
+	u16:16;
+	u16 last_sch;
+	u32:32;
+	struct chsc_header response;
+	u32:32;
+	struct qdio_ssqd_desc qdio_ssqd;
+} __attribute__ ((packed));
+
+struct scssc_area {
+	struct chsc_header request;
+	u16 operation_code;
+	u16:16;
+	u32:32;
+	u32:32;
+	u64 summary_indicator_addr;
+	u64 subchannel_indicator_addr;
+	u32 ks:4;
+	u32 kc:4;
+	u32:21;
+	u32 isc:3;
+	u32 word_with_d_bit;
+	u32:32;
+	struct subchannel_id schid;
+	u32 reserved[1004];
+	struct chsc_header response;
+	u32:32;
+} __attribute__ ((packed));
+
+struct qdio_dev_perf_stat {
+	unsigned int adapter_int;
+	unsigned int qdio_int;
+	unsigned int pci_request_int;
+
+	unsigned int tasklet_inbound;
+	unsigned int tasklet_inbound_resched;
+	unsigned int tasklet_inbound_resched2;
+	unsigned int tasklet_outbound;
+
+	unsigned int siga_read;
+	unsigned int siga_write;
+	unsigned int siga_sync;
+
+	unsigned int inbound_call;
+	unsigned int inbound_handler;
+	unsigned int stop_polling;
+	unsigned int inbound_queue_full;
+	unsigned int outbound_call;
+	unsigned int outbound_handler;
+	unsigned int outbound_queue_full;
+	unsigned int fast_requeue;
+	unsigned int target_full;
+	unsigned int eqbs;
+	unsigned int eqbs_partial;
+	unsigned int sqbs;
+	unsigned int sqbs_partial;
+	unsigned int int_discarded;
+} ____cacheline_aligned;
+
+struct qdio_queue_perf_stat {
+	/*
+	 * Sorted into order-2 buckets: 1, 2-3, 4-7, ... 64-127, 128.
+	 * Since max. 127 SBALs are scanned reuse entry for 128 as queue full
+	 * aka 127 SBALs found.
+	 */
+	unsigned int nr_sbals[8];
+	unsigned int nr_sbal_error;
+	unsigned int nr_sbal_nop;
+	unsigned int nr_sbal_total;
+};
+
+enum qdio_queue_irq_states {
+	QDIO_QUEUE_IRQS_DISABLED,
+};
+
+struct qdio_input_q {
+	/* input buffer acknowledgement flag */
+	int polling;
+	/* first ACK'ed buffer */
+	int ack_start;
+	/* how much sbals are acknowledged with qebsm */
+	int ack_count;
+	/* last time of noticing incoming data */
+	u64 timestamp;
+	/* upper-layer polling flag */
+	unsigned long queue_irq_state;
+	/* callback to start upper-layer polling */
+	void (*queue_start_poll) (struct ccw_device *, int, unsigned long);
+};
+
+struct qdio_output_q {
+	/* PCIs are enabled for the queue */
+	int pci_out_enabled;
+	/* cq: use asynchronous output buffers */
+	int use_cq;
+	/* cq: aobs used for particual SBAL */
+	struct qaob **aobs;
+	/* cq: sbal state related to asynchronous operation */
+	struct qdio_outbuf_state *sbal_state;
+	/* timer to check for more outbound work */
+	struct timer_list timer;
+	/* used SBALs before tasklet schedule */
+	int scan_threshold;
+};
+
+/*
+ * Note on cache alignment: grouped slsb and write mostly data at the beginning
+ * sbal[] is read-only and starts on a new cacheline followed by read mostly.
+ */
+struct qdio_q {
+	struct slsb slsb;
+
+	union {
+		struct qdio_input_q in;
+		struct qdio_output_q out;
+	} u;
+
+	/*
+	 * inbound: next buffer the program should check for
+	 * outbound: next buffer to check if adapter processed it
+	 */
+	int first_to_check;
+
+	/* first_to_check of the last time */
+	int last_move;
+
+	/* beginning position for calling the program */
+	int first_to_kick;
+
+	/* number of buffers in use by the adapter */
+	atomic_t nr_buf_used;
+
+	/* error condition during a data transfer */
+	unsigned int qdio_error;
+
+	/* last scan of the queue */
+	u64 timestamp;
+
+	struct tasklet_struct tasklet;
+	struct qdio_queue_perf_stat q_stats;
+
+	struct qdio_buffer *sbal[QDIO_MAX_BUFFERS_PER_Q] ____cacheline_aligned;
+
+	/* queue number */
+	int nr;
+
+	/* bitmask of queue number */
+	int mask;
+
+	/* input or output queue */
+	int is_input_q;
+
+	/* list of thinint input queues */
+	struct list_head entry;
+
+	/* upper-layer program handler */
+	qdio_handler_t (*handler);
+
+	struct dentry *debugfs_q;
+	struct qdio_irq *irq_ptr;
+	struct sl *sl;
+	/*
+	 * A page is allocated under this pointer and used for slib and sl.
+	 * slib is 2048 bytes big and sl points to offset PAGE_SIZE / 2.
+	 */
+	struct slib *slib;
+} __attribute__ ((aligned(256)));
+
+struct qdio_irq {
+	struct qib qib;
+	u32 *dsci;		/* address of device state change indicator */
+	struct ccw_device *cdev;
+	struct dentry *debugfs_dev;
+	struct dentry *debugfs_perf;
+
+	unsigned long int_parm;
+	struct subchannel_id schid;
+	unsigned long sch_token;	/* QEBSM facility */
+
+	enum qdio_irq_states state;
+
+	struct siga_flag siga_flag;	/* siga sync information from qdioac */
+
+	int nr_input_qs;
+	int nr_output_qs;
+
+	struct ccw1 ccw;
+	struct ciw equeue;
+	struct ciw aqueue;
+
+	struct qdio_ssqd_desc ssqd_desc;
+	void (*orig_handler) (struct ccw_device *, unsigned long, struct irb *);
+
+	int perf_stat_enabled;
+
+	struct qdr *qdr;
+	unsigned long chsc_page;
+
+	struct qdio_q *input_qs[QDIO_MAX_QUEUES_PER_IRQ];
+	struct qdio_q *output_qs[QDIO_MAX_QUEUES_PER_IRQ];
+
+	debug_info_t *debug_area;
+	struct mutex setup_mutex;
+	struct qdio_dev_perf_stat perf_stat;
+};
+
+/* helper functions */
+#define queue_type(q)	q->irq_ptr->qib.qfmt
+#define SCH_NO(q)	(q->irq_ptr->schid.sch_no)
+
+#define is_thinint_irq(irq) \
+	(irq->qib.qfmt == QDIO_IQDIO_QFMT || \
+	 css_general_characteristics.aif_osa)
+
+#define qperf(__qdev, __attr)	((__qdev)->perf_stat.(__attr))
+
+#define qperf_inc(__q, __attr)						\
+({									\
+	struct qdio_irq *qdev = (__q)->irq_ptr;				\
+	if (qdev->perf_stat_enabled)					\
+		(qdev->perf_stat.__attr)++;				\
+})
+
+static inline void account_sbals_error(struct qdio_q *q, int count)
+{
+	q->q_stats.nr_sbal_error += count;
+	q->q_stats.nr_sbal_total += count;
+}
+
+/* the highest iqdio queue is used for multicast */
+static inline int multicast_outbound(struct qdio_q *q)
+{
+	return (q->irq_ptr->nr_output_qs > 1) &&
+	       (q->nr == q->irq_ptr->nr_output_qs - 1);
+}
+
+#define pci_out_supported(q) \
+	(q->irq_ptr->qib.ac & QIB_AC_OUTBOUND_PCI_SUPPORTED)
+#define is_qebsm(q)			(q->irq_ptr->sch_token != 0)
+
+#define need_siga_in(q)			(q->irq_ptr->siga_flag.input)
+#define need_siga_out(q)		(q->irq_ptr->siga_flag.output)
+#define need_siga_sync(q)		(unlikely(q->irq_ptr->siga_flag.sync))
+#define need_siga_sync_after_ai(q)	\
+	(unlikely(q->irq_ptr->siga_flag.sync_after_ai))
+#define need_siga_sync_out_after_pci(q)	\
+	(unlikely(q->irq_ptr->siga_flag.sync_out_after_pci))
+
+#define for_each_input_queue(irq_ptr, q, i)	\
+	for (i = 0, q = irq_ptr->input_qs[0];	\
+		i < irq_ptr->nr_input_qs;	\
+		q = irq_ptr->input_qs[++i])
+#define for_each_output_queue(irq_ptr, q, i)	\
+	for (i = 0, q = irq_ptr->output_qs[0];	\
+		i < irq_ptr->nr_output_qs;	\
+		q = irq_ptr->output_qs[++i])
+
+#define prev_buf(bufnr)	\
+	((bufnr + QDIO_MAX_BUFFERS_MASK) & QDIO_MAX_BUFFERS_MASK)
+#define next_buf(bufnr)	\
+	((bufnr + 1) & QDIO_MAX_BUFFERS_MASK)
+#define add_buf(bufnr, inc) \
+	((bufnr + inc) & QDIO_MAX_BUFFERS_MASK)
+#define sub_buf(bufnr, dec) \
+	((bufnr - dec) & QDIO_MAX_BUFFERS_MASK)
+
+#define queue_irqs_enabled(q)			\
+	(test_bit(QDIO_QUEUE_IRQS_DISABLED, &q->u.in.queue_irq_state) == 0)
+#define queue_irqs_disabled(q)			\
+	(test_bit(QDIO_QUEUE_IRQS_DISABLED, &q->u.in.queue_irq_state) != 0)
+
+extern u64 last_ai_time;
+
+/* prototypes for thin interrupt */
+void qdio_setup_thinint(struct qdio_irq *irq_ptr);
+int qdio_establish_thinint(struct qdio_irq *irq_ptr);
+void qdio_shutdown_thinint(struct qdio_irq *irq_ptr);
+void tiqdio_add_input_queues(struct qdio_irq *irq_ptr);
+void tiqdio_remove_input_queues(struct qdio_irq *irq_ptr);
+void tiqdio_inbound_processing(unsigned long q);
+int tiqdio_allocate_memory(void);
+void tiqdio_free_memory(void);
+int tiqdio_register_thinints(void);
+void tiqdio_unregister_thinints(void);
+void clear_nonshared_ind(struct qdio_irq *);
+int test_nonshared_ind(struct qdio_irq *);
+
+/* prototypes for setup */
+void qdio_inbound_processing(unsigned long data);
+void qdio_outbound_processing(unsigned long data);
+void qdio_outbound_timer(unsigned long data);
+void qdio_int_handler(struct ccw_device *cdev, unsigned long intparm,
+		      struct irb *irb);
+int qdio_allocate_qs(struct qdio_irq *irq_ptr, int nr_input_qs,
+		     int nr_output_qs);
+void qdio_setup_ssqd_info(struct qdio_irq *irq_ptr);
+int qdio_setup_get_ssqd(struct qdio_irq *irq_ptr,
+			struct subchannel_id *schid,
+			struct qdio_ssqd_desc *data);
+int qdio_setup_irq(struct qdio_initialize *init_data);
+void qdio_print_subchannel_info(struct qdio_irq *irq_ptr,
+				struct ccw_device *cdev);
+void qdio_release_memory(struct qdio_irq *irq_ptr);
+int qdio_setup_create_sysfs(struct ccw_device *cdev);
+void qdio_setup_destroy_sysfs(struct ccw_device *cdev);
+int qdio_setup_init(void);
+void qdio_setup_exit(void);
+int qdio_enable_async_operation(struct qdio_output_q *q);
+void qdio_disable_async_operation(struct qdio_output_q *q);
+struct qaob *qdio_allocate_aob(void);
+
+int debug_get_buf_state(struct qdio_q *q, unsigned int bufnr,
+			unsigned char *state);
+#endif /* _CIO_QDIO_H */
diff --git a/ap/os/linux/linux-3.4.x/drivers/s390/cio/qdio_debug.c b/ap/os/linux/linux-3.4.x/drivers/s390/cio/qdio_debug.c
new file mode 100644
index 0000000..29021f4
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/s390/cio/qdio_debug.c
@@ -0,0 +1,309 @@
+/*
+ *  drivers/s390/cio/qdio_debug.c
+ *
+ *  Copyright IBM Corp. 2008,2009
+ *
+ *  Author: Jan Glauber (jang@linux.vnet.ibm.com)
+ */
+#include <linux/seq_file.h>
+#include <linux/debugfs.h>
+#include <linux/uaccess.h>
+#include <linux/export.h>
+#include <asm/debug.h>
+#include "qdio_debug.h"
+#include "qdio.h"
+
+debug_info_t *qdio_dbf_setup;
+debug_info_t *qdio_dbf_error;
+
+static struct dentry *debugfs_root;
+#define QDIO_DEBUGFS_NAME_LEN	10
+
+void qdio_allocate_dbf(struct qdio_initialize *init_data,
+		       struct qdio_irq *irq_ptr)
+{
+	char text[20];
+
+	DBF_EVENT("qfmt:%1d", init_data->q_format);
+	DBF_HEX(init_data->adapter_name, 8);
+	DBF_EVENT("qpff%4x", init_data->qib_param_field_format);
+	DBF_HEX(&init_data->qib_param_field, sizeof(void *));
+	DBF_HEX(&init_data->input_slib_elements, sizeof(void *));
+	DBF_HEX(&init_data->output_slib_elements, sizeof(void *));
+	DBF_EVENT("niq:%1d noq:%1d", init_data->no_input_qs,
+		  init_data->no_output_qs);
+	DBF_HEX(&init_data->input_handler, sizeof(void *));
+	DBF_HEX(&init_data->output_handler, sizeof(void *));
+	DBF_HEX(&init_data->int_parm, sizeof(long));
+	DBF_HEX(&init_data->input_sbal_addr_array, sizeof(void *));
+	DBF_HEX(&init_data->output_sbal_addr_array, sizeof(void *));
+	DBF_EVENT("irq:%8lx", (unsigned long)irq_ptr);
+
+	/* allocate trace view for the interface */
+	snprintf(text, 20, "qdio_%s", dev_name(&init_data->cdev->dev));
+	irq_ptr->debug_area = debug_register(text, 2, 1, 16);
+	debug_register_view(irq_ptr->debug_area, &debug_hex_ascii_view);
+	debug_set_level(irq_ptr->debug_area, DBF_WARN);
+	DBF_DEV_EVENT(DBF_ERR, irq_ptr, "dbf created");
+}
+
+static int qstat_show(struct seq_file *m, void *v)
+{
+	unsigned char state;
+	struct qdio_q *q = m->private;
+	int i;
+
+	if (!q)
+		return 0;
+
+	seq_printf(m, "Timestamp: %Lx  Last AI: %Lx\n",
+		   q->timestamp, last_ai_time);
+	seq_printf(m, "nr_used: %d  ftc: %d  last_move: %d\n",
+		   atomic_read(&q->nr_buf_used),
+		   q->first_to_check, q->last_move);
+	if (q->is_input_q) {
+		seq_printf(m, "polling: %d  ack start: %d  ack count: %d\n",
+			   q->u.in.polling, q->u.in.ack_start,
+			   q->u.in.ack_count);
+		seq_printf(m, "DSCI: %d   IRQs disabled: %u\n",
+			   *(u32 *)q->irq_ptr->dsci,
+			   test_bit(QDIO_QUEUE_IRQS_DISABLED,
+			   &q->u.in.queue_irq_state));
+	}
+	seq_printf(m, "SBAL states:\n");
+	seq_printf(m, "|0      |8      |16     |24     |32     |40     |48     |56  63|\n");
+
+	for (i = 0; i < QDIO_MAX_BUFFERS_PER_Q; i++) {
+		debug_get_buf_state(q, i, &state);
+		switch (state) {
+		case SLSB_P_INPUT_NOT_INIT:
+		case SLSB_P_OUTPUT_NOT_INIT:
+			seq_printf(m, "N");
+			break;
+		case SLSB_P_OUTPUT_PENDING:
+			seq_printf(m, "P");
+			break;
+		case SLSB_P_INPUT_PRIMED:
+		case SLSB_CU_OUTPUT_PRIMED:
+			seq_printf(m, "+");
+			break;
+		case SLSB_P_INPUT_ACK:
+			seq_printf(m, "A");
+			break;
+		case SLSB_P_INPUT_ERROR:
+		case SLSB_P_OUTPUT_ERROR:
+			seq_printf(m, "x");
+			break;
+		case SLSB_CU_INPUT_EMPTY:
+		case SLSB_P_OUTPUT_EMPTY:
+			seq_printf(m, "-");
+			break;
+		case SLSB_P_INPUT_HALTED:
+		case SLSB_P_OUTPUT_HALTED:
+			seq_printf(m, ".");
+			break;
+		default:
+			seq_printf(m, "?");
+		}
+		if (i == 63)
+			seq_printf(m, "\n");
+	}
+	seq_printf(m, "\n");
+	seq_printf(m, "|64     |72     |80     |88     |96     |104    |112    |   127|\n");
+
+	seq_printf(m, "\nSBAL statistics:");
+	if (!q->irq_ptr->perf_stat_enabled) {
+		seq_printf(m, " disabled\n");
+		return 0;
+	}
+
+	seq_printf(m, "\n1          2..        4..        8..        "
+		   "16..       32..       64..       127\n");
+	for (i = 0; i < ARRAY_SIZE(q->q_stats.nr_sbals); i++)
+		seq_printf(m, "%-10u ", q->q_stats.nr_sbals[i]);
+	seq_printf(m, "\nError      NOP        Total\n%-10u %-10u %-10u\n\n",
+		   q->q_stats.nr_sbal_error, q->q_stats.nr_sbal_nop,
+		   q->q_stats.nr_sbal_total);
+	return 0;
+}
+
+static int qstat_seq_open(struct inode *inode, struct file *filp)
+{
+	return single_open(filp, qstat_show,
+			   filp->f_path.dentry->d_inode->i_private);
+}
+
+static const struct file_operations debugfs_fops = {
+	.owner	 = THIS_MODULE,
+	.open	 = qstat_seq_open,
+	.read	 = seq_read,
+	.llseek  = seq_lseek,
+	.release = single_release,
+};
+
+static char *qperf_names[] = {
+	"Assumed adapter interrupts",
+	"QDIO interrupts",
+	"Requested PCIs",
+	"Inbound tasklet runs",
+	"Inbound tasklet resched",
+	"Inbound tasklet resched2",
+	"Outbound tasklet runs",
+	"SIGA read",
+	"SIGA write",
+	"SIGA sync",
+	"Inbound calls",
+	"Inbound handler",
+	"Inbound stop_polling",
+	"Inbound queue full",
+	"Outbound calls",
+	"Outbound handler",
+	"Outbound queue full",
+	"Outbound fast_requeue",
+	"Outbound target_full",
+	"QEBSM eqbs",
+	"QEBSM eqbs partial",
+	"QEBSM sqbs",
+	"QEBSM sqbs partial",
+	"Discarded interrupts"
+};
+
+static int qperf_show(struct seq_file *m, void *v)
+{
+	struct qdio_irq *irq_ptr = m->private;
+	unsigned int *stat;
+	int i;
+
+	if (!irq_ptr)
+		return 0;
+	if (!irq_ptr->perf_stat_enabled) {
+		seq_printf(m, "disabled\n");
+		return 0;
+	}
+	stat = (unsigned int *)&irq_ptr->perf_stat;
+
+	for (i = 0; i < ARRAY_SIZE(qperf_names); i++)
+		seq_printf(m, "%26s:\t%u\n",
+			   qperf_names[i], *(stat + i));
+	return 0;
+}
+
+static ssize_t qperf_seq_write(struct file *file, const char __user *ubuf,
+			       size_t count, loff_t *off)
+{
+	struct seq_file *seq = file->private_data;
+	struct qdio_irq *irq_ptr = seq->private;
+	struct qdio_q *q;
+	unsigned long val;
+	int ret, i;
+
+	if (!irq_ptr)
+		return 0;
+
+	ret = kstrtoul_from_user(ubuf, count, 10, &val);
+	if (ret)
+		return ret;
+
+	switch (val) {
+	case 0:
+		irq_ptr->perf_stat_enabled = 0;
+		memset(&irq_ptr->perf_stat, 0, sizeof(irq_ptr->perf_stat));
+		for_each_input_queue(irq_ptr, q, i)
+			memset(&q->q_stats, 0, sizeof(q->q_stats));
+		for_each_output_queue(irq_ptr, q, i)
+			memset(&q->q_stats, 0, sizeof(q->q_stats));
+		break;
+	case 1:
+		irq_ptr->perf_stat_enabled = 1;
+		break;
+	}
+	return count;
+}
+
+static int qperf_seq_open(struct inode *inode, struct file *filp)
+{
+	return single_open(filp, qperf_show,
+			   filp->f_path.dentry->d_inode->i_private);
+}
+
+static struct file_operations debugfs_perf_fops = {
+	.owner	 = THIS_MODULE,
+	.open	 = qperf_seq_open,
+	.read	 = seq_read,
+	.write	 = qperf_seq_write,
+	.llseek  = seq_lseek,
+	.release = single_release,
+};
+static void setup_debugfs_entry(struct qdio_q *q, struct ccw_device *cdev)
+{
+	char name[QDIO_DEBUGFS_NAME_LEN];
+
+	snprintf(name, QDIO_DEBUGFS_NAME_LEN, "%s_%d",
+		 q->is_input_q ? "input" : "output",
+		 q->nr);
+	q->debugfs_q = debugfs_create_file(name, S_IFREG | S_IRUGO | S_IWUSR,
+				q->irq_ptr->debugfs_dev, q, &debugfs_fops);
+	if (IS_ERR(q->debugfs_q))
+		q->debugfs_q = NULL;
+}
+
+void qdio_setup_debug_entries(struct qdio_irq *irq_ptr, struct ccw_device *cdev)
+{
+	struct qdio_q *q;
+	int i;
+
+	irq_ptr->debugfs_dev = debugfs_create_dir(dev_name(&cdev->dev),
+						  debugfs_root);
+	if (IS_ERR(irq_ptr->debugfs_dev))
+		irq_ptr->debugfs_dev = NULL;
+
+	irq_ptr->debugfs_perf = debugfs_create_file("statistics",
+				S_IFREG | S_IRUGO | S_IWUSR,
+				irq_ptr->debugfs_dev, irq_ptr,
+				&debugfs_perf_fops);
+	if (IS_ERR(irq_ptr->debugfs_perf))
+		irq_ptr->debugfs_perf = NULL;
+
+	for_each_input_queue(irq_ptr, q, i)
+		setup_debugfs_entry(q, cdev);
+	for_each_output_queue(irq_ptr, q, i)
+		setup_debugfs_entry(q, cdev);
+}
+
+void qdio_shutdown_debug_entries(struct qdio_irq *irq_ptr, struct ccw_device *cdev)
+{
+	struct qdio_q *q;
+	int i;
+
+	for_each_input_queue(irq_ptr, q, i)
+		debugfs_remove(q->debugfs_q);
+	for_each_output_queue(irq_ptr, q, i)
+		debugfs_remove(q->debugfs_q);
+	debugfs_remove(irq_ptr->debugfs_perf);
+	debugfs_remove(irq_ptr->debugfs_dev);
+}
+
+int __init qdio_debug_init(void)
+{
+	debugfs_root = debugfs_create_dir("qdio", NULL);
+
+	qdio_dbf_setup = debug_register("qdio_setup", 16, 1, 16);
+	debug_register_view(qdio_dbf_setup, &debug_hex_ascii_view);
+	debug_set_level(qdio_dbf_setup, DBF_INFO);
+	DBF_EVENT("dbf created\n");
+
+	qdio_dbf_error = debug_register("qdio_error", 4, 1, 16);
+	debug_register_view(qdio_dbf_error, &debug_hex_ascii_view);
+	debug_set_level(qdio_dbf_error, DBF_INFO);
+	DBF_ERROR("dbf created\n");
+	return 0;
+}
+
+void qdio_debug_exit(void)
+{
+	debugfs_remove(debugfs_root);
+	if (qdio_dbf_setup)
+		debug_unregister(qdio_dbf_setup);
+	if (qdio_dbf_error)
+		debug_unregister(qdio_dbf_error);
+}
diff --git a/ap/os/linux/linux-3.4.x/drivers/s390/cio/qdio_debug.h b/ap/os/linux/linux-3.4.x/drivers/s390/cio/qdio_debug.h
new file mode 100644
index 0000000..5d70bd1
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/s390/cio/qdio_debug.h
@@ -0,0 +1,83 @@
+/*
+ *  drivers/s390/cio/qdio_debug.h
+ *
+ *  Copyright IBM Corp. 2008
+ *
+ *  Author: Jan Glauber (jang@linux.vnet.ibm.com)
+ */
+#ifndef QDIO_DEBUG_H
+#define QDIO_DEBUG_H
+
+#include <asm/debug.h>
+#include <asm/qdio.h>
+#include "qdio.h"
+
+/* that gives us 15 characters in the text event views */
+#define QDIO_DBF_LEN	16
+
+extern debug_info_t *qdio_dbf_setup;
+extern debug_info_t *qdio_dbf_error;
+
+/* sort out low debug levels early to avoid wasted sprints */
+static inline int qdio_dbf_passes(debug_info_t *dbf_grp, int level)
+{
+	return (level <= dbf_grp->level);
+}
+
+#define DBF_ERR		3	/* error conditions	*/
+#define DBF_WARN	4	/* warning conditions	*/
+#define DBF_INFO	6	/* informational	*/
+
+#undef DBF_EVENT
+#undef DBF_ERROR
+#undef DBF_DEV_EVENT
+
+#define DBF_EVENT(text...) \
+	do { \
+		char debug_buffer[QDIO_DBF_LEN]; \
+		snprintf(debug_buffer, QDIO_DBF_LEN, text); \
+		debug_text_event(qdio_dbf_setup, DBF_ERR, debug_buffer); \
+	} while (0)
+
+#define DBF_HEX(addr, len) \
+	do { \
+		debug_event(qdio_dbf_setup, DBF_ERR, (void*)(addr), len); \
+	} while (0)
+
+#define DBF_ERROR(text...) \
+	do { \
+		char debug_buffer[QDIO_DBF_LEN]; \
+		snprintf(debug_buffer, QDIO_DBF_LEN, text); \
+		debug_text_event(qdio_dbf_error, DBF_ERR, debug_buffer); \
+	} while (0)
+
+#define DBF_ERROR_HEX(addr, len) \
+	do { \
+		debug_event(qdio_dbf_error, DBF_ERR, (void*)(addr), len); \
+	} while (0)
+
+
+#define DBF_DEV_EVENT(level, device, text...) \
+	do { \
+		char debug_buffer[QDIO_DBF_LEN]; \
+		if (qdio_dbf_passes(device->debug_area, level)) { \
+			snprintf(debug_buffer, QDIO_DBF_LEN, text); \
+			debug_text_event(device->debug_area, level, debug_buffer); \
+		} \
+	} while (0)
+
+#define DBF_DEV_HEX(level, device, addr, len) \
+	do { \
+		debug_event(device->debug_area, level, (void*)(addr), len); \
+	} while (0)
+
+void qdio_allocate_dbf(struct qdio_initialize *init_data,
+		       struct qdio_irq *irq_ptr);
+void qdio_setup_debug_entries(struct qdio_irq *irq_ptr,
+			      struct ccw_device *cdev);
+void qdio_shutdown_debug_entries(struct qdio_irq *irq_ptr,
+				 struct ccw_device *cdev);
+int qdio_debug_init(void);
+void qdio_debug_exit(void);
+
+#endif
diff --git a/ap/os/linux/linux-3.4.x/drivers/s390/cio/qdio_main.c b/ap/os/linux/linux-3.4.x/drivers/s390/cio/qdio_main.c
new file mode 100644
index 0000000..35c685c
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/s390/cio/qdio_main.c
@@ -0,0 +1,1866 @@
+/*
+ * linux/drivers/s390/cio/qdio_main.c
+ *
+ * Linux for s390 qdio support, buffer handling, qdio API and module support.
+ *
+ * Copyright 2000,2008 IBM Corp.
+ * Author(s): Utz Bacher <utz.bacher@de.ibm.com>
+ *	      Jan Glauber <jang@linux.vnet.ibm.com>
+ * 2.6 cio integration by Cornelia Huck <cornelia.huck@de.ibm.com>
+ */
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/timer.h>
+#include <linux/delay.h>
+#include <linux/gfp.h>
+#include <linux/io.h>
+#include <linux/atomic.h>
+#include <asm/debug.h>
+#include <asm/qdio.h>
+#include <asm/ipl.h>
+
+#include "cio.h"
+#include "css.h"
+#include "device.h"
+#include "qdio.h"
+#include "qdio_debug.h"
+
+MODULE_AUTHOR("Utz Bacher <utz.bacher@de.ibm.com>,"\
+	"Jan Glauber <jang@linux.vnet.ibm.com>");
+MODULE_DESCRIPTION("QDIO base support");
+MODULE_LICENSE("GPL");
+
+static inline int do_siga_sync(unsigned long schid,
+			       unsigned int out_mask, unsigned int in_mask,
+			       unsigned int fc)
+{
+	register unsigned long __fc asm ("0") = fc;
+	register unsigned long __schid asm ("1") = schid;
+	register unsigned long out asm ("2") = out_mask;
+	register unsigned long in asm ("3") = in_mask;
+	int cc;
+
+	asm volatile(
+		"	siga	0\n"
+		"	ipm	%0\n"
+		"	srl	%0,28\n"
+		: "=d" (cc)
+		: "d" (__fc), "d" (__schid), "d" (out), "d" (in) : "cc");
+	return cc;
+}
+
+static inline int do_siga_input(unsigned long schid, unsigned int mask,
+				unsigned int fc)
+{
+	register unsigned long __fc asm ("0") = fc;
+	register unsigned long __schid asm ("1") = schid;
+	register unsigned long __mask asm ("2") = mask;
+	int cc;
+
+	asm volatile(
+		"	siga	0\n"
+		"	ipm	%0\n"
+		"	srl	%0,28\n"
+		: "=d" (cc)
+		: "d" (__fc), "d" (__schid), "d" (__mask) : "cc", "memory");
+	return cc;
+}
+
+/**
+ * do_siga_output - perform SIGA-w/wt function
+ * @schid: subchannel id or in case of QEBSM the subchannel token
+ * @mask: which output queues to process
+ * @bb: busy bit indicator, set only if SIGA-w/wt could not access a buffer
+ * @fc: function code to perform
+ *
+ * Returns cc or QDIO_ERROR_SIGA_ACCESS_EXCEPTION.
+ * Note: For IQDC unicast queues only the highest priority queue is processed.
+ */
+static inline int do_siga_output(unsigned long schid, unsigned long mask,
+				 unsigned int *bb, unsigned int fc,
+				 unsigned long aob)
+{
+	register unsigned long __fc asm("0") = fc;
+	register unsigned long __schid asm("1") = schid;
+	register unsigned long __mask asm("2") = mask;
+	register unsigned long __aob asm("3") = aob;
+	int cc = QDIO_ERROR_SIGA_ACCESS_EXCEPTION;
+
+	asm volatile(
+		"	siga	0\n"
+		"0:	ipm	%0\n"
+		"	srl	%0,28\n"
+		"1:\n"
+		EX_TABLE(0b, 1b)
+		: "+d" (cc), "+d" (__fc), "+d" (__schid), "+d" (__mask),
+		  "+d" (__aob)
+		: : "cc", "memory");
+	*bb = ((unsigned int) __fc) >> 31;
+	return cc;
+}
+
+static inline int qdio_check_ccq(struct qdio_q *q, unsigned int ccq)
+{
+	/* all done or next buffer state different */
+	if (ccq == 0 || ccq == 32)
+		return 0;
+	/* no buffer processed */
+	if (ccq == 97)
+		return 1;
+	/* not all buffers processed */
+	if (ccq == 96)
+		return 2;
+	/* notify devices immediately */
+	DBF_ERROR("%4x ccq:%3d", SCH_NO(q), ccq);
+	return -EIO;
+}
+
+/**
+ * qdio_do_eqbs - extract buffer states for QEBSM
+ * @q: queue to manipulate
+ * @state: state of the extracted buffers
+ * @start: buffer number to start at
+ * @count: count of buffers to examine
+ * @auto_ack: automatically acknowledge buffers
+ *
+ * Returns the number of successfully extracted equal buffer states.
+ * Stops processing if a state is different from the last buffers state.
+ */
+static int qdio_do_eqbs(struct qdio_q *q, unsigned char *state,
+			int start, int count, int auto_ack)
+{
+	int rc, tmp_count = count, tmp_start = start, nr = q->nr, retried = 0;
+	unsigned int ccq = 0;
+
+	BUG_ON(!q->irq_ptr->sch_token);
+	qperf_inc(q, eqbs);
+
+	if (!q->is_input_q)
+		nr += q->irq_ptr->nr_input_qs;
+again:
+	ccq = do_eqbs(q->irq_ptr->sch_token, state, nr, &tmp_start, &tmp_count,
+		      auto_ack);
+	rc = qdio_check_ccq(q, ccq);
+	if (!rc)
+		return count - tmp_count;
+
+	if (rc == 1) {
+		DBF_DEV_EVENT(DBF_WARN, q->irq_ptr, "EQBS again:%2d", ccq);
+		goto again;
+	}
+
+	if (rc == 2) {
+		BUG_ON(tmp_count == count);
+		qperf_inc(q, eqbs_partial);
+		DBF_DEV_EVENT(DBF_WARN, q->irq_ptr, "EQBS part:%02x",
+			tmp_count);
+		/*
+		 * Retry once, if that fails bail out and process the
+		 * extracted buffers before trying again.
+		 */
+		if (!retried++)
+			goto again;
+		else
+			return count - tmp_count;
+	}
+
+	DBF_ERROR("%4x EQBS ERROR", SCH_NO(q));
+	DBF_ERROR("%3d%3d%2d", count, tmp_count, nr);
+	q->handler(q->irq_ptr->cdev, QDIO_ERROR_ACTIVATE_CHECK_CONDITION,
+		   q->nr, q->first_to_kick, count, q->irq_ptr->int_parm);
+	return 0;
+}
+
+/**
+ * qdio_do_sqbs - set buffer states for QEBSM
+ * @q: queue to manipulate
+ * @state: new state of the buffers
+ * @start: first buffer number to change
+ * @count: how many buffers to change
+ *
+ * Returns the number of successfully changed buffers.
+ * Does retrying until the specified count of buffer states is set or an
+ * error occurs.
+ */
+static int qdio_do_sqbs(struct qdio_q *q, unsigned char state, int start,
+			int count)
+{
+	unsigned int ccq = 0;
+	int tmp_count = count, tmp_start = start;
+	int nr = q->nr;
+	int rc;
+
+	if (!count)
+		return 0;
+
+	BUG_ON(!q->irq_ptr->sch_token);
+	qperf_inc(q, sqbs);
+
+	if (!q->is_input_q)
+		nr += q->irq_ptr->nr_input_qs;
+again:
+	ccq = do_sqbs(q->irq_ptr->sch_token, state, nr, &tmp_start, &tmp_count);
+	rc = qdio_check_ccq(q, ccq);
+	if (!rc) {
+		WARN_ON(tmp_count);
+		return count - tmp_count;
+	}
+
+	if (rc == 1 || rc == 2) {
+		DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "SQBS again:%2d", ccq);
+		qperf_inc(q, sqbs_partial);
+		goto again;
+	}
+
+	DBF_ERROR("%4x SQBS ERROR", SCH_NO(q));
+	DBF_ERROR("%3d%3d%2d", count, tmp_count, nr);
+	q->handler(q->irq_ptr->cdev, QDIO_ERROR_ACTIVATE_CHECK_CONDITION,
+		   q->nr, q->first_to_kick, count, q->irq_ptr->int_parm);
+	return 0;
+}
+
+/* returns number of examined buffers and their common state in *state */
+static inline int get_buf_states(struct qdio_q *q, unsigned int bufnr,
+				 unsigned char *state, unsigned int count,
+				 int auto_ack, int merge_pending)
+{
+	unsigned char __state = 0;
+	int i;
+
+	BUG_ON(bufnr > QDIO_MAX_BUFFERS_MASK);
+	BUG_ON(count > QDIO_MAX_BUFFERS_PER_Q);
+
+	if (is_qebsm(q))
+		return qdio_do_eqbs(q, state, bufnr, count, auto_ack);
+
+	for (i = 0; i < count; i++) {
+		if (!__state) {
+			__state = q->slsb.val[bufnr];
+			if (merge_pending && __state == SLSB_P_OUTPUT_PENDING)
+				__state = SLSB_P_OUTPUT_EMPTY;
+		} else if (merge_pending) {
+			if ((q->slsb.val[bufnr] & __state) != __state)
+				break;
+		} else if (q->slsb.val[bufnr] != __state)
+			break;
+		bufnr = next_buf(bufnr);
+	}
+	*state = __state;
+	return i;
+}
+
+static inline int get_buf_state(struct qdio_q *q, unsigned int bufnr,
+				unsigned char *state, int auto_ack)
+{
+	return get_buf_states(q, bufnr, state, 1, auto_ack, 0);
+}
+
+/* wrap-around safe setting of slsb states, returns number of changed buffers */
+static inline int set_buf_states(struct qdio_q *q, int bufnr,
+				 unsigned char state, int count)
+{
+	int i;
+
+	BUG_ON(bufnr > QDIO_MAX_BUFFERS_MASK);
+	BUG_ON(count > QDIO_MAX_BUFFERS_PER_Q);
+
+	if (is_qebsm(q))
+		return qdio_do_sqbs(q, state, bufnr, count);
+
+	for (i = 0; i < count; i++) {
+		xchg(&q->slsb.val[bufnr], state);
+		bufnr = next_buf(bufnr);
+	}
+	return count;
+}
+
+static inline int set_buf_state(struct qdio_q *q, int bufnr,
+				unsigned char state)
+{
+	return set_buf_states(q, bufnr, state, 1);
+}
+
+/* set slsb states to initial state */
+static void qdio_init_buf_states(struct qdio_irq *irq_ptr)
+{
+	struct qdio_q *q;
+	int i;
+
+	for_each_input_queue(irq_ptr, q, i)
+		set_buf_states(q, 0, SLSB_P_INPUT_NOT_INIT,
+			       QDIO_MAX_BUFFERS_PER_Q);
+	for_each_output_queue(irq_ptr, q, i)
+		set_buf_states(q, 0, SLSB_P_OUTPUT_NOT_INIT,
+			       QDIO_MAX_BUFFERS_PER_Q);
+}
+
+static inline int qdio_siga_sync(struct qdio_q *q, unsigned int output,
+			  unsigned int input)
+{
+	unsigned long schid = *((u32 *) &q->irq_ptr->schid);
+	unsigned int fc = QDIO_SIGA_SYNC;
+	int cc;
+
+	DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "siga-s:%1d", q->nr);
+	qperf_inc(q, siga_sync);
+
+	if (is_qebsm(q)) {
+		schid = q->irq_ptr->sch_token;
+		fc |= QDIO_SIGA_QEBSM_FLAG;
+	}
+
+	cc = do_siga_sync(schid, output, input, fc);
+	if (unlikely(cc))
+		DBF_ERROR("%4x SIGA-S:%2d", SCH_NO(q), cc);
+	return cc;
+}
+
+static inline int qdio_siga_sync_q(struct qdio_q *q)
+{
+	if (q->is_input_q)
+		return qdio_siga_sync(q, 0, q->mask);
+	else
+		return qdio_siga_sync(q, q->mask, 0);
+}
+
+static int qdio_siga_output(struct qdio_q *q, unsigned int *busy_bit,
+	unsigned long aob)
+{
+	unsigned long schid = *((u32 *) &q->irq_ptr->schid);
+	unsigned int fc = QDIO_SIGA_WRITE;
+	u64 start_time = 0;
+	int retries = 0, cc;
+	unsigned long laob = 0;
+
+	if (q->u.out.use_cq && aob != 0) {
+		fc = QDIO_SIGA_WRITEQ;
+		laob = aob;
+	}
+
+	if (is_qebsm(q)) {
+		schid = q->irq_ptr->sch_token;
+		fc |= QDIO_SIGA_QEBSM_FLAG;
+	}
+again:
+	WARN_ON_ONCE((aob && queue_type(q) != QDIO_IQDIO_QFMT) ||
+		(aob && fc != QDIO_SIGA_WRITEQ));
+	cc = do_siga_output(schid, q->mask, busy_bit, fc, laob);
+
+	/* hipersocket busy condition */
+	if (unlikely(*busy_bit)) {
+		WARN_ON(queue_type(q) != QDIO_IQDIO_QFMT || cc != 2);
+		retries++;
+
+		if (!start_time) {
+			start_time = get_clock();
+			goto again;
+		}
+		if ((get_clock() - start_time) < QDIO_BUSY_BIT_PATIENCE)
+			goto again;
+	}
+	if (retries) {
+		DBF_DEV_EVENT(DBF_WARN, q->irq_ptr,
+			      "%4x cc2 BB1:%1d", SCH_NO(q), q->nr);
+		DBF_DEV_EVENT(DBF_WARN, q->irq_ptr, "count:%u", retries);
+	}
+	return cc;
+}
+
+static inline int qdio_siga_input(struct qdio_q *q)
+{
+	unsigned long schid = *((u32 *) &q->irq_ptr->schid);
+	unsigned int fc = QDIO_SIGA_READ;
+	int cc;
+
+	DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "siga-r:%1d", q->nr);
+	qperf_inc(q, siga_read);
+
+	if (is_qebsm(q)) {
+		schid = q->irq_ptr->sch_token;
+		fc |= QDIO_SIGA_QEBSM_FLAG;
+	}
+
+	cc = do_siga_input(schid, q->mask, fc);
+	if (unlikely(cc))
+		DBF_ERROR("%4x SIGA-R:%2d", SCH_NO(q), cc);
+	return cc;
+}
+
+#define qdio_siga_sync_out(q) qdio_siga_sync(q, ~0U, 0)
+#define qdio_siga_sync_all(q) qdio_siga_sync(q, ~0U, ~0U)
+
+static inline void qdio_sync_queues(struct qdio_q *q)
+{
+	/* PCI capable outbound queues will also be scanned so sync them too */
+	if (pci_out_supported(q))
+		qdio_siga_sync_all(q);
+	else
+		qdio_siga_sync_q(q);
+}
+
+int debug_get_buf_state(struct qdio_q *q, unsigned int bufnr,
+			unsigned char *state)
+{
+	if (need_siga_sync(q))
+		qdio_siga_sync_q(q);
+	return get_buf_states(q, bufnr, state, 1, 0, 0);
+}
+
+static inline void qdio_stop_polling(struct qdio_q *q)
+{
+	if (!q->u.in.polling)
+		return;
+
+	q->u.in.polling = 0;
+	qperf_inc(q, stop_polling);
+
+	/* show the card that we are not polling anymore */
+	if (is_qebsm(q)) {
+		set_buf_states(q, q->u.in.ack_start, SLSB_P_INPUT_NOT_INIT,
+			       q->u.in.ack_count);
+		q->u.in.ack_count = 0;
+	} else
+		set_buf_state(q, q->u.in.ack_start, SLSB_P_INPUT_NOT_INIT);
+}
+
+static inline void account_sbals(struct qdio_q *q, int count)
+{
+	int pos = 0;
+
+	q->q_stats.nr_sbal_total += count;
+	if (count == QDIO_MAX_BUFFERS_MASK) {
+		q->q_stats.nr_sbals[7]++;
+		return;
+	}
+	while (count >>= 1)
+		pos++;
+	q->q_stats.nr_sbals[pos]++;
+}
+
+static void process_buffer_error(struct qdio_q *q, int count)
+{
+	unsigned char state = (q->is_input_q) ? SLSB_P_INPUT_NOT_INIT :
+					SLSB_P_OUTPUT_NOT_INIT;
+
+	q->qdio_error |= QDIO_ERROR_SLSB_STATE;
+
+	/* special handling for no target buffer empty */
+	if ((!q->is_input_q &&
+	    (q->sbal[q->first_to_check]->element[15].sflags) == 0x10)) {
+		qperf_inc(q, target_full);
+		DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "OUTFULL FTC:%02x",
+			      q->first_to_check);
+		goto set;
+	}
+
+	DBF_ERROR("%4x BUF ERROR", SCH_NO(q));
+	DBF_ERROR((q->is_input_q) ? "IN:%2d" : "OUT:%2d", q->nr);
+	DBF_ERROR("FTC:%3d C:%3d", q->first_to_check, count);
+	DBF_ERROR("F14:%2x F15:%2x",
+		  q->sbal[q->first_to_check]->element[14].sflags,
+		  q->sbal[q->first_to_check]->element[15].sflags);
+
+set:
+	/*
+	 * Interrupts may be avoided as long as the error is present
+	 * so change the buffer state immediately to avoid starvation.
+	 */
+	set_buf_states(q, q->first_to_check, state, count);
+}
+
+static inline void inbound_primed(struct qdio_q *q, int count)
+{
+	int new;
+
+	DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "in prim: %02x", count);
+
+	/* for QEBSM the ACK was already set by EQBS */
+	if (is_qebsm(q)) {
+		if (!q->u.in.polling) {
+			q->u.in.polling = 1;
+			q->u.in.ack_count = count;
+			q->u.in.ack_start = q->first_to_check;
+			return;
+		}
+
+		/* delete the previous ACK's */
+		set_buf_states(q, q->u.in.ack_start, SLSB_P_INPUT_NOT_INIT,
+			       q->u.in.ack_count);
+		q->u.in.ack_count = count;
+		q->u.in.ack_start = q->first_to_check;
+		return;
+	}
+
+	/*
+	 * ACK the newest buffer. The ACK will be removed in qdio_stop_polling
+	 * or by the next inbound run.
+	 */
+	new = add_buf(q->first_to_check, count - 1);
+	if (q->u.in.polling) {
+		/* reset the previous ACK but first set the new one */
+		set_buf_state(q, new, SLSB_P_INPUT_ACK);
+		set_buf_state(q, q->u.in.ack_start, SLSB_P_INPUT_NOT_INIT);
+	} else {
+		q->u.in.polling = 1;
+		set_buf_state(q, new, SLSB_P_INPUT_ACK);
+	}
+
+	q->u.in.ack_start = new;
+	count--;
+	if (!count)
+		return;
+	/* need to change ALL buffers to get more interrupts */
+	set_buf_states(q, q->first_to_check, SLSB_P_INPUT_NOT_INIT, count);
+}
+
+static int get_inbound_buffer_frontier(struct qdio_q *q)
+{
+	int count, stop;
+	unsigned char state = 0;
+
+	q->timestamp = get_clock_fast();
+
+	/*
+	 * Don't check 128 buffers, as otherwise qdio_inbound_q_moved
+	 * would return 0.
+	 */
+	count = min(atomic_read(&q->nr_buf_used), QDIO_MAX_BUFFERS_MASK);
+	stop = add_buf(q->first_to_check, count);
+
+	if (q->first_to_check == stop)
+		goto out;
+
+	/*
+	 * No siga sync here, as a PCI or we after a thin interrupt
+	 * already sync'ed the queues.
+	 */
+	count = get_buf_states(q, q->first_to_check, &state, count, 1, 0);
+	if (!count)
+		goto out;
+
+	switch (state) {
+	case SLSB_P_INPUT_PRIMED:
+		inbound_primed(q, count);
+		q->first_to_check = add_buf(q->first_to_check, count);
+		if (atomic_sub(count, &q->nr_buf_used) == 0)
+			qperf_inc(q, inbound_queue_full);
+		if (q->irq_ptr->perf_stat_enabled)
+			account_sbals(q, count);
+		break;
+	case SLSB_P_INPUT_ERROR:
+		process_buffer_error(q, count);
+		q->first_to_check = add_buf(q->first_to_check, count);
+		atomic_sub(count, &q->nr_buf_used);
+		if (q->irq_ptr->perf_stat_enabled)
+			account_sbals_error(q, count);
+		break;
+	case SLSB_CU_INPUT_EMPTY:
+	case SLSB_P_INPUT_NOT_INIT:
+	case SLSB_P_INPUT_ACK:
+		if (q->irq_ptr->perf_stat_enabled)
+			q->q_stats.nr_sbal_nop++;
+		DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "in nop");
+		break;
+	default:
+		BUG();
+	}
+out:
+	return q->first_to_check;
+}
+
+static int qdio_inbound_q_moved(struct qdio_q *q)
+{
+	int bufnr;
+
+	bufnr = get_inbound_buffer_frontier(q);
+
+	if ((bufnr != q->last_move) || q->qdio_error) {
+		q->last_move = bufnr;
+		if (!is_thinint_irq(q->irq_ptr) && MACHINE_IS_LPAR)
+			q->u.in.timestamp = get_clock();
+		return 1;
+	} else
+		return 0;
+}
+
+static inline int qdio_inbound_q_done(struct qdio_q *q)
+{
+	unsigned char state = 0;
+
+	if (!atomic_read(&q->nr_buf_used))
+		return 1;
+
+	if (need_siga_sync(q))
+		qdio_siga_sync_q(q);
+	get_buf_state(q, q->first_to_check, &state, 0);
+
+	if (state == SLSB_P_INPUT_PRIMED || state == SLSB_P_INPUT_ERROR)
+		/* more work coming */
+		return 0;
+
+	if (is_thinint_irq(q->irq_ptr))
+		return 1;
+
+	/* don't poll under z/VM */
+	if (MACHINE_IS_VM)
+		return 1;
+
+	/*
+	 * At this point we know, that inbound first_to_check
+	 * has (probably) not moved (see qdio_inbound_processing).
+	 */
+	if (get_clock() > q->u.in.timestamp + QDIO_INPUT_THRESHOLD) {
+		DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "in done:%02x",
+			      q->first_to_check);
+		return 1;
+	} else
+		return 0;
+}
+
+static inline int contains_aobs(struct qdio_q *q)
+{
+	return !q->is_input_q && q->u.out.use_cq;
+}
+
+static inline void qdio_trace_aob(struct qdio_irq *irq, struct qdio_q *q,
+				int i, struct qaob *aob)
+{
+	int tmp;
+
+	DBF_DEV_EVENT(DBF_INFO, irq, "AOB%d:%lx", i,
+			(unsigned long) virt_to_phys(aob));
+	DBF_DEV_EVENT(DBF_INFO, irq, "RES00:%lx",
+			(unsigned long) aob->res0[0]);
+	DBF_DEV_EVENT(DBF_INFO, irq, "RES01:%lx",
+			(unsigned long) aob->res0[1]);
+	DBF_DEV_EVENT(DBF_INFO, irq, "RES02:%lx",
+			(unsigned long) aob->res0[2]);
+	DBF_DEV_EVENT(DBF_INFO, irq, "RES03:%lx",
+			(unsigned long) aob->res0[3]);
+	DBF_DEV_EVENT(DBF_INFO, irq, "RES04:%lx",
+			(unsigned long) aob->res0[4]);
+	DBF_DEV_EVENT(DBF_INFO, irq, "RES05:%lx",
+			(unsigned long) aob->res0[5]);
+	DBF_DEV_EVENT(DBF_INFO, irq, "RES1:%x", aob->res1);
+	DBF_DEV_EVENT(DBF_INFO, irq, "RES2:%x", aob->res2);
+	DBF_DEV_EVENT(DBF_INFO, irq, "RES3:%x", aob->res3);
+	DBF_DEV_EVENT(DBF_INFO, irq, "AORC:%u", aob->aorc);
+	DBF_DEV_EVENT(DBF_INFO, irq, "FLAGS:%u", aob->flags);
+	DBF_DEV_EVENT(DBF_INFO, irq, "CBTBS:%u", aob->cbtbs);
+	DBF_DEV_EVENT(DBF_INFO, irq, "SBC:%u", aob->sb_count);
+	for (tmp = 0; tmp < QDIO_MAX_ELEMENTS_PER_BUFFER; ++tmp) {
+		DBF_DEV_EVENT(DBF_INFO, irq, "SBA%d:%lx", tmp,
+				(unsigned long) aob->sba[tmp]);
+		DBF_DEV_EVENT(DBF_INFO, irq, "rSBA%d:%lx", tmp,
+				(unsigned long) q->sbal[i]->element[tmp].addr);
+		DBF_DEV_EVENT(DBF_INFO, irq, "DC%d:%u", tmp, aob->dcount[tmp]);
+		DBF_DEV_EVENT(DBF_INFO, irq, "rDC%d:%u", tmp,
+				q->sbal[i]->element[tmp].length);
+	}
+	DBF_DEV_EVENT(DBF_INFO, irq, "USER0:%lx", (unsigned long) aob->user0);
+	for (tmp = 0; tmp < 2; ++tmp) {
+		DBF_DEV_EVENT(DBF_INFO, irq, "RES4%d:%lx", tmp,
+			(unsigned long) aob->res4[tmp]);
+	}
+	DBF_DEV_EVENT(DBF_INFO, irq, "USER1:%lx", (unsigned long) aob->user1);
+	DBF_DEV_EVENT(DBF_INFO, irq, "USER2:%lx", (unsigned long) aob->user2);
+}
+
+static inline void qdio_handle_aobs(struct qdio_q *q, int start, int count)
+{
+	unsigned char state = 0;
+	int j, b = start;
+
+	if (!contains_aobs(q))
+		return;
+
+	for (j = 0; j < count; ++j) {
+		get_buf_state(q, b, &state, 0);
+		if (state == SLSB_P_OUTPUT_PENDING) {
+			struct qaob *aob = q->u.out.aobs[b];
+			if (aob == NULL)
+				continue;
+
+			BUG_ON(q->u.out.sbal_state == NULL);
+			q->u.out.sbal_state[b].flags |=
+				QDIO_OUTBUF_STATE_FLAG_PENDING;
+			q->u.out.aobs[b] = NULL;
+		} else if (state == SLSB_P_OUTPUT_EMPTY) {
+			BUG_ON(q->u.out.sbal_state == NULL);
+			q->u.out.sbal_state[b].aob = NULL;
+		}
+		b = next_buf(b);
+	}
+}
+
+static inline unsigned long qdio_aob_for_buffer(struct qdio_output_q *q,
+					int bufnr)
+{
+	unsigned long phys_aob = 0;
+
+	if (!q->use_cq)
+		goto out;
+
+	if (!q->aobs[bufnr]) {
+		struct qaob *aob = qdio_allocate_aob();
+		q->aobs[bufnr] = aob;
+	}
+	if (q->aobs[bufnr]) {
+		BUG_ON(q->sbal_state == NULL);
+		q->sbal_state[bufnr].flags = QDIO_OUTBUF_STATE_FLAG_NONE;
+		q->sbal_state[bufnr].aob = q->aobs[bufnr];
+		q->aobs[bufnr]->user1 = (u64) q->sbal_state[bufnr].user;
+		phys_aob = virt_to_phys(q->aobs[bufnr]);
+		BUG_ON(phys_aob & 0xFF);
+	}
+
+out:
+	return phys_aob;
+}
+
+static void qdio_kick_handler(struct qdio_q *q)
+{
+	int start = q->first_to_kick;
+	int end = q->first_to_check;
+	int count;
+
+	if (unlikely(q->irq_ptr->state != QDIO_IRQ_STATE_ACTIVE))
+		return;
+
+	count = sub_buf(end, start);
+
+	if (q->is_input_q) {
+		qperf_inc(q, inbound_handler);
+		DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "kih s:%02x c:%02x", start, count);
+	} else {
+		qperf_inc(q, outbound_handler);
+		DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "koh: s:%02x c:%02x",
+			      start, count);
+	}
+
+	qdio_handle_aobs(q, start, count);
+
+	q->handler(q->irq_ptr->cdev, q->qdio_error, q->nr, start, count,
+		   q->irq_ptr->int_parm);
+
+	/* for the next time */
+	q->first_to_kick = end;
+	q->qdio_error = 0;
+}
+
+static void __qdio_inbound_processing(struct qdio_q *q)
+{
+	qperf_inc(q, tasklet_inbound);
+
+	if (!qdio_inbound_q_moved(q))
+		return;
+
+	qdio_kick_handler(q);
+
+	if (!qdio_inbound_q_done(q)) {
+		/* means poll time is not yet over */
+		qperf_inc(q, tasklet_inbound_resched);
+		if (likely(q->irq_ptr->state != QDIO_IRQ_STATE_STOPPED)) {
+			tasklet_schedule(&q->tasklet);
+			return;
+		}
+	}
+
+	qdio_stop_polling(q);
+	/*
+	 * We need to check again to not lose initiative after
+	 * resetting the ACK state.
+	 */
+	if (!qdio_inbound_q_done(q)) {
+		qperf_inc(q, tasklet_inbound_resched2);
+		if (likely(q->irq_ptr->state != QDIO_IRQ_STATE_STOPPED))
+			tasklet_schedule(&q->tasklet);
+	}
+}
+
+void qdio_inbound_processing(unsigned long data)
+{
+	struct qdio_q *q = (struct qdio_q *)data;
+	__qdio_inbound_processing(q);
+}
+
+static int get_outbound_buffer_frontier(struct qdio_q *q)
+{
+	int count, stop;
+	unsigned char state = 0;
+
+	q->timestamp = get_clock_fast();
+
+	if (need_siga_sync(q))
+		if (((queue_type(q) != QDIO_IQDIO_QFMT) &&
+		    !pci_out_supported(q)) ||
+		    (queue_type(q) == QDIO_IQDIO_QFMT &&
+		    multicast_outbound(q)))
+			qdio_siga_sync_q(q);
+
+	/*
+	 * Don't check 128 buffers, as otherwise qdio_inbound_q_moved
+	 * would return 0.
+	 */
+	count = min(atomic_read(&q->nr_buf_used), QDIO_MAX_BUFFERS_MASK);
+	stop = add_buf(q->first_to_check, count);
+	if (q->first_to_check == stop)
+		goto out;
+
+	count = get_buf_states(q, q->first_to_check, &state, count, 0, 1);
+	if (!count)
+		goto out;
+
+	switch (state) {
+	case SLSB_P_OUTPUT_PENDING:
+		BUG();
+	case SLSB_P_OUTPUT_EMPTY:
+		/* the adapter got it */
+		DBF_DEV_EVENT(DBF_INFO, q->irq_ptr,
+			"out empty:%1d %02x", q->nr, count);
+
+		atomic_sub(count, &q->nr_buf_used);
+		q->first_to_check = add_buf(q->first_to_check, count);
+		if (q->irq_ptr->perf_stat_enabled)
+			account_sbals(q, count);
+
+		break;
+	case SLSB_P_OUTPUT_ERROR:
+		process_buffer_error(q, count);
+		q->first_to_check = add_buf(q->first_to_check, count);
+		atomic_sub(count, &q->nr_buf_used);
+		if (q->irq_ptr->perf_stat_enabled)
+			account_sbals_error(q, count);
+		break;
+	case SLSB_CU_OUTPUT_PRIMED:
+		/* the adapter has not fetched the output yet */
+		if (q->irq_ptr->perf_stat_enabled)
+			q->q_stats.nr_sbal_nop++;
+		DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "out primed:%1d",
+			      q->nr);
+		break;
+	case SLSB_P_OUTPUT_NOT_INIT:
+	case SLSB_P_OUTPUT_HALTED:
+		break;
+	default:
+		BUG();
+	}
+
+out:
+	return q->first_to_check;
+}
+
+/* all buffers processed? */
+static inline int qdio_outbound_q_done(struct qdio_q *q)
+{
+	return atomic_read(&q->nr_buf_used) == 0;
+}
+
+static inline int qdio_outbound_q_moved(struct qdio_q *q)
+{
+	int bufnr;
+
+	bufnr = get_outbound_buffer_frontier(q);
+
+	if ((bufnr != q->last_move) || q->qdio_error) {
+		q->last_move = bufnr;
+		DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "out moved:%1d", q->nr);
+		return 1;
+	} else
+		return 0;
+}
+
+static int qdio_kick_outbound_q(struct qdio_q *q, unsigned long aob)
+{
+	int retries = 0, cc;
+	unsigned int busy_bit;
+
+	if (!need_siga_out(q))
+		return 0;
+
+	DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "siga-w:%1d", q->nr);
+retry:
+	qperf_inc(q, siga_write);
+
+	cc = qdio_siga_output(q, &busy_bit, aob);
+	switch (cc) {
+	case 0:
+		break;
+	case 2:
+		if (busy_bit) {
+			while (++retries < QDIO_BUSY_BIT_RETRIES) {
+				mdelay(QDIO_BUSY_BIT_RETRY_DELAY);
+				goto retry;
+			}
+			DBF_ERROR("%4x cc2 BBC:%1d", SCH_NO(q), q->nr);
+			cc |= QDIO_ERROR_SIGA_BUSY;
+		} else
+			DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "siga-w cc2:%1d", q->nr);
+		break;
+	case 1:
+	case 3:
+		DBF_ERROR("%4x SIGA-W:%1d", SCH_NO(q), cc);
+		break;
+	}
+	if (retries) {
+		DBF_ERROR("%4x cc2 BB2:%1d", SCH_NO(q), q->nr);
+		DBF_ERROR("count:%u", retries);
+	}
+	return cc;
+}
+
+static void __qdio_outbound_processing(struct qdio_q *q)
+{
+	qperf_inc(q, tasklet_outbound);
+	BUG_ON(atomic_read(&q->nr_buf_used) < 0);
+
+	if (qdio_outbound_q_moved(q))
+		qdio_kick_handler(q);
+
+	if (queue_type(q) == QDIO_ZFCP_QFMT)
+		if (!pci_out_supported(q) && !qdio_outbound_q_done(q))
+			goto sched;
+
+	if (q->u.out.pci_out_enabled)
+		return;
+
+	/*
+	 * Now we know that queue type is either qeth without pci enabled
+	 * or HiperSockets. Make sure buffer switch from PRIMED to EMPTY
+	 * is noticed and outbound_handler is called after some time.
+	 */
+	if (qdio_outbound_q_done(q))
+		del_timer(&q->u.out.timer);
+	else
+		if (!timer_pending(&q->u.out.timer))
+			mod_timer(&q->u.out.timer, jiffies + 10 * HZ);
+	return;
+
+sched:
+	if (unlikely(q->irq_ptr->state == QDIO_IRQ_STATE_STOPPED))
+		return;
+	tasklet_schedule(&q->tasklet);
+}
+
+/* outbound tasklet */
+void qdio_outbound_processing(unsigned long data)
+{
+	struct qdio_q *q = (struct qdio_q *)data;
+	__qdio_outbound_processing(q);
+}
+
+void qdio_outbound_timer(unsigned long data)
+{
+	struct qdio_q *q = (struct qdio_q *)data;
+
+	if (unlikely(q->irq_ptr->state == QDIO_IRQ_STATE_STOPPED))
+		return;
+	tasklet_schedule(&q->tasklet);
+}
+
+static inline void qdio_check_outbound_after_thinint(struct qdio_q *q)
+{
+	struct qdio_q *out;
+	int i;
+
+	if (!pci_out_supported(q))
+		return;
+
+	for_each_output_queue(q->irq_ptr, out, i)
+		if (!qdio_outbound_q_done(out))
+			tasklet_schedule(&out->tasklet);
+}
+
+static void __tiqdio_inbound_processing(struct qdio_q *q)
+{
+	qperf_inc(q, tasklet_inbound);
+	if (need_siga_sync(q) && need_siga_sync_after_ai(q))
+		qdio_sync_queues(q);
+
+	/*
+	 * The interrupt could be caused by a PCI request. Check the
+	 * PCI capable outbound queues.
+	 */
+	qdio_check_outbound_after_thinint(q);
+
+	if (!qdio_inbound_q_moved(q))
+		return;
+
+	qdio_kick_handler(q);
+
+	if (!qdio_inbound_q_done(q)) {
+		qperf_inc(q, tasklet_inbound_resched);
+		if (likely(q->irq_ptr->state != QDIO_IRQ_STATE_STOPPED)) {
+			tasklet_schedule(&q->tasklet);
+			return;
+		}
+	}
+
+	qdio_stop_polling(q);
+	/*
+	 * We need to check again to not lose initiative after
+	 * resetting the ACK state.
+	 */
+	if (!qdio_inbound_q_done(q)) {
+		qperf_inc(q, tasklet_inbound_resched2);
+		if (likely(q->irq_ptr->state != QDIO_IRQ_STATE_STOPPED))
+			tasklet_schedule(&q->tasklet);
+	}
+}
+
+void tiqdio_inbound_processing(unsigned long data)
+{
+	struct qdio_q *q = (struct qdio_q *)data;
+	__tiqdio_inbound_processing(q);
+}
+
+static inline void qdio_set_state(struct qdio_irq *irq_ptr,
+				  enum qdio_irq_states state)
+{
+	DBF_DEV_EVENT(DBF_INFO, irq_ptr, "newstate: %1d", state);
+
+	irq_ptr->state = state;
+	mb();
+}
+
+static void qdio_irq_check_sense(struct qdio_irq *irq_ptr, struct irb *irb)
+{
+	if (irb->esw.esw0.erw.cons) {
+		DBF_ERROR("%4x sense:", irq_ptr->schid.sch_no);
+		DBF_ERROR_HEX(irb, 64);
+		DBF_ERROR_HEX(irb->ecw, 64);
+	}
+}
+
+/* PCI interrupt handler */
+static void qdio_int_handler_pci(struct qdio_irq *irq_ptr)
+{
+	int i;
+	struct qdio_q *q;
+
+	if (unlikely(irq_ptr->state == QDIO_IRQ_STATE_STOPPED))
+		return;
+
+	for_each_input_queue(irq_ptr, q, i) {
+		if (q->u.in.queue_start_poll) {
+			/* skip if polling is enabled or already in work */
+			if (test_and_set_bit(QDIO_QUEUE_IRQS_DISABLED,
+				     &q->u.in.queue_irq_state)) {
+				qperf_inc(q, int_discarded);
+				continue;
+			}
+			q->u.in.queue_start_poll(q->irq_ptr->cdev, q->nr,
+						 q->irq_ptr->int_parm);
+		} else {
+			tasklet_schedule(&q->tasklet);
+		}
+	}
+
+	if (!pci_out_supported(q))
+		return;
+
+	for_each_output_queue(irq_ptr, q, i) {
+		if (qdio_outbound_q_done(q))
+			continue;
+		if (need_siga_sync(q) && need_siga_sync_out_after_pci(q))
+			qdio_siga_sync_q(q);
+		tasklet_schedule(&q->tasklet);
+	}
+}
+
+static void qdio_handle_activate_check(struct ccw_device *cdev,
+				unsigned long intparm, int cstat, int dstat)
+{
+	struct qdio_irq *irq_ptr = cdev->private->qdio_data;
+	struct qdio_q *q;
+	int count;
+
+	DBF_ERROR("%4x ACT CHECK", irq_ptr->schid.sch_no);
+	DBF_ERROR("intp :%lx", intparm);
+	DBF_ERROR("ds: %2x cs:%2x", dstat, cstat);
+
+	if (irq_ptr->nr_input_qs) {
+		q = irq_ptr->input_qs[0];
+	} else if (irq_ptr->nr_output_qs) {
+		q = irq_ptr->output_qs[0];
+	} else {
+		dump_stack();
+		goto no_handler;
+	}
+
+	count = sub_buf(q->first_to_check, q->first_to_kick);
+	q->handler(q->irq_ptr->cdev, QDIO_ERROR_ACTIVATE_CHECK_CONDITION,
+		   q->nr, q->first_to_kick, count, irq_ptr->int_parm);
+no_handler:
+	qdio_set_state(irq_ptr, QDIO_IRQ_STATE_STOPPED);
+	/*
+	 * In case of z/VM LGR (Live Guest Migration) QDIO recovery will happen.
+	 * Therefore we call the LGR detection function here.
+	 */
+	lgr_info_log();
+}
+
+static void qdio_establish_handle_irq(struct ccw_device *cdev, int cstat,
+				      int dstat)
+{
+	struct qdio_irq *irq_ptr = cdev->private->qdio_data;
+
+	DBF_DEV_EVENT(DBF_INFO, irq_ptr, "qest irq");
+
+	if (cstat)
+		goto error;
+	if (dstat & ~(DEV_STAT_DEV_END | DEV_STAT_CHN_END))
+		goto error;
+	if (!(dstat & DEV_STAT_DEV_END))
+		goto error;
+	qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ESTABLISHED);
+	return;
+
+error:
+	DBF_ERROR("%4x EQ:error", irq_ptr->schid.sch_no);
+	DBF_ERROR("ds: %2x cs:%2x", dstat, cstat);
+	qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ERR);
+}
+
+/* qdio interrupt handler */
+void qdio_int_handler(struct ccw_device *cdev, unsigned long intparm,
+		      struct irb *irb)
+{
+	struct qdio_irq *irq_ptr = cdev->private->qdio_data;
+	int cstat, dstat;
+
+	if (!intparm || !irq_ptr) {
+		DBF_ERROR("qint:%4x", cdev->private->schid.sch_no);
+		return;
+	}
+
+	if (irq_ptr->perf_stat_enabled)
+		irq_ptr->perf_stat.qdio_int++;
+
+	if (IS_ERR(irb)) {
+		switch (PTR_ERR(irb)) {
+		case -EIO:
+			DBF_ERROR("%4x IO error", irq_ptr->schid.sch_no);
+			qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ERR);
+			wake_up(&cdev->private->wait_q);
+			return;
+		default:
+			WARN_ON(1);
+			return;
+		}
+	}
+	qdio_irq_check_sense(irq_ptr, irb);
+	cstat = irb->scsw.cmd.cstat;
+	dstat = irb->scsw.cmd.dstat;
+
+	switch (irq_ptr->state) {
+	case QDIO_IRQ_STATE_INACTIVE:
+		qdio_establish_handle_irq(cdev, cstat, dstat);
+		break;
+	case QDIO_IRQ_STATE_CLEANUP:
+		qdio_set_state(irq_ptr, QDIO_IRQ_STATE_INACTIVE);
+		break;
+	case QDIO_IRQ_STATE_ESTABLISHED:
+	case QDIO_IRQ_STATE_ACTIVE:
+		if (cstat & SCHN_STAT_PCI) {
+			qdio_int_handler_pci(irq_ptr);
+			return;
+		}
+		if (cstat || dstat)
+			qdio_handle_activate_check(cdev, intparm, cstat,
+						   dstat);
+		break;
+	case QDIO_IRQ_STATE_STOPPED:
+		break;
+	default:
+		WARN_ON(1);
+	}
+	wake_up(&cdev->private->wait_q);
+}
+
+/**
+ * qdio_get_ssqd_desc - get qdio subchannel description
+ * @cdev: ccw device to get description for
+ * @data: where to store the ssqd
+ *
+ * Returns 0 or an error code. The results of the chsc are stored in the
+ * specified structure.
+ */
+int qdio_get_ssqd_desc(struct ccw_device *cdev,
+		       struct qdio_ssqd_desc *data)
+{
+
+	if (!cdev || !cdev->private)
+		return -EINVAL;
+
+	DBF_EVENT("get ssqd:%4x", cdev->private->schid.sch_no);
+	return qdio_setup_get_ssqd(NULL, &cdev->private->schid, data);
+}
+EXPORT_SYMBOL_GPL(qdio_get_ssqd_desc);
+
+static void qdio_shutdown_queues(struct ccw_device *cdev)
+{
+	struct qdio_irq *irq_ptr = cdev->private->qdio_data;
+	struct qdio_q *q;
+	int i;
+
+	for_each_input_queue(irq_ptr, q, i)
+		tasklet_kill(&q->tasklet);
+
+	for_each_output_queue(irq_ptr, q, i) {
+		del_timer(&q->u.out.timer);
+		tasklet_kill(&q->tasklet);
+	}
+}
+
+/**
+ * qdio_shutdown - shut down a qdio subchannel
+ * @cdev: associated ccw device
+ * @how: use halt or clear to shutdown
+ */
+int qdio_shutdown(struct ccw_device *cdev, int how)
+{
+	struct qdio_irq *irq_ptr = cdev->private->qdio_data;
+	int rc;
+	unsigned long flags;
+
+	if (!irq_ptr)
+		return -ENODEV;
+
+	BUG_ON(irqs_disabled());
+	DBF_EVENT("qshutdown:%4x", cdev->private->schid.sch_no);
+
+	mutex_lock(&irq_ptr->setup_mutex);
+	/*
+	 * Subchannel was already shot down. We cannot prevent being called
+	 * twice since cio may trigger a shutdown asynchronously.
+	 */
+	if (irq_ptr->state == QDIO_IRQ_STATE_INACTIVE) {
+		mutex_unlock(&irq_ptr->setup_mutex);
+		return 0;
+	}
+
+	/*
+	 * Indicate that the device is going down. Scheduling the queue
+	 * tasklets is forbidden from here on.
+	 */
+	qdio_set_state(irq_ptr, QDIO_IRQ_STATE_STOPPED);
+
+	tiqdio_remove_input_queues(irq_ptr);
+	qdio_shutdown_queues(cdev);
+	qdio_shutdown_debug_entries(irq_ptr, cdev);
+
+	/* cleanup subchannel */
+	spin_lock_irqsave(get_ccwdev_lock(cdev), flags);
+
+	if (how & QDIO_FLAG_CLEANUP_USING_CLEAR)
+		rc = ccw_device_clear(cdev, QDIO_DOING_CLEANUP);
+	else
+		/* default behaviour is halt */
+		rc = ccw_device_halt(cdev, QDIO_DOING_CLEANUP);
+	if (rc) {
+		DBF_ERROR("%4x SHUTD ERR", irq_ptr->schid.sch_no);
+		DBF_ERROR("rc:%4d", rc);
+		goto no_cleanup;
+	}
+
+	qdio_set_state(irq_ptr, QDIO_IRQ_STATE_CLEANUP);
+	spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
+	wait_event_interruptible_timeout(cdev->private->wait_q,
+		irq_ptr->state == QDIO_IRQ_STATE_INACTIVE ||
+		irq_ptr->state == QDIO_IRQ_STATE_ERR,
+		10 * HZ);
+	spin_lock_irqsave(get_ccwdev_lock(cdev), flags);
+
+no_cleanup:
+	qdio_shutdown_thinint(irq_ptr);
+
+	/* restore interrupt handler */
+	if ((void *)cdev->handler == (void *)qdio_int_handler)
+		cdev->handler = irq_ptr->orig_handler;
+	spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
+
+	qdio_set_state(irq_ptr, QDIO_IRQ_STATE_INACTIVE);
+	mutex_unlock(&irq_ptr->setup_mutex);
+	if (rc)
+		return rc;
+	return 0;
+}
+EXPORT_SYMBOL_GPL(qdio_shutdown);
+
+/**
+ * qdio_free - free data structures for a qdio subchannel
+ * @cdev: associated ccw device
+ */
+int qdio_free(struct ccw_device *cdev)
+{
+	struct qdio_irq *irq_ptr = cdev->private->qdio_data;
+
+	if (!irq_ptr)
+		return -ENODEV;
+
+	DBF_EVENT("qfree:%4x", cdev->private->schid.sch_no);
+	mutex_lock(&irq_ptr->setup_mutex);
+
+	if (irq_ptr->debug_area != NULL) {
+		debug_unregister(irq_ptr->debug_area);
+		irq_ptr->debug_area = NULL;
+	}
+	cdev->private->qdio_data = NULL;
+	mutex_unlock(&irq_ptr->setup_mutex);
+
+	qdio_release_memory(irq_ptr);
+	return 0;
+}
+EXPORT_SYMBOL_GPL(qdio_free);
+
+/**
+ * qdio_allocate - allocate qdio queues and associated data
+ * @init_data: initialization data
+ */
+int qdio_allocate(struct qdio_initialize *init_data)
+{
+	struct qdio_irq *irq_ptr;
+
+	DBF_EVENT("qallocate:%4x", init_data->cdev->private->schid.sch_no);
+
+	if ((init_data->no_input_qs && !init_data->input_handler) ||
+	    (init_data->no_output_qs && !init_data->output_handler))
+		return -EINVAL;
+
+	if ((init_data->no_input_qs > QDIO_MAX_QUEUES_PER_IRQ) ||
+	    (init_data->no_output_qs > QDIO_MAX_QUEUES_PER_IRQ))
+		return -EINVAL;
+
+	if ((!init_data->input_sbal_addr_array) ||
+	    (!init_data->output_sbal_addr_array))
+		return -EINVAL;
+
+	/* irq_ptr must be in GFP_DMA since it contains ccw1.cda */
+	irq_ptr = (void *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
+	if (!irq_ptr)
+		goto out_err;
+
+	mutex_init(&irq_ptr->setup_mutex);
+	qdio_allocate_dbf(init_data, irq_ptr);
+
+	/*
+	 * Allocate a page for the chsc calls in qdio_establish.
+	 * Must be pre-allocated since a zfcp recovery will call
+	 * qdio_establish. In case of low memory and swap on a zfcp disk
+	 * we may not be able to allocate memory otherwise.
+	 */
+	irq_ptr->chsc_page = get_zeroed_page(GFP_KERNEL);
+	if (!irq_ptr->chsc_page)
+		goto out_rel;
+
+	/* qdr is used in ccw1.cda which is u32 */
+	irq_ptr->qdr = (struct qdr *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
+	if (!irq_ptr->qdr)
+		goto out_rel;
+	WARN_ON((unsigned long)irq_ptr->qdr & 0xfff);
+
+	if (qdio_allocate_qs(irq_ptr, init_data->no_input_qs,
+			     init_data->no_output_qs))
+		goto out_rel;
+
+	init_data->cdev->private->qdio_data = irq_ptr;
+	qdio_set_state(irq_ptr, QDIO_IRQ_STATE_INACTIVE);
+	return 0;
+out_rel:
+	qdio_release_memory(irq_ptr);
+out_err:
+	return -ENOMEM;
+}
+EXPORT_SYMBOL_GPL(qdio_allocate);
+
+static void qdio_detect_hsicq(struct qdio_irq *irq_ptr)
+{
+	struct qdio_q *q = irq_ptr->input_qs[0];
+	int i, use_cq = 0;
+
+	if (irq_ptr->nr_input_qs > 1 && queue_type(q) == QDIO_IQDIO_QFMT)
+		use_cq = 1;
+
+	for_each_output_queue(irq_ptr, q, i) {
+		if (use_cq) {
+			if (qdio_enable_async_operation(&q->u.out) < 0) {
+				use_cq = 0;
+				continue;
+			}
+		} else
+			qdio_disable_async_operation(&q->u.out);
+	}
+	DBF_EVENT("use_cq:%d", use_cq);
+}
+
+/**
+ * qdio_establish - establish queues on a qdio subchannel
+ * @init_data: initialization data
+ */
+int qdio_establish(struct qdio_initialize *init_data)
+{
+	struct qdio_irq *irq_ptr;
+	struct ccw_device *cdev = init_data->cdev;
+	unsigned long saveflags;
+	int rc;
+
+	DBF_EVENT("qestablish:%4x", cdev->private->schid.sch_no);
+
+	irq_ptr = cdev->private->qdio_data;
+	if (!irq_ptr)
+		return -ENODEV;
+
+	if (cdev->private->state != DEV_STATE_ONLINE)
+		return -EINVAL;
+
+	mutex_lock(&irq_ptr->setup_mutex);
+	qdio_setup_irq(init_data);
+
+	rc = qdio_establish_thinint(irq_ptr);
+	if (rc) {
+		mutex_unlock(&irq_ptr->setup_mutex);
+		qdio_shutdown(cdev, QDIO_FLAG_CLEANUP_USING_CLEAR);
+		return rc;
+	}
+
+	/* establish q */
+	irq_ptr->ccw.cmd_code = irq_ptr->equeue.cmd;
+	irq_ptr->ccw.flags = CCW_FLAG_SLI;
+	irq_ptr->ccw.count = irq_ptr->equeue.count;
+	irq_ptr->ccw.cda = (u32)((addr_t)irq_ptr->qdr);
+
+	spin_lock_irqsave(get_ccwdev_lock(cdev), saveflags);
+	ccw_device_set_options_mask(cdev, 0);
+
+	rc = ccw_device_start(cdev, &irq_ptr->ccw, QDIO_DOING_ESTABLISH, 0, 0);
+	if (rc) {
+		DBF_ERROR("%4x est IO ERR", irq_ptr->schid.sch_no);
+		DBF_ERROR("rc:%4x", rc);
+	}
+	spin_unlock_irqrestore(get_ccwdev_lock(cdev), saveflags);
+
+	if (rc) {
+		mutex_unlock(&irq_ptr->setup_mutex);
+		qdio_shutdown(cdev, QDIO_FLAG_CLEANUP_USING_CLEAR);
+		return rc;
+	}
+
+	wait_event_interruptible_timeout(cdev->private->wait_q,
+		irq_ptr->state == QDIO_IRQ_STATE_ESTABLISHED ||
+		irq_ptr->state == QDIO_IRQ_STATE_ERR, HZ);
+
+	if (irq_ptr->state != QDIO_IRQ_STATE_ESTABLISHED) {
+		mutex_unlock(&irq_ptr->setup_mutex);
+		qdio_shutdown(cdev, QDIO_FLAG_CLEANUP_USING_CLEAR);
+		return -EIO;
+	}
+
+	qdio_setup_ssqd_info(irq_ptr);
+
+	qdio_detect_hsicq(irq_ptr);
+
+	/* qebsm is now setup if available, initialize buffer states */
+	qdio_init_buf_states(irq_ptr);
+
+	mutex_unlock(&irq_ptr->setup_mutex);
+	qdio_print_subchannel_info(irq_ptr, cdev);
+	qdio_setup_debug_entries(irq_ptr, cdev);
+	return 0;
+}
+EXPORT_SYMBOL_GPL(qdio_establish);
+
+/**
+ * qdio_activate - activate queues on a qdio subchannel
+ * @cdev: associated cdev
+ */
+int qdio_activate(struct ccw_device *cdev)
+{
+	struct qdio_irq *irq_ptr;
+	int rc;
+	unsigned long saveflags;
+
+	DBF_EVENT("qactivate:%4x", cdev->private->schid.sch_no);
+
+	irq_ptr = cdev->private->qdio_data;
+	if (!irq_ptr)
+		return -ENODEV;
+
+	if (cdev->private->state != DEV_STATE_ONLINE)
+		return -EINVAL;
+
+	mutex_lock(&irq_ptr->setup_mutex);
+	if (irq_ptr->state == QDIO_IRQ_STATE_INACTIVE) {
+		rc = -EBUSY;
+		goto out;
+	}
+
+	irq_ptr->ccw.cmd_code = irq_ptr->aqueue.cmd;
+	irq_ptr->ccw.flags = CCW_FLAG_SLI;
+	irq_ptr->ccw.count = irq_ptr->aqueue.count;
+	irq_ptr->ccw.cda = 0;
+
+	spin_lock_irqsave(get_ccwdev_lock(cdev), saveflags);
+	ccw_device_set_options(cdev, CCWDEV_REPORT_ALL);
+
+	rc = ccw_device_start(cdev, &irq_ptr->ccw, QDIO_DOING_ACTIVATE,
+			      0, DOIO_DENY_PREFETCH);
+	if (rc) {
+		DBF_ERROR("%4x act IO ERR", irq_ptr->schid.sch_no);
+		DBF_ERROR("rc:%4x", rc);
+	}
+	spin_unlock_irqrestore(get_ccwdev_lock(cdev), saveflags);
+
+	if (rc)
+		goto out;
+
+	if (is_thinint_irq(irq_ptr))
+		tiqdio_add_input_queues(irq_ptr);
+
+	/* wait for subchannel to become active */
+	msleep(5);
+
+	switch (irq_ptr->state) {
+	case QDIO_IRQ_STATE_STOPPED:
+	case QDIO_IRQ_STATE_ERR:
+		rc = -EIO;
+		break;
+	default:
+		qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ACTIVE);
+		rc = 0;
+	}
+out:
+	mutex_unlock(&irq_ptr->setup_mutex);
+	return rc;
+}
+EXPORT_SYMBOL_GPL(qdio_activate);
+
+static inline int buf_in_between(int bufnr, int start, int count)
+{
+	int end = add_buf(start, count);
+
+	if (end > start) {
+		if (bufnr >= start && bufnr < end)
+			return 1;
+		else
+			return 0;
+	}
+
+	/* wrap-around case */
+	if ((bufnr >= start && bufnr <= QDIO_MAX_BUFFERS_PER_Q) ||
+	    (bufnr < end))
+		return 1;
+	else
+		return 0;
+}
+
+/**
+ * handle_inbound - reset processed input buffers
+ * @q: queue containing the buffers
+ * @callflags: flags
+ * @bufnr: first buffer to process
+ * @count: how many buffers are emptied
+ */
+static int handle_inbound(struct qdio_q *q, unsigned int callflags,
+			  int bufnr, int count)
+{
+	int used, diff;
+
+	qperf_inc(q, inbound_call);
+
+	if (!q->u.in.polling)
+		goto set;
+
+	/* protect against stop polling setting an ACK for an emptied slsb */
+	if (count == QDIO_MAX_BUFFERS_PER_Q) {
+		/* overwriting everything, just delete polling status */
+		q->u.in.polling = 0;
+		q->u.in.ack_count = 0;
+		goto set;
+	} else if (buf_in_between(q->u.in.ack_start, bufnr, count)) {
+		if (is_qebsm(q)) {
+			/* partial overwrite, just update ack_start */
+			diff = add_buf(bufnr, count);
+			diff = sub_buf(diff, q->u.in.ack_start);
+			q->u.in.ack_count -= diff;
+			if (q->u.in.ack_count <= 0) {
+				q->u.in.polling = 0;
+				q->u.in.ack_count = 0;
+				goto set;
+			}
+			q->u.in.ack_start = add_buf(q->u.in.ack_start, diff);
+		}
+		else
+			/* the only ACK will be deleted, so stop polling */
+			q->u.in.polling = 0;
+	}
+
+set:
+	count = set_buf_states(q, bufnr, SLSB_CU_INPUT_EMPTY, count);
+
+	used = atomic_add_return(count, &q->nr_buf_used) - count;
+	BUG_ON(used + count > QDIO_MAX_BUFFERS_PER_Q);
+
+	if (need_siga_in(q))
+		return qdio_siga_input(q);
+
+	return 0;
+}
+
+/**
+ * handle_outbound - process filled outbound buffers
+ * @q: queue containing the buffers
+ * @callflags: flags
+ * @bufnr: first buffer to process
+ * @count: how many buffers are filled
+ */
+static int handle_outbound(struct qdio_q *q, unsigned int callflags,
+			   int bufnr, int count)
+{
+	unsigned char state = 0;
+	int used, rc = 0;
+
+	qperf_inc(q, outbound_call);
+
+	count = set_buf_states(q, bufnr, SLSB_CU_OUTPUT_PRIMED, count);
+	used = atomic_add_return(count, &q->nr_buf_used);
+	BUG_ON(used > QDIO_MAX_BUFFERS_PER_Q);
+
+	if (used == QDIO_MAX_BUFFERS_PER_Q)
+		qperf_inc(q, outbound_queue_full);
+
+	if (callflags & QDIO_FLAG_PCI_OUT) {
+		q->u.out.pci_out_enabled = 1;
+		qperf_inc(q, pci_request_int);
+	} else
+		q->u.out.pci_out_enabled = 0;
+
+	if (queue_type(q) == QDIO_IQDIO_QFMT) {
+		unsigned long phys_aob = 0;
+
+		/* One SIGA-W per buffer required for unicast HSI */
+		WARN_ON_ONCE(count > 1 && !multicast_outbound(q));
+
+		phys_aob = qdio_aob_for_buffer(&q->u.out, bufnr);
+
+		rc = qdio_kick_outbound_q(q, phys_aob);
+	} else if (need_siga_sync(q)) {
+		rc = qdio_siga_sync_q(q);
+	} else {
+		/* try to fast requeue buffers */
+		get_buf_state(q, prev_buf(bufnr), &state, 0);
+		if (state != SLSB_CU_OUTPUT_PRIMED)
+			rc = qdio_kick_outbound_q(q, 0);
+		else
+			qperf_inc(q, fast_requeue);
+	}
+
+	/* in case of SIGA errors we must process the error immediately */
+	if (used >= q->u.out.scan_threshold || rc)
+		tasklet_schedule(&q->tasklet);
+	else
+		/* free the SBALs in case of no further traffic */
+		if (!timer_pending(&q->u.out.timer))
+			mod_timer(&q->u.out.timer, jiffies + HZ);
+	return rc;
+}
+
+/**
+ * do_QDIO - process input or output buffers
+ * @cdev: associated ccw_device for the qdio subchannel
+ * @callflags: input or output and special flags from the program
+ * @q_nr: queue number
+ * @bufnr: buffer number
+ * @count: how many buffers to process
+ */
+int do_QDIO(struct ccw_device *cdev, unsigned int callflags,
+	    int q_nr, unsigned int bufnr, unsigned int count)
+{
+	struct qdio_irq *irq_ptr;
+
+
+	if (bufnr >= QDIO_MAX_BUFFERS_PER_Q || count > QDIO_MAX_BUFFERS_PER_Q)
+		return -EINVAL;
+
+	irq_ptr = cdev->private->qdio_data;
+	if (!irq_ptr)
+		return -ENODEV;
+
+	DBF_DEV_EVENT(DBF_INFO, irq_ptr,
+		      "do%02x b:%02x c:%02x", callflags, bufnr, count);
+
+	if (irq_ptr->state != QDIO_IRQ_STATE_ACTIVE)
+		return -EBUSY;
+	if (!count)
+		return 0;
+	if (callflags & QDIO_FLAG_SYNC_INPUT)
+		return handle_inbound(irq_ptr->input_qs[q_nr],
+				      callflags, bufnr, count);
+	else if (callflags & QDIO_FLAG_SYNC_OUTPUT)
+		return handle_outbound(irq_ptr->output_qs[q_nr],
+				       callflags, bufnr, count);
+	return -EINVAL;
+}
+EXPORT_SYMBOL_GPL(do_QDIO);
+
+/**
+ * qdio_start_irq - process input buffers
+ * @cdev: associated ccw_device for the qdio subchannel
+ * @nr: input queue number
+ *
+ * Return codes
+ *   0 - success
+ *   1 - irqs not started since new data is available
+ */
+int qdio_start_irq(struct ccw_device *cdev, int nr)
+{
+	struct qdio_q *q;
+	struct qdio_irq *irq_ptr = cdev->private->qdio_data;
+
+	if (!irq_ptr)
+		return -ENODEV;
+	q = irq_ptr->input_qs[nr];
+
+	WARN_ON(queue_irqs_enabled(q));
+
+	clear_nonshared_ind(irq_ptr);
+	qdio_stop_polling(q);
+	clear_bit(QDIO_QUEUE_IRQS_DISABLED, &q->u.in.queue_irq_state);
+
+	/*
+	 * We need to check again to not lose initiative after
+	 * resetting the ACK state.
+	 */
+	if (test_nonshared_ind(irq_ptr))
+		goto rescan;
+	if (!qdio_inbound_q_done(q))
+		goto rescan;
+	return 0;
+
+rescan:
+	if (test_and_set_bit(QDIO_QUEUE_IRQS_DISABLED,
+			     &q->u.in.queue_irq_state))
+		return 0;
+	else
+		return 1;
+
+}
+EXPORT_SYMBOL(qdio_start_irq);
+
+/**
+ * qdio_get_next_buffers - process input buffers
+ * @cdev: associated ccw_device for the qdio subchannel
+ * @nr: input queue number
+ * @bufnr: first filled buffer number
+ * @error: buffers are in error state
+ *
+ * Return codes
+ *   < 0 - error
+ *   = 0 - no new buffers found
+ *   > 0 - number of processed buffers
+ */
+int qdio_get_next_buffers(struct ccw_device *cdev, int nr, int *bufnr,
+			  int *error)
+{
+	struct qdio_q *q;
+	int start, end;
+	struct qdio_irq *irq_ptr = cdev->private->qdio_data;
+
+	if (!irq_ptr)
+		return -ENODEV;
+	q = irq_ptr->input_qs[nr];
+	WARN_ON(queue_irqs_enabled(q));
+
+	/*
+	 * Cannot rely on automatic sync after interrupt since queues may
+	 * also be examined without interrupt.
+	 */
+	if (need_siga_sync(q))
+		qdio_sync_queues(q);
+
+	/* check the PCI capable outbound queues. */
+	qdio_check_outbound_after_thinint(q);
+
+	if (!qdio_inbound_q_moved(q))
+		return 0;
+
+	/* Note: upper-layer MUST stop processing immediately here ... */
+	if (unlikely(q->irq_ptr->state != QDIO_IRQ_STATE_ACTIVE))
+		return -EIO;
+
+	start = q->first_to_kick;
+	end = q->first_to_check;
+	*bufnr = start;
+	*error = q->qdio_error;
+
+	/* for the next time */
+	q->first_to_kick = end;
+	q->qdio_error = 0;
+	return sub_buf(end, start);
+}
+EXPORT_SYMBOL(qdio_get_next_buffers);
+
+/**
+ * qdio_stop_irq - disable interrupt processing for the device
+ * @cdev: associated ccw_device for the qdio subchannel
+ * @nr: input queue number
+ *
+ * Return codes
+ *   0 - interrupts were already disabled
+ *   1 - interrupts successfully disabled
+ */
+int qdio_stop_irq(struct ccw_device *cdev, int nr)
+{
+	struct qdio_q *q;
+	struct qdio_irq *irq_ptr = cdev->private->qdio_data;
+
+	if (!irq_ptr)
+		return -ENODEV;
+	q = irq_ptr->input_qs[nr];
+
+	if (test_and_set_bit(QDIO_QUEUE_IRQS_DISABLED,
+			     &q->u.in.queue_irq_state))
+		return 0;
+	else
+		return 1;
+}
+EXPORT_SYMBOL(qdio_stop_irq);
+
+static int __init init_QDIO(void)
+{
+	int rc;
+
+	rc = qdio_debug_init();
+	if (rc)
+		return rc;
+	rc = qdio_setup_init();
+	if (rc)
+		goto out_debug;
+	rc = tiqdio_allocate_memory();
+	if (rc)
+		goto out_cache;
+	rc = tiqdio_register_thinints();
+	if (rc)
+		goto out_ti;
+	return 0;
+
+out_ti:
+	tiqdio_free_memory();
+out_cache:
+	qdio_setup_exit();
+out_debug:
+	qdio_debug_exit();
+	return rc;
+}
+
+static void __exit exit_QDIO(void)
+{
+	tiqdio_unregister_thinints();
+	tiqdio_free_memory();
+	qdio_setup_exit();
+	qdio_debug_exit();
+}
+
+module_init(init_QDIO);
+module_exit(exit_QDIO);
diff --git a/ap/os/linux/linux-3.4.x/drivers/s390/cio/qdio_setup.c b/ap/os/linux/linux-3.4.x/drivers/s390/cio/qdio_setup.c
new file mode 100644
index 0000000..ecf12f0
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/s390/cio/qdio_setup.c
@@ -0,0 +1,561 @@
+/*
+ * driver/s390/cio/qdio_setup.c
+ *
+ * qdio queue initialization
+ *
+ * Copyright (C) IBM Corp. 2008
+ * Author(s): Jan Glauber <jang@linux.vnet.ibm.com>
+ */
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/export.h>
+#include <asm/qdio.h>
+
+#include "cio.h"
+#include "css.h"
+#include "device.h"
+#include "ioasm.h"
+#include "chsc.h"
+#include "qdio.h"
+#include "qdio_debug.h"
+
+static struct kmem_cache *qdio_q_cache;
+static struct kmem_cache *qdio_aob_cache;
+
+struct qaob *qdio_allocate_aob(void)
+{
+	return kmem_cache_zalloc(qdio_aob_cache, GFP_ATOMIC);
+}
+EXPORT_SYMBOL_GPL(qdio_allocate_aob);
+
+void qdio_release_aob(struct qaob *aob)
+{
+	kmem_cache_free(qdio_aob_cache, aob);
+}
+EXPORT_SYMBOL_GPL(qdio_release_aob);
+
+/*
+ * qebsm is only available under 64bit but the adapter sets the feature
+ * flag anyway, so we manually override it.
+ */
+static inline int qebsm_possible(void)
+{
+#ifdef CONFIG_64BIT
+	return css_general_characteristics.qebsm;
+#endif
+	return 0;
+}
+
+/*
+ * qib_param_field: pointer to 128 bytes or NULL, if no param field
+ * nr_input_qs: pointer to nr_queues*128 words of data or NULL
+ */
+static void set_impl_params(struct qdio_irq *irq_ptr,
+			    unsigned int qib_param_field_format,
+			    unsigned char *qib_param_field,
+			    unsigned long *input_slib_elements,
+			    unsigned long *output_slib_elements)
+{
+	struct qdio_q *q;
+	int i, j;
+
+	if (!irq_ptr)
+		return;
+
+	irq_ptr->qib.pfmt = qib_param_field_format;
+	if (qib_param_field)
+		memcpy(irq_ptr->qib.parm, qib_param_field,
+		       QDIO_MAX_BUFFERS_PER_Q);
+
+	if (!input_slib_elements)
+		goto output;
+
+	for_each_input_queue(irq_ptr, q, i) {
+		for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; j++)
+			q->slib->slibe[j].parms =
+				input_slib_elements[i * QDIO_MAX_BUFFERS_PER_Q + j];
+	}
+output:
+	if (!output_slib_elements)
+		return;
+
+	for_each_output_queue(irq_ptr, q, i) {
+		for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; j++)
+			q->slib->slibe[j].parms =
+				output_slib_elements[i * QDIO_MAX_BUFFERS_PER_Q + j];
+	}
+}
+
+static int __qdio_allocate_qs(struct qdio_q **irq_ptr_qs, int nr_queues)
+{
+	struct qdio_q *q;
+	int i;
+
+	for (i = 0; i < nr_queues; i++) {
+		q = kmem_cache_alloc(qdio_q_cache, GFP_KERNEL);
+		if (!q)
+			return -ENOMEM;
+
+		q->slib = (struct slib *) __get_free_page(GFP_KERNEL);
+		if (!q->slib) {
+			kmem_cache_free(qdio_q_cache, q);
+			return -ENOMEM;
+		}
+		irq_ptr_qs[i] = q;
+	}
+	return 0;
+}
+
+int qdio_allocate_qs(struct qdio_irq *irq_ptr, int nr_input_qs, int nr_output_qs)
+{
+	int rc;
+
+	rc = __qdio_allocate_qs(irq_ptr->input_qs, nr_input_qs);
+	if (rc)
+		return rc;
+	rc = __qdio_allocate_qs(irq_ptr->output_qs, nr_output_qs);
+	return rc;
+}
+
+static void setup_queues_misc(struct qdio_q *q, struct qdio_irq *irq_ptr,
+			      qdio_handler_t *handler, int i)
+{
+	struct slib *slib = q->slib;
+
+	/* queue must be cleared for qdio_establish */
+	memset(q, 0, sizeof(*q));
+	memset(slib, 0, PAGE_SIZE);
+	q->slib = slib;
+	q->irq_ptr = irq_ptr;
+	q->mask = 1 << (31 - i);
+	q->nr = i;
+	q->handler = handler;
+}
+
+static void setup_storage_lists(struct qdio_q *q, struct qdio_irq *irq_ptr,
+				void **sbals_array, int i)
+{
+	struct qdio_q *prev;
+	int j;
+
+	DBF_HEX(&q, sizeof(void *));
+	q->sl = (struct sl *)((char *)q->slib + PAGE_SIZE / 2);
+
+	/* fill in sbal */
+	for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; j++) {
+		q->sbal[j] = *sbals_array++;
+		BUG_ON((unsigned long)q->sbal[j] & 0xff);
+	}
+
+	/* fill in slib */
+	if (i > 0) {
+		prev = (q->is_input_q) ? irq_ptr->input_qs[i - 1]
+			: irq_ptr->output_qs[i - 1];
+		prev->slib->nsliba = (unsigned long)q->slib;
+	}
+
+	q->slib->sla = (unsigned long)q->sl;
+	q->slib->slsba = (unsigned long)&q->slsb.val[0];
+
+	/* fill in sl */
+	for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; j++)
+		q->sl->element[j].sbal = (unsigned long)q->sbal[j];
+}
+
+static void setup_queues(struct qdio_irq *irq_ptr,
+			 struct qdio_initialize *qdio_init)
+{
+	struct qdio_q *q;
+	void **input_sbal_array = qdio_init->input_sbal_addr_array;
+	void **output_sbal_array = qdio_init->output_sbal_addr_array;
+	struct qdio_outbuf_state *output_sbal_state_array =
+				  qdio_init->output_sbal_state_array;
+	int i;
+
+	for_each_input_queue(irq_ptr, q, i) {
+		DBF_EVENT("inq:%1d", i);
+		setup_queues_misc(q, irq_ptr, qdio_init->input_handler, i);
+
+		q->is_input_q = 1;
+		q->u.in.queue_start_poll = qdio_init->queue_start_poll_array ?
+				qdio_init->queue_start_poll_array[i] : NULL;
+
+		setup_storage_lists(q, irq_ptr, input_sbal_array, i);
+		input_sbal_array += QDIO_MAX_BUFFERS_PER_Q;
+
+		if (is_thinint_irq(irq_ptr)) {
+			tasklet_init(&q->tasklet, tiqdio_inbound_processing,
+				     (unsigned long) q);
+		} else {
+			tasklet_init(&q->tasklet, qdio_inbound_processing,
+				     (unsigned long) q);
+		}
+	}
+
+	for_each_output_queue(irq_ptr, q, i) {
+		DBF_EVENT("outq:%1d", i);
+		setup_queues_misc(q, irq_ptr, qdio_init->output_handler, i);
+
+		q->u.out.sbal_state = output_sbal_state_array;
+		output_sbal_state_array += QDIO_MAX_BUFFERS_PER_Q;
+
+		q->is_input_q = 0;
+		q->u.out.scan_threshold = qdio_init->scan_threshold;
+		setup_storage_lists(q, irq_ptr, output_sbal_array, i);
+		output_sbal_array += QDIO_MAX_BUFFERS_PER_Q;
+
+		tasklet_init(&q->tasklet, qdio_outbound_processing,
+			     (unsigned long) q);
+		setup_timer(&q->u.out.timer, (void(*)(unsigned long))
+			    &qdio_outbound_timer, (unsigned long)q);
+	}
+}
+
+static void process_ac_flags(struct qdio_irq *irq_ptr, unsigned char qdioac)
+{
+	if (qdioac & AC1_SIGA_INPUT_NEEDED)
+		irq_ptr->siga_flag.input = 1;
+	if (qdioac & AC1_SIGA_OUTPUT_NEEDED)
+		irq_ptr->siga_flag.output = 1;
+	if (qdioac & AC1_SIGA_SYNC_NEEDED)
+		irq_ptr->siga_flag.sync = 1;
+	if (!(qdioac & AC1_AUTOMATIC_SYNC_ON_THININT))
+		irq_ptr->siga_flag.sync_after_ai = 1;
+	if (!(qdioac & AC1_AUTOMATIC_SYNC_ON_OUT_PCI))
+		irq_ptr->siga_flag.sync_out_after_pci = 1;
+}
+
+static void check_and_setup_qebsm(struct qdio_irq *irq_ptr,
+				  unsigned char qdioac, unsigned long token)
+{
+	if (!(irq_ptr->qib.rflags & QIB_RFLAGS_ENABLE_QEBSM))
+		goto no_qebsm;
+	if (!(qdioac & AC1_SC_QEBSM_AVAILABLE) ||
+	    (!(qdioac & AC1_SC_QEBSM_ENABLED)))
+		goto no_qebsm;
+
+	irq_ptr->sch_token = token;
+
+	DBF_EVENT("V=V:1");
+	DBF_EVENT("%8lx", irq_ptr->sch_token);
+	return;
+
+no_qebsm:
+	irq_ptr->sch_token = 0;
+	irq_ptr->qib.rflags &= ~QIB_RFLAGS_ENABLE_QEBSM;
+	DBF_EVENT("noV=V");
+}
+
+/*
+ * If there is a qdio_irq we use the chsc_page and store the information
+ * in the qdio_irq, otherwise we copy it to the specified structure.
+ */
+int qdio_setup_get_ssqd(struct qdio_irq *irq_ptr,
+			struct subchannel_id *schid,
+			struct qdio_ssqd_desc *data)
+{
+	struct chsc_ssqd_area *ssqd;
+	int rc;
+
+	DBF_EVENT("getssqd:%4x", schid->sch_no);
+	if (irq_ptr != NULL)
+		ssqd = (struct chsc_ssqd_area *)irq_ptr->chsc_page;
+	else
+		ssqd = (struct chsc_ssqd_area *)__get_free_page(GFP_KERNEL);
+	memset(ssqd, 0, PAGE_SIZE);
+
+	ssqd->request = (struct chsc_header) {
+		.length = 0x0010,
+		.code	= 0x0024,
+	};
+	ssqd->first_sch = schid->sch_no;
+	ssqd->last_sch = schid->sch_no;
+	ssqd->ssid = schid->ssid;
+
+	if (chsc(ssqd))
+		return -EIO;
+	rc = chsc_error_from_response(ssqd->response.code);
+	if (rc)
+		return rc;
+
+	if (!(ssqd->qdio_ssqd.flags & CHSC_FLAG_QDIO_CAPABILITY) ||
+	    !(ssqd->qdio_ssqd.flags & CHSC_FLAG_VALIDITY) ||
+	    (ssqd->qdio_ssqd.sch != schid->sch_no))
+		return -EINVAL;
+
+	if (irq_ptr != NULL)
+		memcpy(&irq_ptr->ssqd_desc, &ssqd->qdio_ssqd,
+		       sizeof(struct qdio_ssqd_desc));
+	else {
+		memcpy(data, &ssqd->qdio_ssqd,
+		       sizeof(struct qdio_ssqd_desc));
+		free_page((unsigned long)ssqd);
+	}
+	return 0;
+}
+
+void qdio_setup_ssqd_info(struct qdio_irq *irq_ptr)
+{
+	unsigned char qdioac;
+	int rc;
+
+	rc = qdio_setup_get_ssqd(irq_ptr, &irq_ptr->schid, NULL);
+	if (rc) {
+		DBF_ERROR("%4x ssqd ERR", irq_ptr->schid.sch_no);
+		DBF_ERROR("rc:%x", rc);
+		/* all flags set, worst case */
+		qdioac = AC1_SIGA_INPUT_NEEDED | AC1_SIGA_OUTPUT_NEEDED |
+			 AC1_SIGA_SYNC_NEEDED;
+	} else
+		qdioac = irq_ptr->ssqd_desc.qdioac1;
+
+	check_and_setup_qebsm(irq_ptr, qdioac, irq_ptr->ssqd_desc.sch_token);
+	process_ac_flags(irq_ptr, qdioac);
+	DBF_EVENT("ac 1:%2x 2:%4x", qdioac, irq_ptr->ssqd_desc.qdioac2);
+	DBF_EVENT("3:%4x qib:%4x", irq_ptr->ssqd_desc.qdioac3, irq_ptr->qib.ac);
+}
+
+void qdio_release_memory(struct qdio_irq *irq_ptr)
+{
+	struct qdio_q *q;
+	int i;
+
+	/*
+	 * Must check queue array manually since irq_ptr->nr_input_queues /
+	 * irq_ptr->nr_input_queues may not yet be set.
+	 */
+	for (i = 0; i < QDIO_MAX_QUEUES_PER_IRQ; i++) {
+		q = irq_ptr->input_qs[i];
+		if (q) {
+			free_page((unsigned long) q->slib);
+			kmem_cache_free(qdio_q_cache, q);
+		}
+	}
+	for (i = 0; i < QDIO_MAX_QUEUES_PER_IRQ; i++) {
+		q = irq_ptr->output_qs[i];
+		if (q) {
+			if (q->u.out.use_cq) {
+				int n;
+
+				for (n = 0; n < QDIO_MAX_BUFFERS_PER_Q; ++n) {
+					struct qaob *aob = q->u.out.aobs[n];
+					if (aob) {
+						qdio_release_aob(aob);
+						q->u.out.aobs[n] = NULL;
+					}
+				}
+
+				qdio_disable_async_operation(&q->u.out);
+			}
+			free_page((unsigned long) q->slib);
+			kmem_cache_free(qdio_q_cache, q);
+		}
+	}
+	free_page((unsigned long) irq_ptr->qdr);
+	free_page(irq_ptr->chsc_page);
+	free_page((unsigned long) irq_ptr);
+}
+
+static void __qdio_allocate_fill_qdr(struct qdio_irq *irq_ptr,
+				     struct qdio_q **irq_ptr_qs,
+				     int i, int nr)
+{
+	irq_ptr->qdr->qdf0[i + nr].sliba =
+		(unsigned long)irq_ptr_qs[i]->slib;
+
+	irq_ptr->qdr->qdf0[i + nr].sla =
+		(unsigned long)irq_ptr_qs[i]->sl;
+
+	irq_ptr->qdr->qdf0[i + nr].slsba =
+		(unsigned long)&irq_ptr_qs[i]->slsb.val[0];
+
+	irq_ptr->qdr->qdf0[i + nr].akey = PAGE_DEFAULT_KEY >> 4;
+	irq_ptr->qdr->qdf0[i + nr].bkey = PAGE_DEFAULT_KEY >> 4;
+	irq_ptr->qdr->qdf0[i + nr].ckey = PAGE_DEFAULT_KEY >> 4;
+	irq_ptr->qdr->qdf0[i + nr].dkey = PAGE_DEFAULT_KEY >> 4;
+}
+
+static void setup_qdr(struct qdio_irq *irq_ptr,
+		      struct qdio_initialize *qdio_init)
+{
+	int i;
+
+	irq_ptr->qdr->qfmt = qdio_init->q_format;
+	irq_ptr->qdr->ac = qdio_init->qdr_ac;
+	irq_ptr->qdr->iqdcnt = qdio_init->no_input_qs;
+	irq_ptr->qdr->oqdcnt = qdio_init->no_output_qs;
+	irq_ptr->qdr->iqdsz = sizeof(struct qdesfmt0) / 4; /* size in words */
+	irq_ptr->qdr->oqdsz = sizeof(struct qdesfmt0) / 4;
+	irq_ptr->qdr->qiba = (unsigned long)&irq_ptr->qib;
+	irq_ptr->qdr->qkey = PAGE_DEFAULT_KEY >> 4;
+
+	for (i = 0; i < qdio_init->no_input_qs; i++)
+		__qdio_allocate_fill_qdr(irq_ptr, irq_ptr->input_qs, i, 0);
+
+	for (i = 0; i < qdio_init->no_output_qs; i++)
+		__qdio_allocate_fill_qdr(irq_ptr, irq_ptr->output_qs, i,
+					 qdio_init->no_input_qs);
+}
+
+static void setup_qib(struct qdio_irq *irq_ptr,
+		      struct qdio_initialize *init_data)
+{
+	if (qebsm_possible())
+		irq_ptr->qib.rflags |= QIB_RFLAGS_ENABLE_QEBSM;
+
+	irq_ptr->qib.rflags |= init_data->qib_rflags;
+
+	irq_ptr->qib.qfmt = init_data->q_format;
+	if (init_data->no_input_qs)
+		irq_ptr->qib.isliba =
+			(unsigned long)(irq_ptr->input_qs[0]->slib);
+	if (init_data->no_output_qs)
+		irq_ptr->qib.osliba =
+			(unsigned long)(irq_ptr->output_qs[0]->slib);
+	memcpy(irq_ptr->qib.ebcnam, init_data->adapter_name, 8);
+}
+
+int qdio_setup_irq(struct qdio_initialize *init_data)
+{
+	struct ciw *ciw;
+	struct qdio_irq *irq_ptr = init_data->cdev->private->qdio_data;
+	int rc;
+
+	memset(&irq_ptr->qib, 0, sizeof(irq_ptr->qib));
+	memset(&irq_ptr->siga_flag, 0, sizeof(irq_ptr->siga_flag));
+	memset(&irq_ptr->ccw, 0, sizeof(irq_ptr->ccw));
+	memset(&irq_ptr->ssqd_desc, 0, sizeof(irq_ptr->ssqd_desc));
+	memset(&irq_ptr->perf_stat, 0, sizeof(irq_ptr->perf_stat));
+
+	irq_ptr->debugfs_dev = irq_ptr->debugfs_perf = NULL;
+	irq_ptr->sch_token = irq_ptr->state = irq_ptr->perf_stat_enabled = 0;
+
+	/* wipes qib.ac, required by ar7063 */
+	memset(irq_ptr->qdr, 0, sizeof(struct qdr));
+
+	irq_ptr->int_parm = init_data->int_parm;
+	irq_ptr->nr_input_qs = init_data->no_input_qs;
+	irq_ptr->nr_output_qs = init_data->no_output_qs;
+
+	irq_ptr->schid = ccw_device_get_subchannel_id(init_data->cdev);
+	irq_ptr->cdev = init_data->cdev;
+	setup_queues(irq_ptr, init_data);
+
+	setup_qib(irq_ptr, init_data);
+	qdio_setup_thinint(irq_ptr);
+	set_impl_params(irq_ptr, init_data->qib_param_field_format,
+			init_data->qib_param_field,
+			init_data->input_slib_elements,
+			init_data->output_slib_elements);
+
+	/* fill input and output descriptors */
+	setup_qdr(irq_ptr, init_data);
+
+	/* qdr, qib, sls, slsbs, slibs, sbales are filled now */
+
+	/* get qdio commands */
+	ciw = ccw_device_get_ciw(init_data->cdev, CIW_TYPE_EQUEUE);
+	if (!ciw) {
+		DBF_ERROR("%4x NO EQ", irq_ptr->schid.sch_no);
+		rc = -EINVAL;
+		goto out_err;
+	}
+	irq_ptr->equeue = *ciw;
+
+	ciw = ccw_device_get_ciw(init_data->cdev, CIW_TYPE_AQUEUE);
+	if (!ciw) {
+		DBF_ERROR("%4x NO AQ", irq_ptr->schid.sch_no);
+		rc = -EINVAL;
+		goto out_err;
+	}
+	irq_ptr->aqueue = *ciw;
+
+	/* set new interrupt handler */
+	irq_ptr->orig_handler = init_data->cdev->handler;
+	init_data->cdev->handler = qdio_int_handler;
+	return 0;
+out_err:
+	qdio_release_memory(irq_ptr);
+	return rc;
+}
+
+void qdio_print_subchannel_info(struct qdio_irq *irq_ptr,
+				struct ccw_device *cdev)
+{
+	char s[80];
+
+	snprintf(s, 80, "qdio: %s %s on SC %x using "
+		 "AI:%d QEBSM:%d PCI:%d TDD:%d SIGA:%s%s%s%s%s\n",
+		 dev_name(&cdev->dev),
+		 (irq_ptr->qib.qfmt == QDIO_QETH_QFMT) ? "OSA" :
+			((irq_ptr->qib.qfmt == QDIO_ZFCP_QFMT) ? "ZFCP" : "HS"),
+		 irq_ptr->schid.sch_no,
+		 is_thinint_irq(irq_ptr),
+		 (irq_ptr->sch_token) ? 1 : 0,
+		 (irq_ptr->qib.ac & QIB_AC_OUTBOUND_PCI_SUPPORTED) ? 1 : 0,
+		 css_general_characteristics.aif_tdd,
+		 (irq_ptr->siga_flag.input) ? "R" : " ",
+		 (irq_ptr->siga_flag.output) ? "W" : " ",
+		 (irq_ptr->siga_flag.sync) ? "S" : " ",
+		 (irq_ptr->siga_flag.sync_after_ai) ? "A" : " ",
+		 (irq_ptr->siga_flag.sync_out_after_pci) ? "P" : " ");
+	printk(KERN_INFO "%s", s);
+}
+
+int qdio_enable_async_operation(struct qdio_output_q *outq)
+{
+	outq->aobs = kzalloc(sizeof(struct qaob *) * QDIO_MAX_BUFFERS_PER_Q,
+			     GFP_ATOMIC);
+	if (!outq->aobs) {
+		outq->use_cq = 0;
+		return -ENOMEM;
+	}
+	outq->use_cq = 1;
+	return 0;
+}
+
+void qdio_disable_async_operation(struct qdio_output_q *q)
+{
+	kfree(q->aobs);
+	q->aobs = NULL;
+	q->use_cq = 0;
+}
+
+int __init qdio_setup_init(void)
+{
+	int rc;
+
+	qdio_q_cache = kmem_cache_create("qdio_q", sizeof(struct qdio_q),
+					 256, 0, NULL);
+	if (!qdio_q_cache)
+		return -ENOMEM;
+
+	qdio_aob_cache = kmem_cache_create("qdio_aob",
+					sizeof(struct qaob),
+					sizeof(struct qaob),
+					0,
+					NULL);
+	if (!qdio_aob_cache) {
+		rc = -ENOMEM;
+		goto free_qdio_q_cache;
+	}
+
+	/* Check for OSA/FCP thin interrupts (bit 67). */
+	DBF_EVENT("thinint:%1d",
+		  (css_general_characteristics.aif_osa) ? 1 : 0);
+
+	/* Check for QEBSM support in general (bit 58). */
+	DBF_EVENT("cssQEBSM:%1d", (qebsm_possible()) ? 1 : 0);
+	rc = 0;
+out:
+	return rc;
+free_qdio_q_cache:
+	kmem_cache_destroy(qdio_q_cache);
+	goto out;
+}
+
+void qdio_setup_exit(void)
+{
+	kmem_cache_destroy(qdio_aob_cache);
+	kmem_cache_destroy(qdio_q_cache);
+}
diff --git a/ap/os/linux/linux-3.4.x/drivers/s390/cio/qdio_thinint.c b/ap/os/linux/linux-3.4.x/drivers/s390/cio/qdio_thinint.c
new file mode 100644
index 0000000..011eade
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/s390/cio/qdio_thinint.c
@@ -0,0 +1,324 @@
+/*
+ * linux/drivers/s390/cio/thinint_qdio.c
+ *
+ * Copyright 2000,2009 IBM Corp.
+ * Author(s): Utz Bacher <utz.bacher@de.ibm.com>
+ *	      Cornelia Huck <cornelia.huck@de.ibm.com>
+ *	      Jan Glauber <jang@linux.vnet.ibm.com>
+ */
+#include <linux/io.h>
+#include <linux/slab.h>
+#include <linux/kernel_stat.h>
+#include <linux/atomic.h>
+#include <asm/debug.h>
+#include <asm/qdio.h>
+#include <asm/airq.h>
+#include <asm/isc.h>
+
+#include "cio.h"
+#include "ioasm.h"
+#include "qdio.h"
+#include "qdio_debug.h"
+
+/*
+ * Restriction: only 63 iqdio subchannels would have its own indicator,
+ * after that, subsequent subchannels share one indicator
+ */
+#define TIQDIO_NR_NONSHARED_IND		63
+#define TIQDIO_NR_INDICATORS		(TIQDIO_NR_NONSHARED_IND + 1)
+#define TIQDIO_SHARED_IND		63
+
+/* device state change indicators */
+struct indicator_t {
+	u32 ind;	/* u32 because of compare-and-swap performance */
+	atomic_t count; /* use count, 0 or 1 for non-shared indicators */
+};
+
+/* list of thin interrupt input queues */
+static LIST_HEAD(tiq_list);
+static DEFINE_MUTEX(tiq_list_lock);
+
+/* adapter local summary indicator */
+static u8 *tiqdio_alsi;
+
+static struct indicator_t *q_indicators;
+
+u64 last_ai_time;
+
+/* returns addr for the device state change indicator */
+static u32 *get_indicator(void)
+{
+	int i;
+
+	for (i = 0; i < TIQDIO_NR_NONSHARED_IND; i++)
+		if (!atomic_read(&q_indicators[i].count)) {
+			atomic_set(&q_indicators[i].count, 1);
+			return &q_indicators[i].ind;
+		}
+
+	/* use the shared indicator */
+	atomic_inc(&q_indicators[TIQDIO_SHARED_IND].count);
+	return &q_indicators[TIQDIO_SHARED_IND].ind;
+}
+
+static void put_indicator(u32 *addr)
+{
+	int i;
+
+	if (!addr)
+		return;
+	i = ((unsigned long)addr - (unsigned long)q_indicators) /
+		sizeof(struct indicator_t);
+	atomic_dec(&q_indicators[i].count);
+}
+
+void tiqdio_add_input_queues(struct qdio_irq *irq_ptr)
+{
+	mutex_lock(&tiq_list_lock);
+	BUG_ON(irq_ptr->nr_input_qs < 1);
+	list_add_rcu(&irq_ptr->input_qs[0]->entry, &tiq_list);
+	mutex_unlock(&tiq_list_lock);
+	xchg(irq_ptr->dsci, 1 << 7);
+}
+
+void tiqdio_remove_input_queues(struct qdio_irq *irq_ptr)
+{
+	struct qdio_q *q;
+
+	BUG_ON(irq_ptr->nr_input_qs < 1);
+	q = irq_ptr->input_qs[0];
+	/* if establish triggered an error */
+	if (!q || !q->entry.prev || !q->entry.next)
+		return;
+
+	mutex_lock(&tiq_list_lock);
+	list_del_rcu(&q->entry);
+	mutex_unlock(&tiq_list_lock);
+	synchronize_rcu();
+}
+
+static inline int has_multiple_inq_on_dsci(struct qdio_irq *irq_ptr)
+{
+	return irq_ptr->nr_input_qs > 1;
+}
+
+static inline int references_shared_dsci(struct qdio_irq *irq_ptr)
+{
+	return irq_ptr->dsci == &q_indicators[TIQDIO_SHARED_IND].ind;
+}
+
+static inline int shared_ind(struct qdio_irq *irq_ptr)
+{
+	return references_shared_dsci(irq_ptr) ||
+		has_multiple_inq_on_dsci(irq_ptr);
+}
+
+void clear_nonshared_ind(struct qdio_irq *irq_ptr)
+{
+	if (!is_thinint_irq(irq_ptr))
+		return;
+	if (shared_ind(irq_ptr))
+		return;
+	xchg(irq_ptr->dsci, 0);
+}
+
+int test_nonshared_ind(struct qdio_irq *irq_ptr)
+{
+	if (!is_thinint_irq(irq_ptr))
+		return 0;
+	if (shared_ind(irq_ptr))
+		return 0;
+	if (*irq_ptr->dsci)
+		return 1;
+	else
+		return 0;
+}
+
+static inline u32 clear_shared_ind(void)
+{
+	if (!atomic_read(&q_indicators[TIQDIO_SHARED_IND].count))
+		return 0;
+	return xchg(&q_indicators[TIQDIO_SHARED_IND].ind, 0);
+}
+
+static inline void tiqdio_call_inq_handlers(struct qdio_irq *irq)
+{
+	struct qdio_q *q;
+	int i;
+
+	for_each_input_queue(irq, q, i) {
+		if (!references_shared_dsci(irq) &&
+		    has_multiple_inq_on_dsci(irq))
+			xchg(q->irq_ptr->dsci, 0);
+
+		if (q->u.in.queue_start_poll) {
+			/* skip if polling is enabled or already in work */
+			if (test_and_set_bit(QDIO_QUEUE_IRQS_DISABLED,
+					     &q->u.in.queue_irq_state)) {
+				qperf_inc(q, int_discarded);
+				continue;
+			}
+
+			/* avoid dsci clear here, done after processing */
+			q->u.in.queue_start_poll(q->irq_ptr->cdev, q->nr,
+						 q->irq_ptr->int_parm);
+		} else {
+			if (!shared_ind(q->irq_ptr))
+				xchg(q->irq_ptr->dsci, 0);
+
+			/*
+			 * Call inbound processing but not directly
+			 * since that could starve other thinint queues.
+			 */
+			tasklet_schedule(&q->tasklet);
+		}
+	}
+}
+
+/**
+ * tiqdio_thinint_handler - thin interrupt handler for qdio
+ * @alsi: pointer to adapter local summary indicator
+ * @data: NULL
+ */
+static void tiqdio_thinint_handler(void *alsi, void *data)
+{
+	u32 si_used = clear_shared_ind();
+	struct qdio_q *q;
+
+	last_ai_time = S390_lowcore.int_clock;
+	kstat_cpu(smp_processor_id()).irqs[IOINT_QAI]++;
+
+	/* protect tiq_list entries, only changed in activate or shutdown */
+	rcu_read_lock();
+
+	/* check for work on all inbound thinint queues */
+	list_for_each_entry_rcu(q, &tiq_list, entry) {
+		struct qdio_irq *irq;
+
+		/* only process queues from changed sets */
+		irq = q->irq_ptr;
+		if (unlikely(references_shared_dsci(irq))) {
+			if (!si_used)
+				continue;
+		} else if (!*irq->dsci)
+			continue;
+
+		tiqdio_call_inq_handlers(irq);
+
+		qperf_inc(q, adapter_int);
+	}
+	rcu_read_unlock();
+}
+
+static int set_subchannel_ind(struct qdio_irq *irq_ptr, int reset)
+{
+	struct scssc_area *scssc_area;
+	int rc;
+
+	scssc_area = (struct scssc_area *)irq_ptr->chsc_page;
+	memset(scssc_area, 0, PAGE_SIZE);
+
+	if (reset) {
+		scssc_area->summary_indicator_addr = 0;
+		scssc_area->subchannel_indicator_addr = 0;
+	} else {
+		scssc_area->summary_indicator_addr = virt_to_phys(tiqdio_alsi);
+		scssc_area->subchannel_indicator_addr =
+			virt_to_phys(irq_ptr->dsci);
+	}
+
+	scssc_area->request = (struct chsc_header) {
+		.length = 0x0fe0,
+		.code	= 0x0021,
+	};
+	scssc_area->operation_code = 0;
+	scssc_area->ks = PAGE_DEFAULT_KEY >> 4;
+	scssc_area->kc = PAGE_DEFAULT_KEY >> 4;
+	scssc_area->isc = QDIO_AIRQ_ISC;
+	scssc_area->schid = irq_ptr->schid;
+
+	/* enable the time delay disablement facility */
+	if (css_general_characteristics.aif_tdd)
+		scssc_area->word_with_d_bit = 0x10000000;
+
+	rc = chsc(scssc_area);
+	if (rc)
+		return -EIO;
+
+	rc = chsc_error_from_response(scssc_area->response.code);
+	if (rc) {
+		DBF_ERROR("%4x SSI r:%4x", irq_ptr->schid.sch_no,
+			  scssc_area->response.code);
+		DBF_ERROR_HEX(&scssc_area->response, sizeof(void *));
+		return rc;
+	}
+
+	DBF_EVENT("setscind");
+	DBF_HEX(&scssc_area->summary_indicator_addr, sizeof(unsigned long));
+	DBF_HEX(&scssc_area->subchannel_indicator_addr,	sizeof(unsigned long));
+	return 0;
+}
+
+/* allocate non-shared indicators and shared indicator */
+int __init tiqdio_allocate_memory(void)
+{
+	q_indicators = kzalloc(sizeof(struct indicator_t) * TIQDIO_NR_INDICATORS,
+			     GFP_KERNEL);
+	if (!q_indicators)
+		return -ENOMEM;
+	return 0;
+}
+
+void tiqdio_free_memory(void)
+{
+	kfree(q_indicators);
+}
+
+int __init tiqdio_register_thinints(void)
+{
+	isc_register(QDIO_AIRQ_ISC);
+	tiqdio_alsi = s390_register_adapter_interrupt(&tiqdio_thinint_handler,
+						      NULL, QDIO_AIRQ_ISC);
+	if (IS_ERR(tiqdio_alsi)) {
+		DBF_EVENT("RTI:%lx", PTR_ERR(tiqdio_alsi));
+		tiqdio_alsi = NULL;
+		isc_unregister(QDIO_AIRQ_ISC);
+		return -ENOMEM;
+	}
+	return 0;
+}
+
+int qdio_establish_thinint(struct qdio_irq *irq_ptr)
+{
+	if (!is_thinint_irq(irq_ptr))
+		return 0;
+	return set_subchannel_ind(irq_ptr, 0);
+}
+
+void qdio_setup_thinint(struct qdio_irq *irq_ptr)
+{
+	if (!is_thinint_irq(irq_ptr))
+		return;
+	irq_ptr->dsci = get_indicator();
+	DBF_HEX(&irq_ptr->dsci, sizeof(void *));
+}
+
+void qdio_shutdown_thinint(struct qdio_irq *irq_ptr)
+{
+	if (!is_thinint_irq(irq_ptr))
+		return;
+
+	/* reset adapter interrupt indicators */
+	set_subchannel_ind(irq_ptr, 1);
+	put_indicator(irq_ptr->dsci);
+}
+
+void __exit tiqdio_unregister_thinints(void)
+{
+	WARN_ON(!list_empty(&tiq_list));
+
+	if (tiqdio_alsi) {
+		s390_unregister_adapter_interrupt(tiqdio_alsi, QDIO_AIRQ_ISC);
+		isc_unregister(QDIO_AIRQ_ISC);
+	}
+}
diff --git a/ap/os/linux/linux-3.4.x/drivers/s390/crypto/Makefile b/ap/os/linux/linux-3.4.x/drivers/s390/crypto/Makefile
new file mode 100644
index 0000000..af3c7f1
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/s390/crypto/Makefile
@@ -0,0 +1,7 @@
+#
+# S/390 crypto devices
+#
+
+ap-objs := ap_bus.o
+obj-$(CONFIG_ZCRYPT) += ap.o zcrypt_api.o zcrypt_pcicc.o zcrypt_pcixcc.o
+obj-$(CONFIG_ZCRYPT) += zcrypt_pcica.o zcrypt_cex2a.o
diff --git a/ap/os/linux/linux-3.4.x/drivers/s390/crypto/ap_bus.c b/ap/os/linux/linux-3.4.x/drivers/s390/crypto/ap_bus.c
new file mode 100644
index 0000000..7e9a72e
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/s390/crypto/ap_bus.c
@@ -0,0 +1,1866 @@
+/*
+ * linux/drivers/s390/crypto/ap_bus.c
+ *
+ * Copyright (C) 2006 IBM Corporation
+ * Author(s): Cornelia Huck <cornelia.huck@de.ibm.com>
+ *	      Martin Schwidefsky <schwidefsky@de.ibm.com>
+ *	      Ralph Wuerthner <rwuerthn@de.ibm.com>
+ *	      Felix Beck <felix.beck@de.ibm.com>
+ *	      Holger Dengler <hd@linux.vnet.ibm.com>
+ *
+ * Adjunct processor bus.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#define KMSG_COMPONENT "ap"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
+#include <linux/kernel_stat.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/interrupt.h>
+#include <linux/workqueue.h>
+#include <linux/slab.h>
+#include <linux/notifier.h>
+#include <linux/kthread.h>
+#include <linux/mutex.h>
+#include <asm/reset.h>
+#include <asm/airq.h>
+#include <linux/atomic.h>
+#include <asm/isc.h>
+#include <linux/hrtimer.h>
+#include <linux/ktime.h>
+#include <asm/facility.h>
+
+#include "ap_bus.h"
+
+/* Some prototypes. */
+static void ap_scan_bus(struct work_struct *);
+static void ap_poll_all(unsigned long);
+static enum hrtimer_restart ap_poll_timeout(struct hrtimer *);
+static int ap_poll_thread_start(void);
+static void ap_poll_thread_stop(void);
+static void ap_request_timeout(unsigned long);
+static inline void ap_schedule_poll_timer(void);
+static int __ap_poll_device(struct ap_device *ap_dev, unsigned long *flags);
+static int ap_device_remove(struct device *dev);
+static int ap_device_probe(struct device *dev);
+static void ap_interrupt_handler(void *unused1, void *unused2);
+static void ap_reset(struct ap_device *ap_dev);
+static void ap_config_timeout(unsigned long ptr);
+static int ap_select_domain(void);
+
+/*
+ * Module description.
+ */
+MODULE_AUTHOR("IBM Corporation");
+MODULE_DESCRIPTION("Adjunct Processor Bus driver, "
+		   "Copyright 2006 IBM Corporation");
+MODULE_LICENSE("GPL");
+
+/*
+ * Module parameter
+ */
+int ap_domain_index = -1;	/* Adjunct Processor Domain Index */
+module_param_named(domain, ap_domain_index, int, 0000);
+MODULE_PARM_DESC(domain, "domain index for ap devices");
+EXPORT_SYMBOL(ap_domain_index);
+
+static int ap_thread_flag = 0;
+module_param_named(poll_thread, ap_thread_flag, int, 0000);
+MODULE_PARM_DESC(poll_thread, "Turn on/off poll thread, default is 0 (off).");
+
+static struct device *ap_root_device = NULL;
+static DEFINE_SPINLOCK(ap_device_list_lock);
+static LIST_HEAD(ap_device_list);
+
+/*
+ * Workqueue & timer for bus rescan.
+ */
+static struct workqueue_struct *ap_work_queue;
+static struct timer_list ap_config_timer;
+static int ap_config_time = AP_CONFIG_TIME;
+static DECLARE_WORK(ap_config_work, ap_scan_bus);
+
+/*
+ * Tasklet & timer for AP request polling and interrupts
+ */
+static DECLARE_TASKLET(ap_tasklet, ap_poll_all, 0);
+static atomic_t ap_poll_requests = ATOMIC_INIT(0);
+static DECLARE_WAIT_QUEUE_HEAD(ap_poll_wait);
+static struct task_struct *ap_poll_kthread = NULL;
+static DEFINE_MUTEX(ap_poll_thread_mutex);
+static DEFINE_SPINLOCK(ap_poll_timer_lock);
+static void *ap_interrupt_indicator;
+static struct hrtimer ap_poll_timer;
+/* In LPAR poll with 4kHz frequency. Poll every 250000 nanoseconds.
+ * If z/VM change to 1500000 nanoseconds to adjust to z/VM polling.*/
+static unsigned long long poll_timeout = 250000;
+
+/* Suspend flag */
+static int ap_suspend_flag;
+/* Flag to check if domain was set through module parameter domain=. This is
+ * important when supsend and resume is done in a z/VM environment where the
+ * domain might change. */
+static int user_set_domain = 0;
+static struct bus_type ap_bus_type;
+
+/**
+ * ap_using_interrupts() - Returns non-zero if interrupt support is
+ * available.
+ */
+static inline int ap_using_interrupts(void)
+{
+	return ap_interrupt_indicator != NULL;
+}
+
+/**
+ * ap_intructions_available() - Test if AP instructions are available.
+ *
+ * Returns 0 if the AP instructions are installed.
+ */
+static inline int ap_instructions_available(void)
+{
+	register unsigned long reg0 asm ("0") = AP_MKQID(0,0);
+	register unsigned long reg1 asm ("1") = -ENODEV;
+	register unsigned long reg2 asm ("2") = 0UL;
+
+	asm volatile(
+		"   .long 0xb2af0000\n"		/* PQAP(TAPQ) */
+		"0: la    %1,0\n"
+		"1:\n"
+		EX_TABLE(0b, 1b)
+		: "+d" (reg0), "+d" (reg1), "+d" (reg2) : : "cc" );
+	return reg1;
+}
+
+/**
+ * ap_interrupts_available(): Test if AP interrupts are available.
+ *
+ * Returns 1 if AP interrupts are available.
+ */
+static int ap_interrupts_available(void)
+{
+	return test_facility(2) && test_facility(65);
+}
+
+/**
+ * ap_test_queue(): Test adjunct processor queue.
+ * @qid: The AP queue number
+ * @queue_depth: Pointer to queue depth value
+ * @device_type: Pointer to device type value
+ *
+ * Returns AP queue status structure.
+ */
+static inline struct ap_queue_status
+ap_test_queue(ap_qid_t qid, int *queue_depth, int *device_type)
+{
+	register unsigned long reg0 asm ("0") = qid;
+	register struct ap_queue_status reg1 asm ("1");
+	register unsigned long reg2 asm ("2") = 0UL;
+
+	asm volatile(".long 0xb2af0000"		/* PQAP(TAPQ) */
+		     : "+d" (reg0), "=d" (reg1), "+d" (reg2) : : "cc");
+	*device_type = (int) (reg2 >> 24);
+	*queue_depth = (int) (reg2 & 0xff);
+	return reg1;
+}
+
+/**
+ * ap_reset_queue(): Reset adjunct processor queue.
+ * @qid: The AP queue number
+ *
+ * Returns AP queue status structure.
+ */
+static inline struct ap_queue_status ap_reset_queue(ap_qid_t qid)
+{
+	register unsigned long reg0 asm ("0") = qid | 0x01000000UL;
+	register struct ap_queue_status reg1 asm ("1");
+	register unsigned long reg2 asm ("2") = 0UL;
+
+	asm volatile(
+		".long 0xb2af0000"		/* PQAP(RAPQ) */
+		: "+d" (reg0), "=d" (reg1), "+d" (reg2) : : "cc");
+	return reg1;
+}
+
+#ifdef CONFIG_64BIT
+/**
+ * ap_queue_interruption_control(): Enable interruption for a specific AP.
+ * @qid: The AP queue number
+ * @ind: The notification indicator byte
+ *
+ * Returns AP queue status.
+ */
+static inline struct ap_queue_status
+ap_queue_interruption_control(ap_qid_t qid, void *ind)
+{
+	register unsigned long reg0 asm ("0") = qid | 0x03000000UL;
+	register unsigned long reg1_in asm ("1") = 0x0000800000000000UL | AP_ISC;
+	register struct ap_queue_status reg1_out asm ("1");
+	register void *reg2 asm ("2") = ind;
+	asm volatile(
+		".long 0xb2af0000"		/* PQAP(RAPQ) */
+		: "+d" (reg0), "+d" (reg1_in), "=d" (reg1_out), "+d" (reg2)
+		:
+		: "cc" );
+	return reg1_out;
+}
+#endif
+
+#ifdef CONFIG_64BIT
+static inline struct ap_queue_status
+__ap_query_functions(ap_qid_t qid, unsigned int *functions)
+{
+	register unsigned long reg0 asm ("0") = 0UL | qid | (1UL << 23);
+	register struct ap_queue_status reg1 asm ("1") = AP_QUEUE_STATUS_INVALID;
+	register unsigned long reg2 asm ("2");
+
+	asm volatile(
+		".long 0xb2af0000\n"
+		"0:\n"
+		EX_TABLE(0b, 0b)
+		: "+d" (reg0), "+d" (reg1), "=d" (reg2)
+		:
+		: "cc");
+
+	*functions = (unsigned int)(reg2 >> 32);
+	return reg1;
+}
+#endif
+
+/**
+ * ap_query_functions(): Query supported functions.
+ * @qid: The AP queue number
+ * @functions: Pointer to functions field.
+ *
+ * Returns
+ *   0	     on success.
+ *   -ENODEV  if queue not valid.
+ *   -EBUSY   if device busy.
+ *   -EINVAL  if query function is not supported
+ */
+static int ap_query_functions(ap_qid_t qid, unsigned int *functions)
+{
+#ifdef CONFIG_64BIT
+	struct ap_queue_status status;
+	int i;
+	status = __ap_query_functions(qid, functions);
+
+	for (i = 0; i < AP_MAX_RESET; i++) {
+		if (ap_queue_status_invalid_test(&status))
+			return -ENODEV;
+
+		switch (status.response_code) {
+		case AP_RESPONSE_NORMAL:
+			return 0;
+		case AP_RESPONSE_RESET_IN_PROGRESS:
+		case AP_RESPONSE_BUSY:
+			break;
+		case AP_RESPONSE_Q_NOT_AVAIL:
+		case AP_RESPONSE_DECONFIGURED:
+		case AP_RESPONSE_CHECKSTOPPED:
+		case AP_RESPONSE_INVALID_ADDRESS:
+			return -ENODEV;
+		case AP_RESPONSE_OTHERWISE_CHANGED:
+			break;
+		default:
+			break;
+		}
+		if (i < AP_MAX_RESET - 1) {
+			udelay(5);
+			status = __ap_query_functions(qid, functions);
+		}
+	}
+	return -EBUSY;
+#else
+	return -EINVAL;
+#endif
+}
+
+/**
+ * ap_4096_commands_availablen(): Check for availability of 4096 bit RSA
+ * support.
+ * @qid: The AP queue number
+ *
+ * Returns 1 if 4096 bit RSA keys are support fo the AP, returns 0 if not.
+ */
+int ap_4096_commands_available(ap_qid_t qid)
+{
+	unsigned int functions;
+
+	if (ap_query_functions(qid, &functions))
+		return 0;
+
+	return test_ap_facility(functions, 1) &&
+	       test_ap_facility(functions, 2);
+}
+EXPORT_SYMBOL(ap_4096_commands_available);
+
+/**
+ * ap_queue_enable_interruption(): Enable interruption on an AP.
+ * @qid: The AP queue number
+ * @ind: the notification indicator byte
+ *
+ * Enables interruption on AP queue via ap_queue_interruption_control(). Based
+ * on the return value it waits a while and tests the AP queue if interrupts
+ * have been switched on using ap_test_queue().
+ */
+static int ap_queue_enable_interruption(ap_qid_t qid, void *ind)
+{
+#ifdef CONFIG_64BIT
+	struct ap_queue_status status;
+	int t_depth, t_device_type, rc, i;
+
+	rc = -EBUSY;
+	status = ap_queue_interruption_control(qid, ind);
+
+	for (i = 0; i < AP_MAX_RESET; i++) {
+		switch (status.response_code) {
+		case AP_RESPONSE_NORMAL:
+			if (status.int_enabled)
+				return 0;
+			break;
+		case AP_RESPONSE_RESET_IN_PROGRESS:
+		case AP_RESPONSE_BUSY:
+			break;
+		case AP_RESPONSE_Q_NOT_AVAIL:
+		case AP_RESPONSE_DECONFIGURED:
+		case AP_RESPONSE_CHECKSTOPPED:
+		case AP_RESPONSE_INVALID_ADDRESS:
+			return -ENODEV;
+		case AP_RESPONSE_OTHERWISE_CHANGED:
+			if (status.int_enabled)
+				return 0;
+			break;
+		default:
+			break;
+		}
+		if (i < AP_MAX_RESET - 1) {
+			udelay(5);
+			status = ap_test_queue(qid, &t_depth, &t_device_type);
+		}
+	}
+	return rc;
+#else
+	return -EINVAL;
+#endif
+}
+
+/**
+ * __ap_send(): Send message to adjunct processor queue.
+ * @qid: The AP queue number
+ * @psmid: The program supplied message identifier
+ * @msg: The message text
+ * @length: The message length
+ * @special: Special Bit
+ *
+ * Returns AP queue status structure.
+ * Condition code 1 on NQAP can't happen because the L bit is 1.
+ * Condition code 2 on NQAP also means the send is incomplete,
+ * because a segment boundary was reached. The NQAP is repeated.
+ */
+static inline struct ap_queue_status
+__ap_send(ap_qid_t qid, unsigned long long psmid, void *msg, size_t length,
+	  unsigned int special)
+{
+	typedef struct { char _[length]; } msgblock;
+	register unsigned long reg0 asm ("0") = qid | 0x40000000UL;
+	register struct ap_queue_status reg1 asm ("1");
+	register unsigned long reg2 asm ("2") = (unsigned long) msg;
+	register unsigned long reg3 asm ("3") = (unsigned long) length;
+	register unsigned long reg4 asm ("4") = (unsigned int) (psmid >> 32);
+	register unsigned long reg5 asm ("5") = (unsigned int) psmid;
+
+	if (special == 1)
+		reg0 |= 0x400000UL;
+
+	asm volatile (
+		"0: .long 0xb2ad0042\n"		/* DQAP */
+		"   brc   2,0b"
+		: "+d" (reg0), "=d" (reg1), "+d" (reg2), "+d" (reg3)
+		: "d" (reg4), "d" (reg5), "m" (*(msgblock *) msg)
+		: "cc" );
+	return reg1;
+}
+
+int ap_send(ap_qid_t qid, unsigned long long psmid, void *msg, size_t length)
+{
+	struct ap_queue_status status;
+
+	status = __ap_send(qid, psmid, msg, length, 0);
+	switch (status.response_code) {
+	case AP_RESPONSE_NORMAL:
+		return 0;
+	case AP_RESPONSE_Q_FULL:
+	case AP_RESPONSE_RESET_IN_PROGRESS:
+		return -EBUSY;
+	case AP_RESPONSE_REQ_FAC_NOT_INST:
+		return -EINVAL;
+	default:	/* Device is gone. */
+		return -ENODEV;
+	}
+}
+EXPORT_SYMBOL(ap_send);
+
+/**
+ * __ap_recv(): Receive message from adjunct processor queue.
+ * @qid: The AP queue number
+ * @psmid: Pointer to program supplied message identifier
+ * @msg: The message text
+ * @length: The message length
+ *
+ * Returns AP queue status structure.
+ * Condition code 1 on DQAP means the receive has taken place
+ * but only partially.	The response is incomplete, hence the
+ * DQAP is repeated.
+ * Condition code 2 on DQAP also means the receive is incomplete,
+ * this time because a segment boundary was reached. Again, the
+ * DQAP is repeated.
+ * Note that gpr2 is used by the DQAP instruction to keep track of
+ * any 'residual' length, in case the instruction gets interrupted.
+ * Hence it gets zeroed before the instruction.
+ */
+static inline struct ap_queue_status
+__ap_recv(ap_qid_t qid, unsigned long long *psmid, void *msg, size_t length)
+{
+	typedef struct { char _[length]; } msgblock;
+	register unsigned long reg0 asm("0") = qid | 0x80000000UL;
+	register struct ap_queue_status reg1 asm ("1");
+	register unsigned long reg2 asm("2") = 0UL;
+	register unsigned long reg4 asm("4") = (unsigned long) msg;
+	register unsigned long reg5 asm("5") = (unsigned long) length;
+	register unsigned long reg6 asm("6") = 0UL;
+	register unsigned long reg7 asm("7") = 0UL;
+
+
+	asm volatile(
+		"0: .long 0xb2ae0064\n"
+		"   brc   6,0b\n"
+		: "+d" (reg0), "=d" (reg1), "+d" (reg2),
+		"+d" (reg4), "+d" (reg5), "+d" (reg6), "+d" (reg7),
+		"=m" (*(msgblock *) msg) : : "cc" );
+	*psmid = (((unsigned long long) reg6) << 32) + reg7;
+	return reg1;
+}
+
+int ap_recv(ap_qid_t qid, unsigned long long *psmid, void *msg, size_t length)
+{
+	struct ap_queue_status status;
+
+	status = __ap_recv(qid, psmid, msg, length);
+	switch (status.response_code) {
+	case AP_RESPONSE_NORMAL:
+		return 0;
+	case AP_RESPONSE_NO_PENDING_REPLY:
+		if (status.queue_empty)
+			return -ENOENT;
+		return -EBUSY;
+	case AP_RESPONSE_RESET_IN_PROGRESS:
+		return -EBUSY;
+	default:
+		return -ENODEV;
+	}
+}
+EXPORT_SYMBOL(ap_recv);
+
+/**
+ * ap_query_queue(): Check if an AP queue is available.
+ * @qid: The AP queue number
+ * @queue_depth: Pointer to queue depth value
+ * @device_type: Pointer to device type value
+ *
+ * The test is repeated for AP_MAX_RESET times.
+ */
+static int ap_query_queue(ap_qid_t qid, int *queue_depth, int *device_type)
+{
+	struct ap_queue_status status;
+	int t_depth, t_device_type, rc, i;
+
+	rc = -EBUSY;
+	for (i = 0; i < AP_MAX_RESET; i++) {
+		status = ap_test_queue(qid, &t_depth, &t_device_type);
+		switch (status.response_code) {
+		case AP_RESPONSE_NORMAL:
+			*queue_depth = t_depth + 1;
+			*device_type = t_device_type;
+			rc = 0;
+			break;
+		case AP_RESPONSE_Q_NOT_AVAIL:
+			rc = -ENODEV;
+			break;
+		case AP_RESPONSE_RESET_IN_PROGRESS:
+			break;
+		case AP_RESPONSE_DECONFIGURED:
+			rc = -ENODEV;
+			break;
+		case AP_RESPONSE_CHECKSTOPPED:
+			rc = -ENODEV;
+			break;
+		case AP_RESPONSE_INVALID_ADDRESS:
+			rc = -ENODEV;
+			break;
+		case AP_RESPONSE_OTHERWISE_CHANGED:
+			break;
+		case AP_RESPONSE_BUSY:
+			break;
+		default:
+			BUG();
+		}
+		if (rc != -EBUSY)
+			break;
+		if (i < AP_MAX_RESET - 1)
+			udelay(5);
+	}
+	return rc;
+}
+
+/**
+ * ap_init_queue(): Reset an AP queue.
+ * @qid: The AP queue number
+ *
+ * Reset an AP queue and wait for it to become available again.
+ */
+static int ap_init_queue(ap_qid_t qid)
+{
+	struct ap_queue_status status;
+	int rc, dummy, i;
+
+	rc = -ENODEV;
+	status = ap_reset_queue(qid);
+	for (i = 0; i < AP_MAX_RESET; i++) {
+		switch (status.response_code) {
+		case AP_RESPONSE_NORMAL:
+			if (status.queue_empty)
+				rc = 0;
+			break;
+		case AP_RESPONSE_Q_NOT_AVAIL:
+		case AP_RESPONSE_DECONFIGURED:
+		case AP_RESPONSE_CHECKSTOPPED:
+			i = AP_MAX_RESET;	/* return with -ENODEV */
+			break;
+		case AP_RESPONSE_RESET_IN_PROGRESS:
+			rc = -EBUSY;
+		case AP_RESPONSE_BUSY:
+		default:
+			break;
+		}
+		if (rc != -ENODEV && rc != -EBUSY)
+			break;
+		if (i < AP_MAX_RESET - 1) {
+			udelay(5);
+			status = ap_test_queue(qid, &dummy, &dummy);
+		}
+	}
+	if (rc == 0 && ap_using_interrupts()) {
+		rc = ap_queue_enable_interruption(qid, ap_interrupt_indicator);
+		/* If interruption mode is supported by the machine,
+		* but an AP can not be enabled for interruption then
+		* the AP will be discarded.    */
+		if (rc)
+			pr_err("Registering adapter interrupts for "
+			       "AP %d failed\n", AP_QID_DEVICE(qid));
+	}
+	return rc;
+}
+
+/**
+ * ap_increase_queue_count(): Arm request timeout.
+ * @ap_dev: Pointer to an AP device.
+ *
+ * Arm request timeout if an AP device was idle and a new request is submitted.
+ */
+static void ap_increase_queue_count(struct ap_device *ap_dev)
+{
+	int timeout = ap_dev->drv->request_timeout;
+
+	ap_dev->queue_count++;
+	if (ap_dev->queue_count == 1) {
+		mod_timer(&ap_dev->timeout, jiffies + timeout);
+		ap_dev->reset = AP_RESET_ARMED;
+	}
+}
+
+/**
+ * ap_decrease_queue_count(): Decrease queue count.
+ * @ap_dev: Pointer to an AP device.
+ *
+ * If AP device is still alive, re-schedule request timeout if there are still
+ * pending requests.
+ */
+static void ap_decrease_queue_count(struct ap_device *ap_dev)
+{
+	int timeout = ap_dev->drv->request_timeout;
+
+	ap_dev->queue_count--;
+	if (ap_dev->queue_count > 0)
+		mod_timer(&ap_dev->timeout, jiffies + timeout);
+	else
+		/*
+		 * The timeout timer should to be disabled now - since
+		 * del_timer_sync() is very expensive, we just tell via the
+		 * reset flag to ignore the pending timeout timer.
+		 */
+		ap_dev->reset = AP_RESET_IGNORE;
+}
+
+/*
+ * AP device related attributes.
+ */
+static ssize_t ap_hwtype_show(struct device *dev,
+			      struct device_attribute *attr, char *buf)
+{
+	struct ap_device *ap_dev = to_ap_dev(dev);
+	return snprintf(buf, PAGE_SIZE, "%d\n", ap_dev->device_type);
+}
+
+static DEVICE_ATTR(hwtype, 0444, ap_hwtype_show, NULL);
+static ssize_t ap_depth_show(struct device *dev, struct device_attribute *attr,
+			     char *buf)
+{
+	struct ap_device *ap_dev = to_ap_dev(dev);
+	return snprintf(buf, PAGE_SIZE, "%d\n", ap_dev->queue_depth);
+}
+
+static DEVICE_ATTR(depth, 0444, ap_depth_show, NULL);
+static ssize_t ap_request_count_show(struct device *dev,
+				     struct device_attribute *attr,
+				     char *buf)
+{
+	struct ap_device *ap_dev = to_ap_dev(dev);
+	int rc;
+
+	spin_lock_bh(&ap_dev->lock);
+	rc = snprintf(buf, PAGE_SIZE, "%d\n", ap_dev->total_request_count);
+	spin_unlock_bh(&ap_dev->lock);
+	return rc;
+}
+
+static DEVICE_ATTR(request_count, 0444, ap_request_count_show, NULL);
+
+static ssize_t ap_modalias_show(struct device *dev,
+				struct device_attribute *attr, char *buf)
+{
+	return sprintf(buf, "ap:t%02X", to_ap_dev(dev)->device_type);
+}
+
+static DEVICE_ATTR(modalias, 0444, ap_modalias_show, NULL);
+
+static struct attribute *ap_dev_attrs[] = {
+	&dev_attr_hwtype.attr,
+	&dev_attr_depth.attr,
+	&dev_attr_request_count.attr,
+	&dev_attr_modalias.attr,
+	NULL
+};
+static struct attribute_group ap_dev_attr_group = {
+	.attrs = ap_dev_attrs
+};
+
+/**
+ * ap_bus_match()
+ * @dev: Pointer to device
+ * @drv: Pointer to device_driver
+ *
+ * AP bus driver registration/unregistration.
+ */
+static int ap_bus_match(struct device *dev, struct device_driver *drv)
+{
+	struct ap_device *ap_dev = to_ap_dev(dev);
+	struct ap_driver *ap_drv = to_ap_drv(drv);
+	struct ap_device_id *id;
+
+	/*
+	 * Compare device type of the device with the list of
+	 * supported types of the device_driver.
+	 */
+	for (id = ap_drv->ids; id->match_flags; id++) {
+		if ((id->match_flags & AP_DEVICE_ID_MATCH_DEVICE_TYPE) &&
+		    (id->dev_type != ap_dev->device_type))
+			continue;
+		return 1;
+	}
+	return 0;
+}
+
+/**
+ * ap_uevent(): Uevent function for AP devices.
+ * @dev: Pointer to device
+ * @env: Pointer to kobj_uevent_env
+ *
+ * It sets up a single environment variable DEV_TYPE which contains the
+ * hardware device type.
+ */
+static int ap_uevent (struct device *dev, struct kobj_uevent_env *env)
+{
+	struct ap_device *ap_dev = to_ap_dev(dev);
+	int retval = 0;
+
+	if (!ap_dev)
+		return -ENODEV;
+
+	/* Set up DEV_TYPE environment variable. */
+	retval = add_uevent_var(env, "DEV_TYPE=%04X", ap_dev->device_type);
+	if (retval)
+		return retval;
+
+	/* Add MODALIAS= */
+	retval = add_uevent_var(env, "MODALIAS=ap:t%02X", ap_dev->device_type);
+
+	return retval;
+}
+
+static int ap_bus_suspend(struct device *dev, pm_message_t state)
+{
+	struct ap_device *ap_dev = to_ap_dev(dev);
+	unsigned long flags;
+
+	if (!ap_suspend_flag) {
+		ap_suspend_flag = 1;
+
+		/* Disable scanning for devices, thus we do not want to scan
+		 * for them after removing.
+		 */
+		del_timer_sync(&ap_config_timer);
+		if (ap_work_queue != NULL) {
+			destroy_workqueue(ap_work_queue);
+			ap_work_queue = NULL;
+		}
+
+		tasklet_disable(&ap_tasklet);
+	}
+	/* Poll on the device until all requests are finished. */
+	do {
+		flags = 0;
+		spin_lock_bh(&ap_dev->lock);
+		__ap_poll_device(ap_dev, &flags);
+		spin_unlock_bh(&ap_dev->lock);
+	} while ((flags & 1) || (flags & 2));
+
+	spin_lock_bh(&ap_dev->lock);
+	ap_dev->unregistered = 1;
+	spin_unlock_bh(&ap_dev->lock);
+
+	return 0;
+}
+
+static int ap_bus_resume(struct device *dev)
+{
+	int rc = 0;
+	struct ap_device *ap_dev = to_ap_dev(dev);
+
+	if (ap_suspend_flag) {
+		ap_suspend_flag = 0;
+		if (!ap_interrupts_available())
+			ap_interrupt_indicator = NULL;
+		if (!user_set_domain) {
+			ap_domain_index = -1;
+			ap_select_domain();
+		}
+		init_timer(&ap_config_timer);
+		ap_config_timer.function = ap_config_timeout;
+		ap_config_timer.data = 0;
+		ap_config_timer.expires = jiffies + ap_config_time * HZ;
+		add_timer(&ap_config_timer);
+		ap_work_queue = create_singlethread_workqueue("kapwork");
+		if (!ap_work_queue)
+			return -ENOMEM;
+		tasklet_enable(&ap_tasklet);
+		if (!ap_using_interrupts())
+			ap_schedule_poll_timer();
+		else
+			tasklet_schedule(&ap_tasklet);
+		if (ap_thread_flag)
+			rc = ap_poll_thread_start();
+	}
+	if (AP_QID_QUEUE(ap_dev->qid) != ap_domain_index) {
+		spin_lock_bh(&ap_dev->lock);
+		ap_dev->qid = AP_MKQID(AP_QID_DEVICE(ap_dev->qid),
+				       ap_domain_index);
+		spin_unlock_bh(&ap_dev->lock);
+	}
+	queue_work(ap_work_queue, &ap_config_work);
+
+	return rc;
+}
+
+static struct bus_type ap_bus_type = {
+	.name = "ap",
+	.match = &ap_bus_match,
+	.uevent = &ap_uevent,
+	.suspend = ap_bus_suspend,
+	.resume = ap_bus_resume
+};
+
+static int ap_device_probe(struct device *dev)
+{
+	struct ap_device *ap_dev = to_ap_dev(dev);
+	struct ap_driver *ap_drv = to_ap_drv(dev->driver);
+	int rc;
+
+	ap_dev->drv = ap_drv;
+	rc = ap_drv->probe ? ap_drv->probe(ap_dev) : -ENODEV;
+	if (!rc) {
+		spin_lock_bh(&ap_device_list_lock);
+		list_add(&ap_dev->list, &ap_device_list);
+		spin_unlock_bh(&ap_device_list_lock);
+	}
+	return rc;
+}
+
+/**
+ * __ap_flush_queue(): Flush requests.
+ * @ap_dev: Pointer to the AP device
+ *
+ * Flush all requests from the request/pending queue of an AP device.
+ */
+static void __ap_flush_queue(struct ap_device *ap_dev)
+{
+	struct ap_message *ap_msg, *next;
+
+	list_for_each_entry_safe(ap_msg, next, &ap_dev->pendingq, list) {
+		list_del_init(&ap_msg->list);
+		ap_dev->pendingq_count--;
+		ap_dev->drv->receive(ap_dev, ap_msg, ERR_PTR(-ENODEV));
+	}
+	list_for_each_entry_safe(ap_msg, next, &ap_dev->requestq, list) {
+		list_del_init(&ap_msg->list);
+		ap_dev->requestq_count--;
+		ap_dev->drv->receive(ap_dev, ap_msg, ERR_PTR(-ENODEV));
+	}
+}
+
+void ap_flush_queue(struct ap_device *ap_dev)
+{
+	spin_lock_bh(&ap_dev->lock);
+	__ap_flush_queue(ap_dev);
+	spin_unlock_bh(&ap_dev->lock);
+}
+EXPORT_SYMBOL(ap_flush_queue);
+
+static int ap_device_remove(struct device *dev)
+{
+	struct ap_device *ap_dev = to_ap_dev(dev);
+	struct ap_driver *ap_drv = ap_dev->drv;
+
+	ap_flush_queue(ap_dev);
+	del_timer_sync(&ap_dev->timeout);
+	spin_lock_bh(&ap_device_list_lock);
+	list_del_init(&ap_dev->list);
+	spin_unlock_bh(&ap_device_list_lock);
+	if (ap_drv->remove)
+		ap_drv->remove(ap_dev);
+	spin_lock_bh(&ap_dev->lock);
+	atomic_sub(ap_dev->queue_count, &ap_poll_requests);
+	spin_unlock_bh(&ap_dev->lock);
+	return 0;
+}
+
+int ap_driver_register(struct ap_driver *ap_drv, struct module *owner,
+		       char *name)
+{
+	struct device_driver *drv = &ap_drv->driver;
+
+	drv->bus = &ap_bus_type;
+	drv->probe = ap_device_probe;
+	drv->remove = ap_device_remove;
+	drv->owner = owner;
+	drv->name = name;
+	return driver_register(drv);
+}
+EXPORT_SYMBOL(ap_driver_register);
+
+void ap_driver_unregister(struct ap_driver *ap_drv)
+{
+	driver_unregister(&ap_drv->driver);
+}
+EXPORT_SYMBOL(ap_driver_unregister);
+
+/*
+ * AP bus attributes.
+ */
+static ssize_t ap_domain_show(struct bus_type *bus, char *buf)
+{
+	return snprintf(buf, PAGE_SIZE, "%d\n", ap_domain_index);
+}
+
+static BUS_ATTR(ap_domain, 0444, ap_domain_show, NULL);
+
+static ssize_t ap_config_time_show(struct bus_type *bus, char *buf)
+{
+	return snprintf(buf, PAGE_SIZE, "%d\n", ap_config_time);
+}
+
+static ssize_t ap_interrupts_show(struct bus_type *bus, char *buf)
+{
+	return snprintf(buf, PAGE_SIZE, "%d\n",
+			ap_using_interrupts() ? 1 : 0);
+}
+
+static BUS_ATTR(ap_interrupts, 0444, ap_interrupts_show, NULL);
+
+static ssize_t ap_config_time_store(struct bus_type *bus,
+				    const char *buf, size_t count)
+{
+	int time;
+
+	if (sscanf(buf, "%d\n", &time) != 1 || time < 5 || time > 120)
+		return -EINVAL;
+	ap_config_time = time;
+	if (!timer_pending(&ap_config_timer) ||
+	    !mod_timer(&ap_config_timer, jiffies + ap_config_time * HZ)) {
+		ap_config_timer.expires = jiffies + ap_config_time * HZ;
+		add_timer(&ap_config_timer);
+	}
+	return count;
+}
+
+static BUS_ATTR(config_time, 0644, ap_config_time_show, ap_config_time_store);
+
+static ssize_t ap_poll_thread_show(struct bus_type *bus, char *buf)
+{
+	return snprintf(buf, PAGE_SIZE, "%d\n", ap_poll_kthread ? 1 : 0);
+}
+
+static ssize_t ap_poll_thread_store(struct bus_type *bus,
+				    const char *buf, size_t count)
+{
+	int flag, rc;
+
+	if (sscanf(buf, "%d\n", &flag) != 1)
+		return -EINVAL;
+	if (flag) {
+		rc = ap_poll_thread_start();
+		if (rc)
+			return rc;
+	}
+	else
+		ap_poll_thread_stop();
+	return count;
+}
+
+static BUS_ATTR(poll_thread, 0644, ap_poll_thread_show, ap_poll_thread_store);
+
+static ssize_t poll_timeout_show(struct bus_type *bus, char *buf)
+{
+	return snprintf(buf, PAGE_SIZE, "%llu\n", poll_timeout);
+}
+
+static ssize_t poll_timeout_store(struct bus_type *bus, const char *buf,
+				  size_t count)
+{
+	unsigned long long time;
+	ktime_t hr_time;
+
+	/* 120 seconds = maximum poll interval */
+	if (sscanf(buf, "%llu\n", &time) != 1 || time < 1 ||
+	    time > 120000000000ULL)
+		return -EINVAL;
+	poll_timeout = time;
+	hr_time = ktime_set(0, poll_timeout);
+
+	if (!hrtimer_is_queued(&ap_poll_timer) ||
+	    !hrtimer_forward(&ap_poll_timer, hrtimer_get_expires(&ap_poll_timer), hr_time)) {
+		hrtimer_set_expires(&ap_poll_timer, hr_time);
+		hrtimer_start_expires(&ap_poll_timer, HRTIMER_MODE_ABS);
+	}
+	return count;
+}
+
+static BUS_ATTR(poll_timeout, 0644, poll_timeout_show, poll_timeout_store);
+
+static struct bus_attribute *const ap_bus_attrs[] = {
+	&bus_attr_ap_domain,
+	&bus_attr_config_time,
+	&bus_attr_poll_thread,
+	&bus_attr_ap_interrupts,
+	&bus_attr_poll_timeout,
+	NULL,
+};
+
+/**
+ * ap_select_domain(): Select an AP domain.
+ *
+ * Pick one of the 16 AP domains.
+ */
+static int ap_select_domain(void)
+{
+	int queue_depth, device_type, count, max_count, best_domain;
+	int rc, i, j;
+
+	/*
+	 * We want to use a single domain. Either the one specified with
+	 * the "domain=" parameter or the domain with the maximum number
+	 * of devices.
+	 */
+	if (ap_domain_index >= 0 && ap_domain_index < AP_DOMAINS)
+		/* Domain has already been selected. */
+		return 0;
+	best_domain = -1;
+	max_count = 0;
+	for (i = 0; i < AP_DOMAINS; i++) {
+		count = 0;
+		for (j = 0; j < AP_DEVICES; j++) {
+			ap_qid_t qid = AP_MKQID(j, i);
+			rc = ap_query_queue(qid, &queue_depth, &device_type);
+			if (rc)
+				continue;
+			count++;
+		}
+		if (count > max_count) {
+			max_count = count;
+			best_domain = i;
+		}
+	}
+	if (best_domain >= 0){
+		ap_domain_index = best_domain;
+		return 0;
+	}
+	return -ENODEV;
+}
+
+/**
+ * ap_probe_device_type(): Find the device type of an AP.
+ * @ap_dev: pointer to the AP device.
+ *
+ * Find the device type if query queue returned a device type of 0.
+ */
+static int ap_probe_device_type(struct ap_device *ap_dev)
+{
+	static unsigned char msg[] = {
+		0x00,0x06,0x00,0x00,0x00,0x00,0x00,0x00,
+		0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
+		0x00,0x00,0x00,0x58,0x00,0x00,0x00,0x00,
+		0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
+		0x01,0x00,0x43,0x43,0x41,0x2d,0x41,0x50,
+		0x50,0x4c,0x20,0x20,0x20,0x01,0x01,0x01,
+		0x00,0x00,0x00,0x00,0x50,0x4b,0x00,0x00,
+		0x00,0x00,0x01,0x1c,0x00,0x00,0x00,0x00,
+		0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
+		0x00,0x00,0x05,0xb8,0x00,0x00,0x00,0x00,
+		0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
+		0x70,0x00,0x41,0x00,0x00,0x00,0x00,0x00,
+		0x00,0x00,0x54,0x32,0x01,0x00,0xa0,0x00,
+		0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
+		0x00,0x00,0x00,0x00,0xb8,0x05,0x00,0x00,
+		0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
+		0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
+		0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
+		0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
+		0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
+		0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
+		0x00,0x00,0x0a,0x00,0x00,0x00,0x00,0x00,
+		0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
+		0x00,0x00,0x00,0x00,0x00,0x00,0x08,0x00,
+		0x49,0x43,0x53,0x46,0x20,0x20,0x20,0x20,
+		0x50,0x4b,0x0a,0x00,0x50,0x4b,0x43,0x53,
+		0x2d,0x31,0x2e,0x32,0x37,0x00,0x11,0x22,
+		0x33,0x44,0x55,0x66,0x77,0x88,0x99,0x00,
+		0x11,0x22,0x33,0x44,0x55,0x66,0x77,0x88,
+		0x99,0x00,0x11,0x22,0x33,0x44,0x55,0x66,
+		0x77,0x88,0x99,0x00,0x11,0x22,0x33,0x44,
+		0x55,0x66,0x77,0x88,0x99,0x00,0x11,0x22,
+		0x33,0x44,0x55,0x66,0x77,0x88,0x99,0x00,
+		0x11,0x22,0x33,0x5d,0x00,0x5b,0x00,0x77,
+		0x88,0x1e,0x00,0x00,0x57,0x00,0x00,0x00,
+		0x00,0x04,0x00,0x00,0x4f,0x00,0x00,0x00,
+		0x03,0x02,0x00,0x00,0x40,0x01,0x00,0x01,
+		0xce,0x02,0x68,0x2d,0x5f,0xa9,0xde,0x0c,
+		0xf6,0xd2,0x7b,0x58,0x4b,0xf9,0x28,0x68,
+		0x3d,0xb4,0xf4,0xef,0x78,0xd5,0xbe,0x66,
+		0x63,0x42,0xef,0xf8,0xfd,0xa4,0xf8,0xb0,
+		0x8e,0x29,0xc2,0xc9,0x2e,0xd8,0x45,0xb8,
+		0x53,0x8c,0x6f,0x4e,0x72,0x8f,0x6c,0x04,
+		0x9c,0x88,0xfc,0x1e,0xc5,0x83,0x55,0x57,
+		0xf7,0xdd,0xfd,0x4f,0x11,0x36,0x95,0x5d,
+	};
+	struct ap_queue_status status;
+	unsigned long long psmid;
+	char *reply;
+	int rc, i;
+
+	reply = (void *) get_zeroed_page(GFP_KERNEL);
+	if (!reply) {
+		rc = -ENOMEM;
+		goto out;
+	}
+
+	status = __ap_send(ap_dev->qid, 0x0102030405060708ULL,
+			   msg, sizeof(msg), 0);
+	if (status.response_code != AP_RESPONSE_NORMAL) {
+		rc = -ENODEV;
+		goto out_free;
+	}
+
+	/* Wait for the test message to complete. */
+	for (i = 0; i < 6; i++) {
+		mdelay(300);
+		status = __ap_recv(ap_dev->qid, &psmid, reply, 4096);
+		if (status.response_code == AP_RESPONSE_NORMAL &&
+		    psmid == 0x0102030405060708ULL)
+			break;
+	}
+	if (i < 6) {
+		/* Got an answer. */
+		if (reply[0] == 0x00 && reply[1] == 0x86)
+			ap_dev->device_type = AP_DEVICE_TYPE_PCICC;
+		else
+			ap_dev->device_type = AP_DEVICE_TYPE_PCICA;
+		rc = 0;
+	} else
+		rc = -ENODEV;
+
+out_free:
+	free_page((unsigned long) reply);
+out:
+	return rc;
+}
+
+static void ap_interrupt_handler(void *unused1, void *unused2)
+{
+	kstat_cpu(smp_processor_id()).irqs[IOINT_APB]++;
+	tasklet_schedule(&ap_tasklet);
+}
+
+/**
+ * __ap_scan_bus(): Scan the AP bus.
+ * @dev: Pointer to device
+ * @data: Pointer to data
+ *
+ * Scan the AP bus for new devices.
+ */
+static int __ap_scan_bus(struct device *dev, void *data)
+{
+	return to_ap_dev(dev)->qid == (ap_qid_t)(unsigned long) data;
+}
+
+static void ap_device_release(struct device *dev)
+{
+	struct ap_device *ap_dev = to_ap_dev(dev);
+
+	kfree(ap_dev);
+}
+
+static void ap_scan_bus(struct work_struct *unused)
+{
+	struct ap_device *ap_dev;
+	struct device *dev;
+	ap_qid_t qid;
+	int queue_depth, device_type;
+	unsigned int device_functions;
+	int rc, i;
+
+	if (ap_select_domain() != 0)
+		return;
+	for (i = 0; i < AP_DEVICES; i++) {
+		qid = AP_MKQID(i, ap_domain_index);
+		dev = bus_find_device(&ap_bus_type, NULL,
+				      (void *)(unsigned long)qid,
+				      __ap_scan_bus);
+		rc = ap_query_queue(qid, &queue_depth, &device_type);
+		if (dev) {
+			if (rc == -EBUSY) {
+				set_current_state(TASK_UNINTERRUPTIBLE);
+				schedule_timeout(AP_RESET_TIMEOUT);
+				rc = ap_query_queue(qid, &queue_depth,
+						    &device_type);
+			}
+			ap_dev = to_ap_dev(dev);
+			spin_lock_bh(&ap_dev->lock);
+			if (rc || ap_dev->unregistered) {
+				spin_unlock_bh(&ap_dev->lock);
+				if (ap_dev->unregistered)
+					i--;
+				device_unregister(dev);
+				put_device(dev);
+				continue;
+			}
+			spin_unlock_bh(&ap_dev->lock);
+			put_device(dev);
+			continue;
+		}
+		if (rc)
+			continue;
+		rc = ap_init_queue(qid);
+		if (rc)
+			continue;
+		ap_dev = kzalloc(sizeof(*ap_dev), GFP_KERNEL);
+		if (!ap_dev)
+			break;
+		ap_dev->qid = qid;
+		ap_dev->queue_depth = queue_depth;
+		ap_dev->unregistered = 1;
+		spin_lock_init(&ap_dev->lock);
+		INIT_LIST_HEAD(&ap_dev->pendingq);
+		INIT_LIST_HEAD(&ap_dev->requestq);
+		INIT_LIST_HEAD(&ap_dev->list);
+		setup_timer(&ap_dev->timeout, ap_request_timeout,
+			    (unsigned long) ap_dev);
+		switch (device_type) {
+		case 0:
+			if (ap_probe_device_type(ap_dev)) {
+				kfree(ap_dev);
+				continue;
+			}
+			break;
+		case 10:
+			if (ap_query_functions(qid, &device_functions)) {
+				kfree(ap_dev);
+				continue;
+			}
+			if (test_ap_facility(device_functions, 3))
+				ap_dev->device_type = AP_DEVICE_TYPE_CEX3C;
+			else if (test_ap_facility(device_functions, 4))
+				ap_dev->device_type = AP_DEVICE_TYPE_CEX3A;
+			else {
+				kfree(ap_dev);
+				continue;
+			}
+			break;
+		default:
+			ap_dev->device_type = device_type;
+		}
+
+		ap_dev->device.bus = &ap_bus_type;
+		ap_dev->device.parent = ap_root_device;
+		if (dev_set_name(&ap_dev->device, "card%02x",
+				 AP_QID_DEVICE(ap_dev->qid))) {
+			kfree(ap_dev);
+			continue;
+		}
+		ap_dev->device.release = ap_device_release;
+		rc = device_register(&ap_dev->device);
+		if (rc) {
+			put_device(&ap_dev->device);
+			continue;
+		}
+		/* Add device attributes. */
+		rc = sysfs_create_group(&ap_dev->device.kobj,
+					&ap_dev_attr_group);
+		if (!rc) {
+			spin_lock_bh(&ap_dev->lock);
+			ap_dev->unregistered = 0;
+			spin_unlock_bh(&ap_dev->lock);
+		}
+		else
+			device_unregister(&ap_dev->device);
+	}
+}
+
+static void
+ap_config_timeout(unsigned long ptr)
+{
+	queue_work(ap_work_queue, &ap_config_work);
+	ap_config_timer.expires = jiffies + ap_config_time * HZ;
+	add_timer(&ap_config_timer);
+}
+
+/**
+ * __ap_schedule_poll_timer(): Schedule poll timer.
+ *
+ * Set up the timer to run the poll tasklet
+ */
+static inline void __ap_schedule_poll_timer(void)
+{
+	ktime_t hr_time;
+
+	spin_lock_bh(&ap_poll_timer_lock);
+	if (hrtimer_is_queued(&ap_poll_timer) || ap_suspend_flag)
+		goto out;
+	if (ktime_to_ns(hrtimer_expires_remaining(&ap_poll_timer)) <= 0) {
+		hr_time = ktime_set(0, poll_timeout);
+		hrtimer_forward_now(&ap_poll_timer, hr_time);
+		hrtimer_restart(&ap_poll_timer);
+	}
+out:
+	spin_unlock_bh(&ap_poll_timer_lock);
+}
+
+/**
+ * ap_schedule_poll_timer(): Schedule poll timer.
+ *
+ * Set up the timer to run the poll tasklet
+ */
+static inline void ap_schedule_poll_timer(void)
+{
+	if (ap_using_interrupts())
+		return;
+	__ap_schedule_poll_timer();
+}
+
+/**
+ * ap_poll_read(): Receive pending reply messages from an AP device.
+ * @ap_dev: pointer to the AP device
+ * @flags: pointer to control flags, bit 2^0 is set if another poll is
+ *	   required, bit 2^1 is set if the poll timer needs to get armed
+ *
+ * Returns 0 if the device is still present, -ENODEV if not.
+ */
+static int ap_poll_read(struct ap_device *ap_dev, unsigned long *flags)
+{
+	struct ap_queue_status status;
+	struct ap_message *ap_msg;
+
+	if (ap_dev->queue_count <= 0)
+		return 0;
+	status = __ap_recv(ap_dev->qid, &ap_dev->reply->psmid,
+			   ap_dev->reply->message, ap_dev->reply->length);
+	switch (status.response_code) {
+	case AP_RESPONSE_NORMAL:
+		atomic_dec(&ap_poll_requests);
+		ap_decrease_queue_count(ap_dev);
+		list_for_each_entry(ap_msg, &ap_dev->pendingq, list) {
+			if (ap_msg->psmid != ap_dev->reply->psmid)
+				continue;
+			list_del_init(&ap_msg->list);
+			ap_dev->pendingq_count--;
+			ap_dev->drv->receive(ap_dev, ap_msg, ap_dev->reply);
+			break;
+		}
+		if (ap_dev->queue_count > 0)
+			*flags |= 1;
+		break;
+	case AP_RESPONSE_NO_PENDING_REPLY:
+		if (status.queue_empty) {
+			/* The card shouldn't forget requests but who knows. */
+			atomic_sub(ap_dev->queue_count, &ap_poll_requests);
+			ap_dev->queue_count = 0;
+			list_splice_init(&ap_dev->pendingq, &ap_dev->requestq);
+			ap_dev->requestq_count += ap_dev->pendingq_count;
+			ap_dev->pendingq_count = 0;
+		} else
+			*flags |= 2;
+		break;
+	default:
+		return -ENODEV;
+	}
+	return 0;
+}
+
+/**
+ * ap_poll_write(): Send messages from the request queue to an AP device.
+ * @ap_dev: pointer to the AP device
+ * @flags: pointer to control flags, bit 2^0 is set if another poll is
+ *	   required, bit 2^1 is set if the poll timer needs to get armed
+ *
+ * Returns 0 if the device is still present, -ENODEV if not.
+ */
+static int ap_poll_write(struct ap_device *ap_dev, unsigned long *flags)
+{
+	struct ap_queue_status status;
+	struct ap_message *ap_msg;
+
+	if (ap_dev->requestq_count <= 0 ||
+	    ap_dev->queue_count >= ap_dev->queue_depth)
+		return 0;
+	/* Start the next request on the queue. */
+	ap_msg = list_entry(ap_dev->requestq.next, struct ap_message, list);
+	status = __ap_send(ap_dev->qid, ap_msg->psmid,
+			   ap_msg->message, ap_msg->length, ap_msg->special);
+	switch (status.response_code) {
+	case AP_RESPONSE_NORMAL:
+		atomic_inc(&ap_poll_requests);
+		ap_increase_queue_count(ap_dev);
+		list_move_tail(&ap_msg->list, &ap_dev->pendingq);
+		ap_dev->requestq_count--;
+		ap_dev->pendingq_count++;
+		if (ap_dev->queue_count < ap_dev->queue_depth &&
+		    ap_dev->requestq_count > 0)
+			*flags |= 1;
+		*flags |= 2;
+		break;
+	case AP_RESPONSE_RESET_IN_PROGRESS:
+		__ap_schedule_poll_timer();
+	case AP_RESPONSE_Q_FULL:
+		*flags |= 2;
+		break;
+	case AP_RESPONSE_MESSAGE_TOO_BIG:
+	case AP_RESPONSE_REQ_FAC_NOT_INST:
+		return -EINVAL;
+	default:
+		return -ENODEV;
+	}
+	return 0;
+}
+
+/**
+ * ap_poll_queue(): Poll AP device for pending replies and send new messages.
+ * @ap_dev: pointer to the bus device
+ * @flags: pointer to control flags, bit 2^0 is set if another poll is
+ *	   required, bit 2^1 is set if the poll timer needs to get armed
+ *
+ * Poll AP device for pending replies and send new messages. If either
+ * ap_poll_read or ap_poll_write returns -ENODEV unregister the device.
+ * Returns 0.
+ */
+static inline int ap_poll_queue(struct ap_device *ap_dev, unsigned long *flags)
+{
+	int rc;
+
+	rc = ap_poll_read(ap_dev, flags);
+	if (rc)
+		return rc;
+	return ap_poll_write(ap_dev, flags);
+}
+
+/**
+ * __ap_queue_message(): Queue a message to a device.
+ * @ap_dev: pointer to the AP device
+ * @ap_msg: the message to be queued
+ *
+ * Queue a message to a device. Returns 0 if successful.
+ */
+static int __ap_queue_message(struct ap_device *ap_dev, struct ap_message *ap_msg)
+{
+	struct ap_queue_status status;
+
+	if (list_empty(&ap_dev->requestq) &&
+	    ap_dev->queue_count < ap_dev->queue_depth) {
+		status = __ap_send(ap_dev->qid, ap_msg->psmid,
+				   ap_msg->message, ap_msg->length,
+				   ap_msg->special);
+		switch (status.response_code) {
+		case AP_RESPONSE_NORMAL:
+			list_add_tail(&ap_msg->list, &ap_dev->pendingq);
+			atomic_inc(&ap_poll_requests);
+			ap_dev->pendingq_count++;
+			ap_increase_queue_count(ap_dev);
+			ap_dev->total_request_count++;
+			break;
+		case AP_RESPONSE_Q_FULL:
+		case AP_RESPONSE_RESET_IN_PROGRESS:
+			list_add_tail(&ap_msg->list, &ap_dev->requestq);
+			ap_dev->requestq_count++;
+			ap_dev->total_request_count++;
+			return -EBUSY;
+		case AP_RESPONSE_REQ_FAC_NOT_INST:
+		case AP_RESPONSE_MESSAGE_TOO_BIG:
+			ap_dev->drv->receive(ap_dev, ap_msg, ERR_PTR(-EINVAL));
+			return -EINVAL;
+		default:	/* Device is gone. */
+			ap_dev->drv->receive(ap_dev, ap_msg, ERR_PTR(-ENODEV));
+			return -ENODEV;
+		}
+	} else {
+		list_add_tail(&ap_msg->list, &ap_dev->requestq);
+		ap_dev->requestq_count++;
+		ap_dev->total_request_count++;
+		return -EBUSY;
+	}
+	ap_schedule_poll_timer();
+	return 0;
+}
+
+void ap_queue_message(struct ap_device *ap_dev, struct ap_message *ap_msg)
+{
+	unsigned long flags;
+	int rc;
+
+	spin_lock_bh(&ap_dev->lock);
+	if (!ap_dev->unregistered) {
+		/* Make room on the queue by polling for finished requests. */
+		rc = ap_poll_queue(ap_dev, &flags);
+		if (!rc)
+			rc = __ap_queue_message(ap_dev, ap_msg);
+		if (!rc)
+			wake_up(&ap_poll_wait);
+		if (rc == -ENODEV)
+			ap_dev->unregistered = 1;
+	} else {
+		ap_dev->drv->receive(ap_dev, ap_msg, ERR_PTR(-ENODEV));
+		rc = -ENODEV;
+	}
+	spin_unlock_bh(&ap_dev->lock);
+	if (rc == -ENODEV)
+		device_unregister(&ap_dev->device);
+}
+EXPORT_SYMBOL(ap_queue_message);
+
+/**
+ * ap_cancel_message(): Cancel a crypto request.
+ * @ap_dev: The AP device that has the message queued
+ * @ap_msg: The message that is to be removed
+ *
+ * Cancel a crypto request. This is done by removing the request
+ * from the device pending or request queue. Note that the
+ * request stays on the AP queue. When it finishes the message
+ * reply will be discarded because the psmid can't be found.
+ */
+void ap_cancel_message(struct ap_device *ap_dev, struct ap_message *ap_msg)
+{
+	struct ap_message *tmp;
+
+	spin_lock_bh(&ap_dev->lock);
+	if (!list_empty(&ap_msg->list)) {
+		list_for_each_entry(tmp, &ap_dev->pendingq, list)
+			if (tmp->psmid == ap_msg->psmid) {
+				ap_dev->pendingq_count--;
+				goto found;
+			}
+		ap_dev->requestq_count--;
+	found:
+		list_del_init(&ap_msg->list);
+	}
+	spin_unlock_bh(&ap_dev->lock);
+}
+EXPORT_SYMBOL(ap_cancel_message);
+
+/**
+ * ap_poll_timeout(): AP receive polling for finished AP requests.
+ * @unused: Unused pointer.
+ *
+ * Schedules the AP tasklet using a high resolution timer.
+ */
+static enum hrtimer_restart ap_poll_timeout(struct hrtimer *unused)
+{
+	tasklet_schedule(&ap_tasklet);
+	return HRTIMER_NORESTART;
+}
+
+/**
+ * ap_reset(): Reset a not responding AP device.
+ * @ap_dev: Pointer to the AP device
+ *
+ * Reset a not responding AP device and move all requests from the
+ * pending queue to the request queue.
+ */
+static void ap_reset(struct ap_device *ap_dev)
+{
+	int rc;
+
+	ap_dev->reset = AP_RESET_IGNORE;
+	atomic_sub(ap_dev->queue_count, &ap_poll_requests);
+	ap_dev->queue_count = 0;
+	list_splice_init(&ap_dev->pendingq, &ap_dev->requestq);
+	ap_dev->requestq_count += ap_dev->pendingq_count;
+	ap_dev->pendingq_count = 0;
+	rc = ap_init_queue(ap_dev->qid);
+	if (rc == -ENODEV)
+		ap_dev->unregistered = 1;
+	else
+		__ap_schedule_poll_timer();
+}
+
+static int __ap_poll_device(struct ap_device *ap_dev, unsigned long *flags)
+{
+	if (!ap_dev->unregistered) {
+		if (ap_poll_queue(ap_dev, flags))
+			ap_dev->unregistered = 1;
+		if (ap_dev->reset == AP_RESET_DO)
+			ap_reset(ap_dev);
+	}
+	return 0;
+}
+
+/**
+ * ap_poll_all(): Poll all AP devices.
+ * @dummy: Unused variable
+ *
+ * Poll all AP devices on the bus in a round robin fashion. Continue
+ * polling until bit 2^0 of the control flags is not set. If bit 2^1
+ * of the control flags has been set arm the poll timer.
+ */
+static void ap_poll_all(unsigned long dummy)
+{
+	unsigned long flags;
+	struct ap_device *ap_dev;
+
+	/* Reset the indicator if interrupts are used. Thus new interrupts can
+	 * be received. Doing it in the beginning of the tasklet is therefor
+	 * important that no requests on any AP get lost.
+	 */
+	if (ap_using_interrupts())
+		xchg((u8 *)ap_interrupt_indicator, 0);
+	do {
+		flags = 0;
+		spin_lock(&ap_device_list_lock);
+		list_for_each_entry(ap_dev, &ap_device_list, list) {
+			spin_lock(&ap_dev->lock);
+			__ap_poll_device(ap_dev, &flags);
+			spin_unlock(&ap_dev->lock);
+		}
+		spin_unlock(&ap_device_list_lock);
+	} while (flags & 1);
+	if (flags & 2)
+		ap_schedule_poll_timer();
+}
+
+/**
+ * ap_poll_thread(): Thread that polls for finished requests.
+ * @data: Unused pointer
+ *
+ * AP bus poll thread. The purpose of this thread is to poll for
+ * finished requests in a loop if there is a "free" cpu - that is
+ * a cpu that doesn't have anything better to do. The polling stops
+ * as soon as there is another task or if all messages have been
+ * delivered.
+ */
+static int ap_poll_thread(void *data)
+{
+	DECLARE_WAITQUEUE(wait, current);
+	unsigned long flags;
+	int requests;
+	struct ap_device *ap_dev;
+
+	set_user_nice(current, 19);
+	while (1) {
+		if (ap_suspend_flag)
+			return 0;
+		if (need_resched()) {
+			schedule();
+			continue;
+		}
+		add_wait_queue(&ap_poll_wait, &wait);
+		set_current_state(TASK_INTERRUPTIBLE);
+		if (kthread_should_stop())
+			break;
+		requests = atomic_read(&ap_poll_requests);
+		if (requests <= 0)
+			schedule();
+		set_current_state(TASK_RUNNING);
+		remove_wait_queue(&ap_poll_wait, &wait);
+
+		flags = 0;
+		spin_lock_bh(&ap_device_list_lock);
+		list_for_each_entry(ap_dev, &ap_device_list, list) {
+			spin_lock(&ap_dev->lock);
+			__ap_poll_device(ap_dev, &flags);
+			spin_unlock(&ap_dev->lock);
+		}
+		spin_unlock_bh(&ap_device_list_lock);
+	}
+	set_current_state(TASK_RUNNING);
+	remove_wait_queue(&ap_poll_wait, &wait);
+	return 0;
+}
+
+static int ap_poll_thread_start(void)
+{
+	int rc;
+
+	if (ap_using_interrupts() || ap_suspend_flag)
+		return 0;
+	mutex_lock(&ap_poll_thread_mutex);
+	if (!ap_poll_kthread) {
+		ap_poll_kthread = kthread_run(ap_poll_thread, NULL, "appoll");
+		rc = IS_ERR(ap_poll_kthread) ? PTR_ERR(ap_poll_kthread) : 0;
+		if (rc)
+			ap_poll_kthread = NULL;
+	}
+	else
+		rc = 0;
+	mutex_unlock(&ap_poll_thread_mutex);
+	return rc;
+}
+
+static void ap_poll_thread_stop(void)
+{
+	mutex_lock(&ap_poll_thread_mutex);
+	if (ap_poll_kthread) {
+		kthread_stop(ap_poll_kthread);
+		ap_poll_kthread = NULL;
+	}
+	mutex_unlock(&ap_poll_thread_mutex);
+}
+
+/**
+ * ap_request_timeout(): Handling of request timeouts
+ * @data: Holds the AP device.
+ *
+ * Handles request timeouts.
+ */
+static void ap_request_timeout(unsigned long data)
+{
+	struct ap_device *ap_dev = (struct ap_device *) data;
+
+	if (ap_dev->reset == AP_RESET_ARMED) {
+		ap_dev->reset = AP_RESET_DO;
+
+		if (ap_using_interrupts())
+			tasklet_schedule(&ap_tasklet);
+	}
+}
+
+static void ap_reset_domain(void)
+{
+	int i;
+
+	if (ap_domain_index != -1)
+		for (i = 0; i < AP_DEVICES; i++)
+			ap_reset_queue(AP_MKQID(i, ap_domain_index));
+}
+
+static void ap_reset_all(void)
+{
+	int i, j;
+
+	for (i = 0; i < AP_DOMAINS; i++)
+		for (j = 0; j < AP_DEVICES; j++)
+			ap_reset_queue(AP_MKQID(j, i));
+}
+
+static struct reset_call ap_reset_call = {
+	.fn = ap_reset_all,
+};
+
+/**
+ * ap_module_init(): The module initialization code.
+ *
+ * Initializes the module.
+ */
+int __init ap_module_init(void)
+{
+	int rc, i;
+
+	if (ap_domain_index < -1 || ap_domain_index >= AP_DOMAINS) {
+		pr_warning("%d is not a valid cryptographic domain\n",
+			   ap_domain_index);
+		return -EINVAL;
+	}
+	/* In resume callback we need to know if the user had set the domain.
+	 * If so, we can not just reset it.
+	 */
+	if (ap_domain_index >= 0)
+		user_set_domain = 1;
+
+	if (ap_instructions_available() != 0) {
+		pr_warning("The hardware system does not support "
+			   "AP instructions\n");
+		return -ENODEV;
+	}
+	if (ap_interrupts_available()) {
+		isc_register(AP_ISC);
+		ap_interrupt_indicator = s390_register_adapter_interrupt(
+			&ap_interrupt_handler, NULL, AP_ISC);
+		if (IS_ERR(ap_interrupt_indicator)) {
+			ap_interrupt_indicator = NULL;
+			isc_unregister(AP_ISC);
+		}
+	}
+
+	register_reset_call(&ap_reset_call);
+
+	/* Create /sys/bus/ap. */
+	rc = bus_register(&ap_bus_type);
+	if (rc)
+		goto out;
+	for (i = 0; ap_bus_attrs[i]; i++) {
+		rc = bus_create_file(&ap_bus_type, ap_bus_attrs[i]);
+		if (rc)
+			goto out_bus;
+	}
+
+	/* Create /sys/devices/ap. */
+	ap_root_device = root_device_register("ap");
+	rc = IS_ERR(ap_root_device) ? PTR_ERR(ap_root_device) : 0;
+	if (rc)
+		goto out_bus;
+
+	ap_work_queue = create_singlethread_workqueue("kapwork");
+	if (!ap_work_queue) {
+		rc = -ENOMEM;
+		goto out_root;
+	}
+
+	if (ap_select_domain() == 0)
+		ap_scan_bus(NULL);
+
+	/* Setup the AP bus rescan timer. */
+	init_timer(&ap_config_timer);
+	ap_config_timer.function = ap_config_timeout;
+	ap_config_timer.data = 0;
+	ap_config_timer.expires = jiffies + ap_config_time * HZ;
+	add_timer(&ap_config_timer);
+
+	/* Setup the high resultion poll timer.
+	 * If we are running under z/VM adjust polling to z/VM polling rate.
+	 */
+	if (MACHINE_IS_VM)
+		poll_timeout = 1500000;
+	spin_lock_init(&ap_poll_timer_lock);
+	hrtimer_init(&ap_poll_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
+	ap_poll_timer.function = ap_poll_timeout;
+
+	/* Start the low priority AP bus poll thread. */
+	if (ap_thread_flag) {
+		rc = ap_poll_thread_start();
+		if (rc)
+			goto out_work;
+	}
+
+	return 0;
+
+out_work:
+	del_timer_sync(&ap_config_timer);
+	hrtimer_cancel(&ap_poll_timer);
+	destroy_workqueue(ap_work_queue);
+out_root:
+	root_device_unregister(ap_root_device);
+out_bus:
+	while (i--)
+		bus_remove_file(&ap_bus_type, ap_bus_attrs[i]);
+	bus_unregister(&ap_bus_type);
+out:
+	unregister_reset_call(&ap_reset_call);
+	if (ap_using_interrupts()) {
+		s390_unregister_adapter_interrupt(ap_interrupt_indicator, AP_ISC);
+		isc_unregister(AP_ISC);
+	}
+	return rc;
+}
+
+static int __ap_match_all(struct device *dev, void *data)
+{
+	return 1;
+}
+
+/**
+ * ap_modules_exit(): The module termination code
+ *
+ * Terminates the module.
+ */
+void ap_module_exit(void)
+{
+	int i;
+	struct device *dev;
+
+	ap_reset_domain();
+	ap_poll_thread_stop();
+	del_timer_sync(&ap_config_timer);
+	hrtimer_cancel(&ap_poll_timer);
+	destroy_workqueue(ap_work_queue);
+	tasklet_kill(&ap_tasklet);
+	root_device_unregister(ap_root_device);
+	while ((dev = bus_find_device(&ap_bus_type, NULL, NULL,
+		    __ap_match_all)))
+	{
+		device_unregister(dev);
+		put_device(dev);
+	}
+	for (i = 0; ap_bus_attrs[i]; i++)
+		bus_remove_file(&ap_bus_type, ap_bus_attrs[i]);
+	bus_unregister(&ap_bus_type);
+	unregister_reset_call(&ap_reset_call);
+	if (ap_using_interrupts()) {
+		s390_unregister_adapter_interrupt(ap_interrupt_indicator, AP_ISC);
+		isc_unregister(AP_ISC);
+	}
+}
+
+module_init(ap_module_init);
+module_exit(ap_module_exit);
diff --git a/ap/os/linux/linux-3.4.x/drivers/s390/crypto/ap_bus.h b/ap/os/linux/linux-3.4.x/drivers/s390/crypto/ap_bus.h
new file mode 100644
index 0000000..d960a63
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/s390/crypto/ap_bus.h
@@ -0,0 +1,221 @@
+/*
+ * linux/drivers/s390/crypto/ap_bus.h
+ *
+ * Copyright (C) 2006 IBM Corporation
+ * Author(s): Cornelia Huck <cornelia.huck@de.ibm.com>
+ *	      Martin Schwidefsky <schwidefsky@de.ibm.com>
+ *	      Ralph Wuerthner <rwuerthn@de.ibm.com>
+ *	      Felix Beck <felix.beck@de.ibm.com>
+ *	      Holger Dengler <hd@linux.vnet.ibm.com>
+ *
+ * Adjunct processor bus header file.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#ifndef _AP_BUS_H_
+#define _AP_BUS_H_
+
+#include <linux/device.h>
+#include <linux/mod_devicetable.h>
+#include <linux/types.h>
+
+#define AP_DEVICES 64		/* Number of AP devices. */
+#define AP_DOMAINS 16		/* Number of AP domains. */
+#define AP_MAX_RESET 90		/* Maximum number of resets. */
+#define AP_RESET_TIMEOUT (HZ/2)	/* Time in ticks for reset timeouts. */
+#define AP_CONFIG_TIME 30	/* Time in seconds between AP bus rescans. */
+#define AP_POLL_TIME 1		/* Time in ticks between receive polls. */
+
+extern int ap_domain_index;
+
+/**
+ * The ap_qid_t identifier of an ap queue. It contains a
+ * 6 bit device index and a 4 bit queue index (domain).
+ */
+typedef unsigned int ap_qid_t;
+
+#define AP_MKQID(_device,_queue) (((_device) & 63) << 8 | ((_queue) & 15))
+#define AP_QID_DEVICE(_qid) (((_qid) >> 8) & 63)
+#define AP_QID_QUEUE(_qid) ((_qid) & 15)
+
+/**
+ * structy ap_queue_status - Holds the AP queue status.
+ * @queue_empty: Shows if queue is empty
+ * @replies_waiting: Waiting replies
+ * @queue_full: Is 1 if the queue is full
+ * @pad: A 4 bit pad
+ * @int_enabled: Shows if interrupts are enabled for the AP
+ * @response_conde: Holds the 8 bit response code
+ * @pad2: A 16 bit pad
+ *
+ * The ap queue status word is returned by all three AP functions
+ * (PQAP, NQAP and DQAP).  There's a set of flags in the first
+ * byte, followed by a 1 byte response code.
+ */
+struct ap_queue_status {
+	unsigned int queue_empty	: 1;
+	unsigned int replies_waiting	: 1;
+	unsigned int queue_full		: 1;
+	unsigned int pad1		: 4;
+	unsigned int int_enabled	: 1;
+	unsigned int response_code	: 8;
+	unsigned int pad2		: 16;
+} __packed;
+
+#define AP_QUEUE_STATUS_INVALID \
+		{ 1, 1, 1, 0xF, 1, 0xFF, 0xFFFF }
+
+static inline
+int ap_queue_status_invalid_test(struct ap_queue_status *status)
+{
+	struct ap_queue_status invalid = AP_QUEUE_STATUS_INVALID;
+	return !(memcmp(status, &invalid, sizeof(struct ap_queue_status)));
+}
+
+#define MAX_AP_FACILITY 31
+
+static inline int test_ap_facility(unsigned int function, unsigned int nr)
+{
+	if (nr > MAX_AP_FACILITY)
+		return 0;
+	return function & (unsigned int)(0x80000000 >> nr);
+}
+
+#define AP_RESPONSE_NORMAL		0x00
+#define AP_RESPONSE_Q_NOT_AVAIL		0x01
+#define AP_RESPONSE_RESET_IN_PROGRESS	0x02
+#define AP_RESPONSE_DECONFIGURED	0x03
+#define AP_RESPONSE_CHECKSTOPPED	0x04
+#define AP_RESPONSE_BUSY		0x05
+#define AP_RESPONSE_INVALID_ADDRESS	0x06
+#define AP_RESPONSE_OTHERWISE_CHANGED	0x07
+#define AP_RESPONSE_Q_FULL		0x10
+#define AP_RESPONSE_NO_PENDING_REPLY	0x10
+#define AP_RESPONSE_INDEX_TOO_BIG	0x11
+#define AP_RESPONSE_NO_FIRST_PART	0x13
+#define AP_RESPONSE_MESSAGE_TOO_BIG	0x15
+#define AP_RESPONSE_REQ_FAC_NOT_INST	0x16
+
+/*
+ * Known device types
+ */
+#define AP_DEVICE_TYPE_PCICC	3
+#define AP_DEVICE_TYPE_PCICA	4
+#define AP_DEVICE_TYPE_PCIXCC	5
+#define AP_DEVICE_TYPE_CEX2A	6
+#define AP_DEVICE_TYPE_CEX2C	7
+#define AP_DEVICE_TYPE_CEX3A	8
+#define AP_DEVICE_TYPE_CEX3C	9
+
+/*
+ * AP reset flag states
+ */
+#define AP_RESET_IGNORE	0	/* request timeout will be ignored */
+#define AP_RESET_ARMED	1	/* request timeout timer is active */
+#define AP_RESET_DO	2	/* AP reset required */
+
+struct ap_device;
+struct ap_message;
+
+struct ap_driver {
+	struct device_driver driver;
+	struct ap_device_id *ids;
+
+	int (*probe)(struct ap_device *);
+	void (*remove)(struct ap_device *);
+	/* receive is called from tasklet context */
+	void (*receive)(struct ap_device *, struct ap_message *,
+			struct ap_message *);
+	int request_timeout;		/* request timeout in jiffies */
+};
+
+#define to_ap_drv(x) container_of((x), struct ap_driver, driver)
+
+int ap_driver_register(struct ap_driver *, struct module *, char *);
+void ap_driver_unregister(struct ap_driver *);
+
+struct ap_device {
+	struct device device;
+	struct ap_driver *drv;		/* Pointer to AP device driver. */
+	spinlock_t lock;		/* Per device lock. */
+	struct list_head list;		/* private list of all AP devices. */
+
+	ap_qid_t qid;			/* AP queue id. */
+	int queue_depth;		/* AP queue depth.*/
+	int device_type;		/* AP device type. */
+	int unregistered;		/* marks AP device as unregistered */
+	struct timer_list timeout;	/* Timer for request timeouts. */
+	int reset;			/* Reset required after req. timeout. */
+
+	int queue_count;		/* # messages currently on AP queue. */
+
+	struct list_head pendingq;	/* List of message sent to AP queue. */
+	int pendingq_count;		/* # requests on pendingq list. */
+	struct list_head requestq;	/* List of message yet to be sent. */
+	int requestq_count;		/* # requests on requestq list. */
+	int total_request_count;	/* # requests ever for this AP device. */
+
+	struct ap_message *reply;	/* Per device reply message. */
+
+	void *private;			/* ap driver private pointer. */
+};
+
+#define to_ap_dev(x) container_of((x), struct ap_device, device)
+
+struct ap_message {
+	struct list_head list;		/* Request queueing. */
+	unsigned long long psmid;	/* Message id. */
+	void *message;			/* Pointer to message buffer. */
+	size_t length;			/* Message length. */
+
+	void *private;			/* ap driver private pointer. */
+	unsigned int special:1;		/* Used for special commands. */
+};
+
+#define AP_DEVICE(dt)					\
+	.dev_type=(dt),					\
+	.match_flags=AP_DEVICE_ID_MATCH_DEVICE_TYPE,
+
+/**
+ * ap_init_message() - Initialize ap_message.
+ * Initialize a message before using. Otherwise this might result in
+ * unexpected behaviour.
+ */
+static inline void ap_init_message(struct ap_message *ap_msg)
+{
+	ap_msg->psmid = 0;
+	ap_msg->length = 0;
+	ap_msg->special = 0;
+}
+
+/*
+ * Note: don't use ap_send/ap_recv after using ap_queue_message
+ * for the first time. Otherwise the ap message queue will get
+ * confused.
+ */
+int ap_send(ap_qid_t, unsigned long long, void *, size_t);
+int ap_recv(ap_qid_t, unsigned long long *, void *, size_t);
+
+void ap_queue_message(struct ap_device *ap_dev, struct ap_message *ap_msg);
+void ap_cancel_message(struct ap_device *ap_dev, struct ap_message *ap_msg);
+void ap_flush_queue(struct ap_device *ap_dev);
+
+int ap_module_init(void);
+void ap_module_exit(void);
+
+int ap_4096_commands_available(ap_qid_t qid);
+
+#endif /* _AP_BUS_H_ */
diff --git a/ap/os/linux/linux-3.4.x/drivers/s390/crypto/zcrypt_api.c b/ap/os/linux/linux-3.4.x/drivers/s390/crypto/zcrypt_api.c
new file mode 100644
index 0000000..8852320
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/s390/crypto/zcrypt_api.c
@@ -0,0 +1,1224 @@
+/*
+ *  linux/drivers/s390/crypto/zcrypt_api.c
+ *
+ *  zcrypt 2.1.0
+ *
+ *  Copyright (C)  2001, 2006 IBM Corporation
+ *  Author(s): Robert Burroughs
+ *	       Eric Rossman (edrossma@us.ibm.com)
+ *	       Cornelia Huck <cornelia.huck@de.ibm.com>
+ *
+ *  Hotplug & misc device support: Jochen Roehrig (roehrig@de.ibm.com)
+ *  Major cleanup & driver split: Martin Schwidefsky <schwidefsky@de.ibm.com>
+ *				  Ralph Wuerthner <rwuerthn@de.ibm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/miscdevice.h>
+#include <linux/fs.h>
+#include <linux/proc_fs.h>
+#include <linux/seq_file.h>
+#include <linux/compat.h>
+#include <linux/slab.h>
+#include <linux/atomic.h>
+#include <asm/uaccess.h>
+#include <linux/hw_random.h>
+
+#include "zcrypt_api.h"
+
+/*
+ * Module description.
+ */
+MODULE_AUTHOR("IBM Corporation");
+MODULE_DESCRIPTION("Cryptographic Coprocessor interface, "
+		   "Copyright 2001, 2006 IBM Corporation");
+MODULE_LICENSE("GPL");
+
+static DEFINE_SPINLOCK(zcrypt_device_lock);
+static LIST_HEAD(zcrypt_device_list);
+static int zcrypt_device_count = 0;
+static atomic_t zcrypt_open_count = ATOMIC_INIT(0);
+
+static int zcrypt_rng_device_add(void);
+static void zcrypt_rng_device_remove(void);
+
+/*
+ * Device attributes common for all crypto devices.
+ */
+static ssize_t zcrypt_type_show(struct device *dev,
+				struct device_attribute *attr, char *buf)
+{
+	struct zcrypt_device *zdev = to_ap_dev(dev)->private;
+	return snprintf(buf, PAGE_SIZE, "%s\n", zdev->type_string);
+}
+
+static DEVICE_ATTR(type, 0444, zcrypt_type_show, NULL);
+
+static ssize_t zcrypt_online_show(struct device *dev,
+				  struct device_attribute *attr, char *buf)
+{
+	struct zcrypt_device *zdev = to_ap_dev(dev)->private;
+	return snprintf(buf, PAGE_SIZE, "%d\n", zdev->online);
+}
+
+static ssize_t zcrypt_online_store(struct device *dev,
+				   struct device_attribute *attr,
+				   const char *buf, size_t count)
+{
+	struct zcrypt_device *zdev = to_ap_dev(dev)->private;
+	int online;
+
+	if (sscanf(buf, "%d\n", &online) != 1 || online < 0 || online > 1)
+		return -EINVAL;
+	zdev->online = online;
+	if (!online)
+		ap_flush_queue(zdev->ap_dev);
+	return count;
+}
+
+static DEVICE_ATTR(online, 0644, zcrypt_online_show, zcrypt_online_store);
+
+static struct attribute * zcrypt_device_attrs[] = {
+	&dev_attr_type.attr,
+	&dev_attr_online.attr,
+	NULL,
+};
+
+static struct attribute_group zcrypt_device_attr_group = {
+	.attrs = zcrypt_device_attrs,
+};
+
+/**
+ * __zcrypt_increase_preference(): Increase preference of a crypto device.
+ * @zdev: Pointer the crypto device
+ *
+ * Move the device towards the head of the device list.
+ * Need to be called while holding the zcrypt device list lock.
+ * Note: cards with speed_rating of 0 are kept at the end of the list.
+ */
+static void __zcrypt_increase_preference(struct zcrypt_device *zdev)
+{
+	struct zcrypt_device *tmp;
+	struct list_head *l;
+
+	if (zdev->speed_rating == 0)
+		return;
+	for (l = zdev->list.prev; l != &zcrypt_device_list; l = l->prev) {
+		tmp = list_entry(l, struct zcrypt_device, list);
+		if ((tmp->request_count + 1) * tmp->speed_rating <=
+		    (zdev->request_count + 1) * zdev->speed_rating &&
+		    tmp->speed_rating != 0)
+			break;
+	}
+	if (l == zdev->list.prev)
+		return;
+	/* Move zdev behind l */
+	list_move(&zdev->list, l);
+}
+
+/**
+ * __zcrypt_decrease_preference(): Decrease preference of a crypto device.
+ * @zdev: Pointer to a crypto device.
+ *
+ * Move the device towards the tail of the device list.
+ * Need to be called while holding the zcrypt device list lock.
+ * Note: cards with speed_rating of 0 are kept at the end of the list.
+ */
+static void __zcrypt_decrease_preference(struct zcrypt_device *zdev)
+{
+	struct zcrypt_device *tmp;
+	struct list_head *l;
+
+	if (zdev->speed_rating == 0)
+		return;
+	for (l = zdev->list.next; l != &zcrypt_device_list; l = l->next) {
+		tmp = list_entry(l, struct zcrypt_device, list);
+		if ((tmp->request_count + 1) * tmp->speed_rating >
+		    (zdev->request_count + 1) * zdev->speed_rating ||
+		    tmp->speed_rating == 0)
+			break;
+	}
+	if (l == zdev->list.next)
+		return;
+	/* Move zdev before l */
+	list_move_tail(&zdev->list, l);
+}
+
+static void zcrypt_device_release(struct kref *kref)
+{
+	struct zcrypt_device *zdev =
+		container_of(kref, struct zcrypt_device, refcount);
+	zcrypt_device_free(zdev);
+}
+
+void zcrypt_device_get(struct zcrypt_device *zdev)
+{
+	kref_get(&zdev->refcount);
+}
+EXPORT_SYMBOL(zcrypt_device_get);
+
+int zcrypt_device_put(struct zcrypt_device *zdev)
+{
+	return kref_put(&zdev->refcount, zcrypt_device_release);
+}
+EXPORT_SYMBOL(zcrypt_device_put);
+
+struct zcrypt_device *zcrypt_device_alloc(size_t max_response_size)
+{
+	struct zcrypt_device *zdev;
+
+	zdev = kzalloc(sizeof(struct zcrypt_device), GFP_KERNEL);
+	if (!zdev)
+		return NULL;
+	zdev->reply.message = kmalloc(max_response_size, GFP_KERNEL);
+	if (!zdev->reply.message)
+		goto out_free;
+	zdev->reply.length = max_response_size;
+	spin_lock_init(&zdev->lock);
+	INIT_LIST_HEAD(&zdev->list);
+	return zdev;
+
+out_free:
+	kfree(zdev);
+	return NULL;
+}
+EXPORT_SYMBOL(zcrypt_device_alloc);
+
+void zcrypt_device_free(struct zcrypt_device *zdev)
+{
+	kfree(zdev->reply.message);
+	kfree(zdev);
+}
+EXPORT_SYMBOL(zcrypt_device_free);
+
+/**
+ * zcrypt_device_register() - Register a crypto device.
+ * @zdev: Pointer to a crypto device
+ *
+ * Register a crypto device. Returns 0 if successful.
+ */
+int zcrypt_device_register(struct zcrypt_device *zdev)
+{
+	int rc;
+
+	rc = sysfs_create_group(&zdev->ap_dev->device.kobj,
+				&zcrypt_device_attr_group);
+	if (rc)
+		goto out;
+	get_device(&zdev->ap_dev->device);
+	kref_init(&zdev->refcount);
+	spin_lock_bh(&zcrypt_device_lock);
+	zdev->online = 1;	/* New devices are online by default. */
+	list_add_tail(&zdev->list, &zcrypt_device_list);
+	__zcrypt_increase_preference(zdev);
+	zcrypt_device_count++;
+	spin_unlock_bh(&zcrypt_device_lock);
+	if (zdev->ops->rng) {
+		rc = zcrypt_rng_device_add();
+		if (rc)
+			goto out_unregister;
+	}
+	return 0;
+
+out_unregister:
+	spin_lock_bh(&zcrypt_device_lock);
+	zcrypt_device_count--;
+	list_del_init(&zdev->list);
+	spin_unlock_bh(&zcrypt_device_lock);
+	sysfs_remove_group(&zdev->ap_dev->device.kobj,
+			   &zcrypt_device_attr_group);
+	put_device(&zdev->ap_dev->device);
+	zcrypt_device_put(zdev);
+out:
+	return rc;
+}
+EXPORT_SYMBOL(zcrypt_device_register);
+
+/**
+ * zcrypt_device_unregister(): Unregister a crypto device.
+ * @zdev: Pointer to crypto device
+ *
+ * Unregister a crypto device.
+ */
+void zcrypt_device_unregister(struct zcrypt_device *zdev)
+{
+	if (zdev->ops->rng)
+		zcrypt_rng_device_remove();
+	spin_lock_bh(&zcrypt_device_lock);
+	zcrypt_device_count--;
+	list_del_init(&zdev->list);
+	spin_unlock_bh(&zcrypt_device_lock);
+	sysfs_remove_group(&zdev->ap_dev->device.kobj,
+			   &zcrypt_device_attr_group);
+	put_device(&zdev->ap_dev->device);
+	zcrypt_device_put(zdev);
+}
+EXPORT_SYMBOL(zcrypt_device_unregister);
+
+/**
+ * zcrypt_read (): Not supported beyond zcrypt 1.3.1.
+ *
+ * This function is not supported beyond zcrypt 1.3.1.
+ */
+static ssize_t zcrypt_read(struct file *filp, char __user *buf,
+			   size_t count, loff_t *f_pos)
+{
+	return -EPERM;
+}
+
+/**
+ * zcrypt_write(): Not allowed.
+ *
+ * Write is is not allowed
+ */
+static ssize_t zcrypt_write(struct file *filp, const char __user *buf,
+			    size_t count, loff_t *f_pos)
+{
+	return -EPERM;
+}
+
+/**
+ * zcrypt_open(): Count number of users.
+ *
+ * Device open function to count number of users.
+ */
+static int zcrypt_open(struct inode *inode, struct file *filp)
+{
+	atomic_inc(&zcrypt_open_count);
+	return nonseekable_open(inode, filp);
+}
+
+/**
+ * zcrypt_release(): Count number of users.
+ *
+ * Device close function to count number of users.
+ */
+static int zcrypt_release(struct inode *inode, struct file *filp)
+{
+	atomic_dec(&zcrypt_open_count);
+	return 0;
+}
+
+/*
+ * zcrypt ioctls.
+ */
+static long zcrypt_rsa_modexpo(struct ica_rsa_modexpo *mex)
+{
+	struct zcrypt_device *zdev;
+	int rc;
+
+	if (mex->outputdatalength < mex->inputdatalength)
+		return -EINVAL;
+	/*
+	 * As long as outputdatalength is big enough, we can set the
+	 * outputdatalength equal to the inputdatalength, since that is the
+	 * number of bytes we will copy in any case
+	 */
+	mex->outputdatalength = mex->inputdatalength;
+
+	spin_lock_bh(&zcrypt_device_lock);
+	list_for_each_entry(zdev, &zcrypt_device_list, list) {
+		if (!zdev->online ||
+		    !zdev->ops->rsa_modexpo ||
+		    zdev->min_mod_size > mex->inputdatalength ||
+		    zdev->max_mod_size < mex->inputdatalength)
+			continue;
+		zcrypt_device_get(zdev);
+		get_device(&zdev->ap_dev->device);
+		zdev->request_count++;
+		__zcrypt_decrease_preference(zdev);
+		if (try_module_get(zdev->ap_dev->drv->driver.owner)) {
+			spin_unlock_bh(&zcrypt_device_lock);
+			rc = zdev->ops->rsa_modexpo(zdev, mex);
+			spin_lock_bh(&zcrypt_device_lock);
+			module_put(zdev->ap_dev->drv->driver.owner);
+		}
+		else
+			rc = -EAGAIN;
+		zdev->request_count--;
+		__zcrypt_increase_preference(zdev);
+		put_device(&zdev->ap_dev->device);
+		zcrypt_device_put(zdev);
+		spin_unlock_bh(&zcrypt_device_lock);
+		return rc;
+	}
+	spin_unlock_bh(&zcrypt_device_lock);
+	return -ENODEV;
+}
+
+static long zcrypt_rsa_crt(struct ica_rsa_modexpo_crt *crt)
+{
+	struct zcrypt_device *zdev;
+	unsigned long long z1, z2, z3;
+	int rc, copied;
+
+	if (crt->outputdatalength < crt->inputdatalength ||
+	    (crt->inputdatalength & 1))
+		return -EINVAL;
+	/*
+	 * As long as outputdatalength is big enough, we can set the
+	 * outputdatalength equal to the inputdatalength, since that is the
+	 * number of bytes we will copy in any case
+	 */
+	crt->outputdatalength = crt->inputdatalength;
+
+	copied = 0;
+ restart:
+	spin_lock_bh(&zcrypt_device_lock);
+	list_for_each_entry(zdev, &zcrypt_device_list, list) {
+		if (!zdev->online ||
+		    !zdev->ops->rsa_modexpo_crt ||
+		    zdev->min_mod_size > crt->inputdatalength ||
+		    zdev->max_mod_size < crt->inputdatalength)
+			continue;
+		if (zdev->short_crt && crt->inputdatalength > 240) {
+			/*
+			 * Check inputdata for leading zeros for cards
+			 * that can't handle np_prime, bp_key, or
+			 * u_mult_inv > 128 bytes.
+			 */
+			if (copied == 0) {
+				unsigned int len;
+				spin_unlock_bh(&zcrypt_device_lock);
+				/* len is max 256 / 2 - 120 = 8
+				 * For bigger device just assume len of leading
+				 * 0s is 8 as stated in the requirements for
+				 * ica_rsa_modexpo_crt struct in zcrypt.h.
+				 */
+				if (crt->inputdatalength <= 256)
+					len = crt->inputdatalength / 2 - 120;
+				else
+					len = 8;
+				if (len > sizeof(z1))
+					return -EFAULT;
+				z1 = z2 = z3 = 0;
+				if (copy_from_user(&z1, crt->np_prime, len) ||
+				    copy_from_user(&z2, crt->bp_key, len) ||
+				    copy_from_user(&z3, crt->u_mult_inv, len))
+					return -EFAULT;
+				z1 = z2 = z3 = 0;
+				copied = 1;
+				/*
+				 * We have to restart device lookup -
+				 * the device list may have changed by now.
+				 */
+				goto restart;
+			}
+			if (z1 != 0ULL || z2 != 0ULL || z3 != 0ULL)
+				/* The device can't handle this request. */
+				continue;
+		}
+		zcrypt_device_get(zdev);
+		get_device(&zdev->ap_dev->device);
+		zdev->request_count++;
+		__zcrypt_decrease_preference(zdev);
+		if (try_module_get(zdev->ap_dev->drv->driver.owner)) {
+			spin_unlock_bh(&zcrypt_device_lock);
+			rc = zdev->ops->rsa_modexpo_crt(zdev, crt);
+			spin_lock_bh(&zcrypt_device_lock);
+			module_put(zdev->ap_dev->drv->driver.owner);
+		}
+		else
+			rc = -EAGAIN;
+		zdev->request_count--;
+		__zcrypt_increase_preference(zdev);
+		put_device(&zdev->ap_dev->device);
+		zcrypt_device_put(zdev);
+		spin_unlock_bh(&zcrypt_device_lock);
+		return rc;
+	}
+	spin_unlock_bh(&zcrypt_device_lock);
+	return -ENODEV;
+}
+
+static long zcrypt_send_cprb(struct ica_xcRB *xcRB)
+{
+	struct zcrypt_device *zdev;
+	int rc;
+
+	spin_lock_bh(&zcrypt_device_lock);
+	list_for_each_entry(zdev, &zcrypt_device_list, list) {
+		if (!zdev->online || !zdev->ops->send_cprb ||
+		    (xcRB->user_defined != AUTOSELECT &&
+			AP_QID_DEVICE(zdev->ap_dev->qid) != xcRB->user_defined)
+		    )
+			continue;
+		zcrypt_device_get(zdev);
+		get_device(&zdev->ap_dev->device);
+		zdev->request_count++;
+		__zcrypt_decrease_preference(zdev);
+		if (try_module_get(zdev->ap_dev->drv->driver.owner)) {
+			spin_unlock_bh(&zcrypt_device_lock);
+			rc = zdev->ops->send_cprb(zdev, xcRB);
+			spin_lock_bh(&zcrypt_device_lock);
+			module_put(zdev->ap_dev->drv->driver.owner);
+		}
+		else
+			rc = -EAGAIN;
+		zdev->request_count--;
+		__zcrypt_increase_preference(zdev);
+		put_device(&zdev->ap_dev->device);
+		zcrypt_device_put(zdev);
+		spin_unlock_bh(&zcrypt_device_lock);
+		return rc;
+	}
+	spin_unlock_bh(&zcrypt_device_lock);
+	return -ENODEV;
+}
+
+static long zcrypt_rng(char *buffer)
+{
+	struct zcrypt_device *zdev;
+	int rc;
+
+	spin_lock_bh(&zcrypt_device_lock);
+	list_for_each_entry(zdev, &zcrypt_device_list, list) {
+		if (!zdev->online || !zdev->ops->rng)
+			continue;
+		zcrypt_device_get(zdev);
+		get_device(&zdev->ap_dev->device);
+		zdev->request_count++;
+		__zcrypt_decrease_preference(zdev);
+		if (try_module_get(zdev->ap_dev->drv->driver.owner)) {
+			spin_unlock_bh(&zcrypt_device_lock);
+			rc = zdev->ops->rng(zdev, buffer);
+			spin_lock_bh(&zcrypt_device_lock);
+			module_put(zdev->ap_dev->drv->driver.owner);
+		} else
+			rc = -EAGAIN;
+		zdev->request_count--;
+		__zcrypt_increase_preference(zdev);
+		put_device(&zdev->ap_dev->device);
+		zcrypt_device_put(zdev);
+		spin_unlock_bh(&zcrypt_device_lock);
+		return rc;
+	}
+	spin_unlock_bh(&zcrypt_device_lock);
+	return -ENODEV;
+}
+
+static void zcrypt_status_mask(char status[AP_DEVICES])
+{
+	struct zcrypt_device *zdev;
+
+	memset(status, 0, sizeof(char) * AP_DEVICES);
+	spin_lock_bh(&zcrypt_device_lock);
+	list_for_each_entry(zdev, &zcrypt_device_list, list)
+		status[AP_QID_DEVICE(zdev->ap_dev->qid)] =
+			zdev->online ? zdev->user_space_type : 0x0d;
+	spin_unlock_bh(&zcrypt_device_lock);
+}
+
+static void zcrypt_qdepth_mask(char qdepth[AP_DEVICES])
+{
+	struct zcrypt_device *zdev;
+
+	memset(qdepth, 0, sizeof(char)	* AP_DEVICES);
+	spin_lock_bh(&zcrypt_device_lock);
+	list_for_each_entry(zdev, &zcrypt_device_list, list) {
+		spin_lock(&zdev->ap_dev->lock);
+		qdepth[AP_QID_DEVICE(zdev->ap_dev->qid)] =
+			zdev->ap_dev->pendingq_count +
+			zdev->ap_dev->requestq_count;
+		spin_unlock(&zdev->ap_dev->lock);
+	}
+	spin_unlock_bh(&zcrypt_device_lock);
+}
+
+static void zcrypt_perdev_reqcnt(int reqcnt[AP_DEVICES])
+{
+	struct zcrypt_device *zdev;
+
+	memset(reqcnt, 0, sizeof(int) * AP_DEVICES);
+	spin_lock_bh(&zcrypt_device_lock);
+	list_for_each_entry(zdev, &zcrypt_device_list, list) {
+		spin_lock(&zdev->ap_dev->lock);
+		reqcnt[AP_QID_DEVICE(zdev->ap_dev->qid)] =
+			zdev->ap_dev->total_request_count;
+		spin_unlock(&zdev->ap_dev->lock);
+	}
+	spin_unlock_bh(&zcrypt_device_lock);
+}
+
+static int zcrypt_pendingq_count(void)
+{
+	struct zcrypt_device *zdev;
+	int pendingq_count = 0;
+
+	spin_lock_bh(&zcrypt_device_lock);
+	list_for_each_entry(zdev, &zcrypt_device_list, list) {
+		spin_lock(&zdev->ap_dev->lock);
+		pendingq_count += zdev->ap_dev->pendingq_count;
+		spin_unlock(&zdev->ap_dev->lock);
+	}
+	spin_unlock_bh(&zcrypt_device_lock);
+	return pendingq_count;
+}
+
+static int zcrypt_requestq_count(void)
+{
+	struct zcrypt_device *zdev;
+	int requestq_count = 0;
+
+	spin_lock_bh(&zcrypt_device_lock);
+	list_for_each_entry(zdev, &zcrypt_device_list, list) {
+		spin_lock(&zdev->ap_dev->lock);
+		requestq_count += zdev->ap_dev->requestq_count;
+		spin_unlock(&zdev->ap_dev->lock);
+	}
+	spin_unlock_bh(&zcrypt_device_lock);
+	return requestq_count;
+}
+
+static int zcrypt_count_type(int type)
+{
+	struct zcrypt_device *zdev;
+	int device_count = 0;
+
+	spin_lock_bh(&zcrypt_device_lock);
+	list_for_each_entry(zdev, &zcrypt_device_list, list)
+		if (zdev->user_space_type == type)
+			device_count++;
+	spin_unlock_bh(&zcrypt_device_lock);
+	return device_count;
+}
+
+/**
+ * zcrypt_ica_status(): Old, depracted combi status call.
+ *
+ * Old, deprecated combi status call.
+ */
+static long zcrypt_ica_status(struct file *filp, unsigned long arg)
+{
+	struct ica_z90_status *pstat;
+	int ret;
+
+	pstat = kzalloc(sizeof(*pstat), GFP_KERNEL);
+	if (!pstat)
+		return -ENOMEM;
+	pstat->totalcount = zcrypt_device_count;
+	pstat->leedslitecount = zcrypt_count_type(ZCRYPT_PCICA);
+	pstat->leeds2count = zcrypt_count_type(ZCRYPT_PCICC);
+	pstat->requestqWaitCount = zcrypt_requestq_count();
+	pstat->pendingqWaitCount = zcrypt_pendingq_count();
+	pstat->totalOpenCount = atomic_read(&zcrypt_open_count);
+	pstat->cryptoDomain = ap_domain_index;
+	zcrypt_status_mask(pstat->status);
+	zcrypt_qdepth_mask(pstat->qdepth);
+	ret = 0;
+	if (copy_to_user((void __user *) arg, pstat, sizeof(*pstat)))
+		ret = -EFAULT;
+	kfree(pstat);
+	return ret;
+}
+
+static long zcrypt_unlocked_ioctl(struct file *filp, unsigned int cmd,
+				  unsigned long arg)
+{
+	int rc;
+
+	switch (cmd) {
+	case ICARSAMODEXPO: {
+		struct ica_rsa_modexpo __user *umex = (void __user *) arg;
+		struct ica_rsa_modexpo mex;
+		if (copy_from_user(&mex, umex, sizeof(mex)))
+			return -EFAULT;
+		do {
+			rc = zcrypt_rsa_modexpo(&mex);
+		} while (rc == -EAGAIN);
+		if (rc)
+			return rc;
+		return put_user(mex.outputdatalength, &umex->outputdatalength);
+	}
+	case ICARSACRT: {
+		struct ica_rsa_modexpo_crt __user *ucrt = (void __user *) arg;
+		struct ica_rsa_modexpo_crt crt;
+		if (copy_from_user(&crt, ucrt, sizeof(crt)))
+			return -EFAULT;
+		do {
+			rc = zcrypt_rsa_crt(&crt);
+		} while (rc == -EAGAIN);
+		if (rc)
+			return rc;
+		return put_user(crt.outputdatalength, &ucrt->outputdatalength);
+	}
+	case ZSECSENDCPRB: {
+		struct ica_xcRB __user *uxcRB = (void __user *) arg;
+		struct ica_xcRB xcRB;
+		if (copy_from_user(&xcRB, uxcRB, sizeof(xcRB)))
+			return -EFAULT;
+		do {
+			rc = zcrypt_send_cprb(&xcRB);
+		} while (rc == -EAGAIN);
+		if (copy_to_user(uxcRB, &xcRB, sizeof(xcRB)))
+			return -EFAULT;
+		return rc;
+	}
+	case Z90STAT_STATUS_MASK: {
+		char status[AP_DEVICES];
+		zcrypt_status_mask(status);
+		if (copy_to_user((char __user *) arg, status,
+				 sizeof(char) * AP_DEVICES))
+			return -EFAULT;
+		return 0;
+	}
+	case Z90STAT_QDEPTH_MASK: {
+		char qdepth[AP_DEVICES];
+		zcrypt_qdepth_mask(qdepth);
+		if (copy_to_user((char __user *) arg, qdepth,
+				 sizeof(char) * AP_DEVICES))
+			return -EFAULT;
+		return 0;
+	}
+	case Z90STAT_PERDEV_REQCNT: {
+		int reqcnt[AP_DEVICES];
+		zcrypt_perdev_reqcnt(reqcnt);
+		if (copy_to_user((int __user *) arg, reqcnt,
+				 sizeof(int) * AP_DEVICES))
+			return -EFAULT;
+		return 0;
+	}
+	case Z90STAT_REQUESTQ_COUNT:
+		return put_user(zcrypt_requestq_count(), (int __user *) arg);
+	case Z90STAT_PENDINGQ_COUNT:
+		return put_user(zcrypt_pendingq_count(), (int __user *) arg);
+	case Z90STAT_TOTALOPEN_COUNT:
+		return put_user(atomic_read(&zcrypt_open_count),
+				(int __user *) arg);
+	case Z90STAT_DOMAIN_INDEX:
+		return put_user(ap_domain_index, (int __user *) arg);
+	/*
+	 * Deprecated ioctls. Don't add another device count ioctl,
+	 * you can count them yourself in the user space with the
+	 * output of the Z90STAT_STATUS_MASK ioctl.
+	 */
+	case ICAZ90STATUS:
+		return zcrypt_ica_status(filp, arg);
+	case Z90STAT_TOTALCOUNT:
+		return put_user(zcrypt_device_count, (int __user *) arg);
+	case Z90STAT_PCICACOUNT:
+		return put_user(zcrypt_count_type(ZCRYPT_PCICA),
+				(int __user *) arg);
+	case Z90STAT_PCICCCOUNT:
+		return put_user(zcrypt_count_type(ZCRYPT_PCICC),
+				(int __user *) arg);
+	case Z90STAT_PCIXCCMCL2COUNT:
+		return put_user(zcrypt_count_type(ZCRYPT_PCIXCC_MCL2),
+				(int __user *) arg);
+	case Z90STAT_PCIXCCMCL3COUNT:
+		return put_user(zcrypt_count_type(ZCRYPT_PCIXCC_MCL3),
+				(int __user *) arg);
+	case Z90STAT_PCIXCCCOUNT:
+		return put_user(zcrypt_count_type(ZCRYPT_PCIXCC_MCL2) +
+				zcrypt_count_type(ZCRYPT_PCIXCC_MCL3),
+				(int __user *) arg);
+	case Z90STAT_CEX2CCOUNT:
+		return put_user(zcrypt_count_type(ZCRYPT_CEX2C),
+				(int __user *) arg);
+	case Z90STAT_CEX2ACOUNT:
+		return put_user(zcrypt_count_type(ZCRYPT_CEX2A),
+				(int __user *) arg);
+	default:
+		/* unknown ioctl number */
+		return -ENOIOCTLCMD;
+	}
+}
+
+#ifdef CONFIG_COMPAT
+/*
+ * ioctl32 conversion routines
+ */
+struct compat_ica_rsa_modexpo {
+	compat_uptr_t	inputdata;
+	unsigned int	inputdatalength;
+	compat_uptr_t	outputdata;
+	unsigned int	outputdatalength;
+	compat_uptr_t	b_key;
+	compat_uptr_t	n_modulus;
+};
+
+static long trans_modexpo32(struct file *filp, unsigned int cmd,
+			    unsigned long arg)
+{
+	struct compat_ica_rsa_modexpo __user *umex32 = compat_ptr(arg);
+	struct compat_ica_rsa_modexpo mex32;
+	struct ica_rsa_modexpo mex64;
+	long rc;
+
+	if (copy_from_user(&mex32, umex32, sizeof(mex32)))
+		return -EFAULT;
+	mex64.inputdata = compat_ptr(mex32.inputdata);
+	mex64.inputdatalength = mex32.inputdatalength;
+	mex64.outputdata = compat_ptr(mex32.outputdata);
+	mex64.outputdatalength = mex32.outputdatalength;
+	mex64.b_key = compat_ptr(mex32.b_key);
+	mex64.n_modulus = compat_ptr(mex32.n_modulus);
+	do {
+		rc = zcrypt_rsa_modexpo(&mex64);
+	} while (rc == -EAGAIN);
+	if (!rc)
+		rc = put_user(mex64.outputdatalength,
+			      &umex32->outputdatalength);
+	return rc;
+}
+
+struct compat_ica_rsa_modexpo_crt {
+	compat_uptr_t	inputdata;
+	unsigned int	inputdatalength;
+	compat_uptr_t	outputdata;
+	unsigned int	outputdatalength;
+	compat_uptr_t	bp_key;
+	compat_uptr_t	bq_key;
+	compat_uptr_t	np_prime;
+	compat_uptr_t	nq_prime;
+	compat_uptr_t	u_mult_inv;
+};
+
+static long trans_modexpo_crt32(struct file *filp, unsigned int cmd,
+				unsigned long arg)
+{
+	struct compat_ica_rsa_modexpo_crt __user *ucrt32 = compat_ptr(arg);
+	struct compat_ica_rsa_modexpo_crt crt32;
+	struct ica_rsa_modexpo_crt crt64;
+	long rc;
+
+	if (copy_from_user(&crt32, ucrt32, sizeof(crt32)))
+		return -EFAULT;
+	crt64.inputdata = compat_ptr(crt32.inputdata);
+	crt64.inputdatalength = crt32.inputdatalength;
+	crt64.outputdata=  compat_ptr(crt32.outputdata);
+	crt64.outputdatalength = crt32.outputdatalength;
+	crt64.bp_key = compat_ptr(crt32.bp_key);
+	crt64.bq_key = compat_ptr(crt32.bq_key);
+	crt64.np_prime = compat_ptr(crt32.np_prime);
+	crt64.nq_prime = compat_ptr(crt32.nq_prime);
+	crt64.u_mult_inv = compat_ptr(crt32.u_mult_inv);
+	do {
+		rc = zcrypt_rsa_crt(&crt64);
+	} while (rc == -EAGAIN);
+	if (!rc)
+		rc = put_user(crt64.outputdatalength,
+			      &ucrt32->outputdatalength);
+	return rc;
+}
+
+struct compat_ica_xcRB {
+	unsigned short	agent_ID;
+	unsigned int	user_defined;
+	unsigned short	request_ID;
+	unsigned int	request_control_blk_length;
+	unsigned char	padding1[16 - sizeof (compat_uptr_t)];
+	compat_uptr_t	request_control_blk_addr;
+	unsigned int	request_data_length;
+	char		padding2[16 - sizeof (compat_uptr_t)];
+	compat_uptr_t	request_data_address;
+	unsigned int	reply_control_blk_length;
+	char		padding3[16 - sizeof (compat_uptr_t)];
+	compat_uptr_t	reply_control_blk_addr;
+	unsigned int	reply_data_length;
+	char		padding4[16 - sizeof (compat_uptr_t)];
+	compat_uptr_t	reply_data_addr;
+	unsigned short	priority_window;
+	unsigned int	status;
+} __attribute__((packed));
+
+static long trans_xcRB32(struct file *filp, unsigned int cmd,
+			 unsigned long arg)
+{
+	struct compat_ica_xcRB __user *uxcRB32 = compat_ptr(arg);
+	struct compat_ica_xcRB xcRB32;
+	struct ica_xcRB xcRB64;
+	long rc;
+
+	if (copy_from_user(&xcRB32, uxcRB32, sizeof(xcRB32)))
+		return -EFAULT;
+	xcRB64.agent_ID = xcRB32.agent_ID;
+	xcRB64.user_defined = xcRB32.user_defined;
+	xcRB64.request_ID = xcRB32.request_ID;
+	xcRB64.request_control_blk_length =
+		xcRB32.request_control_blk_length;
+	xcRB64.request_control_blk_addr =
+		compat_ptr(xcRB32.request_control_blk_addr);
+	xcRB64.request_data_length =
+		xcRB32.request_data_length;
+	xcRB64.request_data_address =
+		compat_ptr(xcRB32.request_data_address);
+	xcRB64.reply_control_blk_length =
+		xcRB32.reply_control_blk_length;
+	xcRB64.reply_control_blk_addr =
+		compat_ptr(xcRB32.reply_control_blk_addr);
+	xcRB64.reply_data_length = xcRB32.reply_data_length;
+	xcRB64.reply_data_addr =
+		compat_ptr(xcRB32.reply_data_addr);
+	xcRB64.priority_window = xcRB32.priority_window;
+	xcRB64.status = xcRB32.status;
+	do {
+		rc = zcrypt_send_cprb(&xcRB64);
+	} while (rc == -EAGAIN);
+	xcRB32.reply_control_blk_length = xcRB64.reply_control_blk_length;
+	xcRB32.reply_data_length = xcRB64.reply_data_length;
+	xcRB32.status = xcRB64.status;
+	if (copy_to_user(uxcRB32, &xcRB32, sizeof(xcRB32)))
+			return -EFAULT;
+	return rc;
+}
+
+static long zcrypt_compat_ioctl(struct file *filp, unsigned int cmd,
+			 unsigned long arg)
+{
+	if (cmd == ICARSAMODEXPO)
+		return trans_modexpo32(filp, cmd, arg);
+	if (cmd == ICARSACRT)
+		return trans_modexpo_crt32(filp, cmd, arg);
+	if (cmd == ZSECSENDCPRB)
+		return trans_xcRB32(filp, cmd, arg);
+	return zcrypt_unlocked_ioctl(filp, cmd, arg);
+}
+#endif
+
+/*
+ * Misc device file operations.
+ */
+static const struct file_operations zcrypt_fops = {
+	.owner		= THIS_MODULE,
+	.read		= zcrypt_read,
+	.write		= zcrypt_write,
+	.unlocked_ioctl	= zcrypt_unlocked_ioctl,
+#ifdef CONFIG_COMPAT
+	.compat_ioctl	= zcrypt_compat_ioctl,
+#endif
+	.open		= zcrypt_open,
+	.release	= zcrypt_release,
+	.llseek		= no_llseek,
+};
+
+/*
+ * Misc device.
+ */
+static struct miscdevice zcrypt_misc_device = {
+	.minor	    = MISC_DYNAMIC_MINOR,
+	.name	    = "z90crypt",
+	.fops	    = &zcrypt_fops,
+};
+
+/*
+ * Deprecated /proc entry support.
+ */
+static struct proc_dir_entry *zcrypt_entry;
+
+static void sprintcl(struct seq_file *m, unsigned char *addr, unsigned int len)
+{
+	int i;
+
+	for (i = 0; i < len; i++)
+		seq_printf(m, "%01x", (unsigned int) addr[i]);
+	seq_putc(m, ' ');
+}
+
+static void sprintrw(struct seq_file *m, unsigned char *addr, unsigned int len)
+{
+	int inl, c, cx;
+
+	seq_printf(m, "	   ");
+	inl = 0;
+	for (c = 0; c < (len / 16); c++) {
+		sprintcl(m, addr+inl, 16);
+		inl += 16;
+	}
+	cx = len%16;
+	if (cx) {
+		sprintcl(m, addr+inl, cx);
+		inl += cx;
+	}
+	seq_putc(m, '\n');
+}
+
+static void sprinthx(unsigned char *title, struct seq_file *m,
+		     unsigned char *addr, unsigned int len)
+{
+	int inl, r, rx;
+
+	seq_printf(m, "\n%s\n", title);
+	inl = 0;
+	for (r = 0; r < (len / 64); r++) {
+		sprintrw(m, addr+inl, 64);
+		inl += 64;
+	}
+	rx = len % 64;
+	if (rx) {
+		sprintrw(m, addr+inl, rx);
+		inl += rx;
+	}
+	seq_putc(m, '\n');
+}
+
+static void sprinthx4(unsigned char *title, struct seq_file *m,
+		      unsigned int *array, unsigned int len)
+{
+	int r;
+
+	seq_printf(m, "\n%s\n", title);
+	for (r = 0; r < len; r++) {
+		if ((r % 8) == 0)
+			seq_printf(m, "    ");
+		seq_printf(m, "%08X ", array[r]);
+		if ((r % 8) == 7)
+			seq_putc(m, '\n');
+	}
+	seq_putc(m, '\n');
+}
+
+static int zcrypt_proc_show(struct seq_file *m, void *v)
+{
+	char workarea[sizeof(int) * AP_DEVICES];
+
+	seq_printf(m, "\nzcrypt version: %d.%d.%d\n",
+		   ZCRYPT_VERSION, ZCRYPT_RELEASE, ZCRYPT_VARIANT);
+	seq_printf(m, "Cryptographic domain: %d\n", ap_domain_index);
+	seq_printf(m, "Total device count: %d\n", zcrypt_device_count);
+	seq_printf(m, "PCICA count: %d\n", zcrypt_count_type(ZCRYPT_PCICA));
+	seq_printf(m, "PCICC count: %d\n", zcrypt_count_type(ZCRYPT_PCICC));
+	seq_printf(m, "PCIXCC MCL2 count: %d\n",
+		   zcrypt_count_type(ZCRYPT_PCIXCC_MCL2));
+	seq_printf(m, "PCIXCC MCL3 count: %d\n",
+		   zcrypt_count_type(ZCRYPT_PCIXCC_MCL3));
+	seq_printf(m, "CEX2C count: %d\n", zcrypt_count_type(ZCRYPT_CEX2C));
+	seq_printf(m, "CEX2A count: %d\n", zcrypt_count_type(ZCRYPT_CEX2A));
+	seq_printf(m, "CEX3C count: %d\n", zcrypt_count_type(ZCRYPT_CEX3C));
+	seq_printf(m, "CEX3A count: %d\n", zcrypt_count_type(ZCRYPT_CEX3A));
+	seq_printf(m, "requestq count: %d\n", zcrypt_requestq_count());
+	seq_printf(m, "pendingq count: %d\n", zcrypt_pendingq_count());
+	seq_printf(m, "Total open handles: %d\n\n",
+		   atomic_read(&zcrypt_open_count));
+	zcrypt_status_mask(workarea);
+	sprinthx("Online devices: 1=PCICA 2=PCICC 3=PCIXCC(MCL2) "
+		 "4=PCIXCC(MCL3) 5=CEX2C 6=CEX2A 7=CEX3C 8=CEX3A",
+		 m, workarea, AP_DEVICES);
+	zcrypt_qdepth_mask(workarea);
+	sprinthx("Waiting work element counts", m, workarea, AP_DEVICES);
+	zcrypt_perdev_reqcnt((int *) workarea);
+	sprinthx4("Per-device successfully completed request counts",
+		  m, (unsigned int *) workarea, AP_DEVICES);
+	return 0;
+}
+
+static int zcrypt_proc_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, zcrypt_proc_show, NULL);
+}
+
+static void zcrypt_disable_card(int index)
+{
+	struct zcrypt_device *zdev;
+
+	spin_lock_bh(&zcrypt_device_lock);
+	list_for_each_entry(zdev, &zcrypt_device_list, list)
+		if (AP_QID_DEVICE(zdev->ap_dev->qid) == index) {
+			zdev->online = 0;
+			ap_flush_queue(zdev->ap_dev);
+			break;
+		}
+	spin_unlock_bh(&zcrypt_device_lock);
+}
+
+static void zcrypt_enable_card(int index)
+{
+	struct zcrypt_device *zdev;
+
+	spin_lock_bh(&zcrypt_device_lock);
+	list_for_each_entry(zdev, &zcrypt_device_list, list)
+		if (AP_QID_DEVICE(zdev->ap_dev->qid) == index) {
+			zdev->online = 1;
+			break;
+		}
+	spin_unlock_bh(&zcrypt_device_lock);
+}
+
+static ssize_t zcrypt_proc_write(struct file *file, const char __user *buffer,
+				 size_t count, loff_t *pos)
+{
+	unsigned char *lbuf, *ptr;
+	size_t local_count;
+	int j;
+
+	if (count <= 0)
+		return 0;
+
+#define LBUFSIZE 1200UL
+	lbuf = kmalloc(LBUFSIZE, GFP_KERNEL);
+	if (!lbuf)
+		return 0;
+
+	local_count = min(LBUFSIZE - 1, count);
+	if (copy_from_user(lbuf, buffer, local_count) != 0) {
+		kfree(lbuf);
+		return -EFAULT;
+	}
+	lbuf[local_count] = '\0';
+
+	ptr = strstr(lbuf, "Online devices");
+	if (!ptr)
+		goto out;
+	ptr = strstr(ptr, "\n");
+	if (!ptr)
+		goto out;
+	ptr++;
+
+	if (strstr(ptr, "Waiting work element counts") == NULL)
+		goto out;
+
+	for (j = 0; j < 64 && *ptr; ptr++) {
+		/*
+		 * '0' for no device, '1' for PCICA, '2' for PCICC,
+		 * '3' for PCIXCC_MCL2, '4' for PCIXCC_MCL3,
+		 * '5' for CEX2C and '6' for CEX2A'
+		 * '7' for CEX3C and '8' for CEX3A
+		 */
+		if (*ptr >= '0' && *ptr <= '8')
+			j++;
+		else if (*ptr == 'd' || *ptr == 'D')
+			zcrypt_disable_card(j++);
+		else if (*ptr == 'e' || *ptr == 'E')
+			zcrypt_enable_card(j++);
+		else if (*ptr != ' ' && *ptr != '\t')
+			break;
+	}
+out:
+	kfree(lbuf);
+	return count;
+}
+
+static const struct file_operations zcrypt_proc_fops = {
+	.owner		= THIS_MODULE,
+	.open		= zcrypt_proc_open,
+	.read		= seq_read,
+	.llseek		= seq_lseek,
+	.release	= single_release,
+	.write		= zcrypt_proc_write,
+};
+
+static int zcrypt_rng_device_count;
+static u32 *zcrypt_rng_buffer;
+static int zcrypt_rng_buffer_index;
+static DEFINE_MUTEX(zcrypt_rng_mutex);
+
+static int zcrypt_rng_data_read(struct hwrng *rng, u32 *data)
+{
+	int rc;
+
+	/*
+	 * We don't need locking here because the RNG API guarantees serialized
+	 * read method calls.
+	 */
+	if (zcrypt_rng_buffer_index == 0) {
+		rc = zcrypt_rng((char *) zcrypt_rng_buffer);
+		if (rc < 0)
+			return -EIO;
+		zcrypt_rng_buffer_index = rc / sizeof *data;
+	}
+	*data = zcrypt_rng_buffer[--zcrypt_rng_buffer_index];
+	return sizeof *data;
+}
+
+static struct hwrng zcrypt_rng_dev = {
+	.name		= "zcrypt",
+	.data_read	= zcrypt_rng_data_read,
+};
+
+static int zcrypt_rng_device_add(void)
+{
+	int rc = 0;
+
+	mutex_lock(&zcrypt_rng_mutex);
+	if (zcrypt_rng_device_count == 0) {
+		zcrypt_rng_buffer = (u32 *) get_zeroed_page(GFP_KERNEL);
+		if (!zcrypt_rng_buffer) {
+			rc = -ENOMEM;
+			goto out;
+		}
+		zcrypt_rng_buffer_index = 0;
+		rc = hwrng_register(&zcrypt_rng_dev);
+		if (rc)
+			goto out_free;
+		zcrypt_rng_device_count = 1;
+	} else
+		zcrypt_rng_device_count++;
+	mutex_unlock(&zcrypt_rng_mutex);
+	return 0;
+
+out_free:
+	free_page((unsigned long) zcrypt_rng_buffer);
+out:
+	mutex_unlock(&zcrypt_rng_mutex);
+	return rc;
+}
+
+static void zcrypt_rng_device_remove(void)
+{
+	mutex_lock(&zcrypt_rng_mutex);
+	zcrypt_rng_device_count--;
+	if (zcrypt_rng_device_count == 0) {
+		hwrng_unregister(&zcrypt_rng_dev);
+		free_page((unsigned long) zcrypt_rng_buffer);
+	}
+	mutex_unlock(&zcrypt_rng_mutex);
+}
+
+/**
+ * zcrypt_api_init(): Module initialization.
+ *
+ * The module initialization code.
+ */
+int __init zcrypt_api_init(void)
+{
+	int rc;
+
+	/* Register the request sprayer. */
+	rc = misc_register(&zcrypt_misc_device);
+	if (rc < 0)
+		goto out;
+
+	/* Set up the proc file system */
+	zcrypt_entry = proc_create("driver/z90crypt", 0644, NULL, &zcrypt_proc_fops);
+	if (!zcrypt_entry) {
+		rc = -ENOMEM;
+		goto out_misc;
+	}
+
+	return 0;
+
+out_misc:
+	misc_deregister(&zcrypt_misc_device);
+out:
+	return rc;
+}
+
+/**
+ * zcrypt_api_exit(): Module termination.
+ *
+ * The module termination code.
+ */
+void zcrypt_api_exit(void)
+{
+	remove_proc_entry("driver/z90crypt", NULL);
+	misc_deregister(&zcrypt_misc_device);
+}
+
+module_init(zcrypt_api_init);
+module_exit(zcrypt_api_exit);
diff --git a/ap/os/linux/linux-3.4.x/drivers/s390/crypto/zcrypt_api.h b/ap/os/linux/linux-3.4.x/drivers/s390/crypto/zcrypt_api.h
new file mode 100644
index 0000000..9688f39
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/s390/crypto/zcrypt_api.h
@@ -0,0 +1,124 @@
+/*
+ *  linux/drivers/s390/crypto/zcrypt_api.h
+ *
+ *  zcrypt 2.1.0
+ *
+ *  Copyright (C)  2001, 2006 IBM Corporation
+ *  Author(s): Robert Burroughs
+ *	       Eric Rossman (edrossma@us.ibm.com)
+ *	       Cornelia Huck <cornelia.huck@de.ibm.com>
+ *
+ *  Hotplug & misc device support: Jochen Roehrig (roehrig@de.ibm.com)
+ *  Major cleanup & driver split: Martin Schwidefsky <schwidefsky@de.ibm.com>
+ *				  Ralph Wuerthner <rwuerthn@de.ibm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#ifndef _ZCRYPT_API_H_
+#define _ZCRYPT_API_H_
+
+#include "ap_bus.h"
+#include <asm/zcrypt.h>
+
+/* deprecated status calls */
+#define ICAZ90STATUS		_IOR(ZCRYPT_IOCTL_MAGIC, 0x10, struct ica_z90_status)
+#define Z90STAT_PCIXCCCOUNT	_IOR(ZCRYPT_IOCTL_MAGIC, 0x43, int)
+
+/**
+ * This structure is deprecated and the corresponding ioctl() has been
+ * replaced with individual ioctl()s for each piece of data!
+ */
+struct ica_z90_status {
+	int totalcount;
+	int leedslitecount; // PCICA
+	int leeds2count;    // PCICC
+	// int PCIXCCCount; is not in struct for backward compatibility
+	int requestqWaitCount;
+	int pendingqWaitCount;
+	int totalOpenCount;
+	int cryptoDomain;
+	// status: 0=not there, 1=PCICA, 2=PCICC, 3=PCIXCC_MCL2, 4=PCIXCC_MCL3,
+	//	   5=CEX2C
+	unsigned char status[64];
+	// qdepth: # work elements waiting for each device
+	unsigned char qdepth[64];
+};
+
+/**
+ * device type for an actual device is either PCICA, PCICC, PCIXCC_MCL2,
+ * PCIXCC_MCL3, CEX2C, or CEX2A
+ *
+ * NOTE: PCIXCC_MCL3 refers to a PCIXCC with May 2004 version of Licensed
+ *	 Internal Code (LIC) (EC J12220 level 29).
+ *	 PCIXCC_MCL2 refers to any LIC before this level.
+ */
+#define ZCRYPT_PCICA		1
+#define ZCRYPT_PCICC		2
+#define ZCRYPT_PCIXCC_MCL2	3
+#define ZCRYPT_PCIXCC_MCL3	4
+#define ZCRYPT_CEX2C		5
+#define ZCRYPT_CEX2A		6
+#define ZCRYPT_CEX3C		7
+#define ZCRYPT_CEX3A		8
+
+/**
+ * Large random numbers are pulled in 4096 byte chunks from the crypto cards
+ * and stored in a page. Be careful when increasing this buffer due to size
+ * limitations for AP requests.
+ */
+#define ZCRYPT_RNG_BUFFER_SIZE	4096
+
+struct zcrypt_device;
+
+struct zcrypt_ops {
+	long (*rsa_modexpo)(struct zcrypt_device *, struct ica_rsa_modexpo *);
+	long (*rsa_modexpo_crt)(struct zcrypt_device *,
+				struct ica_rsa_modexpo_crt *);
+	long (*send_cprb)(struct zcrypt_device *, struct ica_xcRB *);
+	long (*rng)(struct zcrypt_device *, char *);
+};
+
+struct zcrypt_device {
+	struct list_head list;		/* Device list. */
+	spinlock_t lock;		/* Per device lock. */
+	struct kref refcount;		/* device refcounting */
+	struct ap_device *ap_dev;	/* The "real" ap device. */
+	struct zcrypt_ops *ops;		/* Crypto operations. */
+	int online;			/* User online/offline */
+
+	int user_space_type;		/* User space device id. */
+	char *type_string;		/* User space device name. */
+	int min_mod_size;		/* Min number of bits. */
+	int max_mod_size;		/* Max number of bits. */
+	int short_crt;			/* Card has crt length restriction. */
+	int speed_rating;		/* Speed of the crypto device. */
+
+	int request_count;		/* # current requests. */
+
+	struct ap_message reply;	/* Per-device reply structure. */
+	int max_exp_bit_length;
+};
+
+struct zcrypt_device *zcrypt_device_alloc(size_t);
+void zcrypt_device_free(struct zcrypt_device *);
+void zcrypt_device_get(struct zcrypt_device *);
+int zcrypt_device_put(struct zcrypt_device *);
+int zcrypt_device_register(struct zcrypt_device *);
+void zcrypt_device_unregister(struct zcrypt_device *);
+int zcrypt_api_init(void);
+void zcrypt_api_exit(void);
+
+#endif /* _ZCRYPT_API_H_ */
diff --git a/ap/os/linux/linux-3.4.x/drivers/s390/crypto/zcrypt_cca_key.h b/ap/os/linux/linux-3.4.x/drivers/s390/crypto/zcrypt_cca_key.h
new file mode 100644
index 0000000..ed82f2f
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/s390/crypto/zcrypt_cca_key.h
@@ -0,0 +1,350 @@
+/*
+ *  linux/drivers/s390/crypto/zcrypt_cca_key.h
+ *
+ *  zcrypt 2.1.0
+ *
+ *  Copyright (C)  2001, 2006 IBM Corporation
+ *  Author(s): Robert Burroughs
+ *	       Eric Rossman (edrossma@us.ibm.com)
+ *
+ *  Hotplug & misc device support: Jochen Roehrig (roehrig@de.ibm.com)
+ *  Major cleanup & driver split: Martin Schwidefsky <schwidefsky@de.ibm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#ifndef _ZCRYPT_CCA_KEY_H_
+#define _ZCRYPT_CCA_KEY_H_
+
+struct T6_keyBlock_hdr {
+	unsigned short blen;
+	unsigned short ulen;
+	unsigned short flags;
+};
+
+/**
+ * mapping for the cca private ME key token.
+ * Three parts of interest here: the header, the private section and
+ * the public section.
+ *
+ * mapping for the cca key token header
+ */
+struct cca_token_hdr {
+	unsigned char  token_identifier;
+	unsigned char  version;
+	unsigned short token_length;
+	unsigned char  reserved[4];
+} __attribute__((packed));
+
+#define CCA_TKN_HDR_ID_EXT 0x1E
+
+/**
+ * mapping for the cca private ME section
+ */
+struct cca_private_ext_ME_sec {
+	unsigned char  section_identifier;
+	unsigned char  version;
+	unsigned short section_length;
+	unsigned char  private_key_hash[20];
+	unsigned char  reserved1[4];
+	unsigned char  key_format;
+	unsigned char  reserved2;
+	unsigned char  key_name_hash[20];
+	unsigned char  key_use_flags[4];
+	unsigned char  reserved3[6];
+	unsigned char  reserved4[24];
+	unsigned char  confounder[24];
+	unsigned char  exponent[128];
+	unsigned char  modulus[128];
+} __attribute__((packed));
+
+#define CCA_PVT_USAGE_ALL 0x80
+
+/**
+ * mapping for the cca public section
+ * In a private key, the modulus doesn't appear in the public
+ * section. So, an arbitrary public exponent of 0x010001 will be
+ * used, for a section length of 0x0F always.
+ */
+struct cca_public_sec {
+	unsigned char  section_identifier;
+	unsigned char  version;
+	unsigned short section_length;
+	unsigned char  reserved[2];
+	unsigned short exponent_len;
+	unsigned short modulus_bit_len;
+	unsigned short modulus_byte_len;    /* In a private key, this is 0 */
+} __attribute__((packed));
+
+/**
+ * mapping for the cca private CRT key 'token'
+ * The first three parts (the only parts considered in this release)
+ * are: the header, the private section and the public section.
+ * The header and public section are the same as for the
+ * struct cca_private_ext_ME
+ *
+ * Following the structure are the quantities p, q, dp, dq, u, pad,
+ * and modulus, in that order, where pad_len is the modulo 8
+ * complement of the residue modulo 8 of the sum of
+ * (p_len + q_len + dp_len + dq_len + u_len).
+ */
+struct cca_pvt_ext_CRT_sec {
+	unsigned char  section_identifier;
+	unsigned char  version;
+	unsigned short section_length;
+	unsigned char  private_key_hash[20];
+	unsigned char  reserved1[4];
+	unsigned char  key_format;
+	unsigned char  reserved2;
+	unsigned char  key_name_hash[20];
+	unsigned char  key_use_flags[4];
+	unsigned short p_len;
+	unsigned short q_len;
+	unsigned short dp_len;
+	unsigned short dq_len;
+	unsigned short u_len;
+	unsigned short mod_len;
+	unsigned char  reserved3[4];
+	unsigned short pad_len;
+	unsigned char  reserved4[52];
+	unsigned char  confounder[8];
+} __attribute__((packed));
+
+#define CCA_PVT_EXT_CRT_SEC_ID_PVT 0x08
+#define CCA_PVT_EXT_CRT_SEC_FMT_CL 0x40
+
+/**
+ * Set up private key fields of a type6 MEX message.
+ * Note that all numerics in the key token are big-endian,
+ * while the entries in the key block header are little-endian.
+ *
+ * @mex: pointer to user input data
+ * @p: pointer to memory area for the key
+ *
+ * Returns the size of the key area or -EFAULT
+ */
+static inline int zcrypt_type6_mex_key_de(struct ica_rsa_modexpo *mex,
+					  void *p, int big_endian)
+{
+	static struct cca_token_hdr static_pvt_me_hdr = {
+		.token_identifier	=  0x1E,
+		.token_length		=  0x0183,
+	};
+	static struct cca_private_ext_ME_sec static_pvt_me_sec = {
+		.section_identifier	=  0x02,
+		.section_length		=  0x016C,
+		.key_use_flags		= {0x80,0x00,0x00,0x00},
+	};
+	static struct cca_public_sec static_pub_me_sec = {
+		.section_identifier	=  0x04,
+		.section_length		=  0x000F,
+		.exponent_len		=  0x0003,
+	};
+	static char pk_exponent[3] = { 0x01, 0x00, 0x01 };
+	struct {
+		struct T6_keyBlock_hdr t6_hdr;
+		struct cca_token_hdr pvtMeHdr;
+		struct cca_private_ext_ME_sec pvtMeSec;
+		struct cca_public_sec pubMeSec;
+		char exponent[3];
+	} __attribute__((packed)) *key = p;
+	unsigned char *temp;
+
+	memset(key, 0, sizeof(*key));
+
+	if (big_endian) {
+		key->t6_hdr.blen = cpu_to_be16(0x189);
+		key->t6_hdr.ulen = cpu_to_be16(0x189 - 2);
+	} else {
+		key->t6_hdr.blen = cpu_to_le16(0x189);
+		key->t6_hdr.ulen = cpu_to_le16(0x189 - 2);
+	}
+	key->pvtMeHdr = static_pvt_me_hdr;
+	key->pvtMeSec = static_pvt_me_sec;
+	key->pubMeSec = static_pub_me_sec;
+	/*
+	 * In a private key, the modulus doesn't appear in the public
+	 * section. So, an arbitrary public exponent of 0x010001 will be
+	 * used.
+	 */
+	memcpy(key->exponent, pk_exponent, 3);
+
+	/* key parameter block */
+	temp = key->pvtMeSec.exponent +
+		sizeof(key->pvtMeSec.exponent) - mex->inputdatalength;
+	if (copy_from_user(temp, mex->b_key, mex->inputdatalength))
+		return -EFAULT;
+
+	/* modulus */
+	temp = key->pvtMeSec.modulus +
+		sizeof(key->pvtMeSec.modulus) - mex->inputdatalength;
+	if (copy_from_user(temp, mex->n_modulus, mex->inputdatalength))
+		return -EFAULT;
+	key->pubMeSec.modulus_bit_len = 8 * mex->inputdatalength;
+	return sizeof(*key);
+}
+
+/**
+ * Set up private key fields of a type6 MEX message. The _pad variant
+ * strips leading zeroes from the b_key.
+ * Note that all numerics in the key token are big-endian,
+ * while the entries in the key block header are little-endian.
+ *
+ * @mex: pointer to user input data
+ * @p: pointer to memory area for the key
+ *
+ * Returns the size of the key area or -EFAULT
+ */
+static inline int zcrypt_type6_mex_key_en(struct ica_rsa_modexpo *mex,
+					  void *p, int big_endian)
+{
+	static struct cca_token_hdr static_pub_hdr = {
+		.token_identifier	=  0x1E,
+	};
+	static struct cca_public_sec static_pub_sec = {
+		.section_identifier	=  0x04,
+	};
+	struct {
+		struct T6_keyBlock_hdr t6_hdr;
+		struct cca_token_hdr pubHdr;
+		struct cca_public_sec pubSec;
+		char exponent[0];
+	} __attribute__((packed)) *key = p;
+	unsigned char *temp;
+	int i;
+
+	memset(key, 0, sizeof(*key));
+
+	key->pubHdr = static_pub_hdr;
+	key->pubSec = static_pub_sec;
+
+	/* key parameter block */
+	temp = key->exponent;
+	if (copy_from_user(temp, mex->b_key, mex->inputdatalength))
+		return -EFAULT;
+	/* Strip leading zeroes from b_key. */
+	for (i = 0; i < mex->inputdatalength; i++)
+		if (temp[i])
+			break;
+	if (i >= mex->inputdatalength)
+		return -EINVAL;
+	memmove(temp, temp + i, mex->inputdatalength - i);
+	temp += mex->inputdatalength - i;
+	/* modulus */
+	if (copy_from_user(temp, mex->n_modulus, mex->inputdatalength))
+		return -EFAULT;
+
+	key->pubSec.modulus_bit_len = 8 * mex->inputdatalength;
+	key->pubSec.modulus_byte_len = mex->inputdatalength;
+	key->pubSec.exponent_len = mex->inputdatalength - i;
+	key->pubSec.section_length = sizeof(key->pubSec) +
+					2*mex->inputdatalength - i;
+	key->pubHdr.token_length =
+		key->pubSec.section_length + sizeof(key->pubHdr);
+	if (big_endian) {
+		key->t6_hdr.ulen = cpu_to_be16(key->pubHdr.token_length + 4);
+		key->t6_hdr.blen = cpu_to_be16(key->pubHdr.token_length + 6);
+	} else {
+		key->t6_hdr.ulen = cpu_to_le16(key->pubHdr.token_length + 4);
+		key->t6_hdr.blen = cpu_to_le16(key->pubHdr.token_length + 6);
+	}
+	return sizeof(*key) + 2*mex->inputdatalength - i;
+}
+
+/**
+ * Set up private key fields of a type6 CRT message.
+ * Note that all numerics in the key token are big-endian,
+ * while the entries in the key block header are little-endian.
+ *
+ * @mex: pointer to user input data
+ * @p: pointer to memory area for the key
+ *
+ * Returns the size of the key area or -EFAULT
+ */
+static inline int zcrypt_type6_crt_key(struct ica_rsa_modexpo_crt *crt,
+				       void *p, int big_endian)
+{
+	static struct cca_public_sec static_cca_pub_sec = {
+		.section_identifier = 4,
+		.section_length = 0x000f,
+		.exponent_len = 0x0003,
+	};
+	static char pk_exponent[3] = { 0x01, 0x00, 0x01 };
+	struct {
+		struct T6_keyBlock_hdr t6_hdr;
+		struct cca_token_hdr token;
+		struct cca_pvt_ext_CRT_sec pvt;
+		char key_parts[0];
+	} __attribute__((packed)) *key = p;
+	struct cca_public_sec *pub;
+	int short_len, long_len, pad_len, key_len, size;
+
+	memset(key, 0, sizeof(*key));
+
+	short_len = crt->inputdatalength / 2;
+	long_len = short_len + 8;
+	pad_len = -(3*long_len + 2*short_len) & 7;
+	key_len = 3*long_len + 2*short_len + pad_len + crt->inputdatalength;
+	size = sizeof(*key) + key_len + sizeof(*pub) + 3;
+
+	/* parameter block.key block */
+	if (big_endian) {
+		key->t6_hdr.blen = cpu_to_be16(size);
+		key->t6_hdr.ulen = cpu_to_be16(size - 2);
+	} else {
+		key->t6_hdr.blen = cpu_to_le16(size);
+		key->t6_hdr.ulen = cpu_to_le16(size - 2);
+	}
+
+	/* key token header */
+	key->token.token_identifier = CCA_TKN_HDR_ID_EXT;
+	key->token.token_length = size - 6;
+
+	/* private section */
+	key->pvt.section_identifier = CCA_PVT_EXT_CRT_SEC_ID_PVT;
+	key->pvt.section_length = sizeof(key->pvt) + key_len;
+	key->pvt.key_format = CCA_PVT_EXT_CRT_SEC_FMT_CL;
+	key->pvt.key_use_flags[0] = CCA_PVT_USAGE_ALL;
+	key->pvt.p_len = key->pvt.dp_len = key->pvt.u_len = long_len;
+	key->pvt.q_len = key->pvt.dq_len = short_len;
+	key->pvt.mod_len = crt->inputdatalength;
+	key->pvt.pad_len = pad_len;
+
+	/* key parts */
+	if (copy_from_user(key->key_parts, crt->np_prime, long_len) ||
+	    copy_from_user(key->key_parts + long_len,
+					crt->nq_prime, short_len) ||
+	    copy_from_user(key->key_parts + long_len + short_len,
+					crt->bp_key, long_len) ||
+	    copy_from_user(key->key_parts + 2*long_len + short_len,
+					crt->bq_key, short_len) ||
+	    copy_from_user(key->key_parts + 2*long_len + 2*short_len,
+					crt->u_mult_inv, long_len))
+		return -EFAULT;
+	memset(key->key_parts + 3*long_len + 2*short_len + pad_len,
+	       0xff, crt->inputdatalength);
+	pub = (struct cca_public_sec *)(key->key_parts + key_len);
+	*pub = static_cca_pub_sec;
+	pub->modulus_bit_len = 8 * crt->inputdatalength;
+	/*
+	 * In a private key, the modulus doesn't appear in the public
+	 * section. So, an arbitrary public exponent of 0x010001 will be
+	 * used.
+	 */
+	memcpy((char *) (pub + 1), pk_exponent, 3);
+	return size;
+}
+
+#endif /* _ZCRYPT_CCA_KEY_H_ */
diff --git a/ap/os/linux/linux-3.4.x/drivers/s390/crypto/zcrypt_cex2a.c b/ap/os/linux/linux-3.4.x/drivers/s390/crypto/zcrypt_cex2a.c
new file mode 100644
index 0000000..0842867
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/s390/crypto/zcrypt_cex2a.c
@@ -0,0 +1,498 @@
+/*
+ *  linux/drivers/s390/crypto/zcrypt_cex2a.c
+ *
+ *  zcrypt 2.1.0
+ *
+ *  Copyright (C)  2001, 2006 IBM Corporation
+ *  Author(s): Robert Burroughs
+ *	       Eric Rossman (edrossma@us.ibm.com)
+ *
+ *  Hotplug & misc device support: Jochen Roehrig (roehrig@de.ibm.com)
+ *  Major cleanup & driver split: Martin Schwidefsky <schwidefsky@de.ibm.com>
+ *				  Ralph Wuerthner <rwuerthn@de.ibm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/init.h>
+#include <linux/err.h>
+#include <linux/atomic.h>
+#include <asm/uaccess.h>
+
+#include "ap_bus.h"
+#include "zcrypt_api.h"
+#include "zcrypt_error.h"
+#include "zcrypt_cex2a.h"
+
+#define CEX2A_MIN_MOD_SIZE	  1	/*    8 bits	*/
+#define CEX2A_MAX_MOD_SIZE	256	/* 2048 bits	*/
+#define CEX3A_MIN_MOD_SIZE	CEX2A_MIN_MOD_SIZE
+#define CEX3A_MAX_MOD_SIZE	512	/* 4096 bits	*/
+
+#define CEX2A_SPEED_RATING	970
+#define CEX3A_SPEED_RATING	900 /* Fixme: Needs finetuning */
+
+#define CEX2A_MAX_MESSAGE_SIZE	0x390	/* sizeof(struct type50_crb2_msg)    */
+#define CEX2A_MAX_RESPONSE_SIZE 0x110	/* max outputdatalength + type80_hdr */
+
+#define CEX3A_MAX_RESPONSE_SIZE	0x210	/* 512 bit modulus
+					 * (max outputdatalength) +
+					 * type80_hdr*/
+#define CEX3A_MAX_MESSAGE_SIZE	sizeof(struct type50_crb3_msg)
+
+#define CEX2A_CLEANUP_TIME	(15*HZ)
+#define CEX3A_CLEANUP_TIME	CEX2A_CLEANUP_TIME
+
+static struct ap_device_id zcrypt_cex2a_ids[] = {
+	{ AP_DEVICE(AP_DEVICE_TYPE_CEX2A) },
+	{ AP_DEVICE(AP_DEVICE_TYPE_CEX3A) },
+	{ /* end of list */ },
+};
+
+MODULE_DEVICE_TABLE(ap, zcrypt_cex2a_ids);
+MODULE_AUTHOR("IBM Corporation");
+MODULE_DESCRIPTION("CEX2A Cryptographic Coprocessor device driver, "
+		   "Copyright 2001, 2006 IBM Corporation");
+MODULE_LICENSE("GPL");
+
+static int zcrypt_cex2a_probe(struct ap_device *ap_dev);
+static void zcrypt_cex2a_remove(struct ap_device *ap_dev);
+static void zcrypt_cex2a_receive(struct ap_device *, struct ap_message *,
+				 struct ap_message *);
+
+static struct ap_driver zcrypt_cex2a_driver = {
+	.probe = zcrypt_cex2a_probe,
+	.remove = zcrypt_cex2a_remove,
+	.receive = zcrypt_cex2a_receive,
+	.ids = zcrypt_cex2a_ids,
+	.request_timeout = CEX2A_CLEANUP_TIME,
+};
+
+/**
+ * Convert a ICAMEX message to a type50 MEX message.
+ *
+ * @zdev: crypto device pointer
+ * @zreq: crypto request pointer
+ * @mex: pointer to user input data
+ *
+ * Returns 0 on success or -EFAULT.
+ */
+static int ICAMEX_msg_to_type50MEX_msg(struct zcrypt_device *zdev,
+				       struct ap_message *ap_msg,
+				       struct ica_rsa_modexpo *mex)
+{
+	unsigned char *mod, *exp, *inp;
+	int mod_len;
+
+	mod_len = mex->inputdatalength;
+
+	if (mod_len <= 128) {
+		struct type50_meb1_msg *meb1 = ap_msg->message;
+		memset(meb1, 0, sizeof(*meb1));
+		ap_msg->length = sizeof(*meb1);
+		meb1->header.msg_type_code = TYPE50_TYPE_CODE;
+		meb1->header.msg_len = sizeof(*meb1);
+		meb1->keyblock_type = TYPE50_MEB1_FMT;
+		mod = meb1->modulus + sizeof(meb1->modulus) - mod_len;
+		exp = meb1->exponent + sizeof(meb1->exponent) - mod_len;
+		inp = meb1->message + sizeof(meb1->message) - mod_len;
+	} else if (mod_len <= 256) {
+		struct type50_meb2_msg *meb2 = ap_msg->message;
+		memset(meb2, 0, sizeof(*meb2));
+		ap_msg->length = sizeof(*meb2);
+		meb2->header.msg_type_code = TYPE50_TYPE_CODE;
+		meb2->header.msg_len = sizeof(*meb2);
+		meb2->keyblock_type = TYPE50_MEB2_FMT;
+		mod = meb2->modulus + sizeof(meb2->modulus) - mod_len;
+		exp = meb2->exponent + sizeof(meb2->exponent) - mod_len;
+		inp = meb2->message + sizeof(meb2->message) - mod_len;
+	} else {
+		/* mod_len > 256 = 4096 bit RSA Key */
+		struct type50_meb3_msg *meb3 = ap_msg->message;
+		memset(meb3, 0, sizeof(*meb3));
+		ap_msg->length = sizeof(*meb3);
+		meb3->header.msg_type_code = TYPE50_TYPE_CODE;
+		meb3->header.msg_len = sizeof(*meb3);
+		meb3->keyblock_type = TYPE50_MEB3_FMT;
+		mod = meb3->modulus + sizeof(meb3->modulus) - mod_len;
+		exp = meb3->exponent + sizeof(meb3->exponent) - mod_len;
+		inp = meb3->message + sizeof(meb3->message) - mod_len;
+	}
+
+	if (copy_from_user(mod, mex->n_modulus, mod_len) ||
+	    copy_from_user(exp, mex->b_key, mod_len) ||
+	    copy_from_user(inp, mex->inputdata, mod_len))
+		return -EFAULT;
+	return 0;
+}
+
+/**
+ * Convert a ICACRT message to a type50 CRT message.
+ *
+ * @zdev: crypto device pointer
+ * @zreq: crypto request pointer
+ * @crt: pointer to user input data
+ *
+ * Returns 0 on success or -EFAULT.
+ */
+static int ICACRT_msg_to_type50CRT_msg(struct zcrypt_device *zdev,
+				       struct ap_message *ap_msg,
+				       struct ica_rsa_modexpo_crt *crt)
+{
+	int mod_len, short_len, long_len, long_offset, limit;
+	unsigned char *p, *q, *dp, *dq, *u, *inp;
+
+	mod_len = crt->inputdatalength;
+	short_len = mod_len / 2;
+	long_len = mod_len / 2 + 8;
+
+	/*
+	 * CEX2A cannot handle p, dp, or U > 128 bytes.
+	 * If we have one of these, we need to do extra checking.
+	 * For CEX3A the limit is 256 bytes.
+	 */
+	if (zdev->max_mod_size == CEX3A_MAX_MOD_SIZE)
+		limit = 256;
+	else
+		limit = 128;
+
+	if (long_len > limit) {
+		/*
+		 * zcrypt_rsa_crt already checked for the leading
+		 * zeroes of np_prime, bp_key and u_mult_inc.
+		 */
+		long_offset = long_len - limit;
+		long_len = limit;
+	} else
+		long_offset = 0;
+
+	/*
+	 * Instead of doing extra work for p, dp, U > 64 bytes, we'll just use
+	 * the larger message structure.
+	 */
+	if (long_len <= 64) {
+		struct type50_crb1_msg *crb1 = ap_msg->message;
+		memset(crb1, 0, sizeof(*crb1));
+		ap_msg->length = sizeof(*crb1);
+		crb1->header.msg_type_code = TYPE50_TYPE_CODE;
+		crb1->header.msg_len = sizeof(*crb1);
+		crb1->keyblock_type = TYPE50_CRB1_FMT;
+		p = crb1->p + sizeof(crb1->p) - long_len;
+		q = crb1->q + sizeof(crb1->q) - short_len;
+		dp = crb1->dp + sizeof(crb1->dp) - long_len;
+		dq = crb1->dq + sizeof(crb1->dq) - short_len;
+		u = crb1->u + sizeof(crb1->u) - long_len;
+		inp = crb1->message + sizeof(crb1->message) - mod_len;
+	} else if (long_len <= 128) {
+		struct type50_crb2_msg *crb2 = ap_msg->message;
+		memset(crb2, 0, sizeof(*crb2));
+		ap_msg->length = sizeof(*crb2);
+		crb2->header.msg_type_code = TYPE50_TYPE_CODE;
+		crb2->header.msg_len = sizeof(*crb2);
+		crb2->keyblock_type = TYPE50_CRB2_FMT;
+		p = crb2->p + sizeof(crb2->p) - long_len;
+		q = crb2->q + sizeof(crb2->q) - short_len;
+		dp = crb2->dp + sizeof(crb2->dp) - long_len;
+		dq = crb2->dq + sizeof(crb2->dq) - short_len;
+		u = crb2->u + sizeof(crb2->u) - long_len;
+		inp = crb2->message + sizeof(crb2->message) - mod_len;
+	} else {
+		/* long_len >= 256 */
+		struct type50_crb3_msg *crb3 = ap_msg->message;
+		memset(crb3, 0, sizeof(*crb3));
+		ap_msg->length = sizeof(*crb3);
+		crb3->header.msg_type_code = TYPE50_TYPE_CODE;
+		crb3->header.msg_len = sizeof(*crb3);
+		crb3->keyblock_type = TYPE50_CRB3_FMT;
+		p = crb3->p + sizeof(crb3->p) - long_len;
+		q = crb3->q + sizeof(crb3->q) - short_len;
+		dp = crb3->dp + sizeof(crb3->dp) - long_len;
+		dq = crb3->dq + sizeof(crb3->dq) - short_len;
+		u = crb3->u + sizeof(crb3->u) - long_len;
+		inp = crb3->message + sizeof(crb3->message) - mod_len;
+	}
+
+	if (copy_from_user(p, crt->np_prime + long_offset, long_len) ||
+	    copy_from_user(q, crt->nq_prime, short_len) ||
+	    copy_from_user(dp, crt->bp_key + long_offset, long_len) ||
+	    copy_from_user(dq, crt->bq_key, short_len) ||
+	    copy_from_user(u, crt->u_mult_inv + long_offset, long_len) ||
+	    copy_from_user(inp, crt->inputdata, mod_len))
+		return -EFAULT;
+
+	return 0;
+}
+
+/**
+ * Copy results from a type 80 reply message back to user space.
+ *
+ * @zdev: crypto device pointer
+ * @reply: reply AP message.
+ * @data: pointer to user output data
+ * @length: size of user output data
+ *
+ * Returns 0 on success or -EFAULT.
+ */
+static int convert_type80(struct zcrypt_device *zdev,
+			  struct ap_message *reply,
+			  char __user *outputdata,
+			  unsigned int outputdatalength)
+{
+	struct type80_hdr *t80h = reply->message;
+	unsigned char *data;
+
+	if (t80h->len < sizeof(*t80h) + outputdatalength) {
+		/* The result is too short, the CEX2A card may not do that.. */
+		zdev->online = 0;
+		return -EAGAIN;	/* repeat the request on a different device. */
+	}
+	if (zdev->user_space_type == ZCRYPT_CEX2A)
+		BUG_ON(t80h->len > CEX2A_MAX_RESPONSE_SIZE);
+	else
+		BUG_ON(t80h->len > CEX3A_MAX_RESPONSE_SIZE);
+	data = reply->message + t80h->len - outputdatalength;
+	if (copy_to_user(outputdata, data, outputdatalength))
+		return -EFAULT;
+	return 0;
+}
+
+static int convert_response(struct zcrypt_device *zdev,
+			    struct ap_message *reply,
+			    char __user *outputdata,
+			    unsigned int outputdatalength)
+{
+	/* Response type byte is the second byte in the response. */
+	switch (((unsigned char *) reply->message)[1]) {
+	case TYPE82_RSP_CODE:
+	case TYPE88_RSP_CODE:
+		return convert_error(zdev, reply);
+	case TYPE80_RSP_CODE:
+		return convert_type80(zdev, reply,
+				      outputdata, outputdatalength);
+	default: /* Unknown response type, this should NEVER EVER happen */
+		zdev->online = 0;
+		return -EAGAIN;	/* repeat the request on a different device. */
+	}
+}
+
+/**
+ * This function is called from the AP bus code after a crypto request
+ * "msg" has finished with the reply message "reply".
+ * It is called from tasklet context.
+ * @ap_dev: pointer to the AP device
+ * @msg: pointer to the AP message
+ * @reply: pointer to the AP reply message
+ */
+static void zcrypt_cex2a_receive(struct ap_device *ap_dev,
+				 struct ap_message *msg,
+				 struct ap_message *reply)
+{
+	static struct error_hdr error_reply = {
+		.type = TYPE82_RSP_CODE,
+		.reply_code = REP82_ERROR_MACHINE_FAILURE,
+	};
+	struct type80_hdr *t80h;
+	int length;
+
+	/* Copy the reply message to the request message buffer. */
+	if (IS_ERR(reply)) {
+		memcpy(msg->message, &error_reply, sizeof(error_reply));
+		goto out;
+	}
+	t80h = reply->message;
+	if (t80h->type == TYPE80_RSP_CODE) {
+		if (ap_dev->device_type == AP_DEVICE_TYPE_CEX2A)
+			length = min(CEX2A_MAX_RESPONSE_SIZE, (int) t80h->len);
+		else
+			length = min(CEX3A_MAX_RESPONSE_SIZE, (int) t80h->len);
+		memcpy(msg->message, reply->message, length);
+	} else
+		memcpy(msg->message, reply->message, sizeof error_reply);
+out:
+	complete((struct completion *) msg->private);
+}
+
+static atomic_t zcrypt_step = ATOMIC_INIT(0);
+
+/**
+ * The request distributor calls this function if it picked the CEX2A
+ * device to handle a modexpo request.
+ * @zdev: pointer to zcrypt_device structure that identifies the
+ *	  CEX2A device to the request distributor
+ * @mex: pointer to the modexpo request buffer
+ */
+static long zcrypt_cex2a_modexpo(struct zcrypt_device *zdev,
+				 struct ica_rsa_modexpo *mex)
+{
+	struct ap_message ap_msg;
+	struct completion work;
+	int rc;
+
+	ap_init_message(&ap_msg);
+	if (zdev->user_space_type == ZCRYPT_CEX2A)
+		ap_msg.message = kmalloc(CEX2A_MAX_MESSAGE_SIZE, GFP_KERNEL);
+	else
+		ap_msg.message = kmalloc(CEX3A_MAX_MESSAGE_SIZE, GFP_KERNEL);
+	if (!ap_msg.message)
+		return -ENOMEM;
+	ap_msg.psmid = (((unsigned long long) current->pid) << 32) +
+				atomic_inc_return(&zcrypt_step);
+	ap_msg.private = &work;
+	rc = ICAMEX_msg_to_type50MEX_msg(zdev, &ap_msg, mex);
+	if (rc)
+		goto out_free;
+	init_completion(&work);
+	ap_queue_message(zdev->ap_dev, &ap_msg);
+	rc = wait_for_completion_interruptible(&work);
+	if (rc == 0)
+		rc = convert_response(zdev, &ap_msg, mex->outputdata,
+				      mex->outputdatalength);
+	else
+		/* Signal pending. */
+		ap_cancel_message(zdev->ap_dev, &ap_msg);
+out_free:
+	kfree(ap_msg.message);
+	return rc;
+}
+
+/**
+ * The request distributor calls this function if it picked the CEX2A
+ * device to handle a modexpo_crt request.
+ * @zdev: pointer to zcrypt_device structure that identifies the
+ *	  CEX2A device to the request distributor
+ * @crt: pointer to the modexpoc_crt request buffer
+ */
+static long zcrypt_cex2a_modexpo_crt(struct zcrypt_device *zdev,
+				     struct ica_rsa_modexpo_crt *crt)
+{
+	struct ap_message ap_msg;
+	struct completion work;
+	int rc;
+
+	ap_init_message(&ap_msg);
+	if (zdev->user_space_type == ZCRYPT_CEX2A)
+		ap_msg.message = kmalloc(CEX2A_MAX_MESSAGE_SIZE, GFP_KERNEL);
+	else
+		ap_msg.message = kmalloc(CEX3A_MAX_MESSAGE_SIZE, GFP_KERNEL);
+	if (!ap_msg.message)
+		return -ENOMEM;
+	ap_msg.psmid = (((unsigned long long) current->pid) << 32) +
+				atomic_inc_return(&zcrypt_step);
+	ap_msg.private = &work;
+	rc = ICACRT_msg_to_type50CRT_msg(zdev, &ap_msg, crt);
+	if (rc)
+		goto out_free;
+	init_completion(&work);
+	ap_queue_message(zdev->ap_dev, &ap_msg);
+	rc = wait_for_completion_interruptible(&work);
+	if (rc == 0)
+		rc = convert_response(zdev, &ap_msg, crt->outputdata,
+				      crt->outputdatalength);
+	else
+		/* Signal pending. */
+		ap_cancel_message(zdev->ap_dev, &ap_msg);
+out_free:
+	kfree(ap_msg.message);
+	return rc;
+}
+
+/**
+ * The crypto operations for a CEX2A card.
+ */
+static struct zcrypt_ops zcrypt_cex2a_ops = {
+	.rsa_modexpo = zcrypt_cex2a_modexpo,
+	.rsa_modexpo_crt = zcrypt_cex2a_modexpo_crt,
+};
+
+/**
+ * Probe function for CEX2A cards. It always accepts the AP device
+ * since the bus_match already checked the hardware type.
+ * @ap_dev: pointer to the AP device.
+ */
+static int zcrypt_cex2a_probe(struct ap_device *ap_dev)
+{
+	struct zcrypt_device *zdev = NULL;
+	int rc = 0;
+
+	switch (ap_dev->device_type) {
+	case AP_DEVICE_TYPE_CEX2A:
+		zdev = zcrypt_device_alloc(CEX2A_MAX_RESPONSE_SIZE);
+		if (!zdev)
+			return -ENOMEM;
+		zdev->user_space_type = ZCRYPT_CEX2A;
+		zdev->type_string = "CEX2A";
+		zdev->min_mod_size = CEX2A_MIN_MOD_SIZE;
+		zdev->max_mod_size = CEX2A_MAX_MOD_SIZE;
+		zdev->short_crt = 1;
+		zdev->speed_rating = CEX2A_SPEED_RATING;
+		zdev->max_exp_bit_length = CEX2A_MAX_MOD_SIZE;
+		break;
+	case AP_DEVICE_TYPE_CEX3A:
+		zdev = zcrypt_device_alloc(CEX3A_MAX_RESPONSE_SIZE);
+		if (!zdev)
+			return -ENOMEM;
+		zdev->user_space_type = ZCRYPT_CEX3A;
+		zdev->type_string = "CEX3A";
+		zdev->min_mod_size = CEX2A_MIN_MOD_SIZE;
+		zdev->max_mod_size = CEX2A_MAX_MOD_SIZE;
+		zdev->max_exp_bit_length = CEX2A_MAX_MOD_SIZE;
+		if (ap_4096_commands_available(ap_dev->qid)) {
+			zdev->max_mod_size = CEX3A_MAX_MOD_SIZE;
+			zdev->max_exp_bit_length = CEX3A_MAX_MOD_SIZE;
+		}
+		zdev->short_crt = 1;
+		zdev->speed_rating = CEX3A_SPEED_RATING;
+		break;
+	}
+	if (zdev != NULL) {
+		zdev->ap_dev = ap_dev;
+		zdev->ops = &zcrypt_cex2a_ops;
+		zdev->online = 1;
+		ap_dev->reply = &zdev->reply;
+		ap_dev->private = zdev;
+		rc = zcrypt_device_register(zdev);
+	}
+	if (rc) {
+		ap_dev->private = NULL;
+		zcrypt_device_free(zdev);
+	}
+	return rc;
+}
+
+/**
+ * This is called to remove the extended CEX2A driver information
+ * if an AP device is removed.
+ */
+static void zcrypt_cex2a_remove(struct ap_device *ap_dev)
+{
+	struct zcrypt_device *zdev = ap_dev->private;
+
+	zcrypt_device_unregister(zdev);
+}
+
+int __init zcrypt_cex2a_init(void)
+{
+	return ap_driver_register(&zcrypt_cex2a_driver, THIS_MODULE, "cex2a");
+}
+
+void __exit zcrypt_cex2a_exit(void)
+{
+	ap_driver_unregister(&zcrypt_cex2a_driver);
+}
+
+module_init(zcrypt_cex2a_init);
+module_exit(zcrypt_cex2a_exit);
diff --git a/ap/os/linux/linux-3.4.x/drivers/s390/crypto/zcrypt_cex2a.h b/ap/os/linux/linux-3.4.x/drivers/s390/crypto/zcrypt_cex2a.h
new file mode 100644
index 0000000..0350665
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/s390/crypto/zcrypt_cex2a.h
@@ -0,0 +1,151 @@
+/*
+ *  linux/drivers/s390/crypto/zcrypt_cex2a.h
+ *
+ *  zcrypt 2.1.0
+ *
+ *  Copyright (C)  2001, 2006 IBM Corporation
+ *  Author(s): Robert Burroughs
+ *	       Eric Rossman (edrossma@us.ibm.com)
+ *
+ *  Hotplug & misc device support: Jochen Roehrig (roehrig@de.ibm.com)
+ *  Major cleanup & driver split: Martin Schwidefsky <schwidefsky@de.ibm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#ifndef _ZCRYPT_CEX2A_H_
+#define _ZCRYPT_CEX2A_H_
+
+/**
+ * The type 50 message family is associated with a CEX2A card.
+ *
+ * The four members of the family are described below.
+ *
+ * Note that all unsigned char arrays are right-justified and left-padded
+ * with zeroes.
+ *
+ * Note that all reserved fields must be zeroes.
+ */
+struct type50_hdr {
+	unsigned char	reserved1;
+	unsigned char	msg_type_code;	/* 0x50 */
+	unsigned short	msg_len;
+	unsigned char	reserved2;
+	unsigned char	ignored;
+	unsigned short	reserved3;
+} __attribute__((packed));
+
+#define TYPE50_TYPE_CODE	0x50
+
+#define TYPE50_MEB1_FMT		0x0001
+#define TYPE50_MEB2_FMT		0x0002
+#define TYPE50_MEB3_FMT		0x0003
+#define TYPE50_CRB1_FMT		0x0011
+#define TYPE50_CRB2_FMT		0x0012
+#define TYPE50_CRB3_FMT		0x0013
+
+/* Mod-Exp, with a small modulus */
+struct type50_meb1_msg {
+	struct type50_hdr header;
+	unsigned short	keyblock_type;	/* 0x0001 */
+	unsigned char	reserved[6];
+	unsigned char	exponent[128];
+	unsigned char	modulus[128];
+	unsigned char	message[128];
+} __attribute__((packed));
+
+/* Mod-Exp, with a large modulus */
+struct type50_meb2_msg {
+	struct type50_hdr header;
+	unsigned short	keyblock_type;	/* 0x0002 */
+	unsigned char	reserved[6];
+	unsigned char	exponent[256];
+	unsigned char	modulus[256];
+	unsigned char	message[256];
+} __attribute__((packed));
+
+/* Mod-Exp, with a larger modulus */
+struct type50_meb3_msg {
+	struct type50_hdr header;
+	unsigned short	keyblock_type;	/* 0x0003 */
+	unsigned char	reserved[6];
+	unsigned char	exponent[512];
+	unsigned char	modulus[512];
+	unsigned char	message[512];
+} __attribute__((packed));
+
+/* CRT, with a small modulus */
+struct type50_crb1_msg {
+	struct type50_hdr header;
+	unsigned short	keyblock_type;	/* 0x0011 */
+	unsigned char	reserved[6];
+	unsigned char	p[64];
+	unsigned char	q[64];
+	unsigned char	dp[64];
+	unsigned char	dq[64];
+	unsigned char	u[64];
+	unsigned char	message[128];
+} __attribute__((packed));
+
+/* CRT, with a large modulus */
+struct type50_crb2_msg {
+	struct type50_hdr header;
+	unsigned short	keyblock_type;	/* 0x0012 */
+	unsigned char	reserved[6];
+	unsigned char	p[128];
+	unsigned char	q[128];
+	unsigned char	dp[128];
+	unsigned char	dq[128];
+	unsigned char	u[128];
+	unsigned char	message[256];
+} __attribute__((packed));
+
+/* CRT, with a larger modulus */
+struct type50_crb3_msg {
+	struct type50_hdr header;
+	unsigned short	keyblock_type;	/* 0x0013 */
+	unsigned char	reserved[6];
+	unsigned char	p[256];
+	unsigned char	q[256];
+	unsigned char	dp[256];
+	unsigned char	dq[256];
+	unsigned char	u[256];
+	unsigned char	message[512];
+} __attribute__((packed));
+
+/**
+ * The type 80 response family is associated with a CEX2A card.
+ *
+ * Note that all unsigned char arrays are right-justified and left-padded
+ * with zeroes.
+ *
+ * Note that all reserved fields must be zeroes.
+ */
+
+#define TYPE80_RSP_CODE 0x80
+
+struct type80_hdr {
+	unsigned char	reserved1;
+	unsigned char	type;		/* 0x80 */
+	unsigned short	len;
+	unsigned char	code;		/* 0x00 */
+	unsigned char	reserved2[3];
+	unsigned char	reserved3[8];
+} __attribute__((packed));
+
+int zcrypt_cex2a_init(void);
+void zcrypt_cex2a_exit(void);
+
+#endif /* _ZCRYPT_CEX2A_H_ */
diff --git a/ap/os/linux/linux-3.4.x/drivers/s390/crypto/zcrypt_error.h b/ap/os/linux/linux-3.4.x/drivers/s390/crypto/zcrypt_error.h
new file mode 100644
index 0000000..03ba27f
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/s390/crypto/zcrypt_error.h
@@ -0,0 +1,127 @@
+/*
+ *  linux/drivers/s390/crypto/zcrypt_error.h
+ *
+ *  zcrypt 2.1.0
+ *
+ *  Copyright (C)  2001, 2006 IBM Corporation
+ *  Author(s): Robert Burroughs
+ *	       Eric Rossman (edrossma@us.ibm.com)
+ *
+ *  Hotplug & misc device support: Jochen Roehrig (roehrig@de.ibm.com)
+ *  Major cleanup & driver split: Martin Schwidefsky <schwidefsky@de.ibm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#ifndef _ZCRYPT_ERROR_H_
+#define _ZCRYPT_ERROR_H_
+
+#include "zcrypt_api.h"
+
+/**
+ * Reply Messages
+ *
+ * Error reply messages are of two types:
+ *    82:  Error (see below)
+ *    88:  Error (see below)
+ * Both type 82 and type 88 have the same structure in the header.
+ *
+ * Request reply messages are of three known types:
+ *    80:  Reply from a Type 50 Request (see CEX2A-RELATED STRUCTS)
+ *    84:  Reply from a Type 4 Request (see PCICA-RELATED STRUCTS)
+ *    86:  Reply from a Type 6 Request (see PCICC/PCIXCC/CEX2C-RELATED STRUCTS)
+ *
+ */
+struct error_hdr {
+	unsigned char reserved1;	/* 0x00			*/
+	unsigned char type;		/* 0x82 or 0x88		*/
+	unsigned char reserved2[2];	/* 0x0000		*/
+	unsigned char reply_code;	/* reply code		*/
+	unsigned char reserved3[3];	/* 0x000000		*/
+};
+
+#define TYPE82_RSP_CODE 0x82
+#define TYPE88_RSP_CODE 0x88
+
+#define REP82_ERROR_MACHINE_FAILURE  0x10
+#define REP82_ERROR_PREEMPT_FAILURE  0x12
+#define REP82_ERROR_CHECKPT_FAILURE  0x14
+#define REP82_ERROR_MESSAGE_TYPE     0x20
+#define REP82_ERROR_INVALID_COMM_CD  0x21	/* Type 84	*/
+#define REP82_ERROR_INVALID_MSG_LEN  0x23
+#define REP82_ERROR_RESERVD_FIELD    0x24	/* was 0x50	*/
+#define REP82_ERROR_FORMAT_FIELD     0x29
+#define REP82_ERROR_INVALID_COMMAND  0x30
+#define REP82_ERROR_MALFORMED_MSG    0x40
+#define REP82_ERROR_RESERVED_FIELDO  0x50	/* old value	*/
+#define REP82_ERROR_WORD_ALIGNMENT   0x60
+#define REP82_ERROR_MESSAGE_LENGTH   0x80
+#define REP82_ERROR_OPERAND_INVALID  0x82
+#define REP82_ERROR_OPERAND_SIZE     0x84
+#define REP82_ERROR_EVEN_MOD_IN_OPND 0x85
+#define REP82_ERROR_RESERVED_FIELD   0x88
+#define REP82_ERROR_TRANSPORT_FAIL   0x90
+#define REP82_ERROR_PACKET_TRUNCATED 0xA0
+#define REP82_ERROR_ZERO_BUFFER_LEN  0xB0
+
+#define REP88_ERROR_MODULE_FAILURE   0x10
+
+#define REP88_ERROR_MESSAGE_TYPE     0x20
+#define REP88_ERROR_MESSAGE_MALFORMD 0x22
+#define REP88_ERROR_MESSAGE_LENGTH   0x23
+#define REP88_ERROR_RESERVED_FIELD   0x24
+#define REP88_ERROR_KEY_TYPE	     0x34
+#define REP88_ERROR_INVALID_KEY      0x82	/* CEX2A	*/
+#define REP88_ERROR_OPERAND	     0x84	/* CEX2A	*/
+#define REP88_ERROR_OPERAND_EVEN_MOD 0x85	/* CEX2A	*/
+
+static inline int convert_error(struct zcrypt_device *zdev,
+				struct ap_message *reply)
+{
+	struct error_hdr *ehdr = reply->message;
+
+	switch (ehdr->reply_code) {
+	case REP82_ERROR_OPERAND_INVALID:
+	case REP82_ERROR_OPERAND_SIZE:
+	case REP82_ERROR_EVEN_MOD_IN_OPND:
+	case REP88_ERROR_MESSAGE_MALFORMD:
+	//   REP88_ERROR_INVALID_KEY		// '82' CEX2A
+	//   REP88_ERROR_OPERAND		// '84' CEX2A
+	//   REP88_ERROR_OPERAND_EVEN_MOD	// '85' CEX2A
+		/* Invalid input data. */
+		return -EINVAL;
+	case REP82_ERROR_MESSAGE_TYPE:
+	//   REP88_ERROR_MESSAGE_TYPE		// '20' CEX2A
+		/*
+		 * To sent a message of the wrong type is a bug in the
+		 * device driver. Warn about it, disable the device
+		 * and then repeat the request.
+		 */
+		WARN_ON(1);
+		zdev->online = 0;
+		return -EAGAIN;
+	case REP82_ERROR_TRANSPORT_FAIL:
+	case REP82_ERROR_MACHINE_FAILURE:
+	//   REP88_ERROR_MODULE_FAILURE		// '10' CEX2A
+		/* If a card fails disable it and repeat the request. */
+		zdev->online = 0;
+		return -EAGAIN;
+	default:
+		zdev->online = 0;
+		return -EAGAIN;	/* repeat the request on a different device. */
+	}
+}
+
+#endif /* _ZCRYPT_ERROR_H_ */
diff --git a/ap/os/linux/linux-3.4.x/drivers/s390/crypto/zcrypt_pcica.c b/ap/os/linux/linux-3.4.x/drivers/s390/crypto/zcrypt_pcica.c
new file mode 100644
index 0000000..0effca9
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/s390/crypto/zcrypt_pcica.c
@@ -0,0 +1,410 @@
+/*
+ *  linux/drivers/s390/crypto/zcrypt_pcica.c
+ *
+ *  zcrypt 2.1.0
+ *
+ *  Copyright (C)  2001, 2006 IBM Corporation
+ *  Author(s): Robert Burroughs
+ *	       Eric Rossman (edrossma@us.ibm.com)
+ *
+ *  Hotplug & misc device support: Jochen Roehrig (roehrig@de.ibm.com)
+ *  Major cleanup & driver split: Martin Schwidefsky <schwidefsky@de.ibm.com>
+ *				  Ralph Wuerthner <rwuerthn@de.ibm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/init.h>
+#include <linux/err.h>
+#include <linux/atomic.h>
+#include <asm/uaccess.h>
+
+#include "ap_bus.h"
+#include "zcrypt_api.h"
+#include "zcrypt_error.h"
+#include "zcrypt_pcica.h"
+
+#define PCICA_MIN_MOD_SIZE	  1	/*    8 bits	*/
+#define PCICA_MAX_MOD_SIZE	256	/* 2048 bits	*/
+
+#define PCICA_SPEED_RATING	2800
+
+#define PCICA_MAX_MESSAGE_SIZE	0x3a0	/* sizeof(struct type4_lcr)	     */
+#define PCICA_MAX_RESPONSE_SIZE 0x110	/* max outputdatalength + type80_hdr */
+
+#define PCICA_CLEANUP_TIME	(15*HZ)
+
+static struct ap_device_id zcrypt_pcica_ids[] = {
+	{ AP_DEVICE(AP_DEVICE_TYPE_PCICA) },
+	{ /* end of list */ },
+};
+
+MODULE_DEVICE_TABLE(ap, zcrypt_pcica_ids);
+MODULE_AUTHOR("IBM Corporation");
+MODULE_DESCRIPTION("PCICA Cryptographic Coprocessor device driver, "
+		   "Copyright 2001, 2006 IBM Corporation");
+MODULE_LICENSE("GPL");
+
+static int zcrypt_pcica_probe(struct ap_device *ap_dev);
+static void zcrypt_pcica_remove(struct ap_device *ap_dev);
+static void zcrypt_pcica_receive(struct ap_device *, struct ap_message *,
+				 struct ap_message *);
+
+static struct ap_driver zcrypt_pcica_driver = {
+	.probe = zcrypt_pcica_probe,
+	.remove = zcrypt_pcica_remove,
+	.receive = zcrypt_pcica_receive,
+	.ids = zcrypt_pcica_ids,
+	.request_timeout = PCICA_CLEANUP_TIME,
+};
+
+/**
+ * Convert a ICAMEX message to a type4 MEX message.
+ *
+ * @zdev: crypto device pointer
+ * @zreq: crypto request pointer
+ * @mex: pointer to user input data
+ *
+ * Returns 0 on success or -EFAULT.
+ */
+static int ICAMEX_msg_to_type4MEX_msg(struct zcrypt_device *zdev,
+				      struct ap_message *ap_msg,
+				      struct ica_rsa_modexpo *mex)
+{
+	unsigned char *modulus, *exponent, *message;
+	int mod_len;
+
+	mod_len = mex->inputdatalength;
+
+	if (mod_len <= 128) {
+		struct type4_sme *sme = ap_msg->message;
+		memset(sme, 0, sizeof(*sme));
+		ap_msg->length = sizeof(*sme);
+		sme->header.msg_fmt = TYPE4_SME_FMT;
+		sme->header.msg_len = sizeof(*sme);
+		sme->header.msg_type_code = TYPE4_TYPE_CODE;
+		sme->header.request_code = TYPE4_REQU_CODE;
+		modulus = sme->modulus + sizeof(sme->modulus) - mod_len;
+		exponent = sme->exponent + sizeof(sme->exponent) - mod_len;
+		message = sme->message + sizeof(sme->message) - mod_len;
+	} else {
+		struct type4_lme *lme = ap_msg->message;
+		memset(lme, 0, sizeof(*lme));
+		ap_msg->length = sizeof(*lme);
+		lme->header.msg_fmt = TYPE4_LME_FMT;
+		lme->header.msg_len = sizeof(*lme);
+		lme->header.msg_type_code = TYPE4_TYPE_CODE;
+		lme->header.request_code = TYPE4_REQU_CODE;
+		modulus = lme->modulus + sizeof(lme->modulus) - mod_len;
+		exponent = lme->exponent + sizeof(lme->exponent) - mod_len;
+		message = lme->message + sizeof(lme->message) - mod_len;
+	}
+
+	if (copy_from_user(modulus, mex->n_modulus, mod_len) ||
+	    copy_from_user(exponent, mex->b_key, mod_len) ||
+	    copy_from_user(message, mex->inputdata, mod_len))
+		return -EFAULT;
+	return 0;
+}
+
+/**
+ * Convert a ICACRT message to a type4 CRT message.
+ *
+ * @zdev: crypto device pointer
+ * @zreq: crypto request pointer
+ * @crt: pointer to user input data
+ *
+ * Returns 0 on success or -EFAULT.
+ */
+static int ICACRT_msg_to_type4CRT_msg(struct zcrypt_device *zdev,
+				      struct ap_message *ap_msg,
+				      struct ica_rsa_modexpo_crt *crt)
+{
+	unsigned char *p, *q, *dp, *dq, *u, *inp;
+	int mod_len, short_len, long_len;
+
+	mod_len = crt->inputdatalength;
+	short_len = mod_len / 2;
+	long_len = mod_len / 2 + 8;
+
+	if (mod_len <= 128) {
+		struct type4_scr *scr = ap_msg->message;
+		memset(scr, 0, sizeof(*scr));
+		ap_msg->length = sizeof(*scr);
+		scr->header.msg_type_code = TYPE4_TYPE_CODE;
+		scr->header.request_code = TYPE4_REQU_CODE;
+		scr->header.msg_fmt = TYPE4_SCR_FMT;
+		scr->header.msg_len = sizeof(*scr);
+		p = scr->p + sizeof(scr->p) - long_len;
+		q = scr->q + sizeof(scr->q) - short_len;
+		dp = scr->dp + sizeof(scr->dp) - long_len;
+		dq = scr->dq + sizeof(scr->dq) - short_len;
+		u = scr->u + sizeof(scr->u) - long_len;
+		inp = scr->message + sizeof(scr->message) - mod_len;
+	} else {
+		struct type4_lcr *lcr = ap_msg->message;
+		memset(lcr, 0, sizeof(*lcr));
+		ap_msg->length = sizeof(*lcr);
+		lcr->header.msg_type_code = TYPE4_TYPE_CODE;
+		lcr->header.request_code = TYPE4_REQU_CODE;
+		lcr->header.msg_fmt = TYPE4_LCR_FMT;
+		lcr->header.msg_len = sizeof(*lcr);
+		p = lcr->p + sizeof(lcr->p) - long_len;
+		q = lcr->q + sizeof(lcr->q) - short_len;
+		dp = lcr->dp + sizeof(lcr->dp) - long_len;
+		dq = lcr->dq + sizeof(lcr->dq) - short_len;
+		u = lcr->u + sizeof(lcr->u) - long_len;
+		inp = lcr->message + sizeof(lcr->message) - mod_len;
+	}
+
+	if (copy_from_user(p, crt->np_prime, long_len) ||
+	    copy_from_user(q, crt->nq_prime, short_len) ||
+	    copy_from_user(dp, crt->bp_key, long_len) ||
+	    copy_from_user(dq, crt->bq_key, short_len) ||
+	    copy_from_user(u, crt->u_mult_inv, long_len) ||
+	    copy_from_user(inp, crt->inputdata, mod_len))
+		return -EFAULT;
+	return 0;
+}
+
+/**
+ * Copy results from a type 84 reply message back to user space.
+ *
+ * @zdev: crypto device pointer
+ * @reply: reply AP message.
+ * @data: pointer to user output data
+ * @length: size of user output data
+ *
+ * Returns 0 on success or -EFAULT.
+ */
+static int convert_type84(struct zcrypt_device *zdev,
+			  struct ap_message *reply,
+			  char __user *outputdata,
+			  unsigned int outputdatalength)
+{
+	struct type84_hdr *t84h = reply->message;
+	char *data;
+
+	if (t84h->len < sizeof(*t84h) + outputdatalength) {
+		/* The result is too short, the PCICA card may not do that.. */
+		zdev->online = 0;
+		return -EAGAIN;	/* repeat the request on a different device. */
+	}
+	BUG_ON(t84h->len > PCICA_MAX_RESPONSE_SIZE);
+	data = reply->message + t84h->len - outputdatalength;
+	if (copy_to_user(outputdata, data, outputdatalength))
+		return -EFAULT;
+	return 0;
+}
+
+static int convert_response(struct zcrypt_device *zdev,
+			    struct ap_message *reply,
+			    char __user *outputdata,
+			    unsigned int outputdatalength)
+{
+	/* Response type byte is the second byte in the response. */
+	switch (((unsigned char *) reply->message)[1]) {
+	case TYPE82_RSP_CODE:
+	case TYPE88_RSP_CODE:
+		return convert_error(zdev, reply);
+	case TYPE84_RSP_CODE:
+		return convert_type84(zdev, reply,
+				      outputdata, outputdatalength);
+	default: /* Unknown response type, this should NEVER EVER happen */
+		zdev->online = 0;
+		return -EAGAIN;	/* repeat the request on a different device. */
+	}
+}
+
+/**
+ * This function is called from the AP bus code after a crypto request
+ * "msg" has finished with the reply message "reply".
+ * It is called from tasklet context.
+ * @ap_dev: pointer to the AP device
+ * @msg: pointer to the AP message
+ * @reply: pointer to the AP reply message
+ */
+static void zcrypt_pcica_receive(struct ap_device *ap_dev,
+				 struct ap_message *msg,
+				 struct ap_message *reply)
+{
+	static struct error_hdr error_reply = {
+		.type = TYPE82_RSP_CODE,
+		.reply_code = REP82_ERROR_MACHINE_FAILURE,
+	};
+	struct type84_hdr *t84h;
+	int length;
+
+	/* Copy the reply message to the request message buffer. */
+	if (IS_ERR(reply)) {
+		memcpy(msg->message, &error_reply, sizeof(error_reply));
+		goto out;
+	}
+	t84h = reply->message;
+	if (t84h->code == TYPE84_RSP_CODE) {
+		length = min(PCICA_MAX_RESPONSE_SIZE, (int) t84h->len);
+		memcpy(msg->message, reply->message, length);
+	} else
+		memcpy(msg->message, reply->message, sizeof error_reply);
+out:
+	complete((struct completion *) msg->private);
+}
+
+static atomic_t zcrypt_step = ATOMIC_INIT(0);
+
+/**
+ * The request distributor calls this function if it picked the PCICA
+ * device to handle a modexpo request.
+ * @zdev: pointer to zcrypt_device structure that identifies the
+ *	  PCICA device to the request distributor
+ * @mex: pointer to the modexpo request buffer
+ */
+static long zcrypt_pcica_modexpo(struct zcrypt_device *zdev,
+				 struct ica_rsa_modexpo *mex)
+{
+	struct ap_message ap_msg;
+	struct completion work;
+	int rc;
+
+	ap_init_message(&ap_msg);
+	ap_msg.message = kmalloc(PCICA_MAX_MESSAGE_SIZE, GFP_KERNEL);
+	if (!ap_msg.message)
+		return -ENOMEM;
+	ap_msg.psmid = (((unsigned long long) current->pid) << 32) +
+				atomic_inc_return(&zcrypt_step);
+	ap_msg.private = &work;
+	rc = ICAMEX_msg_to_type4MEX_msg(zdev, &ap_msg, mex);
+	if (rc)
+		goto out_free;
+	init_completion(&work);
+	ap_queue_message(zdev->ap_dev, &ap_msg);
+	rc = wait_for_completion_interruptible(&work);
+	if (rc == 0)
+		rc = convert_response(zdev, &ap_msg, mex->outputdata,
+				      mex->outputdatalength);
+	else
+		/* Signal pending. */
+		ap_cancel_message(zdev->ap_dev, &ap_msg);
+out_free:
+	kfree(ap_msg.message);
+	return rc;
+}
+
+/**
+ * The request distributor calls this function if it picked the PCICA
+ * device to handle a modexpo_crt request.
+ * @zdev: pointer to zcrypt_device structure that identifies the
+ *	  PCICA device to the request distributor
+ * @crt: pointer to the modexpoc_crt request buffer
+ */
+static long zcrypt_pcica_modexpo_crt(struct zcrypt_device *zdev,
+				     struct ica_rsa_modexpo_crt *crt)
+{
+	struct ap_message ap_msg;
+	struct completion work;
+	int rc;
+
+	ap_init_message(&ap_msg);
+	ap_msg.message = kmalloc(PCICA_MAX_MESSAGE_SIZE, GFP_KERNEL);
+	if (!ap_msg.message)
+		return -ENOMEM;
+	ap_msg.psmid = (((unsigned long long) current->pid) << 32) +
+				atomic_inc_return(&zcrypt_step);
+	ap_msg.private = &work;
+	rc = ICACRT_msg_to_type4CRT_msg(zdev, &ap_msg, crt);
+	if (rc)
+		goto out_free;
+	init_completion(&work);
+	ap_queue_message(zdev->ap_dev, &ap_msg);
+	rc = wait_for_completion_interruptible(&work);
+	if (rc == 0)
+		rc = convert_response(zdev, &ap_msg, crt->outputdata,
+				      crt->outputdatalength);
+	else
+		/* Signal pending. */
+		ap_cancel_message(zdev->ap_dev, &ap_msg);
+out_free:
+	kfree(ap_msg.message);
+	return rc;
+}
+
+/**
+ * The crypto operations for a PCICA card.
+ */
+static struct zcrypt_ops zcrypt_pcica_ops = {
+	.rsa_modexpo = zcrypt_pcica_modexpo,
+	.rsa_modexpo_crt = zcrypt_pcica_modexpo_crt,
+};
+
+/**
+ * Probe function for PCICA cards. It always accepts the AP device
+ * since the bus_match already checked the hardware type.
+ * @ap_dev: pointer to the AP device.
+ */
+static int zcrypt_pcica_probe(struct ap_device *ap_dev)
+{
+	struct zcrypt_device *zdev;
+	int rc;
+
+	zdev = zcrypt_device_alloc(PCICA_MAX_RESPONSE_SIZE);
+	if (!zdev)
+		return -ENOMEM;
+	zdev->ap_dev = ap_dev;
+	zdev->ops = &zcrypt_pcica_ops;
+	zdev->online = 1;
+	zdev->user_space_type = ZCRYPT_PCICA;
+	zdev->type_string = "PCICA";
+	zdev->min_mod_size = PCICA_MIN_MOD_SIZE;
+	zdev->max_mod_size = PCICA_MAX_MOD_SIZE;
+	zdev->speed_rating = PCICA_SPEED_RATING;
+	zdev->max_exp_bit_length = PCICA_MAX_MOD_SIZE;
+	ap_dev->reply = &zdev->reply;
+	ap_dev->private = zdev;
+	rc = zcrypt_device_register(zdev);
+	if (rc)
+		goto out_free;
+	return 0;
+
+out_free:
+	ap_dev->private = NULL;
+	zcrypt_device_free(zdev);
+	return rc;
+}
+
+/**
+ * This is called to remove the extended PCICA driver information
+ * if an AP device is removed.
+ */
+static void zcrypt_pcica_remove(struct ap_device *ap_dev)
+{
+	struct zcrypt_device *zdev = ap_dev->private;
+
+	zcrypt_device_unregister(zdev);
+}
+
+int __init zcrypt_pcica_init(void)
+{
+	return ap_driver_register(&zcrypt_pcica_driver, THIS_MODULE, "pcica");
+}
+
+void zcrypt_pcica_exit(void)
+{
+	ap_driver_unregister(&zcrypt_pcica_driver);
+}
+
+module_init(zcrypt_pcica_init);
+module_exit(zcrypt_pcica_exit);
diff --git a/ap/os/linux/linux-3.4.x/drivers/s390/crypto/zcrypt_pcica.h b/ap/os/linux/linux-3.4.x/drivers/s390/crypto/zcrypt_pcica.h
new file mode 100644
index 0000000..3be1118
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/s390/crypto/zcrypt_pcica.h
@@ -0,0 +1,117 @@
+/*
+ *  linux/drivers/s390/crypto/zcrypt_pcica.h
+ *
+ *  zcrypt 2.1.0
+ *
+ *  Copyright (C)  2001, 2006 IBM Corporation
+ *  Author(s): Robert Burroughs
+ *	       Eric Rossman (edrossma@us.ibm.com)
+ *
+ *  Hotplug & misc device support: Jochen Roehrig (roehrig@de.ibm.com)
+ *  Major cleanup & driver split: Martin Schwidefsky <schwidefsky@de.ibm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#ifndef _ZCRYPT_PCICA_H_
+#define _ZCRYPT_PCICA_H_
+
+/**
+ * The type 4 message family is associated with a PCICA card.
+ *
+ * The four members of the family are described below.
+ *
+ * Note that all unsigned char arrays are right-justified and left-padded
+ * with zeroes.
+ *
+ * Note that all reserved fields must be zeroes.
+ */
+struct type4_hdr {
+	unsigned char  reserved1;
+	unsigned char  msg_type_code;	/* 0x04 */
+	unsigned short msg_len;
+	unsigned char  request_code;	/* 0x40 */
+	unsigned char  msg_fmt;
+	unsigned short reserved2;
+} __attribute__((packed));
+
+#define TYPE4_TYPE_CODE 0x04
+#define TYPE4_REQU_CODE 0x40
+
+#define TYPE4_SME_FMT 0x00
+#define TYPE4_LME_FMT 0x10
+#define TYPE4_SCR_FMT 0x40
+#define TYPE4_LCR_FMT 0x50
+
+/* Mod-Exp, with a small modulus */
+struct type4_sme {
+	struct type4_hdr header;
+	unsigned char	 message[128];
+	unsigned char	 exponent[128];
+	unsigned char	 modulus[128];
+} __attribute__((packed));
+
+/* Mod-Exp, with a large modulus */
+struct type4_lme {
+	struct type4_hdr header;
+	unsigned char	 message[256];
+	unsigned char	 exponent[256];
+	unsigned char	 modulus[256];
+} __attribute__((packed));
+
+/* CRT, with a small modulus */
+struct type4_scr {
+	struct type4_hdr header;
+	unsigned char	 message[128];
+	unsigned char	 dp[72];
+	unsigned char	 dq[64];
+	unsigned char	 p[72];
+	unsigned char	 q[64];
+	unsigned char	 u[72];
+} __attribute__((packed));
+
+/* CRT, with a large modulus */
+struct type4_lcr {
+	struct type4_hdr header;
+	unsigned char	 message[256];
+	unsigned char	 dp[136];
+	unsigned char	 dq[128];
+	unsigned char	 p[136];
+	unsigned char	 q[128];
+	unsigned char	 u[136];
+} __attribute__((packed));
+
+/**
+ * The type 84 response family is associated with a PCICA card.
+ *
+ * Note that all unsigned char arrays are right-justified and left-padded
+ * with zeroes.
+ *
+ * Note that all reserved fields must be zeroes.
+ */
+
+struct type84_hdr {
+	unsigned char  reserved1;
+	unsigned char  code;
+	unsigned short len;
+	unsigned char  reserved2[4];
+} __attribute__((packed));
+
+#define TYPE84_RSP_CODE 0x84
+
+int zcrypt_pcica_init(void);
+void zcrypt_pcica_exit(void);
+
+#endif /* _ZCRYPT_PCICA_H_ */
diff --git a/ap/os/linux/linux-3.4.x/drivers/s390/crypto/zcrypt_pcicc.c b/ap/os/linux/linux-3.4.x/drivers/s390/crypto/zcrypt_pcicc.c
new file mode 100644
index 0000000..f9523c0
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/s390/crypto/zcrypt_pcicc.c
@@ -0,0 +1,616 @@
+/*
+ *  linux/drivers/s390/crypto/zcrypt_pcicc.c
+ *
+ *  zcrypt 2.1.0
+ *
+ *  Copyright (C)  2001, 2006 IBM Corporation
+ *  Author(s): Robert Burroughs
+ *	       Eric Rossman (edrossma@us.ibm.com)
+ *
+ *  Hotplug & misc device support: Jochen Roehrig (roehrig@de.ibm.com)
+ *  Major cleanup & driver split: Martin Schwidefsky <schwidefsky@de.ibm.com>
+ *				  Ralph Wuerthner <rwuerthn@de.ibm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/gfp.h>
+#include <linux/err.h>
+#include <linux/atomic.h>
+#include <asm/uaccess.h>
+
+#include "ap_bus.h"
+#include "zcrypt_api.h"
+#include "zcrypt_error.h"
+#include "zcrypt_pcicc.h"
+#include "zcrypt_cca_key.h"
+
+#define PCICC_MIN_MOD_SIZE	 64	/*  512 bits */
+#define PCICC_MAX_MOD_SIZE_OLD	128	/* 1024 bits */
+#define PCICC_MAX_MOD_SIZE	256	/* 2048 bits */
+
+/*
+ * PCICC cards need a speed rating of 0. This keeps them at the end of
+ * the zcrypt device list (see zcrypt_api.c). PCICC cards are only
+ * used if no other cards are present because they are slow and can only
+ * cope with PKCS12 padded requests. The logic is queer. PKCS11 padded
+ * requests are rejected. The modexpo function encrypts PKCS12 padded data
+ * and decrypts any non-PKCS12 padded data (except PKCS11) in the assumption
+ * that it's encrypted PKCS12 data. The modexpo_crt function always decrypts
+ * the data in the assumption that its PKCS12 encrypted data.
+ */
+#define PCICC_SPEED_RATING	0
+
+#define PCICC_MAX_MESSAGE_SIZE 0x710	/* max size type6 v1 crt message */
+#define PCICC_MAX_RESPONSE_SIZE 0x710	/* max size type86 v1 reply	 */
+
+#define PCICC_CLEANUP_TIME	(15*HZ)
+
+static struct ap_device_id zcrypt_pcicc_ids[] = {
+	{ AP_DEVICE(AP_DEVICE_TYPE_PCICC) },
+	{ /* end of list */ },
+};
+
+MODULE_DEVICE_TABLE(ap, zcrypt_pcicc_ids);
+MODULE_AUTHOR("IBM Corporation");
+MODULE_DESCRIPTION("PCICC Cryptographic Coprocessor device driver, "
+		   "Copyright 2001, 2006 IBM Corporation");
+MODULE_LICENSE("GPL");
+
+static int zcrypt_pcicc_probe(struct ap_device *ap_dev);
+static void zcrypt_pcicc_remove(struct ap_device *ap_dev);
+static void zcrypt_pcicc_receive(struct ap_device *, struct ap_message *,
+				 struct ap_message *);
+
+static struct ap_driver zcrypt_pcicc_driver = {
+	.probe = zcrypt_pcicc_probe,
+	.remove = zcrypt_pcicc_remove,
+	.receive = zcrypt_pcicc_receive,
+	.ids = zcrypt_pcicc_ids,
+	.request_timeout = PCICC_CLEANUP_TIME,
+};
+
+/**
+ * The following is used to initialize the CPRB passed to the PCICC card
+ * in a type6 message. The 3 fields that must be filled in at execution
+ * time are  req_parml, rpl_parml and usage_domain. Note that all three
+ * fields are *little*-endian. Actually, everything about this interface
+ * is ascii/little-endian, since the device has 'Intel inside'.
+ *
+ * The CPRB is followed immediately by the parm block.
+ * The parm block contains:
+ * - function code ('PD' 0x5044 or 'PK' 0x504B)
+ * - rule block (0x0A00 'PKCS-1.2' or 0x0A00 'ZERO-PAD')
+ * - VUD block
+ */
+static struct CPRB static_cprb = {
+	.cprb_len	= __constant_cpu_to_le16(0x0070),
+	.cprb_ver_id	=  0x41,
+	.func_id	= {0x54,0x32},
+	.checkpoint_flag=  0x01,
+	.svr_namel	= __constant_cpu_to_le16(0x0008),
+	.svr_name	= {'I','C','S','F',' ',' ',' ',' '}
+};
+
+/**
+ * Check the message for PKCS11 padding.
+ */
+static inline int is_PKCS11_padded(unsigned char *buffer, int length)
+{
+	int i;
+	if ((buffer[0] != 0x00) || (buffer[1] != 0x01))
+		return 0;
+	for (i = 2; i < length; i++)
+		if (buffer[i] != 0xFF)
+			break;
+	if (i < 10 || i == length)
+		return 0;
+	if (buffer[i] != 0x00)
+		return 0;
+	return 1;
+}
+
+/**
+ * Check the message for PKCS12 padding.
+ */
+static inline int is_PKCS12_padded(unsigned char *buffer, int length)
+{
+	int i;
+	if ((buffer[0] != 0x00) || (buffer[1] != 0x02))
+		return 0;
+	for (i = 2; i < length; i++)
+		if (buffer[i] == 0x00)
+			break;
+	if ((i < 10) || (i == length))
+		return 0;
+	if (buffer[i] != 0x00)
+		return 0;
+	return 1;
+}
+
+/**
+ * Convert a ICAMEX message to a type6 MEX message.
+ *
+ * @zdev: crypto device pointer
+ * @zreq: crypto request pointer
+ * @mex: pointer to user input data
+ *
+ * Returns 0 on success or -EFAULT.
+ */
+static int ICAMEX_msg_to_type6MEX_msg(struct zcrypt_device *zdev,
+				      struct ap_message *ap_msg,
+				      struct ica_rsa_modexpo *mex)
+{
+	static struct type6_hdr static_type6_hdr = {
+		.type		=  0x06,
+		.offset1	=  0x00000058,
+		.agent_id	= {0x01,0x00,0x43,0x43,0x41,0x2D,0x41,0x50,
+				   0x50,0x4C,0x20,0x20,0x20,0x01,0x01,0x01},
+		.function_code	= {'P','K'},
+	};
+	static struct function_and_rules_block static_pke_function_and_rules ={
+		.function_code	= {'P','K'},
+		.ulen		= __constant_cpu_to_le16(10),
+		.only_rule	= {'P','K','C','S','-','1','.','2'}
+	};
+	struct {
+		struct type6_hdr hdr;
+		struct CPRB cprb;
+		struct function_and_rules_block fr;
+		unsigned short length;
+		char text[0];
+	} __attribute__((packed)) *msg = ap_msg->message;
+	int vud_len, pad_len, size;
+
+	/* VUD.ciphertext */
+	if (copy_from_user(msg->text, mex->inputdata, mex->inputdatalength))
+		return -EFAULT;
+
+	if (is_PKCS11_padded(msg->text, mex->inputdatalength))
+		return -EINVAL;
+
+	/* static message header and f&r */
+	msg->hdr = static_type6_hdr;
+	msg->fr = static_pke_function_and_rules;
+
+	if (is_PKCS12_padded(msg->text, mex->inputdatalength)) {
+		/* strip the padding and adjust the data length */
+		pad_len = strnlen(msg->text + 2, mex->inputdatalength - 2) + 3;
+		if (pad_len <= 9 || pad_len >= mex->inputdatalength)
+			return -ENODEV;
+		vud_len = mex->inputdatalength - pad_len;
+		memmove(msg->text, msg->text + pad_len, vud_len);
+		msg->length = cpu_to_le16(vud_len + 2);
+
+		/* Set up key after the variable length text. */
+		size = zcrypt_type6_mex_key_en(mex, msg->text + vud_len, 0);
+		if (size < 0)
+			return size;
+		size += sizeof(*msg) + vud_len;	/* total size of msg */
+	} else {
+		vud_len = mex->inputdatalength;
+		msg->length = cpu_to_le16(2 + vud_len);
+
+		msg->hdr.function_code[1] = 'D';
+		msg->fr.function_code[1] = 'D';
+
+		/* Set up key after the variable length text. */
+		size = zcrypt_type6_mex_key_de(mex, msg->text + vud_len, 0);
+		if (size < 0)
+			return size;
+		size += sizeof(*msg) + vud_len;	/* total size of msg */
+	}
+
+	/* message header, cprb and f&r */
+	msg->hdr.ToCardLen1 = (size - sizeof(msg->hdr) + 3) & -4;
+	msg->hdr.FromCardLen1 = PCICC_MAX_RESPONSE_SIZE - sizeof(msg->hdr);
+
+	msg->cprb = static_cprb;
+	msg->cprb.usage_domain[0]= AP_QID_QUEUE(zdev->ap_dev->qid);
+	msg->cprb.req_parml = cpu_to_le16(size - sizeof(msg->hdr) -
+					   sizeof(msg->cprb));
+	msg->cprb.rpl_parml = cpu_to_le16(msg->hdr.FromCardLen1);
+
+	ap_msg->length = (size + 3) & -4;
+	return 0;
+}
+
+/**
+ * Convert a ICACRT message to a type6 CRT message.
+ *
+ * @zdev: crypto device pointer
+ * @zreq: crypto request pointer
+ * @crt: pointer to user input data
+ *
+ * Returns 0 on success or -EFAULT.
+ */
+static int ICACRT_msg_to_type6CRT_msg(struct zcrypt_device *zdev,
+				      struct ap_message *ap_msg,
+				      struct ica_rsa_modexpo_crt *crt)
+{
+	static struct type6_hdr static_type6_hdr = {
+		.type		=  0x06,
+		.offset1	=  0x00000058,
+		.agent_id	= {0x01,0x00,0x43,0x43,0x41,0x2D,0x41,0x50,
+				   0x50,0x4C,0x20,0x20,0x20,0x01,0x01,0x01},
+		.function_code	= {'P','D'},
+	};
+	static struct function_and_rules_block static_pkd_function_and_rules ={
+		.function_code	= {'P','D'},
+		.ulen		= __constant_cpu_to_le16(10),
+		.only_rule	= {'P','K','C','S','-','1','.','2'}
+	};
+	struct {
+		struct type6_hdr hdr;
+		struct CPRB cprb;
+		struct function_and_rules_block fr;
+		unsigned short length;
+		char text[0];
+	} __attribute__((packed)) *msg = ap_msg->message;
+	int size;
+
+	/* VUD.ciphertext */
+	msg->length = cpu_to_le16(2 + crt->inputdatalength);
+	if (copy_from_user(msg->text, crt->inputdata, crt->inputdatalength))
+		return -EFAULT;
+
+	if (is_PKCS11_padded(msg->text, crt->inputdatalength))
+		return -EINVAL;
+
+	/* Set up key after the variable length text. */
+	size = zcrypt_type6_crt_key(crt, msg->text + crt->inputdatalength, 0);
+	if (size < 0)
+		return size;
+	size += sizeof(*msg) + crt->inputdatalength;	/* total size of msg */
+
+	/* message header, cprb and f&r */
+	msg->hdr = static_type6_hdr;
+	msg->hdr.ToCardLen1 = (size -  sizeof(msg->hdr) + 3) & -4;
+	msg->hdr.FromCardLen1 = PCICC_MAX_RESPONSE_SIZE - sizeof(msg->hdr);
+
+	msg->cprb = static_cprb;
+	msg->cprb.usage_domain[0] = AP_QID_QUEUE(zdev->ap_dev->qid);
+	msg->cprb.req_parml = msg->cprb.rpl_parml =
+		cpu_to_le16(size - sizeof(msg->hdr) - sizeof(msg->cprb));
+
+	msg->fr = static_pkd_function_and_rules;
+
+	ap_msg->length = (size + 3) & -4;
+	return 0;
+}
+
+/**
+ * Copy results from a type 86 reply message back to user space.
+ *
+ * @zdev: crypto device pointer
+ * @reply: reply AP message.
+ * @data: pointer to user output data
+ * @length: size of user output data
+ *
+ * Returns 0 on success or -EINVAL, -EFAULT, -EAGAIN in case of an error.
+ */
+struct type86_reply {
+	struct type86_hdr hdr;
+	struct type86_fmt2_ext fmt2;
+	struct CPRB cprb;
+	unsigned char pad[4];	/* 4 byte function code/rules block ? */
+	unsigned short length;
+	char text[0];
+} __attribute__((packed));
+
+static int convert_type86(struct zcrypt_device *zdev,
+			  struct ap_message *reply,
+			  char __user *outputdata,
+			  unsigned int outputdatalength)
+{
+	static unsigned char static_pad[] = {
+		0x00,0x02,
+		0x1B,0x7B,0x5D,0xB5,0x75,0x01,0x3D,0xFD,
+		0x8D,0xD1,0xC7,0x03,0x2D,0x09,0x23,0x57,
+		0x89,0x49,0xB9,0x3F,0xBB,0x99,0x41,0x5B,
+		0x75,0x21,0x7B,0x9D,0x3B,0x6B,0x51,0x39,
+		0xBB,0x0D,0x35,0xB9,0x89,0x0F,0x93,0xA5,
+		0x0B,0x47,0xF1,0xD3,0xBB,0xCB,0xF1,0x9D,
+		0x23,0x73,0x71,0xFF,0xF3,0xF5,0x45,0xFB,
+		0x61,0x29,0x23,0xFD,0xF1,0x29,0x3F,0x7F,
+		0x17,0xB7,0x1B,0xA9,0x19,0xBD,0x57,0xA9,
+		0xD7,0x95,0xA3,0xCB,0xED,0x1D,0xDB,0x45,
+		0x7D,0x11,0xD1,0x51,0x1B,0xED,0x71,0xE9,
+		0xB1,0xD1,0xAB,0xAB,0x21,0x2B,0x1B,0x9F,
+		0x3B,0x9F,0xF7,0xF7,0xBD,0x63,0xEB,0xAD,
+		0xDF,0xB3,0x6F,0x5B,0xDB,0x8D,0xA9,0x5D,
+		0xE3,0x7D,0x77,0x49,0x47,0xF5,0xA7,0xFD,
+		0xAB,0x2F,0x27,0x35,0x77,0xD3,0x49,0xC9,
+		0x09,0xEB,0xB1,0xF9,0xBF,0x4B,0xCB,0x2B,
+		0xEB,0xEB,0x05,0xFF,0x7D,0xC7,0x91,0x8B,
+		0x09,0x83,0xB9,0xB9,0x69,0x33,0x39,0x6B,
+		0x79,0x75,0x19,0xBF,0xBB,0x07,0x1D,0xBD,
+		0x29,0xBF,0x39,0x95,0x93,0x1D,0x35,0xC7,
+		0xC9,0x4D,0xE5,0x97,0x0B,0x43,0x9B,0xF1,
+		0x16,0x93,0x03,0x1F,0xA5,0xFB,0xDB,0xF3,
+		0x27,0x4F,0x27,0x61,0x05,0x1F,0xB9,0x23,
+		0x2F,0xC3,0x81,0xA9,0x23,0x71,0x55,0x55,
+		0xEB,0xED,0x41,0xE5,0xF3,0x11,0xF1,0x43,
+		0x69,0x03,0xBD,0x0B,0x37,0x0F,0x51,0x8F,
+		0x0B,0xB5,0x89,0x5B,0x67,0xA9,0xD9,0x4F,
+		0x01,0xF9,0x21,0x77,0x37,0x73,0x79,0xC5,
+		0x7F,0x51,0xC1,0xCF,0x97,0xA1,0x75,0xAD,
+		0x35,0x9D,0xD3,0xD3,0xA7,0x9D,0x5D,0x41,
+		0x6F,0x65,0x1B,0xCF,0xA9,0x87,0x91,0x09
+	};
+	struct type86_reply *msg = reply->message;
+	unsigned short service_rc, service_rs;
+	unsigned int reply_len, pad_len;
+	char *data;
+
+	service_rc = le16_to_cpu(msg->cprb.ccp_rtcode);
+	if (unlikely(service_rc != 0)) {
+		service_rs = le16_to_cpu(msg->cprb.ccp_rscode);
+		if (service_rc == 8 && service_rs == 66)
+			return -EINVAL;
+		if (service_rc == 8 && service_rs == 65)
+			return -EINVAL;
+		if (service_rc == 8 && service_rs == 770) {
+			zdev->max_mod_size = PCICC_MAX_MOD_SIZE_OLD;
+			return -EAGAIN;
+		}
+		if (service_rc == 8 && service_rs == 783) {
+			zdev->max_mod_size = PCICC_MAX_MOD_SIZE_OLD;
+			return -EAGAIN;
+		}
+		if (service_rc == 8 && service_rs == 72)
+			return -EINVAL;
+		zdev->online = 0;
+		return -EAGAIN;	/* repeat the request on a different device. */
+	}
+	data = msg->text;
+	reply_len = le16_to_cpu(msg->length) - 2;
+	if (reply_len > outputdatalength)
+		return -EINVAL;
+	/*
+	 * For all encipher requests, the length of the ciphertext (reply_len)
+	 * will always equal the modulus length. For MEX decipher requests
+	 * the output needs to get padded. Minimum pad size is 10.
+	 *
+	 * Currently, the cases where padding will be added is for:
+	 * - PCIXCC_MCL2 using a CRT form token (since PKD didn't support
+	 *   ZERO-PAD and CRT is only supported for PKD requests)
+	 * - PCICC, always
+	 */
+	pad_len = outputdatalength - reply_len;
+	if (pad_len > 0) {
+		if (pad_len < 10)
+			return -EINVAL;
+		/* 'restore' padding left in the PCICC/PCIXCC card. */
+		if (copy_to_user(outputdata, static_pad, pad_len - 1))
+			return -EFAULT;
+		if (put_user(0, outputdata + pad_len - 1))
+			return -EFAULT;
+	}
+	/* Copy the crypto response to user space. */
+	if (copy_to_user(outputdata + pad_len, data, reply_len))
+		return -EFAULT;
+	return 0;
+}
+
+static int convert_response(struct zcrypt_device *zdev,
+			    struct ap_message *reply,
+			    char __user *outputdata,
+			    unsigned int outputdatalength)
+{
+	struct type86_reply *msg = reply->message;
+
+	/* Response type byte is the second byte in the response. */
+	switch (msg->hdr.type) {
+	case TYPE82_RSP_CODE:
+	case TYPE88_RSP_CODE:
+		return convert_error(zdev, reply);
+	case TYPE86_RSP_CODE:
+		if (msg->hdr.reply_code)
+			return convert_error(zdev, reply);
+		if (msg->cprb.cprb_ver_id == 0x01)
+			return convert_type86(zdev, reply,
+					      outputdata, outputdatalength);
+		/* no break, incorrect cprb version is an unknown response */
+	default: /* Unknown response type, this should NEVER EVER happen */
+		zdev->online = 0;
+		return -EAGAIN;	/* repeat the request on a different device. */
+	}
+}
+
+/**
+ * This function is called from the AP bus code after a crypto request
+ * "msg" has finished with the reply message "reply".
+ * It is called from tasklet context.
+ * @ap_dev: pointer to the AP device
+ * @msg: pointer to the AP message
+ * @reply: pointer to the AP reply message
+ */
+static void zcrypt_pcicc_receive(struct ap_device *ap_dev,
+				 struct ap_message *msg,
+				 struct ap_message *reply)
+{
+	static struct error_hdr error_reply = {
+		.type = TYPE82_RSP_CODE,
+		.reply_code = REP82_ERROR_MACHINE_FAILURE,
+	};
+	struct type86_reply *t86r;
+	int length;
+
+	/* Copy the reply message to the request message buffer. */
+	if (IS_ERR(reply)) {
+		memcpy(msg->message, &error_reply, sizeof(error_reply));
+		goto out;
+	}
+	t86r = reply->message;
+	if (t86r->hdr.type == TYPE86_RSP_CODE &&
+		 t86r->cprb.cprb_ver_id == 0x01) {
+		length = sizeof(struct type86_reply) + t86r->length - 2;
+		length = min(PCICC_MAX_RESPONSE_SIZE, length);
+		memcpy(msg->message, reply->message, length);
+	} else
+		memcpy(msg->message, reply->message, sizeof error_reply);
+out:
+	complete((struct completion *) msg->private);
+}
+
+static atomic_t zcrypt_step = ATOMIC_INIT(0);
+
+/**
+ * The request distributor calls this function if it picked the PCICC
+ * device to handle a modexpo request.
+ * @zdev: pointer to zcrypt_device structure that identifies the
+ *	  PCICC device to the request distributor
+ * @mex: pointer to the modexpo request buffer
+ */
+static long zcrypt_pcicc_modexpo(struct zcrypt_device *zdev,
+				 struct ica_rsa_modexpo *mex)
+{
+	struct ap_message ap_msg;
+	struct completion work;
+	int rc;
+
+	ap_init_message(&ap_msg);
+	ap_msg.message = (void *) get_zeroed_page(GFP_KERNEL);
+	if (!ap_msg.message)
+		return -ENOMEM;
+	ap_msg.length = PAGE_SIZE;
+	ap_msg.psmid = (((unsigned long long) current->pid) << 32) +
+				atomic_inc_return(&zcrypt_step);
+	ap_msg.private = &work;
+	rc = ICAMEX_msg_to_type6MEX_msg(zdev, &ap_msg, mex);
+	if (rc)
+		goto out_free;
+	init_completion(&work);
+	ap_queue_message(zdev->ap_dev, &ap_msg);
+	rc = wait_for_completion_interruptible(&work);
+	if (rc == 0)
+		rc = convert_response(zdev, &ap_msg, mex->outputdata,
+				      mex->outputdatalength);
+	else
+		/* Signal pending. */
+		ap_cancel_message(zdev->ap_dev, &ap_msg);
+out_free:
+	free_page((unsigned long) ap_msg.message);
+	return rc;
+}
+
+/**
+ * The request distributor calls this function if it picked the PCICC
+ * device to handle a modexpo_crt request.
+ * @zdev: pointer to zcrypt_device structure that identifies the
+ *	  PCICC device to the request distributor
+ * @crt: pointer to the modexpoc_crt request buffer
+ */
+static long zcrypt_pcicc_modexpo_crt(struct zcrypt_device *zdev,
+				     struct ica_rsa_modexpo_crt *crt)
+{
+	struct ap_message ap_msg;
+	struct completion work;
+	int rc;
+
+	ap_init_message(&ap_msg);
+	ap_msg.message = (void *) get_zeroed_page(GFP_KERNEL);
+	if (!ap_msg.message)
+		return -ENOMEM;
+	ap_msg.length = PAGE_SIZE;
+	ap_msg.psmid = (((unsigned long long) current->pid) << 32) +
+				atomic_inc_return(&zcrypt_step);
+	ap_msg.private = &work;
+	rc = ICACRT_msg_to_type6CRT_msg(zdev, &ap_msg, crt);
+	if (rc)
+		goto out_free;
+	init_completion(&work);
+	ap_queue_message(zdev->ap_dev, &ap_msg);
+	rc = wait_for_completion_interruptible(&work);
+	if (rc == 0)
+		rc = convert_response(zdev, &ap_msg, crt->outputdata,
+				      crt->outputdatalength);
+	else
+		/* Signal pending. */
+		ap_cancel_message(zdev->ap_dev, &ap_msg);
+out_free:
+	free_page((unsigned long) ap_msg.message);
+	return rc;
+}
+
+/**
+ * The crypto operations for a PCICC card.
+ */
+static struct zcrypt_ops zcrypt_pcicc_ops = {
+	.rsa_modexpo = zcrypt_pcicc_modexpo,
+	.rsa_modexpo_crt = zcrypt_pcicc_modexpo_crt,
+};
+
+/**
+ * Probe function for PCICC cards. It always accepts the AP device
+ * since the bus_match already checked the hardware type.
+ * @ap_dev: pointer to the AP device.
+ */
+static int zcrypt_pcicc_probe(struct ap_device *ap_dev)
+{
+	struct zcrypt_device *zdev;
+	int rc;
+
+	zdev = zcrypt_device_alloc(PCICC_MAX_RESPONSE_SIZE);
+	if (!zdev)
+		return -ENOMEM;
+	zdev->ap_dev = ap_dev;
+	zdev->ops = &zcrypt_pcicc_ops;
+	zdev->online = 1;
+	zdev->user_space_type = ZCRYPT_PCICC;
+	zdev->type_string = "PCICC";
+	zdev->min_mod_size = PCICC_MIN_MOD_SIZE;
+	zdev->max_mod_size = PCICC_MAX_MOD_SIZE;
+	zdev->speed_rating = PCICC_SPEED_RATING;
+	zdev->max_exp_bit_length = PCICC_MAX_MOD_SIZE;
+	ap_dev->reply = &zdev->reply;
+	ap_dev->private = zdev;
+	rc = zcrypt_device_register(zdev);
+	if (rc)
+		goto out_free;
+	return 0;
+
+ out_free:
+	ap_dev->private = NULL;
+	zcrypt_device_free(zdev);
+	return rc;
+}
+
+/**
+ * This is called to remove the extended PCICC driver information
+ * if an AP device is removed.
+ */
+static void zcrypt_pcicc_remove(struct ap_device *ap_dev)
+{
+	struct zcrypt_device *zdev = ap_dev->private;
+
+	zcrypt_device_unregister(zdev);
+}
+
+int __init zcrypt_pcicc_init(void)
+{
+	return ap_driver_register(&zcrypt_pcicc_driver, THIS_MODULE, "pcicc");
+}
+
+void zcrypt_pcicc_exit(void)
+{
+	ap_driver_unregister(&zcrypt_pcicc_driver);
+}
+
+module_init(zcrypt_pcicc_init);
+module_exit(zcrypt_pcicc_exit);
diff --git a/ap/os/linux/linux-3.4.x/drivers/s390/crypto/zcrypt_pcicc.h b/ap/os/linux/linux-3.4.x/drivers/s390/crypto/zcrypt_pcicc.h
new file mode 100644
index 0000000..6d44548
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/s390/crypto/zcrypt_pcicc.h
@@ -0,0 +1,176 @@
+/*
+ *  linux/drivers/s390/crypto/zcrypt_pcicc.h
+ *
+ *  zcrypt 2.1.0
+ *
+ *  Copyright (C)  2001, 2006 IBM Corporation
+ *  Author(s): Robert Burroughs
+ *	       Eric Rossman (edrossma@us.ibm.com)
+ *
+ *  Hotplug & misc device support: Jochen Roehrig (roehrig@de.ibm.com)
+ *  Major cleanup & driver split: Martin Schwidefsky <schwidefsky@de.ibm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#ifndef _ZCRYPT_PCICC_H_
+#define _ZCRYPT_PCICC_H_
+
+/**
+ * The type 6 message family is associated with PCICC or PCIXCC cards.
+ *
+ * It contains a message header followed by a CPRB, both of which
+ * are described below.
+ *
+ * Note that all reserved fields must be zeroes.
+ */
+struct type6_hdr {
+	unsigned char reserved1;	/* 0x00				*/
+	unsigned char type;		/* 0x06				*/
+	unsigned char reserved2[2];	/* 0x0000			*/
+	unsigned char right[4];		/* 0x00000000			*/
+	unsigned char reserved3[2];	/* 0x0000			*/
+	unsigned char reserved4[2];	/* 0x0000			*/
+	unsigned char apfs[4];		/* 0x00000000			*/
+	unsigned int  offset1;		/* 0x00000058 (offset to CPRB)	*/
+	unsigned int  offset2;		/* 0x00000000			*/
+	unsigned int  offset3;		/* 0x00000000			*/
+	unsigned int  offset4;		/* 0x00000000			*/
+	unsigned char agent_id[16];	/* PCICC:			*/
+					/*    0x0100			*/
+					/*    0x4343412d4150504c202020	*/
+					/*    0x010101			*/
+					/* PCIXCC:			*/
+					/*    0x4341000000000000	*/
+					/*    0x0000000000000000	*/
+	unsigned char rqid[2];		/* rqid.  internal to 603	*/
+	unsigned char reserved5[2];	/* 0x0000			*/
+	unsigned char function_code[2];	/* for PKD, 0x5044 (ascii 'PD')	*/
+	unsigned char reserved6[2];	/* 0x0000			*/
+	unsigned int  ToCardLen1;	/* (request CPRB len + 3) & -4	*/
+	unsigned int  ToCardLen2;	/* db len 0x00000000 for PKD	*/
+	unsigned int  ToCardLen3;	/* 0x00000000			*/
+	unsigned int  ToCardLen4;	/* 0x00000000			*/
+	unsigned int  FromCardLen1;	/* response buffer length	*/
+	unsigned int  FromCardLen2;	/* db len 0x00000000 for PKD	*/
+	unsigned int  FromCardLen3;	/* 0x00000000			*/
+	unsigned int  FromCardLen4;	/* 0x00000000			*/
+} __attribute__((packed));
+
+/**
+ * CPRB
+ *	  Note that all shorts, ints and longs are little-endian.
+ *	  All pointer fields are 32-bits long, and mean nothing
+ *
+ *	  A request CPRB is followed by a request_parameter_block.
+ *
+ *	  The request (or reply) parameter block is organized thus:
+ *	    function code
+ *	    VUD block
+ *	    key block
+ */
+struct CPRB {
+	unsigned short cprb_len;	/* CPRB length			 */
+	unsigned char cprb_ver_id;	/* CPRB version id.		 */
+	unsigned char pad_000;		/* Alignment pad byte.		 */
+	unsigned char srpi_rtcode[4];	/* SRPI return code LELONG	 */
+	unsigned char srpi_verb;	/* SRPI verb type		 */
+	unsigned char flags;		/* flags			 */
+	unsigned char func_id[2];	/* function id			 */
+	unsigned char checkpoint_flag;	/*				 */
+	unsigned char resv2;		/* reserved			 */
+	unsigned short req_parml;	/* request parameter buffer	 */
+					/* length 16-bit little endian	 */
+	unsigned char req_parmp[4];	/* request parameter buffer	 *
+					 * pointer (means nothing: the	 *
+					 * parameter buffer follows	 *
+					 * the CPRB).			 */
+	unsigned char req_datal[4];	/* request data buffer		 */
+					/* length	  ULELONG	 */
+	unsigned char req_datap[4];	/* request data buffer		 */
+					/* pointer			 */
+	unsigned short rpl_parml;	/* reply  parameter buffer	 */
+					/* length 16-bit little endian	 */
+	unsigned char pad_001[2];	/* Alignment pad bytes. ULESHORT */
+	unsigned char rpl_parmp[4];	/* reply parameter buffer	 *
+					 * pointer (means nothing: the	 *
+					 * parameter buffer follows	 *
+					 * the CPRB).			 */
+	unsigned char rpl_datal[4];	/* reply data buffer len ULELONG */
+	unsigned char rpl_datap[4];	/* reply data buffer		 */
+					/* pointer			 */
+	unsigned short ccp_rscode;	/* server reason code	ULESHORT */
+	unsigned short ccp_rtcode;	/* server return code	ULESHORT */
+	unsigned char repd_parml[2];	/* replied parameter len ULESHORT*/
+	unsigned char mac_data_len[2];	/* Mac Data Length	ULESHORT */
+	unsigned char repd_datal[4];	/* replied data length	ULELONG	 */
+	unsigned char req_pc[2];	/* PC identifier		 */
+	unsigned char res_origin[8];	/* resource origin		 */
+	unsigned char mac_value[8];	/* Mac Value			 */
+	unsigned char logon_id[8];	/* Logon Identifier		 */
+	unsigned char usage_domain[2];	/* cdx				 */
+	unsigned char resv3[18];	/* reserved for requestor	 */
+	unsigned short svr_namel;	/* server name length  ULESHORT	 */
+	unsigned char svr_name[8];	/* server name			 */
+} __attribute__((packed));
+
+/**
+ * The type 86 message family is associated with PCICC and PCIXCC cards.
+ *
+ * It contains a message header followed by a CPRB.  The CPRB is
+ * the same as the request CPRB, which is described above.
+ *
+ * If format is 1, an error condition exists and no data beyond
+ * the 8-byte message header is of interest.
+ *
+ * The non-error message is shown below.
+ *
+ * Note that all reserved fields must be zeroes.
+ */
+struct type86_hdr {
+	unsigned char reserved1;	/* 0x00				*/
+	unsigned char type;		/* 0x86				*/
+	unsigned char format;		/* 0x01 (error) or 0x02 (ok)	*/
+	unsigned char reserved2;	/* 0x00				*/
+	unsigned char reply_code;	/* reply code (see above)	*/
+	unsigned char reserved3[3];	/* 0x000000			*/
+} __attribute__((packed));
+
+#define TYPE86_RSP_CODE 0x86
+#define TYPE86_FMT2	0x02
+
+struct type86_fmt2_ext {
+	unsigned char	  reserved[4];	/* 0x00000000			*/
+	unsigned char	  apfs[4];	/* final status			*/
+	unsigned int	  count1;	/* length of CPRB + parameters	*/
+	unsigned int	  offset1;	/* offset to CPRB		*/
+	unsigned int	  count2;	/* 0x00000000			*/
+	unsigned int	  offset2;	/* db offset 0x00000000 for PKD	*/
+	unsigned int	  count3;	/* 0x00000000			*/
+	unsigned int	  offset3;	/* 0x00000000			*/
+	unsigned int	  count4;	/* 0x00000000			*/
+	unsigned int	  offset4;	/* 0x00000000			*/
+} __attribute__((packed));
+
+struct function_and_rules_block {
+	unsigned char function_code[2];
+	unsigned short ulen;
+	unsigned char only_rule[8];
+} __attribute__((packed));
+
+int zcrypt_pcicc_init(void);
+void zcrypt_pcicc_exit(void);
+
+#endif /* _ZCRYPT_PCICC_H_ */
diff --git a/ap/os/linux/linux-3.4.x/drivers/s390/crypto/zcrypt_pcixcc.c b/ap/os/linux/linux-3.4.x/drivers/s390/crypto/zcrypt_pcixcc.c
new file mode 100644
index 0000000..cf1cbd4
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/s390/crypto/zcrypt_pcixcc.c
@@ -0,0 +1,1123 @@
+/*
+ *  linux/drivers/s390/crypto/zcrypt_pcixcc.c
+ *
+ *  zcrypt 2.1.0
+ *
+ *  Copyright (C)  2001, 2006 IBM Corporation
+ *  Author(s): Robert Burroughs
+ *	       Eric Rossman (edrossma@us.ibm.com)
+ *
+ *  Hotplug & misc device support: Jochen Roehrig (roehrig@de.ibm.com)
+ *  Major cleanup & driver split: Martin Schwidefsky <schwidefsky@de.ibm.com>
+ *				  Ralph Wuerthner <rwuerthn@de.ibm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/err.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/atomic.h>
+#include <asm/uaccess.h>
+
+#include "ap_bus.h"
+#include "zcrypt_api.h"
+#include "zcrypt_error.h"
+#include "zcrypt_pcicc.h"
+#include "zcrypt_pcixcc.h"
+#include "zcrypt_cca_key.h"
+
+#define PCIXCC_MIN_MOD_SIZE	 16	/*  128 bits	*/
+#define PCIXCC_MIN_MOD_SIZE_OLD	 64	/*  512 bits	*/
+#define PCIXCC_MAX_MOD_SIZE	256	/* 2048 bits	*/
+#define CEX3C_MIN_MOD_SIZE	PCIXCC_MIN_MOD_SIZE
+#define CEX3C_MAX_MOD_SIZE	512	/* 4096 bits	*/
+
+#define PCIXCC_MCL2_SPEED_RATING	7870
+#define PCIXCC_MCL3_SPEED_RATING	7870
+#define CEX2C_SPEED_RATING		7000
+#define CEX3C_SPEED_RATING		6500
+
+#define PCIXCC_MAX_ICA_MESSAGE_SIZE 0x77c  /* max size type6 v2 crt message */
+#define PCIXCC_MAX_ICA_RESPONSE_SIZE 0x77c /* max size type86 v2 reply	    */
+
+#define PCIXCC_MAX_XCRB_MESSAGE_SIZE (12*1024)
+
+#define PCIXCC_CLEANUP_TIME	(15*HZ)
+
+#define CEIL4(x) ((((x)+3)/4)*4)
+
+struct response_type {
+	struct completion work;
+	int type;
+};
+#define PCIXCC_RESPONSE_TYPE_ICA  0
+#define PCIXCC_RESPONSE_TYPE_XCRB 1
+
+static struct ap_device_id zcrypt_pcixcc_ids[] = {
+	{ AP_DEVICE(AP_DEVICE_TYPE_PCIXCC) },
+	{ AP_DEVICE(AP_DEVICE_TYPE_CEX2C) },
+	{ AP_DEVICE(AP_DEVICE_TYPE_CEX3C) },
+	{ /* end of list */ },
+};
+
+MODULE_DEVICE_TABLE(ap, zcrypt_pcixcc_ids);
+MODULE_AUTHOR("IBM Corporation");
+MODULE_DESCRIPTION("PCIXCC Cryptographic Coprocessor device driver, "
+		   "Copyright 2001, 2006 IBM Corporation");
+MODULE_LICENSE("GPL");
+
+static int zcrypt_pcixcc_probe(struct ap_device *ap_dev);
+static void zcrypt_pcixcc_remove(struct ap_device *ap_dev);
+static void zcrypt_pcixcc_receive(struct ap_device *, struct ap_message *,
+				 struct ap_message *);
+
+static struct ap_driver zcrypt_pcixcc_driver = {
+	.probe = zcrypt_pcixcc_probe,
+	.remove = zcrypt_pcixcc_remove,
+	.receive = zcrypt_pcixcc_receive,
+	.ids = zcrypt_pcixcc_ids,
+	.request_timeout = PCIXCC_CLEANUP_TIME,
+};
+
+/**
+ * The following is used to initialize the CPRBX passed to the PCIXCC/CEX2C
+ * card in a type6 message. The 3 fields that must be filled in at execution
+ * time are  req_parml, rpl_parml and usage_domain.
+ * Everything about this interface is ascii/big-endian, since the
+ * device does *not* have 'Intel inside'.
+ *
+ * The CPRBX is followed immediately by the parm block.
+ * The parm block contains:
+ * - function code ('PD' 0x5044 or 'PK' 0x504B)
+ * - rule block (one of:)
+ *   + 0x000A 'PKCS-1.2' (MCL2 'PD')
+ *   + 0x000A 'ZERO-PAD' (MCL2 'PK')
+ *   + 0x000A 'ZERO-PAD' (MCL3 'PD' or CEX2C 'PD')
+ *   + 0x000A 'MRP     ' (MCL3 'PK' or CEX2C 'PK')
+ * - VUD block
+ */
+static struct CPRBX static_cprbx = {
+	.cprb_len	=  0x00DC,
+	.cprb_ver_id	=  0x02,
+	.func_id	= {0x54,0x32},
+};
+
+/**
+ * Convert a ICAMEX message to a type6 MEX message.
+ *
+ * @zdev: crypto device pointer
+ * @ap_msg: pointer to AP message
+ * @mex: pointer to user input data
+ *
+ * Returns 0 on success or -EFAULT.
+ */
+static int ICAMEX_msg_to_type6MEX_msgX(struct zcrypt_device *zdev,
+				       struct ap_message *ap_msg,
+				       struct ica_rsa_modexpo *mex)
+{
+	static struct type6_hdr static_type6_hdrX = {
+		.type		=  0x06,
+		.offset1	=  0x00000058,
+		.agent_id	= {'C','A',},
+		.function_code	= {'P','K'},
+	};
+	static struct function_and_rules_block static_pke_fnr = {
+		.function_code	= {'P','K'},
+		.ulen		= 10,
+		.only_rule	= {'M','R','P',' ',' ',' ',' ',' '}
+	};
+	static struct function_and_rules_block static_pke_fnr_MCL2 = {
+		.function_code	= {'P','K'},
+		.ulen		= 10,
+		.only_rule	= {'Z','E','R','O','-','P','A','D'}
+	};
+	struct {
+		struct type6_hdr hdr;
+		struct CPRBX cprbx;
+		struct function_and_rules_block fr;
+		unsigned short length;
+		char text[0];
+	} __attribute__((packed)) *msg = ap_msg->message;
+	int size;
+
+	/* VUD.ciphertext */
+	msg->length = mex->inputdatalength + 2;
+	if (copy_from_user(msg->text, mex->inputdata, mex->inputdatalength))
+		return -EFAULT;
+
+	/* Set up key which is located after the variable length text. */
+	size = zcrypt_type6_mex_key_en(mex, msg->text+mex->inputdatalength, 1);
+	if (size < 0)
+		return size;
+	size += sizeof(*msg) + mex->inputdatalength;
+
+	/* message header, cprbx and f&r */
+	msg->hdr = static_type6_hdrX;
+	msg->hdr.ToCardLen1 = size - sizeof(msg->hdr);
+	msg->hdr.FromCardLen1 = PCIXCC_MAX_ICA_RESPONSE_SIZE - sizeof(msg->hdr);
+
+	msg->cprbx = static_cprbx;
+	msg->cprbx.domain = AP_QID_QUEUE(zdev->ap_dev->qid);
+	msg->cprbx.rpl_msgbl = msg->hdr.FromCardLen1;
+
+	msg->fr = (zdev->user_space_type == ZCRYPT_PCIXCC_MCL2) ?
+		static_pke_fnr_MCL2 : static_pke_fnr;
+
+	msg->cprbx.req_parml = size - sizeof(msg->hdr) - sizeof(msg->cprbx);
+
+	ap_msg->length = size;
+	return 0;
+}
+
+/**
+ * Convert a ICACRT message to a type6 CRT message.
+ *
+ * @zdev: crypto device pointer
+ * @ap_msg: pointer to AP message
+ * @crt: pointer to user input data
+ *
+ * Returns 0 on success or -EFAULT.
+ */
+static int ICACRT_msg_to_type6CRT_msgX(struct zcrypt_device *zdev,
+				       struct ap_message *ap_msg,
+				       struct ica_rsa_modexpo_crt *crt)
+{
+	static struct type6_hdr static_type6_hdrX = {
+		.type		=  0x06,
+		.offset1	=  0x00000058,
+		.agent_id	= {'C','A',},
+		.function_code	= {'P','D'},
+	};
+	static struct function_and_rules_block static_pkd_fnr = {
+		.function_code	= {'P','D'},
+		.ulen		= 10,
+		.only_rule	= {'Z','E','R','O','-','P','A','D'}
+	};
+
+	static struct function_and_rules_block static_pkd_fnr_MCL2 = {
+		.function_code	= {'P','D'},
+		.ulen		= 10,
+		.only_rule	= {'P','K','C','S','-','1','.','2'}
+	};
+	struct {
+		struct type6_hdr hdr;
+		struct CPRBX cprbx;
+		struct function_and_rules_block fr;
+		unsigned short length;
+		char text[0];
+	} __attribute__((packed)) *msg = ap_msg->message;
+	int size;
+
+	/* VUD.ciphertext */
+	msg->length = crt->inputdatalength + 2;
+	if (copy_from_user(msg->text, crt->inputdata, crt->inputdatalength))
+		return -EFAULT;
+
+	/* Set up key which is located after the variable length text. */
+	size = zcrypt_type6_crt_key(crt, msg->text + crt->inputdatalength, 1);
+	if (size < 0)
+		return size;
+	size += sizeof(*msg) + crt->inputdatalength;	/* total size of msg */
+
+	/* message header, cprbx and f&r */
+	msg->hdr = static_type6_hdrX;
+	msg->hdr.ToCardLen1 = size -  sizeof(msg->hdr);
+	msg->hdr.FromCardLen1 = PCIXCC_MAX_ICA_RESPONSE_SIZE - sizeof(msg->hdr);
+
+	msg->cprbx = static_cprbx;
+	msg->cprbx.domain = AP_QID_QUEUE(zdev->ap_dev->qid);
+	msg->cprbx.req_parml = msg->cprbx.rpl_msgbl =
+		size - sizeof(msg->hdr) - sizeof(msg->cprbx);
+
+	msg->fr = (zdev->user_space_type == ZCRYPT_PCIXCC_MCL2) ?
+		static_pkd_fnr_MCL2 : static_pkd_fnr;
+
+	ap_msg->length = size;
+	return 0;
+}
+
+/**
+ * Convert a XCRB message to a type6 CPRB message.
+ *
+ * @zdev: crypto device pointer
+ * @ap_msg: pointer to AP message
+ * @xcRB: pointer to user input data
+ *
+ * Returns 0 on success or -EFAULT, -EINVAL.
+ */
+struct type86_fmt2_msg {
+	struct type86_hdr hdr;
+	struct type86_fmt2_ext fmt2;
+} __attribute__((packed));
+
+static int XCRB_msg_to_type6CPRB_msgX(struct zcrypt_device *zdev,
+				       struct ap_message *ap_msg,
+				       struct ica_xcRB *xcRB)
+{
+	static struct type6_hdr static_type6_hdrX = {
+		.type		=  0x06,
+		.offset1	=  0x00000058,
+	};
+	struct {
+		struct type6_hdr hdr;
+		struct CPRBX cprbx;
+	} __attribute__((packed)) *msg = ap_msg->message;
+
+	int rcblen = CEIL4(xcRB->request_control_blk_length);
+	int replylen;
+	char *req_data = ap_msg->message + sizeof(struct type6_hdr) + rcblen;
+	char *function_code;
+
+	/* length checks */
+	ap_msg->length = sizeof(struct type6_hdr) +
+		CEIL4(xcRB->request_control_blk_length) +
+		xcRB->request_data_length;
+	if (ap_msg->length > PCIXCC_MAX_XCRB_MESSAGE_SIZE)
+		return -EINVAL;
+	replylen = sizeof(struct type86_fmt2_msg) +
+		CEIL4(xcRB->reply_control_blk_length) +
+		xcRB->reply_data_length;
+	if (replylen > PCIXCC_MAX_XCRB_MESSAGE_SIZE)
+		return -EINVAL;
+
+	/* prepare type6 header */
+	msg->hdr = static_type6_hdrX;
+	memcpy(msg->hdr.agent_id , &(xcRB->agent_ID), sizeof(xcRB->agent_ID));
+	msg->hdr.ToCardLen1 = xcRB->request_control_blk_length;
+	if (xcRB->request_data_length) {
+		msg->hdr.offset2 = msg->hdr.offset1 + rcblen;
+		msg->hdr.ToCardLen2 = xcRB->request_data_length;
+	}
+	msg->hdr.FromCardLen1 = xcRB->reply_control_blk_length;
+	msg->hdr.FromCardLen2 = xcRB->reply_data_length;
+
+	/* prepare CPRB */
+	if (copy_from_user(&(msg->cprbx), xcRB->request_control_blk_addr,
+		    xcRB->request_control_blk_length))
+		return -EFAULT;
+	if (msg->cprbx.cprb_len + sizeof(msg->hdr.function_code) >
+	    xcRB->request_control_blk_length)
+		return -EINVAL;
+	function_code = ((unsigned char *)&msg->cprbx) + msg->cprbx.cprb_len;
+	memcpy(msg->hdr.function_code, function_code, sizeof(msg->hdr.function_code));
+
+	if (memcmp(function_code, "US", 2) == 0)
+		ap_msg->special = 1;
+	else
+		ap_msg->special = 0;
+
+	/* copy data block */
+	if (xcRB->request_data_length &&
+	    copy_from_user(req_data, xcRB->request_data_address,
+		xcRB->request_data_length))
+		return -EFAULT;
+	return 0;
+}
+
+/**
+ * Prepare a type6 CPRB message for random number generation
+ *
+ * @ap_dev: AP device pointer
+ * @ap_msg: pointer to AP message
+ */
+static void rng_type6CPRB_msgX(struct ap_device *ap_dev,
+			       struct ap_message *ap_msg,
+			       unsigned random_number_length)
+{
+	struct {
+		struct type6_hdr hdr;
+		struct CPRBX cprbx;
+		char function_code[2];
+		short int rule_length;
+		char rule[8];
+		short int verb_length;
+		short int key_length;
+	} __attribute__((packed)) *msg = ap_msg->message;
+	static struct type6_hdr static_type6_hdrX = {
+		.type		= 0x06,
+		.offset1	= 0x00000058,
+		.agent_id	= {'C', 'A'},
+		.function_code	= {'R', 'L'},
+		.ToCardLen1	= sizeof *msg - sizeof(msg->hdr),
+		.FromCardLen1	= sizeof *msg - sizeof(msg->hdr),
+	};
+	static struct CPRBX local_cprbx = {
+		.cprb_len	= 0x00dc,
+		.cprb_ver_id	= 0x02,
+		.func_id	= {0x54, 0x32},
+		.req_parml	= sizeof *msg - sizeof(msg->hdr) -
+				  sizeof(msg->cprbx),
+		.rpl_msgbl	= sizeof *msg - sizeof(msg->hdr),
+	};
+
+	msg->hdr = static_type6_hdrX;
+	msg->hdr.FromCardLen2 = random_number_length,
+	msg->cprbx = local_cprbx;
+	msg->cprbx.rpl_datal = random_number_length,
+	msg->cprbx.domain = AP_QID_QUEUE(ap_dev->qid);
+	memcpy(msg->function_code, msg->hdr.function_code, 0x02);
+	msg->rule_length = 0x0a;
+	memcpy(msg->rule, "RANDOM  ", 8);
+	msg->verb_length = 0x02;
+	msg->key_length = 0x02;
+	ap_msg->length = sizeof *msg;
+}
+
+/**
+ * Copy results from a type 86 ICA reply message back to user space.
+ *
+ * @zdev: crypto device pointer
+ * @reply: reply AP message.
+ * @data: pointer to user output data
+ * @length: size of user output data
+ *
+ * Returns 0 on success or -EINVAL, -EFAULT, -EAGAIN in case of an error.
+ */
+struct type86x_reply {
+	struct type86_hdr hdr;
+	struct type86_fmt2_ext fmt2;
+	struct CPRBX cprbx;
+	unsigned char pad[4];	/* 4 byte function code/rules block ? */
+	unsigned short length;
+	char text[0];
+} __attribute__((packed));
+
+static int convert_type86_ica(struct zcrypt_device *zdev,
+			  struct ap_message *reply,
+			  char __user *outputdata,
+			  unsigned int outputdatalength)
+{
+	static unsigned char static_pad[] = {
+		0x00,0x02,
+		0x1B,0x7B,0x5D,0xB5,0x75,0x01,0x3D,0xFD,
+		0x8D,0xD1,0xC7,0x03,0x2D,0x09,0x23,0x57,
+		0x89,0x49,0xB9,0x3F,0xBB,0x99,0x41,0x5B,
+		0x75,0x21,0x7B,0x9D,0x3B,0x6B,0x51,0x39,
+		0xBB,0x0D,0x35,0xB9,0x89,0x0F,0x93,0xA5,
+		0x0B,0x47,0xF1,0xD3,0xBB,0xCB,0xF1,0x9D,
+		0x23,0x73,0x71,0xFF,0xF3,0xF5,0x45,0xFB,
+		0x61,0x29,0x23,0xFD,0xF1,0x29,0x3F,0x7F,
+		0x17,0xB7,0x1B,0xA9,0x19,0xBD,0x57,0xA9,
+		0xD7,0x95,0xA3,0xCB,0xED,0x1D,0xDB,0x45,
+		0x7D,0x11,0xD1,0x51,0x1B,0xED,0x71,0xE9,
+		0xB1,0xD1,0xAB,0xAB,0x21,0x2B,0x1B,0x9F,
+		0x3B,0x9F,0xF7,0xF7,0xBD,0x63,0xEB,0xAD,
+		0xDF,0xB3,0x6F,0x5B,0xDB,0x8D,0xA9,0x5D,
+		0xE3,0x7D,0x77,0x49,0x47,0xF5,0xA7,0xFD,
+		0xAB,0x2F,0x27,0x35,0x77,0xD3,0x49,0xC9,
+		0x09,0xEB,0xB1,0xF9,0xBF,0x4B,0xCB,0x2B,
+		0xEB,0xEB,0x05,0xFF,0x7D,0xC7,0x91,0x8B,
+		0x09,0x83,0xB9,0xB9,0x69,0x33,0x39,0x6B,
+		0x79,0x75,0x19,0xBF,0xBB,0x07,0x1D,0xBD,
+		0x29,0xBF,0x39,0x95,0x93,0x1D,0x35,0xC7,
+		0xC9,0x4D,0xE5,0x97,0x0B,0x43,0x9B,0xF1,
+		0x16,0x93,0x03,0x1F,0xA5,0xFB,0xDB,0xF3,
+		0x27,0x4F,0x27,0x61,0x05,0x1F,0xB9,0x23,
+		0x2F,0xC3,0x81,0xA9,0x23,0x71,0x55,0x55,
+		0xEB,0xED,0x41,0xE5,0xF3,0x11,0xF1,0x43,
+		0x69,0x03,0xBD,0x0B,0x37,0x0F,0x51,0x8F,
+		0x0B,0xB5,0x89,0x5B,0x67,0xA9,0xD9,0x4F,
+		0x01,0xF9,0x21,0x77,0x37,0x73,0x79,0xC5,
+		0x7F,0x51,0xC1,0xCF,0x97,0xA1,0x75,0xAD,
+		0x35,0x9D,0xD3,0xD3,0xA7,0x9D,0x5D,0x41,
+		0x6F,0x65,0x1B,0xCF,0xA9,0x87,0x91,0x09
+	};
+	struct type86x_reply *msg = reply->message;
+	unsigned short service_rc, service_rs;
+	unsigned int reply_len, pad_len;
+	char *data;
+
+	service_rc = msg->cprbx.ccp_rtcode;
+	if (unlikely(service_rc != 0)) {
+		service_rs = msg->cprbx.ccp_rscode;
+		if (service_rc == 8 && service_rs == 66)
+			return -EINVAL;
+		if (service_rc == 8 && service_rs == 65)
+			return -EINVAL;
+		if (service_rc == 8 && service_rs == 770)
+			return -EINVAL;
+		if (service_rc == 8 && service_rs == 783) {
+			zdev->min_mod_size = PCIXCC_MIN_MOD_SIZE_OLD;
+			return -EAGAIN;
+		}
+		if (service_rc == 12 && service_rs == 769)
+			return -EINVAL;
+		if (service_rc == 8 && service_rs == 72)
+			return -EINVAL;
+		zdev->online = 0;
+		return -EAGAIN;	/* repeat the request on a different device. */
+	}
+	data = msg->text;
+	reply_len = msg->length - 2;
+	if (reply_len > outputdatalength)
+		return -EINVAL;
+	/*
+	 * For all encipher requests, the length of the ciphertext (reply_len)
+	 * will always equal the modulus length. For MEX decipher requests
+	 * the output needs to get padded. Minimum pad size is 10.
+	 *
+	 * Currently, the cases where padding will be added is for:
+	 * - PCIXCC_MCL2 using a CRT form token (since PKD didn't support
+	 *   ZERO-PAD and CRT is only supported for PKD requests)
+	 * - PCICC, always
+	 */
+	pad_len = outputdatalength - reply_len;
+	if (pad_len > 0) {
+		if (pad_len < 10)
+			return -EINVAL;
+		/* 'restore' padding left in the PCICC/PCIXCC card. */
+		if (copy_to_user(outputdata, static_pad, pad_len - 1))
+			return -EFAULT;
+		if (put_user(0, outputdata + pad_len - 1))
+			return -EFAULT;
+	}
+	/* Copy the crypto response to user space. */
+	if (copy_to_user(outputdata + pad_len, data, reply_len))
+		return -EFAULT;
+	return 0;
+}
+
+/**
+ * Copy results from a type 86 XCRB reply message back to user space.
+ *
+ * @zdev: crypto device pointer
+ * @reply: reply AP message.
+ * @xcRB: pointer to XCRB
+ *
+ * Returns 0 on success or -EINVAL, -EFAULT, -EAGAIN in case of an error.
+ */
+static int convert_type86_xcrb(struct zcrypt_device *zdev,
+			       struct ap_message *reply,
+			       struct ica_xcRB *xcRB)
+{
+	struct type86_fmt2_msg *msg = reply->message;
+	char *data = reply->message;
+
+	/* Copy CPRB to user */
+	if (copy_to_user(xcRB->reply_control_blk_addr,
+		data + msg->fmt2.offset1, msg->fmt2.count1))
+		return -EFAULT;
+	xcRB->reply_control_blk_length = msg->fmt2.count1;
+
+	/* Copy data buffer to user */
+	if (msg->fmt2.count2)
+		if (copy_to_user(xcRB->reply_data_addr,
+			data + msg->fmt2.offset2, msg->fmt2.count2))
+			return -EFAULT;
+	xcRB->reply_data_length = msg->fmt2.count2;
+	return 0;
+}
+
+static int convert_type86_rng(struct zcrypt_device *zdev,
+			  struct ap_message *reply,
+			  char *buffer)
+{
+	struct {
+		struct type86_hdr hdr;
+		struct type86_fmt2_ext fmt2;
+		struct CPRBX cprbx;
+	} __attribute__((packed)) *msg = reply->message;
+	char *data = reply->message;
+
+	if (msg->cprbx.ccp_rtcode != 0 || msg->cprbx.ccp_rscode != 0)
+		return -EINVAL;
+	memcpy(buffer, data + msg->fmt2.offset2, msg->fmt2.count2);
+	return msg->fmt2.count2;
+}
+
+static int convert_response_ica(struct zcrypt_device *zdev,
+			    struct ap_message *reply,
+			    char __user *outputdata,
+			    unsigned int outputdatalength)
+{
+	struct type86x_reply *msg = reply->message;
+
+	/* Response type byte is the second byte in the response. */
+	switch (((unsigned char *) reply->message)[1]) {
+	case TYPE82_RSP_CODE:
+	case TYPE88_RSP_CODE:
+		return convert_error(zdev, reply);
+	case TYPE86_RSP_CODE:
+		if (msg->cprbx.ccp_rtcode &&
+		   (msg->cprbx.ccp_rscode == 0x14f) &&
+		   (outputdatalength > 256)) {
+			if (zdev->max_exp_bit_length <= 17) {
+				zdev->max_exp_bit_length = 17;
+				return -EAGAIN;
+			} else
+				return -EINVAL;
+		}
+		if (msg->hdr.reply_code)
+			return convert_error(zdev, reply);
+		if (msg->cprbx.cprb_ver_id == 0x02)
+			return convert_type86_ica(zdev, reply,
+						  outputdata, outputdatalength);
+		/* Fall through, no break, incorrect cprb version is an unknown
+		 * response */
+	default: /* Unknown response type, this should NEVER EVER happen */
+		zdev->online = 0;
+		return -EAGAIN;	/* repeat the request on a different device. */
+	}
+}
+
+static int convert_response_xcrb(struct zcrypt_device *zdev,
+			    struct ap_message *reply,
+			    struct ica_xcRB *xcRB)
+{
+	struct type86x_reply *msg = reply->message;
+
+	/* Response type byte is the second byte in the response. */
+	switch (((unsigned char *) reply->message)[1]) {
+	case TYPE82_RSP_CODE:
+	case TYPE88_RSP_CODE:
+		xcRB->status = 0x0008044DL; /* HDD_InvalidParm */
+		return convert_error(zdev, reply);
+	case TYPE86_RSP_CODE:
+		if (msg->hdr.reply_code) {
+			memcpy(&(xcRB->status), msg->fmt2.apfs, sizeof(u32));
+			return convert_error(zdev, reply);
+		}
+		if (msg->cprbx.cprb_ver_id == 0x02)
+			return convert_type86_xcrb(zdev, reply, xcRB);
+		/* Fall through, no break, incorrect cprb version is an unknown
+		 * response */
+	default: /* Unknown response type, this should NEVER EVER happen */
+		xcRB->status = 0x0008044DL; /* HDD_InvalidParm */
+		zdev->online = 0;
+		return -EAGAIN;	/* repeat the request on a different device. */
+	}
+}
+
+static int convert_response_rng(struct zcrypt_device *zdev,
+				 struct ap_message *reply,
+				 char *data)
+{
+	struct type86x_reply *msg = reply->message;
+
+	switch (msg->hdr.type) {
+	case TYPE82_RSP_CODE:
+	case TYPE88_RSP_CODE:
+		return -EINVAL;
+	case TYPE86_RSP_CODE:
+		if (msg->hdr.reply_code)
+			return -EINVAL;
+		if (msg->cprbx.cprb_ver_id == 0x02)
+			return convert_type86_rng(zdev, reply, data);
+		/* Fall through, no break, incorrect cprb version is an unknown
+		 * response */
+	default: /* Unknown response type, this should NEVER EVER happen */
+		zdev->online = 0;
+		return -EAGAIN;	/* repeat the request on a different device. */
+	}
+}
+
+/**
+ * This function is called from the AP bus code after a crypto request
+ * "msg" has finished with the reply message "reply".
+ * It is called from tasklet context.
+ * @ap_dev: pointer to the AP device
+ * @msg: pointer to the AP message
+ * @reply: pointer to the AP reply message
+ */
+static void zcrypt_pcixcc_receive(struct ap_device *ap_dev,
+				  struct ap_message *msg,
+				  struct ap_message *reply)
+{
+	static struct error_hdr error_reply = {
+		.type = TYPE82_RSP_CODE,
+		.reply_code = REP82_ERROR_MACHINE_FAILURE,
+	};
+	struct response_type *resp_type =
+		(struct response_type *) msg->private;
+	struct type86x_reply *t86r;
+	int length;
+
+	/* Copy the reply message to the request message buffer. */
+	if (IS_ERR(reply)) {
+		memcpy(msg->message, &error_reply, sizeof(error_reply));
+		goto out;
+	}
+	t86r = reply->message;
+	if (t86r->hdr.type == TYPE86_RSP_CODE &&
+		 t86r->cprbx.cprb_ver_id == 0x02) {
+		switch (resp_type->type) {
+		case PCIXCC_RESPONSE_TYPE_ICA:
+			length = sizeof(struct type86x_reply)
+				+ t86r->length - 2;
+			length = min(PCIXCC_MAX_ICA_RESPONSE_SIZE, length);
+			memcpy(msg->message, reply->message, length);
+			break;
+		case PCIXCC_RESPONSE_TYPE_XCRB:
+			length = t86r->fmt2.offset2 + t86r->fmt2.count2;
+			length = min(PCIXCC_MAX_XCRB_MESSAGE_SIZE, length);
+			memcpy(msg->message, reply->message, length);
+			break;
+		default:
+			memcpy(msg->message, &error_reply, sizeof error_reply);
+		}
+	} else
+		memcpy(msg->message, reply->message, sizeof error_reply);
+out:
+	complete(&(resp_type->work));
+}
+
+static atomic_t zcrypt_step = ATOMIC_INIT(0);
+
+/**
+ * The request distributor calls this function if it picked the PCIXCC/CEX2C
+ * device to handle a modexpo request.
+ * @zdev: pointer to zcrypt_device structure that identifies the
+ *	  PCIXCC/CEX2C device to the request distributor
+ * @mex: pointer to the modexpo request buffer
+ */
+static long zcrypt_pcixcc_modexpo(struct zcrypt_device *zdev,
+				  struct ica_rsa_modexpo *mex)
+{
+	struct ap_message ap_msg;
+	struct response_type resp_type = {
+		.type = PCIXCC_RESPONSE_TYPE_ICA,
+	};
+	int rc;
+
+	ap_init_message(&ap_msg);
+	ap_msg.message = (void *) get_zeroed_page(GFP_KERNEL);
+	if (!ap_msg.message)
+		return -ENOMEM;
+	ap_msg.psmid = (((unsigned long long) current->pid) << 32) +
+				atomic_inc_return(&zcrypt_step);
+	ap_msg.private = &resp_type;
+	rc = ICAMEX_msg_to_type6MEX_msgX(zdev, &ap_msg, mex);
+	if (rc)
+		goto out_free;
+	init_completion(&resp_type.work);
+	ap_queue_message(zdev->ap_dev, &ap_msg);
+	rc = wait_for_completion_interruptible(&resp_type.work);
+	if (rc == 0)
+		rc = convert_response_ica(zdev, &ap_msg, mex->outputdata,
+					  mex->outputdatalength);
+	else
+		/* Signal pending. */
+		ap_cancel_message(zdev->ap_dev, &ap_msg);
+out_free:
+	free_page((unsigned long) ap_msg.message);
+	return rc;
+}
+
+/**
+ * The request distributor calls this function if it picked the PCIXCC/CEX2C
+ * device to handle a modexpo_crt request.
+ * @zdev: pointer to zcrypt_device structure that identifies the
+ *	  PCIXCC/CEX2C device to the request distributor
+ * @crt: pointer to the modexpoc_crt request buffer
+ */
+static long zcrypt_pcixcc_modexpo_crt(struct zcrypt_device *zdev,
+				      struct ica_rsa_modexpo_crt *crt)
+{
+	struct ap_message ap_msg;
+	struct response_type resp_type = {
+		.type = PCIXCC_RESPONSE_TYPE_ICA,
+	};
+	int rc;
+
+	ap_init_message(&ap_msg);
+	ap_msg.message = (void *) get_zeroed_page(GFP_KERNEL);
+	if (!ap_msg.message)
+		return -ENOMEM;
+	ap_msg.psmid = (((unsigned long long) current->pid) << 32) +
+				atomic_inc_return(&zcrypt_step);
+	ap_msg.private = &resp_type;
+	rc = ICACRT_msg_to_type6CRT_msgX(zdev, &ap_msg, crt);
+	if (rc)
+		goto out_free;
+	init_completion(&resp_type.work);
+	ap_queue_message(zdev->ap_dev, &ap_msg);
+	rc = wait_for_completion_interruptible(&resp_type.work);
+	if (rc == 0)
+		rc = convert_response_ica(zdev, &ap_msg, crt->outputdata,
+					  crt->outputdatalength);
+	else
+		/* Signal pending. */
+		ap_cancel_message(zdev->ap_dev, &ap_msg);
+out_free:
+	free_page((unsigned long) ap_msg.message);
+	return rc;
+}
+
+/**
+ * The request distributor calls this function if it picked the PCIXCC/CEX2C
+ * device to handle a send_cprb request.
+ * @zdev: pointer to zcrypt_device structure that identifies the
+ *	  PCIXCC/CEX2C device to the request distributor
+ * @xcRB: pointer to the send_cprb request buffer
+ */
+static long zcrypt_pcixcc_send_cprb(struct zcrypt_device *zdev,
+				    struct ica_xcRB *xcRB)
+{
+	struct ap_message ap_msg;
+	struct response_type resp_type = {
+		.type = PCIXCC_RESPONSE_TYPE_XCRB,
+	};
+	int rc;
+
+	ap_init_message(&ap_msg);
+	ap_msg.message = kmalloc(PCIXCC_MAX_XCRB_MESSAGE_SIZE, GFP_KERNEL);
+	if (!ap_msg.message)
+		return -ENOMEM;
+	ap_msg.psmid = (((unsigned long long) current->pid) << 32) +
+				atomic_inc_return(&zcrypt_step);
+	ap_msg.private = &resp_type;
+	rc = XCRB_msg_to_type6CPRB_msgX(zdev, &ap_msg, xcRB);
+	if (rc)
+		goto out_free;
+	init_completion(&resp_type.work);
+	ap_queue_message(zdev->ap_dev, &ap_msg);
+	rc = wait_for_completion_interruptible(&resp_type.work);
+	if (rc == 0)
+		rc = convert_response_xcrb(zdev, &ap_msg, xcRB);
+	else
+		/* Signal pending. */
+		ap_cancel_message(zdev->ap_dev, &ap_msg);
+out_free:
+	kzfree(ap_msg.message);
+	return rc;
+}
+
+/**
+ * The request distributor calls this function if it picked the PCIXCC/CEX2C
+ * device to generate random data.
+ * @zdev: pointer to zcrypt_device structure that identifies the
+ *	  PCIXCC/CEX2C device to the request distributor
+ * @buffer: pointer to a memory page to return random data
+ */
+
+static long zcrypt_pcixcc_rng(struct zcrypt_device *zdev,
+				    char *buffer)
+{
+	struct ap_message ap_msg;
+	struct response_type resp_type = {
+		.type = PCIXCC_RESPONSE_TYPE_XCRB,
+	};
+	int rc;
+
+	ap_init_message(&ap_msg);
+	ap_msg.message = kmalloc(PCIXCC_MAX_XCRB_MESSAGE_SIZE, GFP_KERNEL);
+	if (!ap_msg.message)
+		return -ENOMEM;
+	ap_msg.psmid = (((unsigned long long) current->pid) << 32) +
+				atomic_inc_return(&zcrypt_step);
+	ap_msg.private = &resp_type;
+	rng_type6CPRB_msgX(zdev->ap_dev, &ap_msg, ZCRYPT_RNG_BUFFER_SIZE);
+	init_completion(&resp_type.work);
+	ap_queue_message(zdev->ap_dev, &ap_msg);
+	rc = wait_for_completion_interruptible(&resp_type.work);
+	if (rc == 0)
+		rc = convert_response_rng(zdev, &ap_msg, buffer);
+	else
+		/* Signal pending. */
+		ap_cancel_message(zdev->ap_dev, &ap_msg);
+	kfree(ap_msg.message);
+	return rc;
+}
+
+/**
+ * The crypto operations for a PCIXCC/CEX2C card.
+ */
+static struct zcrypt_ops zcrypt_pcixcc_ops = {
+	.rsa_modexpo = zcrypt_pcixcc_modexpo,
+	.rsa_modexpo_crt = zcrypt_pcixcc_modexpo_crt,
+	.send_cprb = zcrypt_pcixcc_send_cprb,
+};
+
+static struct zcrypt_ops zcrypt_pcixcc_with_rng_ops = {
+	.rsa_modexpo = zcrypt_pcixcc_modexpo,
+	.rsa_modexpo_crt = zcrypt_pcixcc_modexpo_crt,
+	.send_cprb = zcrypt_pcixcc_send_cprb,
+	.rng = zcrypt_pcixcc_rng,
+};
+
+/**
+ * Micro-code detection function. Its sends a message to a pcixcc card
+ * to find out the microcode level.
+ * @ap_dev: pointer to the AP device.
+ */
+static int zcrypt_pcixcc_mcl(struct ap_device *ap_dev)
+{
+	static unsigned char msg[] = {
+		0x00,0x06,0x00,0x00,0x00,0x00,0x00,0x00,
+		0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
+		0x00,0x00,0x00,0x58,0x00,0x00,0x00,0x00,
+		0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
+		0x43,0x41,0x00,0x00,0x00,0x00,0x00,0x00,
+		0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
+		0x00,0x00,0x00,0x00,0x50,0x4B,0x00,0x00,
+		0x00,0x00,0x01,0xC4,0x00,0x00,0x00,0x00,
+		0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
+		0x00,0x00,0x07,0x24,0x00,0x00,0x00,0x00,
+		0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
+		0x00,0xDC,0x02,0x00,0x00,0x00,0x54,0x32,
+		0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xE8,
+		0x00,0x00,0x00,0x00,0x00,0x00,0x07,0x24,
+		0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
+		0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
+		0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
+		0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
+		0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
+		0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
+		0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
+		0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
+		0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
+		0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
+		0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
+		0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
+		0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
+		0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
+		0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
+		0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
+		0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
+		0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
+		0x00,0x00,0x00,0x04,0x00,0x00,0x00,0x00,
+		0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
+		0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
+		0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
+		0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
+		0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
+		0x00,0x00,0x00,0x00,0x50,0x4B,0x00,0x0A,
+		0x4D,0x52,0x50,0x20,0x20,0x20,0x20,0x20,
+		0x00,0x42,0x00,0x01,0x02,0x03,0x04,0x05,
+		0x06,0x07,0x08,0x09,0x0A,0x0B,0x0C,0x0D,
+		0x0E,0x0F,0x00,0x11,0x22,0x33,0x44,0x55,
+		0x66,0x77,0x88,0x99,0xAA,0xBB,0xCC,0xDD,
+		0xEE,0xFF,0xFF,0xEE,0xDD,0xCC,0xBB,0xAA,
+		0x99,0x88,0x77,0x66,0x55,0x44,0x33,0x22,
+		0x11,0x00,0x01,0x23,0x45,0x67,0x89,0xAB,
+		0xCD,0xEF,0xFE,0xDC,0xBA,0x98,0x76,0x54,
+		0x32,0x10,0x00,0x9A,0x00,0x98,0x00,0x00,
+		0x1E,0x00,0x00,0x94,0x00,0x00,0x00,0x00,
+		0x04,0x00,0x00,0x8C,0x00,0x00,0x00,0x40,
+		0x02,0x00,0x00,0x40,0xBA,0xE8,0x23,0x3C,
+		0x75,0xF3,0x91,0x61,0xD6,0x73,0x39,0xCF,
+		0x7B,0x6D,0x8E,0x61,0x97,0x63,0x9E,0xD9,
+		0x60,0x55,0xD6,0xC7,0xEF,0xF8,0x1E,0x63,
+		0x95,0x17,0xCC,0x28,0x45,0x60,0x11,0xC5,
+		0xC4,0x4E,0x66,0xC6,0xE6,0xC3,0xDE,0x8A,
+		0x19,0x30,0xCF,0x0E,0xD7,0xAA,0xDB,0x01,
+		0xD8,0x00,0xBB,0x8F,0x39,0x9F,0x64,0x28,
+		0xF5,0x7A,0x77,0x49,0xCC,0x6B,0xA3,0x91,
+		0x97,0x70,0xE7,0x60,0x1E,0x39,0xE1,0xE5,
+		0x33,0xE1,0x15,0x63,0x69,0x08,0x80,0x4C,
+		0x67,0xC4,0x41,0x8F,0x48,0xDF,0x26,0x98,
+		0xF1,0xD5,0x8D,0x88,0xD9,0x6A,0xA4,0x96,
+		0xC5,0x84,0xD9,0x30,0x49,0x67,0x7D,0x19,
+		0xB1,0xB3,0x45,0x4D,0xB2,0x53,0x9A,0x47,
+		0x3C,0x7C,0x55,0xBF,0xCC,0x85,0x00,0x36,
+		0xF1,0x3D,0x93,0x53
+	};
+	unsigned long long psmid;
+	struct CPRBX *cprbx;
+	char *reply;
+	int rc, i;
+
+	reply = (void *) get_zeroed_page(GFP_KERNEL);
+	if (!reply)
+		return -ENOMEM;
+
+	rc = ap_send(ap_dev->qid, 0x0102030405060708ULL, msg, sizeof(msg));
+	if (rc)
+		goto out_free;
+
+	/* Wait for the test message to complete. */
+	for (i = 0; i < 6; i++) {
+		mdelay(300);
+		rc = ap_recv(ap_dev->qid, &psmid, reply, 4096);
+		if (rc == 0 && psmid == 0x0102030405060708ULL)
+			break;
+	}
+
+	if (i >= 6) {
+		/* Got no answer. */
+		rc = -ENODEV;
+		goto out_free;
+	}
+
+	cprbx = (struct CPRBX *) (reply + 48);
+	if (cprbx->ccp_rtcode == 8 && cprbx->ccp_rscode == 33)
+		rc = ZCRYPT_PCIXCC_MCL2;
+	else
+		rc = ZCRYPT_PCIXCC_MCL3;
+out_free:
+	free_page((unsigned long) reply);
+	return rc;
+}
+
+/**
+ * Large random number detection function. Its sends a message to a pcixcc
+ * card to find out if large random numbers are supported.
+ * @ap_dev: pointer to the AP device.
+ *
+ * Returns 1 if large random numbers are supported, 0 if not and < 0 on error.
+ */
+static int zcrypt_pcixcc_rng_supported(struct ap_device *ap_dev)
+{
+	struct ap_message ap_msg;
+	unsigned long long psmid;
+	struct {
+		struct type86_hdr hdr;
+		struct type86_fmt2_ext fmt2;
+		struct CPRBX cprbx;
+	} __attribute__((packed)) *reply;
+	int rc, i;
+
+	ap_init_message(&ap_msg);
+	ap_msg.message = (void *) get_zeroed_page(GFP_KERNEL);
+	if (!ap_msg.message)
+		return -ENOMEM;
+
+	rng_type6CPRB_msgX(ap_dev, &ap_msg, 4);
+	rc = ap_send(ap_dev->qid, 0x0102030405060708ULL, ap_msg.message,
+		     ap_msg.length);
+	if (rc)
+		goto out_free;
+
+	/* Wait for the test message to complete. */
+	for (i = 0; i < 2 * HZ; i++) {
+		msleep(1000 / HZ);
+		rc = ap_recv(ap_dev->qid, &psmid, ap_msg.message, 4096);
+		if (rc == 0 && psmid == 0x0102030405060708ULL)
+			break;
+	}
+
+	if (i >= 2 * HZ) {
+		/* Got no answer. */
+		rc = -ENODEV;
+		goto out_free;
+	}
+
+	reply = ap_msg.message;
+	if (reply->cprbx.ccp_rtcode == 0 && reply->cprbx.ccp_rscode == 0)
+		rc = 1;
+	else
+		rc = 0;
+out_free:
+	free_page((unsigned long) ap_msg.message);
+	return rc;
+}
+
+/**
+ * Probe function for PCIXCC/CEX2C cards. It always accepts the AP device
+ * since the bus_match already checked the hardware type. The PCIXCC
+ * cards come in two flavours: micro code level 2 and micro code level 3.
+ * This is checked by sending a test message to the device.
+ * @ap_dev: pointer to the AP device.
+ */
+static int zcrypt_pcixcc_probe(struct ap_device *ap_dev)
+{
+	struct zcrypt_device *zdev;
+	int rc = 0;
+
+	zdev = zcrypt_device_alloc(PCIXCC_MAX_XCRB_MESSAGE_SIZE);
+	if (!zdev)
+		return -ENOMEM;
+	zdev->ap_dev = ap_dev;
+	zdev->online = 1;
+	switch (ap_dev->device_type) {
+	case AP_DEVICE_TYPE_PCIXCC:
+		rc = zcrypt_pcixcc_mcl(ap_dev);
+		if (rc < 0) {
+			zcrypt_device_free(zdev);
+			return rc;
+		}
+		zdev->user_space_type = rc;
+		if (rc == ZCRYPT_PCIXCC_MCL2) {
+			zdev->type_string = "PCIXCC_MCL2";
+			zdev->speed_rating = PCIXCC_MCL2_SPEED_RATING;
+			zdev->min_mod_size = PCIXCC_MIN_MOD_SIZE_OLD;
+			zdev->max_mod_size = PCIXCC_MAX_MOD_SIZE;
+			zdev->max_exp_bit_length = PCIXCC_MAX_MOD_SIZE;
+		} else {
+			zdev->type_string = "PCIXCC_MCL3";
+			zdev->speed_rating = PCIXCC_MCL3_SPEED_RATING;
+			zdev->min_mod_size = PCIXCC_MIN_MOD_SIZE;
+			zdev->max_mod_size = PCIXCC_MAX_MOD_SIZE;
+			zdev->max_exp_bit_length = PCIXCC_MAX_MOD_SIZE;
+		}
+		break;
+	case AP_DEVICE_TYPE_CEX2C:
+		zdev->user_space_type = ZCRYPT_CEX2C;
+		zdev->type_string = "CEX2C";
+		zdev->speed_rating = CEX2C_SPEED_RATING;
+		zdev->min_mod_size = PCIXCC_MIN_MOD_SIZE;
+		zdev->max_mod_size = PCIXCC_MAX_MOD_SIZE;
+		zdev->max_exp_bit_length = PCIXCC_MAX_MOD_SIZE;
+		break;
+	case AP_DEVICE_TYPE_CEX3C:
+		zdev->user_space_type = ZCRYPT_CEX3C;
+		zdev->type_string = "CEX3C";
+		zdev->speed_rating = CEX3C_SPEED_RATING;
+		zdev->min_mod_size = CEX3C_MIN_MOD_SIZE;
+		zdev->max_mod_size = CEX3C_MAX_MOD_SIZE;
+		zdev->max_exp_bit_length = CEX3C_MAX_MOD_SIZE;
+		break;
+	default:
+		goto out_free;
+	}
+
+	rc = zcrypt_pcixcc_rng_supported(ap_dev);
+	if (rc < 0) {
+		zcrypt_device_free(zdev);
+		return rc;
+	}
+	if (rc)
+		zdev->ops = &zcrypt_pcixcc_with_rng_ops;
+	else
+		zdev->ops = &zcrypt_pcixcc_ops;
+	ap_dev->reply = &zdev->reply;
+	ap_dev->private = zdev;
+	rc = zcrypt_device_register(zdev);
+	if (rc)
+		goto out_free;
+	return 0;
+
+ out_free:
+	ap_dev->private = NULL;
+	zcrypt_device_free(zdev);
+	return rc;
+}
+
+/**
+ * This is called to remove the extended PCIXCC/CEX2C driver information
+ * if an AP device is removed.
+ */
+static void zcrypt_pcixcc_remove(struct ap_device *ap_dev)
+{
+	struct zcrypt_device *zdev = ap_dev->private;
+
+	zcrypt_device_unregister(zdev);
+}
+
+int __init zcrypt_pcixcc_init(void)
+{
+	return ap_driver_register(&zcrypt_pcixcc_driver, THIS_MODULE, "pcixcc");
+}
+
+void zcrypt_pcixcc_exit(void)
+{
+	ap_driver_unregister(&zcrypt_pcixcc_driver);
+}
+
+module_init(zcrypt_pcixcc_init);
+module_exit(zcrypt_pcixcc_exit);
diff --git a/ap/os/linux/linux-3.4.x/drivers/s390/crypto/zcrypt_pcixcc.h b/ap/os/linux/linux-3.4.x/drivers/s390/crypto/zcrypt_pcixcc.h
new file mode 100644
index 0000000..8cb7d7a
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/s390/crypto/zcrypt_pcixcc.h
@@ -0,0 +1,34 @@
+/*
+ *  linux/drivers/s390/crypto/zcrypt_pcixcc.h
+ *
+ *  zcrypt 2.1.0
+ *
+ *  Copyright (C)  2001, 2006 IBM Corporation
+ *  Author(s): Robert Burroughs
+ *	       Eric Rossman (edrossma@us.ibm.com)
+ *
+ *  Hotplug & misc device support: Jochen Roehrig (roehrig@de.ibm.com)
+ *  Major cleanup & driver split: Martin Schwidefsky <schwidefsky@de.ibm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#ifndef _ZCRYPT_PCIXCC_H_
+#define _ZCRYPT_PCIXCC_H_
+
+int zcrypt_pcixcc_init(void);
+void zcrypt_pcixcc_exit(void);
+
+#endif /* _ZCRYPT_PCIXCC_H_ */
diff --git a/ap/os/linux/linux-3.4.x/drivers/s390/kvm/Makefile b/ap/os/linux/linux-3.4.x/drivers/s390/kvm/Makefile
new file mode 100644
index 0000000..0815690
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/s390/kvm/Makefile
@@ -0,0 +1,9 @@
+# Makefile for kvm guest drivers on s390
+#
+# Copyright IBM Corp. 2008
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License (version 2 only)
+# as published by the Free Software Foundation.
+
+obj-$(CONFIG_S390_GUEST) += kvm_virtio.o
diff --git a/ap/os/linux/linux-3.4.x/drivers/s390/kvm/kvm_virtio.c b/ap/os/linux/linux-3.4.x/drivers/s390/kvm/kvm_virtio.c
new file mode 100644
index 0000000..f97b2aa
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/s390/kvm/kvm_virtio.c
@@ -0,0 +1,503 @@
+/*
+ * kvm_virtio.c - virtio for kvm on s390
+ *
+ * Copyright IBM Corp. 2008
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License (version 2 only)
+ * as published by the Free Software Foundation.
+ *
+ *    Author(s): Christian Borntraeger <borntraeger@de.ibm.com>
+ */
+
+#include <linux/kernel_stat.h>
+#include <linux/init.h>
+#include <linux/bootmem.h>
+#include <linux/err.h>
+#include <linux/virtio.h>
+#include <linux/virtio_config.h>
+#include <linux/slab.h>
+#include <linux/virtio_console.h>
+#include <linux/interrupt.h>
+#include <linux/virtio_ring.h>
+#include <linux/export.h>
+#include <linux/pfn.h>
+#include <asm/io.h>
+#include <asm/kvm_para.h>
+#include <asm/kvm_virtio.h>
+#include <asm/setup.h>
+#include <asm/irq.h>
+
+#define VIRTIO_SUBCODE_64 0x0D00
+
+/*
+ * The pointer to our (page) of device descriptions.
+ */
+static void *kvm_devices;
+static struct work_struct hotplug_work;
+
+struct kvm_device {
+	struct virtio_device vdev;
+	struct kvm_device_desc *desc;
+};
+
+#define to_kvmdev(vd) container_of(vd, struct kvm_device, vdev)
+
+/*
+ * memory layout:
+ * - kvm_device_descriptor
+ *        struct kvm_device_desc
+ * - configuration
+ *        struct kvm_vqconfig
+ * - feature bits
+ * - config space
+ */
+static struct kvm_vqconfig *kvm_vq_config(const struct kvm_device_desc *desc)
+{
+	return (struct kvm_vqconfig *)(desc + 1);
+}
+
+static u8 *kvm_vq_features(const struct kvm_device_desc *desc)
+{
+	return (u8 *)(kvm_vq_config(desc) + desc->num_vq);
+}
+
+static u8 *kvm_vq_configspace(const struct kvm_device_desc *desc)
+{
+	return kvm_vq_features(desc) + desc->feature_len * 2;
+}
+
+/*
+ * The total size of the config page used by this device (incl. desc)
+ */
+static unsigned desc_size(const struct kvm_device_desc *desc)
+{
+	return sizeof(*desc)
+		+ desc->num_vq * sizeof(struct kvm_vqconfig)
+		+ desc->feature_len * 2
+		+ desc->config_len;
+}
+
+/* This gets the device's feature bits. */
+static u32 kvm_get_features(struct virtio_device *vdev)
+{
+	unsigned int i;
+	u32 features = 0;
+	struct kvm_device_desc *desc = to_kvmdev(vdev)->desc;
+	u8 *in_features = kvm_vq_features(desc);
+
+	for (i = 0; i < min(desc->feature_len * 8, 32); i++)
+		if (in_features[i / 8] & (1 << (i % 8)))
+			features |= (1 << i);
+	return features;
+}
+
+static void kvm_finalize_features(struct virtio_device *vdev)
+{
+	unsigned int i, bits;
+	struct kvm_device_desc *desc = to_kvmdev(vdev)->desc;
+	/* Second half of bitmap is features we accept. */
+	u8 *out_features = kvm_vq_features(desc) + desc->feature_len;
+
+	/* Give virtio_ring a chance to accept features. */
+	vring_transport_features(vdev);
+
+	memset(out_features, 0, desc->feature_len);
+	bits = min_t(unsigned, desc->feature_len, sizeof(vdev->features)) * 8;
+	for (i = 0; i < bits; i++) {
+		if (test_bit(i, vdev->features))
+			out_features[i / 8] |= (1 << (i % 8));
+	}
+}
+
+/*
+ * Reading and writing elements in config space
+ */
+static void kvm_get(struct virtio_device *vdev, unsigned int offset,
+		   void *buf, unsigned len)
+{
+	struct kvm_device_desc *desc = to_kvmdev(vdev)->desc;
+
+	BUG_ON(offset + len > desc->config_len);
+	memcpy(buf, kvm_vq_configspace(desc) + offset, len);
+}
+
+static void kvm_set(struct virtio_device *vdev, unsigned int offset,
+		   const void *buf, unsigned len)
+{
+	struct kvm_device_desc *desc = to_kvmdev(vdev)->desc;
+
+	BUG_ON(offset + len > desc->config_len);
+	memcpy(kvm_vq_configspace(desc) + offset, buf, len);
+}
+
+/*
+ * The operations to get and set the status word just access
+ * the status field of the device descriptor. set_status will also
+ * make a hypercall to the host, to tell about status changes
+ */
+static u8 kvm_get_status(struct virtio_device *vdev)
+{
+	return to_kvmdev(vdev)->desc->status;
+}
+
+static void kvm_set_status(struct virtio_device *vdev, u8 status)
+{
+	BUG_ON(!status);
+	to_kvmdev(vdev)->desc->status = status;
+	kvm_hypercall1(KVM_S390_VIRTIO_SET_STATUS,
+		       (unsigned long) to_kvmdev(vdev)->desc);
+}
+
+/*
+ * To reset the device, we use the KVM_VIRTIO_RESET hypercall, using the
+ * descriptor address. The Host will zero the status and all the
+ * features.
+ */
+static void kvm_reset(struct virtio_device *vdev)
+{
+	kvm_hypercall1(KVM_S390_VIRTIO_RESET,
+		       (unsigned long) to_kvmdev(vdev)->desc);
+}
+
+/*
+ * When the virtio_ring code wants to notify the Host, it calls us here and we
+ * make a hypercall.  We hand the address  of the virtqueue so the Host
+ * knows which virtqueue we're talking about.
+ */
+static void kvm_notify(struct virtqueue *vq)
+{
+	struct kvm_vqconfig *config = vq->priv;
+
+	kvm_hypercall1(KVM_S390_VIRTIO_NOTIFY, config->address);
+}
+
+/*
+ * This routine finds the first virtqueue described in the configuration of
+ * this device and sets it up.
+ */
+static struct virtqueue *kvm_find_vq(struct virtio_device *vdev,
+				     unsigned index,
+				     void (*callback)(struct virtqueue *vq),
+				     const char *name)
+{
+	struct kvm_device *kdev = to_kvmdev(vdev);
+	struct kvm_vqconfig *config;
+	struct virtqueue *vq;
+	int err;
+
+	if (index >= kdev->desc->num_vq)
+		return ERR_PTR(-ENOENT);
+
+	config = kvm_vq_config(kdev->desc)+index;
+
+	err = vmem_add_mapping(config->address,
+			       vring_size(config->num,
+					  KVM_S390_VIRTIO_RING_ALIGN));
+	if (err)
+		goto out;
+
+	vq = vring_new_virtqueue(config->num, KVM_S390_VIRTIO_RING_ALIGN,
+				 vdev, true, (void *) config->address,
+				 kvm_notify, callback, name);
+	if (!vq) {
+		err = -ENOMEM;
+		goto unmap;
+	}
+
+	/*
+	 * register a callback token
+	 * The host will sent this via the external interrupt parameter
+	 */
+	config->token = (u64) vq;
+
+	vq->priv = config;
+	return vq;
+unmap:
+	vmem_remove_mapping(config->address,
+			    vring_size(config->num,
+				       KVM_S390_VIRTIO_RING_ALIGN));
+out:
+	return ERR_PTR(err);
+}
+
+static void kvm_del_vq(struct virtqueue *vq)
+{
+	struct kvm_vqconfig *config = vq->priv;
+
+	vring_del_virtqueue(vq);
+	vmem_remove_mapping(config->address,
+			    vring_size(config->num,
+				       KVM_S390_VIRTIO_RING_ALIGN));
+}
+
+static void kvm_del_vqs(struct virtio_device *vdev)
+{
+	struct virtqueue *vq, *n;
+
+	list_for_each_entry_safe(vq, n, &vdev->vqs, list)
+		kvm_del_vq(vq);
+}
+
+static int kvm_find_vqs(struct virtio_device *vdev, unsigned nvqs,
+			struct virtqueue *vqs[],
+			vq_callback_t *callbacks[],
+			const char *names[])
+{
+	struct kvm_device *kdev = to_kvmdev(vdev);
+	int i;
+
+	/* We must have this many virtqueues. */
+	if (nvqs > kdev->desc->num_vq)
+		return -ENOENT;
+
+	for (i = 0; i < nvqs; ++i) {
+		vqs[i] = kvm_find_vq(vdev, i, callbacks[i], names[i]);
+		if (IS_ERR(vqs[i]))
+			goto error;
+	}
+	return 0;
+
+error:
+	kvm_del_vqs(vdev);
+	return PTR_ERR(vqs[i]);
+}
+
+static const char *kvm_bus_name(struct virtio_device *vdev)
+{
+	return "";
+}
+
+/*
+ * The config ops structure as defined by virtio config
+ */
+static struct virtio_config_ops kvm_vq_configspace_ops = {
+	.get_features = kvm_get_features,
+	.finalize_features = kvm_finalize_features,
+	.get = kvm_get,
+	.set = kvm_set,
+	.get_status = kvm_get_status,
+	.set_status = kvm_set_status,
+	.reset = kvm_reset,
+	.find_vqs = kvm_find_vqs,
+	.del_vqs = kvm_del_vqs,
+	.bus_name = kvm_bus_name,
+};
+
+/*
+ * The root device for the kvm virtio devices.
+ * This makes them appear as /sys/devices/kvm_s390/0,1,2 not /sys/devices/0,1,2.
+ */
+static struct device *kvm_root;
+
+/*
+ * adds a new device and register it with virtio
+ * appropriate drivers are loaded by the device model
+ */
+static void add_kvm_device(struct kvm_device_desc *d, unsigned int offset)
+{
+	struct kvm_device *kdev;
+
+	kdev = kzalloc(sizeof(*kdev), GFP_KERNEL);
+	if (!kdev) {
+		printk(KERN_EMERG "Cannot allocate kvm dev %u type %u\n",
+		       offset, d->type);
+		return;
+	}
+
+	kdev->vdev.dev.parent = kvm_root;
+	kdev->vdev.id.device = d->type;
+	kdev->vdev.config = &kvm_vq_configspace_ops;
+	kdev->desc = d;
+
+	if (register_virtio_device(&kdev->vdev) != 0) {
+		printk(KERN_ERR "Failed to register kvm device %u type %u\n",
+		       offset, d->type);
+		kfree(kdev);
+	}
+}
+
+/*
+ * scan_devices() simply iterates through the device page.
+ * The type 0 is reserved to mean "end of devices".
+ */
+static void scan_devices(void)
+{
+	unsigned int i;
+	struct kvm_device_desc *d;
+
+	for (i = 0; i < PAGE_SIZE; i += desc_size(d)) {
+		d = kvm_devices + i;
+
+		if (d->type == 0)
+			break;
+
+		add_kvm_device(d, i);
+	}
+}
+
+/*
+ * match for a kvm device with a specific desc pointer
+ */
+static int match_desc(struct device *dev, void *data)
+{
+	struct virtio_device *vdev = dev_to_virtio(dev);
+	struct kvm_device *kdev = to_kvmdev(vdev);
+
+	return kdev->desc == data;
+}
+
+/*
+ * hotplug_device tries to find changes in the device page.
+ */
+static void hotplug_devices(struct work_struct *dummy)
+{
+	unsigned int i;
+	struct kvm_device_desc *d;
+	struct device *dev;
+
+	for (i = 0; i < PAGE_SIZE; i += desc_size(d)) {
+		d = kvm_devices + i;
+
+		/* end of list */
+		if (d->type == 0)
+			break;
+
+		/* device already exists */
+		dev = device_find_child(kvm_root, d, match_desc);
+		if (dev) {
+			/* XXX check for hotplug remove */
+			put_device(dev);
+			continue;
+		}
+
+		/* new device */
+		printk(KERN_INFO "Adding new virtio device %p\n", d);
+		add_kvm_device(d, i);
+	}
+}
+
+/*
+ * we emulate the request_irq behaviour on top of s390 extints
+ */
+static void kvm_extint_handler(struct ext_code ext_code,
+			       unsigned int param32, unsigned long param64)
+{
+	struct virtqueue *vq;
+	u32 param;
+
+	if ((ext_code.subcode & 0xff00) != VIRTIO_SUBCODE_64)
+		return;
+	kstat_cpu(smp_processor_id()).irqs[EXTINT_VRT]++;
+
+	/* The LSB might be overloaded, we have to mask it */
+	vq = (struct virtqueue *)(param64 & ~1UL);
+
+	/* We use ext_params to decide what this interrupt means */
+	param = param32 & VIRTIO_PARAM_MASK;
+
+	switch (param) {
+	case VIRTIO_PARAM_CONFIG_CHANGED:
+	{
+		struct virtio_driver *drv;
+		drv = container_of(vq->vdev->dev.driver,
+				   struct virtio_driver, driver);
+		if (drv->config_changed)
+			drv->config_changed(vq->vdev);
+
+		break;
+	}
+	case VIRTIO_PARAM_DEV_ADD:
+		schedule_work(&hotplug_work);
+		break;
+	case VIRTIO_PARAM_VRING_INTERRUPT:
+	default:
+		vring_interrupt(0, vq);
+		break;
+	}
+}
+
+/*
+ * For s390-virtio, we expect a page above main storage containing
+ * the virtio configuration. Try to actually load from this area
+ * in order to figure out if the host provides this page.
+ */
+static int __init test_devices_support(unsigned long addr)
+{
+	int ret = -EIO;
+
+	asm volatile(
+		"0:	lura	0,%1\n"
+		"1:	xgr	%0,%0\n"
+		"2:\n"
+		EX_TABLE(0b,2b)
+		EX_TABLE(1b,2b)
+		: "+d" (ret)
+		: "a" (addr)
+		: "0", "cc");
+	return ret;
+}
+/*
+ * Init function for virtio
+ * devices are in a single page above top of "normal" mem
+ */
+static int __init kvm_devices_init(void)
+{
+	int rc;
+
+	if (!MACHINE_IS_KVM)
+		return -ENODEV;
+
+	if (test_devices_support(real_memory_size) < 0)
+		return -ENODEV;
+
+	rc = vmem_add_mapping(real_memory_size, PAGE_SIZE);
+	if (rc)
+		return rc;
+
+	kvm_devices = (void *) real_memory_size;
+
+	kvm_root = root_device_register("kvm_s390");
+	if (IS_ERR(kvm_root)) {
+		rc = PTR_ERR(kvm_root);
+		printk(KERN_ERR "Could not register kvm_s390 root device");
+		vmem_remove_mapping(real_memory_size, PAGE_SIZE);
+		return rc;
+	}
+
+	INIT_WORK(&hotplug_work, hotplug_devices);
+
+	service_subclass_irq_register();
+	register_external_interrupt(0x2603, kvm_extint_handler);
+
+	scan_devices();
+	return 0;
+}
+
+/* code for early console output with virtio_console */
+static __init int early_put_chars(u32 vtermno, const char *buf, int count)
+{
+	char scratch[17];
+	unsigned int len = count;
+
+	if (len > sizeof(scratch) - 1)
+		len = sizeof(scratch) - 1;
+	scratch[len] = '\0';
+	memcpy(scratch, buf, len);
+	kvm_hypercall1(KVM_S390_VIRTIO_NOTIFY, __pa(scratch));
+	return len;
+}
+
+static int __init s390_virtio_console_init(void)
+{
+	if (!MACHINE_IS_KVM)
+		return -ENODEV;
+	return virtio_cons_early_init(early_put_chars);
+}
+console_initcall(s390_virtio_console_init);
+
+
+/*
+ * We do this after core stuff, but before the drivers.
+ */
+postcore_initcall(kvm_devices_init);
diff --git a/ap/os/linux/linux-3.4.x/drivers/s390/net/Kconfig b/ap/os/linux/linux-3.4.x/drivers/s390/net/Kconfig
new file mode 100644
index 0000000..9b66d2d
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/s390/net/Kconfig
@@ -0,0 +1,112 @@
+menu "S/390 network device drivers"
+	depends on NETDEVICES && S390
+
+config LCS
+	def_tristate m
+	prompt "Lan Channel Station Interface"
+	depends on CCW && NETDEVICES && (ETHERNET || TR || FDDI)
+	help
+	   Select this option if you want to use LCS networking on IBM System z.
+	   This device driver supports Token Ring (IEEE 802.5),
+	   FDDI (IEEE 802.7) and Ethernet.
+	   To compile as a module, choose M. The module name is lcs.
+	   If you do not know what it is, it's safe to choose Y.
+
+config CTCM
+	def_tristate m
+	prompt "CTC and MPC SNA device support"
+	depends on CCW && NETDEVICES
+	help
+	  Select this option if you want to use channel-to-channel
+	  point-to-point networking on IBM System z.
+	  This device driver supports real CTC coupling using ESCON.
+	  It also supports virtual CTCs when running under VM.
+	  This driver also supports channel-to-channel MPC SNA devices.
+	  MPC is an SNA protocol device used by Communication Server for Linux.
+	  To compile as a module, choose M. The module name is ctcm.
+	  To compile into the kernel, choose Y.
+	  If you do not need any channel-to-channel connection, choose N.
+
+config NETIUCV
+	def_tristate m
+	prompt "IUCV network device support (VM only)"
+	depends on IUCV && NETDEVICES
+	help
+	  Select this option if you want to use inter-user communication
+	  vehicle networking under VM or VIF. It enables a fast communication
+	  link between VM guests. Using ifconfig a point-to-point connection
+	  can be established to the Linux on IBM System z
+	  running on the other VM guest. To compile as a module, choose M.
+	  The module name is netiucv. If unsure, choose Y.
+
+config SMSGIUCV
+	def_tristate m
+	prompt "IUCV special message support (VM only)"
+	depends on IUCV
+	help
+	  Select this option if you want to be able to receive SMSG messages
+	  from other VM guest systems.
+
+config SMSGIUCV_EVENT
+	def_tristate m
+	prompt "Deliver IUCV special messages as uevents (VM only)"
+	depends on SMSGIUCV
+	help
+	  Select this option to deliver CP special messages (SMSGs) as
+	  uevents.  The driver handles only those special messages that
+	  start with "APP".
+
+	  To compile as a module, choose M. The module name is "smsgiucv_app".
+
+config CLAW
+	def_tristate m
+	prompt "CLAW device support"
+	depends on CCW && NETDEVICES
+	help
+	  This driver supports channel attached CLAW devices.
+	  CLAW is Common Link Access for Workstation.  Common devices
+          that use CLAW are RS/6000s, Cisco Routers (CIP) and 3172 devices.
+	  To compile as a module, choose M. The module name is claw.
+	  To compile into the kernel, choose Y.
+
+config QETH
+	def_tristate y
+	prompt "Gigabit Ethernet device support"
+	depends on CCW && NETDEVICES && IP_MULTICAST && QDIO
+	help
+	  This driver supports the IBM System z OSA Express adapters
+	  in QDIO mode (all media types), HiperSockets interfaces and VM GuestLAN
+	  interfaces in QDIO and HIPER mode.
+	
+	  For details please refer to the documentation provided by IBM at
+	  <http://www.ibm.com/developerworks/linux/linux390>
+
+	  To compile this driver as a module, choose M.
+	  The module name is qeth.
+
+config QETH_L2
+	def_tristate y
+	prompt "qeth layer 2 device support"
+	depends on QETH
+	help
+	  Select this option to be able to run qeth devices in layer 2 mode.
+	  To compile as a module, choose M. The module name is qeth_l2.
+	  If unsure, choose y.
+
+config QETH_L3
+	def_tristate y
+	prompt "qeth layer 3 device support"
+	depends on QETH
+	help
+	  Select this option to be able to run qeth devices in layer 3 mode.
+	  To compile as a module choose M. The module name is qeth_l3.
+	  If unsure, choose Y.
+
+config QETH_IPV6
+	def_bool y if (QETH_L3 = IPV6) || (QETH_L3 && IPV6 = 'y')
+
+config CCWGROUP
+	tristate
+	default (LCS || CTCM || QETH || CLAW)
+
+endmenu
diff --git a/ap/os/linux/linux-3.4.x/drivers/s390/net/Makefile b/ap/os/linux/linux-3.4.x/drivers/s390/net/Makefile
new file mode 100644
index 0000000..4dfe8c1
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/s390/net/Makefile
@@ -0,0 +1,17 @@
+#
+# S/390 network devices
+#
+
+ctcm-y += ctcm_main.o ctcm_fsms.o ctcm_mpc.o ctcm_sysfs.o ctcm_dbug.o
+obj-$(CONFIG_CTCM) += ctcm.o fsm.o
+obj-$(CONFIG_NETIUCV) += netiucv.o fsm.o
+obj-$(CONFIG_SMSGIUCV) += smsgiucv.o
+obj-$(CONFIG_SMSGIUCV_EVENT) += smsgiucv_app.o
+obj-$(CONFIG_LCS) += lcs.o
+obj-$(CONFIG_CLAW) += claw.o
+qeth-y += qeth_core_sys.o qeth_core_main.o qeth_core_mpc.o
+obj-$(CONFIG_QETH) += qeth.o
+qeth_l2-y += qeth_l2_main.o
+obj-$(CONFIG_QETH_L2) += qeth_l2.o
+qeth_l3-y += qeth_l3_main.o qeth_l3_sys.o
+obj-$(CONFIG_QETH_L3) += qeth_l3.o
diff --git a/ap/os/linux/linux-3.4.x/drivers/s390/net/claw.c b/ap/os/linux/linux-3.4.x/drivers/s390/net/claw.c
new file mode 100644
index 0000000..b41fae3
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/s390/net/claw.c
@@ -0,0 +1,3421 @@
+/*
+ *  drivers/s390/net/claw.c
+ *    ESCON CLAW network driver
+ *
+ *  Linux for zSeries version
+ *    Copyright IBM Corp. 2002, 2009
+ *  Author(s) Original code written by:
+ *		Kazuo Iimura <iimura@jp.ibm.com>
+ *   	      Rewritten by
+ *		Andy Richter <richtera@us.ibm.com>
+ *		Marc Price <mwprice@us.ibm.com>
+ *
+ *    sysfs parms:
+ *   group x.x.rrrr,x.x.wwww
+ *   read_buffer nnnnnnn
+ *   write_buffer nnnnnn
+ *   host_name  aaaaaaaa
+ *   adapter_name aaaaaaaa
+ *   api_type    aaaaaaaa
+ *
+ *  eg.
+ *   group  0.0.0200 0.0.0201
+ *   read_buffer 25
+ *   write_buffer 20
+ *   host_name LINUX390
+ *   adapter_name RS6K
+ *   api_type     TCPIP
+ *
+ *  where
+ *
+ *   The device id is decided by the order entries
+ *   are added to the group the first is claw0 the second claw1
+ *   up to CLAW_MAX_DEV
+ *
+ *   rrrr     -	the first of 2 consecutive device addresses used for the
+ *		CLAW protocol.
+ *		The specified address is always used as the input (Read)
+ *		channel and the next address is used as the output channel.
+ *
+ *   wwww     -	the second of 2 consecutive device addresses used for
+ *		the CLAW protocol.
+ *              The specified address is always used as the output
+ *		channel and the previous address is used as the input channel.
+ *
+ *   read_buffer	-       specifies number of input buffers to allocate.
+ *   write_buffer       -       specifies number of output buffers to allocate.
+ *   host_name          -       host name
+ *   adaptor_name       -       adaptor name
+ *   api_type           -       API type TCPIP or API will be sent and expected
+ *				as ws_name
+ *
+ *   Note the following requirements:
+ *   1)  host_name must match the configured adapter_name on the remote side
+ *   2)  adaptor_name must match the configured host name on the remote side
+ *
+ *  Change History
+ *    1.00  Initial release shipped
+ *    1.10  Changes for Buffer allocation
+ *    1.15  Changed for 2.6 Kernel  No longer compiles on 2.4 or lower
+ *    1.25  Added Packing support
+ *    1.5
+ */
+
+#define KMSG_COMPONENT "claw"
+
+#include <asm/ccwdev.h>
+#include <asm/ccwgroup.h>
+#include <asm/debug.h>
+#include <asm/idals.h>
+#include <asm/io.h>
+#include <linux/bitops.h>
+#include <linux/ctype.h>
+#include <linux/delay.h>
+#include <linux/errno.h>
+#include <linux/if_arp.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/ip.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/proc_fs.h>
+#include <linux/sched.h>
+#include <linux/signal.h>
+#include <linux/skbuff.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/tcp.h>
+#include <linux/timer.h>
+#include <linux/types.h>
+
+#include "claw.h"
+
+/*
+   CLAW uses the s390dbf file system  see claw_trace and claw_setup
+*/
+
+static char version[] __initdata = "CLAW driver";
+static char debug_buffer[255];
+/**
+ * Debug Facility Stuff
+ */
+static debug_info_t *claw_dbf_setup;
+static debug_info_t *claw_dbf_trace;
+
+/**
+ *  CLAW Debug Facility functions
+ */
+static void
+claw_unregister_debug_facility(void)
+{
+	if (claw_dbf_setup)
+		debug_unregister(claw_dbf_setup);
+	if (claw_dbf_trace)
+		debug_unregister(claw_dbf_trace);
+}
+
+static int
+claw_register_debug_facility(void)
+{
+	claw_dbf_setup = debug_register("claw_setup", 2, 1, 8);
+	claw_dbf_trace = debug_register("claw_trace", 2, 2, 8);
+	if (claw_dbf_setup == NULL || claw_dbf_trace == NULL) {
+		claw_unregister_debug_facility();
+		return -ENOMEM;
+	}
+	debug_register_view(claw_dbf_setup, &debug_hex_ascii_view);
+	debug_set_level(claw_dbf_setup, 2);
+	debug_register_view(claw_dbf_trace, &debug_hex_ascii_view);
+	debug_set_level(claw_dbf_trace, 2);
+	return 0;
+}
+
+static inline void
+claw_set_busy(struct net_device *dev)
+{
+ ((struct claw_privbk *)dev->ml_priv)->tbusy = 1;
+ eieio();
+}
+
+static inline void
+claw_clear_busy(struct net_device *dev)
+{
+	clear_bit(0, &(((struct claw_privbk *) dev->ml_priv)->tbusy));
+	netif_wake_queue(dev);
+	eieio();
+}
+
+static inline int
+claw_check_busy(struct net_device *dev)
+{
+	eieio();
+	return ((struct claw_privbk *) dev->ml_priv)->tbusy;
+}
+
+static inline void
+claw_setbit_busy(int nr,struct net_device *dev)
+{
+	netif_stop_queue(dev);
+	set_bit(nr, (void *)&(((struct claw_privbk *)dev->ml_priv)->tbusy));
+}
+
+static inline void
+claw_clearbit_busy(int nr,struct net_device *dev)
+{
+	clear_bit(nr, (void *)&(((struct claw_privbk *)dev->ml_priv)->tbusy));
+	netif_wake_queue(dev);
+}
+
+static inline int
+claw_test_and_setbit_busy(int nr,struct net_device *dev)
+{
+	netif_stop_queue(dev);
+	return test_and_set_bit(nr,
+		(void *)&(((struct claw_privbk *) dev->ml_priv)->tbusy));
+}
+
+
+/* Functions for the DEV methods */
+
+static int claw_probe(struct ccwgroup_device *cgdev);
+static void claw_remove_device(struct ccwgroup_device *cgdev);
+static void claw_purge_skb_queue(struct sk_buff_head *q);
+static int claw_new_device(struct ccwgroup_device *cgdev);
+static int claw_shutdown_device(struct ccwgroup_device *cgdev);
+static int claw_tx(struct sk_buff *skb, struct net_device *dev);
+static int claw_change_mtu( struct net_device *dev, int new_mtu);
+static int claw_open(struct net_device *dev);
+static void claw_irq_handler(struct ccw_device *cdev,
+	unsigned long intparm, struct irb *irb);
+static void claw_irq_tasklet ( unsigned long data );
+static int claw_release(struct net_device *dev);
+static void claw_write_retry ( struct chbk * p_ch );
+static void claw_write_next ( struct chbk * p_ch );
+static void claw_timer ( struct chbk * p_ch );
+
+/* Functions */
+static int add_claw_reads(struct net_device *dev,
+	struct ccwbk* p_first, struct ccwbk* p_last);
+static void ccw_check_return_code (struct ccw_device *cdev, int return_code);
+static void ccw_check_unit_check (struct chbk * p_ch, unsigned char sense );
+static int find_link(struct net_device *dev, char *host_name, char *ws_name );
+static int claw_hw_tx(struct sk_buff *skb, struct net_device *dev, long linkid);
+static int init_ccw_bk(struct net_device *dev);
+static void probe_error( struct ccwgroup_device *cgdev);
+static struct net_device_stats *claw_stats(struct net_device *dev);
+static int pages_to_order_of_mag(int num_of_pages);
+static struct sk_buff *claw_pack_skb(struct claw_privbk *privptr);
+/* sysfs Functions */
+static ssize_t claw_hname_show(struct device *dev,
+	struct device_attribute *attr, char *buf);
+static ssize_t claw_hname_write(struct device *dev,
+	struct device_attribute *attr,
+	const char *buf, size_t count);
+static ssize_t claw_adname_show(struct device *dev,
+	struct device_attribute *attr, char *buf);
+static ssize_t claw_adname_write(struct device *dev,
+	struct device_attribute *attr,
+	const char *buf, size_t count);
+static ssize_t claw_apname_show(struct device *dev,
+	struct device_attribute *attr, char *buf);
+static ssize_t claw_apname_write(struct device *dev,
+	struct device_attribute *attr,
+	const char *buf, size_t count);
+static ssize_t claw_wbuff_show(struct device *dev,
+	struct device_attribute *attr, char *buf);
+static ssize_t claw_wbuff_write(struct device *dev,
+	struct device_attribute *attr,
+	const char *buf, size_t count);
+static ssize_t claw_rbuff_show(struct device *dev,
+	struct device_attribute *attr, char *buf);
+static ssize_t claw_rbuff_write(struct device *dev,
+	struct device_attribute *attr,
+	const char *buf, size_t count);
+static int claw_add_files(struct device *dev);
+static void claw_remove_files(struct device *dev);
+
+/*   Functions for System Validate  */
+static int claw_process_control( struct net_device *dev, struct ccwbk * p_ccw);
+static int claw_send_control(struct net_device *dev, __u8 type, __u8 link,
+       __u8 correlator, __u8 rc , char *local_name, char *remote_name);
+static int claw_snd_conn_req(struct net_device *dev, __u8 link);
+static int claw_snd_disc(struct net_device *dev, struct clawctl * p_ctl);
+static int claw_snd_sys_validate_rsp(struct net_device *dev,
+        struct clawctl * p_ctl, __u32 return_code);
+static int claw_strt_conn_req(struct net_device *dev );
+static void claw_strt_read(struct net_device *dev, int lock);
+static void claw_strt_out_IO(struct net_device *dev);
+static void claw_free_wrt_buf(struct net_device *dev);
+
+/* Functions for unpack reads   */
+static void unpack_read(struct net_device *dev);
+
+static int claw_pm_prepare(struct ccwgroup_device *gdev)
+{
+	return -EPERM;
+}
+
+/* the root device for claw group devices */
+static struct device *claw_root_dev;
+
+/* ccwgroup table  */
+
+static struct ccwgroup_driver claw_group_driver = {
+	.driver = {
+		.owner	= THIS_MODULE,
+		.name	= "claw",
+	},
+        .max_slaves  = 2,
+        .driver_id   = 0xC3D3C1E6,
+        .probe       = claw_probe,
+        .remove      = claw_remove_device,
+        .set_online  = claw_new_device,
+        .set_offline = claw_shutdown_device,
+	.prepare     = claw_pm_prepare,
+};
+
+static struct ccw_device_id claw_ids[] = {
+	{CCW_DEVICE(0x3088, 0x61), .driver_info = claw_channel_type_claw},
+	{},
+};
+MODULE_DEVICE_TABLE(ccw, claw_ids);
+
+static struct ccw_driver claw_ccw_driver = {
+	.driver = {
+		.owner	= THIS_MODULE,
+		.name	= "claw",
+	},
+	.ids	= claw_ids,
+	.probe	= ccwgroup_probe_ccwdev,
+	.remove	= ccwgroup_remove_ccwdev,
+	.int_class = IOINT_CLW,
+};
+
+static ssize_t
+claw_driver_group_store(struct device_driver *ddrv, const char *buf,
+			size_t count)
+{
+	int err;
+	err = ccwgroup_create_from_string(claw_root_dev,
+					  claw_group_driver.driver_id,
+					  &claw_ccw_driver, 2, buf);
+	return err ? err : count;
+}
+
+static DRIVER_ATTR(group, 0200, NULL, claw_driver_group_store);
+
+static struct attribute *claw_group_attrs[] = {
+	&driver_attr_group.attr,
+	NULL,
+};
+
+static struct attribute_group claw_group_attr_group = {
+	.attrs = claw_group_attrs,
+};
+
+static const struct attribute_group *claw_group_attr_groups[] = {
+	&claw_group_attr_group,
+	NULL,
+};
+
+/*
+*       Key functions
+*/
+
+/*----------------------------------------------------------------*
+ *   claw_probe                                                   *
+ *      this function is called for each CLAW device.             *
+ *----------------------------------------------------------------*/
+static int
+claw_probe(struct ccwgroup_device *cgdev)
+{
+	int  		rc;
+	struct claw_privbk *privptr=NULL;
+
+	CLAW_DBF_TEXT(2, setup, "probe");
+	if (!get_device(&cgdev->dev))
+		return -ENODEV;
+	privptr = kzalloc(sizeof(struct claw_privbk), GFP_KERNEL);
+	dev_set_drvdata(&cgdev->dev, privptr);
+	if (privptr == NULL) {
+		probe_error(cgdev);
+		put_device(&cgdev->dev);
+		CLAW_DBF_TEXT_(2, setup, "probex%d", -ENOMEM);
+		return -ENOMEM;
+	}
+	privptr->p_mtc_envelope= kzalloc( MAX_ENVELOPE_SIZE, GFP_KERNEL);
+	privptr->p_env = kzalloc(sizeof(struct claw_env), GFP_KERNEL);
+        if ((privptr->p_mtc_envelope==NULL) || (privptr->p_env==NULL)) {
+                probe_error(cgdev);
+		put_device(&cgdev->dev);
+		CLAW_DBF_TEXT_(2, setup, "probex%d", -ENOMEM);
+                return -ENOMEM;
+        }
+	memcpy(privptr->p_env->adapter_name,WS_NAME_NOT_DEF,8);
+	memcpy(privptr->p_env->host_name,WS_NAME_NOT_DEF,8);
+	memcpy(privptr->p_env->api_type,WS_NAME_NOT_DEF,8);
+	privptr->p_env->packing = 0;
+	privptr->p_env->write_buffers = 5;
+	privptr->p_env->read_buffers = 5;
+	privptr->p_env->read_size = CLAW_FRAME_SIZE;
+	privptr->p_env->write_size = CLAW_FRAME_SIZE;
+	rc = claw_add_files(&cgdev->dev);
+	if (rc) {
+		probe_error(cgdev);
+		put_device(&cgdev->dev);
+		dev_err(&cgdev->dev, "Creating the /proc files for a new"
+		" CLAW device failed\n");
+		CLAW_DBF_TEXT_(2, setup, "probex%d", rc);
+		return rc;
+	}
+	privptr->p_env->p_priv = privptr;
+        cgdev->cdev[0]->handler = claw_irq_handler;
+	cgdev->cdev[1]->handler = claw_irq_handler;
+	CLAW_DBF_TEXT(2, setup, "prbext 0");
+
+        return 0;
+}  /*  end of claw_probe       */
+
+/*-------------------------------------------------------------------*
+ *   claw_tx                                                         *
+ *-------------------------------------------------------------------*/
+
+static int
+claw_tx(struct sk_buff *skb, struct net_device *dev)
+{
+        int             rc;
+	struct claw_privbk *privptr = dev->ml_priv;
+	unsigned long saveflags;
+        struct chbk *p_ch;
+
+	CLAW_DBF_TEXT(4, trace, "claw_tx");
+	p_ch = &privptr->channel[WRITE_CHANNEL];
+        spin_lock_irqsave(get_ccwdev_lock(p_ch->cdev), saveflags);
+        rc=claw_hw_tx( skb, dev, 1 );
+        spin_unlock_irqrestore(get_ccwdev_lock(p_ch->cdev), saveflags);
+	CLAW_DBF_TEXT_(4, trace, "clawtx%d", rc);
+	if (rc)
+		rc = NETDEV_TX_BUSY;
+	else
+		rc = NETDEV_TX_OK;
+        return rc;
+}   /*  end of claw_tx */
+
+/*------------------------------------------------------------------*
+ *  pack the collect queue into an skb and return it                *
+ *   If not packing just return the top skb from the queue          *
+ *------------------------------------------------------------------*/
+
+static struct sk_buff *
+claw_pack_skb(struct claw_privbk *privptr)
+{
+	struct sk_buff *new_skb,*held_skb;
+	struct chbk *p_ch = &privptr->channel[WRITE_CHANNEL];
+	struct claw_env  *p_env = privptr->p_env;
+	int	pkt_cnt,pk_ind,so_far;
+
+	new_skb = NULL;		/* assume no dice */
+	pkt_cnt = 0;
+	CLAW_DBF_TEXT(4, trace, "PackSKBe");
+	if (!skb_queue_empty(&p_ch->collect_queue)) {
+	/* some data */
+		held_skb = skb_dequeue(&p_ch->collect_queue);
+		if (held_skb)
+			dev_kfree_skb_any(held_skb);
+		else
+			return NULL;
+		if (p_env->packing != DO_PACKED)
+			return held_skb;
+		/* get a new SKB we will pack at least one */
+		new_skb = dev_alloc_skb(p_env->write_size);
+		if (new_skb == NULL) {
+			atomic_inc(&held_skb->users);
+			skb_queue_head(&p_ch->collect_queue,held_skb);
+			return NULL;
+		}
+		/* we have packed packet and a place to put it  */
+		pk_ind = 1;
+		so_far = 0;
+		new_skb->cb[1] = 'P'; /* every skb on queue has pack header */
+		while ((pk_ind) && (held_skb != NULL)) {
+			if (held_skb->len+so_far <= p_env->write_size-8) {
+				memcpy(skb_put(new_skb,held_skb->len),
+					held_skb->data,held_skb->len);
+				privptr->stats.tx_packets++;
+				so_far += held_skb->len;
+				pkt_cnt++;
+				dev_kfree_skb_any(held_skb);
+				held_skb = skb_dequeue(&p_ch->collect_queue);
+				if (held_skb)
+					atomic_dec(&held_skb->users);
+			} else {
+				pk_ind = 0;
+				atomic_inc(&held_skb->users);
+				skb_queue_head(&p_ch->collect_queue,held_skb);
+			}
+		}
+	}
+	CLAW_DBF_TEXT(4, trace, "PackSKBx");
+	return new_skb;
+}
+
+/*-------------------------------------------------------------------*
+ *   claw_change_mtu                                                 *
+ *                                                                   *
+ *-------------------------------------------------------------------*/
+
+static int
+claw_change_mtu(struct net_device *dev, int new_mtu)
+{
+	struct claw_privbk *privptr = dev->ml_priv;
+	int buff_size;
+	CLAW_DBF_TEXT(4, trace, "setmtu");
+	buff_size = privptr->p_env->write_size;
+        if ((new_mtu < 60) || (new_mtu > buff_size)) {
+                return -EINVAL;
+        }
+        dev->mtu = new_mtu;
+        return 0;
+}  /*   end of claw_change_mtu */
+
+
+/*-------------------------------------------------------------------*
+ *   claw_open                                                       *
+ *                                                                   *
+ *-------------------------------------------------------------------*/
+static int
+claw_open(struct net_device *dev)
+{
+
+        int     rc;
+        int     i;
+        unsigned long       saveflags=0;
+        unsigned long       parm;
+        struct claw_privbk  *privptr;
+	DECLARE_WAITQUEUE(wait, current);
+        struct timer_list  timer;
+        struct ccwbk *p_buf;
+
+	CLAW_DBF_TEXT(4, trace, "open");
+	privptr = (struct claw_privbk *)dev->ml_priv;
+        /*   allocate and initialize CCW blocks */
+	if (privptr->buffs_alloc == 0) {
+	        rc=init_ccw_bk(dev);
+        	if (rc) {
+			CLAW_DBF_TEXT(2, trace, "openmem");
+                	return -ENOMEM;
+        	}
+	}
+        privptr->system_validate_comp=0;
+        privptr->release_pend=0;
+	if(strncmp(privptr->p_env->api_type,WS_APPL_NAME_PACKED,6) == 0) {
+		privptr->p_env->read_size=DEF_PACK_BUFSIZE;
+		privptr->p_env->write_size=DEF_PACK_BUFSIZE;
+		privptr->p_env->packing=PACKING_ASK;
+	} else {
+		privptr->p_env->packing=0;
+		privptr->p_env->read_size=CLAW_FRAME_SIZE;
+		privptr->p_env->write_size=CLAW_FRAME_SIZE;
+	}
+        claw_set_busy(dev);
+	tasklet_init(&privptr->channel[READ_CHANNEL].tasklet, claw_irq_tasklet,
+		(unsigned long) &privptr->channel[READ_CHANNEL]);
+        for ( i = 0; i < 2;  i++) {
+		CLAW_DBF_TEXT_(2, trace, "opn_ch%d", i);
+                init_waitqueue_head(&privptr->channel[i].wait);
+		/* skb_queue_head_init(&p_ch->io_queue); */
+		if (i == WRITE_CHANNEL)
+			skb_queue_head_init(
+				&privptr->channel[WRITE_CHANNEL].collect_queue);
+                privptr->channel[i].flag_a = 0;
+                privptr->channel[i].IO_active = 0;
+                privptr->channel[i].flag  &= ~CLAW_TIMER;
+                init_timer(&timer);
+                timer.function = (void *)claw_timer;
+                timer.data = (unsigned long)(&privptr->channel[i]);
+                timer.expires = jiffies + 15*HZ;
+                add_timer(&timer);
+                spin_lock_irqsave(get_ccwdev_lock(
+			privptr->channel[i].cdev), saveflags);
+                parm = (unsigned long) &privptr->channel[i];
+                privptr->channel[i].claw_state = CLAW_START_HALT_IO;
+		rc = 0;
+		add_wait_queue(&privptr->channel[i].wait, &wait);
+                rc = ccw_device_halt(
+			(struct ccw_device *)privptr->channel[i].cdev,parm);
+                set_current_state(TASK_INTERRUPTIBLE);
+                spin_unlock_irqrestore(
+			get_ccwdev_lock(privptr->channel[i].cdev), saveflags);
+                schedule();
+		set_current_state(TASK_RUNNING);
+                remove_wait_queue(&privptr->channel[i].wait, &wait);
+                if(rc != 0)
+                        ccw_check_return_code(privptr->channel[i].cdev, rc);
+                if((privptr->channel[i].flag & CLAW_TIMER) == 0x00)
+                        del_timer(&timer);
+        }
+	if ((((privptr->channel[READ_CHANNEL].last_dstat |
+		privptr->channel[WRITE_CHANNEL].last_dstat) &
+           ~(DEV_STAT_CHN_END | DEV_STAT_DEV_END)) != 0x00) ||
+	   (((privptr->channel[READ_CHANNEL].flag |
+		privptr->channel[WRITE_CHANNEL].flag) & CLAW_TIMER) != 0x00)) {
+		dev_info(&privptr->channel[READ_CHANNEL].cdev->dev,
+			"%s: remote side is not ready\n", dev->name);
+		CLAW_DBF_TEXT(2, trace, "notrdy");
+
+                for ( i = 0; i < 2;  i++) {
+                        spin_lock_irqsave(
+				get_ccwdev_lock(privptr->channel[i].cdev),
+				saveflags);
+                        parm = (unsigned long) &privptr->channel[i];
+                        privptr->channel[i].claw_state = CLAW_STOP;
+                        rc = ccw_device_halt(
+				(struct ccw_device *)&privptr->channel[i].cdev,
+				parm);
+                        spin_unlock_irqrestore(
+				get_ccwdev_lock(privptr->channel[i].cdev),
+				saveflags);
+                        if (rc != 0) {
+                                ccw_check_return_code(
+					privptr->channel[i].cdev, rc);
+                        }
+                }
+                free_pages((unsigned long)privptr->p_buff_ccw,
+			(int)pages_to_order_of_mag(privptr->p_buff_ccw_num));
+                if (privptr->p_env->read_size < PAGE_SIZE) {
+                        free_pages((unsigned long)privptr->p_buff_read,
+			       (int)pages_to_order_of_mag(
+			       		privptr->p_buff_read_num));
+                }
+                else {
+                        p_buf=privptr->p_read_active_first;
+                        while (p_buf!=NULL) {
+                                free_pages((unsigned long)p_buf->p_buffer,
+				      (int)pages_to_order_of_mag(
+				      	privptr->p_buff_pages_perread ));
+                                p_buf=p_buf->next;
+                        }
+                }
+                if (privptr->p_env->write_size < PAGE_SIZE ) {
+                        free_pages((unsigned long)privptr->p_buff_write,
+			     (int)pages_to_order_of_mag(
+			     	privptr->p_buff_write_num));
+                }
+                else {
+                        p_buf=privptr->p_write_active_first;
+                        while (p_buf!=NULL) {
+                                free_pages((unsigned long)p_buf->p_buffer,
+				     (int)pages_to_order_of_mag(
+				     	privptr->p_buff_pages_perwrite ));
+                                p_buf=p_buf->next;
+                        }
+                }
+		privptr->buffs_alloc = 0;
+		privptr->channel[READ_CHANNEL].flag = 0x00;
+		privptr->channel[WRITE_CHANNEL].flag = 0x00;
+                privptr->p_buff_ccw=NULL;
+                privptr->p_buff_read=NULL;
+                privptr->p_buff_write=NULL;
+                claw_clear_busy(dev);
+		CLAW_DBF_TEXT(2, trace, "open EIO");
+                return -EIO;
+        }
+
+        /*   Send SystemValidate command */
+
+        claw_clear_busy(dev);
+	CLAW_DBF_TEXT(4, trace, "openok");
+        return 0;
+}    /*     end of claw_open    */
+
+/*-------------------------------------------------------------------*
+*                                                                    *
+*       claw_irq_handler                                             *
+*                                                                    *
+*--------------------------------------------------------------------*/
+static void
+claw_irq_handler(struct ccw_device *cdev,
+	unsigned long intparm, struct irb *irb)
+{
+        struct chbk *p_ch = NULL;
+        struct claw_privbk *privptr = NULL;
+        struct net_device *dev = NULL;
+        struct claw_env  *p_env;
+        struct chbk *p_ch_r=NULL;
+
+	CLAW_DBF_TEXT(4, trace, "clawirq");
+        /* Bypass all 'unsolicited interrupts' */
+	privptr = dev_get_drvdata(&cdev->dev);
+	if (!privptr) {
+		dev_warn(&cdev->dev, "An uninitialized CLAW device received an"
+			" IRQ, c-%02x d-%02x\n",
+			irb->scsw.cmd.cstat, irb->scsw.cmd.dstat);
+		CLAW_DBF_TEXT(2, trace, "badirq");
+                return;
+        }
+
+	/* Try to extract channel from driver data. */
+	if (privptr->channel[READ_CHANNEL].cdev == cdev)
+		p_ch = &privptr->channel[READ_CHANNEL];
+	else if (privptr->channel[WRITE_CHANNEL].cdev == cdev)
+		p_ch = &privptr->channel[WRITE_CHANNEL];
+	else {
+		dev_warn(&cdev->dev, "The device is not a CLAW device\n");
+		CLAW_DBF_TEXT(2, trace, "badchan");
+		return;
+	}
+	CLAW_DBF_TEXT_(4, trace, "IRQCH=%d", p_ch->flag);
+
+	dev = (struct net_device *) (p_ch->ndev);
+        p_env=privptr->p_env;
+
+	/* Copy interruption response block. */
+	memcpy(p_ch->irb, irb, sizeof(struct irb));
+
+	/* Check for good subchannel return code, otherwise info message */
+	if (irb->scsw.cmd.cstat && !(irb->scsw.cmd.cstat & SCHN_STAT_PCI)) {
+		dev_info(&cdev->dev,
+			"%s: subchannel check for device: %04x -"
+			" Sch Stat %02x  Dev Stat %02x CPA - %04x\n",
+                        dev->name, p_ch->devno,
+			irb->scsw.cmd.cstat, irb->scsw.cmd.dstat,
+			irb->scsw.cmd.cpa);
+		CLAW_DBF_TEXT(2, trace, "chanchk");
+                /* return; */
+        }
+
+        /* Check the reason-code of a unit check */
+	if (irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK)
+                ccw_check_unit_check(p_ch, irb->ecw[0]);
+
+        /* State machine to bring the connection up, down and to restart */
+	p_ch->last_dstat = irb->scsw.cmd.dstat;
+
+        switch (p_ch->claw_state) {
+	case CLAW_STOP:/* HALT_IO by claw_release (halt sequence) */
+		if (!((p_ch->irb->scsw.cmd.stctl & SCSW_STCTL_SEC_STATUS) ||
+		(p_ch->irb->scsw.cmd.stctl == SCSW_STCTL_STATUS_PEND) ||
+		(p_ch->irb->scsw.cmd.stctl ==
+		(SCSW_STCTL_ALERT_STATUS | SCSW_STCTL_STATUS_PEND))))
+			return;
+		wake_up(&p_ch->wait);   /* wake up claw_release */
+		CLAW_DBF_TEXT(4, trace, "stop");
+		return;
+	case CLAW_START_HALT_IO: /* HALT_IO issued by claw_open  */
+		if (!((p_ch->irb->scsw.cmd.stctl & SCSW_STCTL_SEC_STATUS) ||
+		(p_ch->irb->scsw.cmd.stctl == SCSW_STCTL_STATUS_PEND) ||
+		(p_ch->irb->scsw.cmd.stctl ==
+		(SCSW_STCTL_ALERT_STATUS | SCSW_STCTL_STATUS_PEND)))) {
+			CLAW_DBF_TEXT(4, trace, "haltio");
+			return;
+		}
+		if (p_ch->flag == CLAW_READ) {
+			p_ch->claw_state = CLAW_START_READ;
+			wake_up(&p_ch->wait); /* wake claw_open (READ)*/
+		} else if (p_ch->flag == CLAW_WRITE) {
+			p_ch->claw_state = CLAW_START_WRITE;
+			/*      send SYSTEM_VALIDATE                    */
+			claw_strt_read(dev, LOCK_NO);
+			claw_send_control(dev,
+				SYSTEM_VALIDATE_REQUEST,
+				0, 0, 0,
+				p_env->host_name,
+				p_env->adapter_name);
+		} else {
+			dev_warn(&cdev->dev, "The CLAW device received"
+				" an unexpected IRQ, "
+				"c-%02x d-%02x\n",
+				irb->scsw.cmd.cstat,
+				irb->scsw.cmd.dstat);
+			return;
+			}
+		CLAW_DBF_TEXT(4, trace, "haltio");
+		return;
+	case CLAW_START_READ:
+		CLAW_DBF_TEXT(4, trace, "ReadIRQ");
+		if (p_ch->irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK) {
+			clear_bit(0, (void *)&p_ch->IO_active);
+			if ((p_ch->irb->ecw[0] & 0x41) == 0x41 ||
+			    (p_ch->irb->ecw[0] & 0x40) == 0x40 ||
+			    (p_ch->irb->ecw[0])        == 0) {
+				privptr->stats.rx_errors++;
+				dev_info(&cdev->dev,
+					"%s: Restart is required after remote "
+					"side recovers \n",
+					dev->name);
+			}
+			CLAW_DBF_TEXT(4, trace, "notrdy");
+			return;
+		}
+		if ((p_ch->irb->scsw.cmd.cstat & SCHN_STAT_PCI) &&
+			(p_ch->irb->scsw.cmd.dstat == 0)) {
+			if (test_and_set_bit(CLAW_BH_ACTIVE,
+				(void *)&p_ch->flag_a) == 0)
+				tasklet_schedule(&p_ch->tasklet);
+			else
+				CLAW_DBF_TEXT(4, trace, "PCINoBH");
+			CLAW_DBF_TEXT(4, trace, "PCI_read");
+			return;
+		}
+		if (!((p_ch->irb->scsw.cmd.stctl & SCSW_STCTL_SEC_STATUS) ||
+		 (p_ch->irb->scsw.cmd.stctl == SCSW_STCTL_STATUS_PEND) ||
+		 (p_ch->irb->scsw.cmd.stctl ==
+		 (SCSW_STCTL_ALERT_STATUS | SCSW_STCTL_STATUS_PEND)))) {
+			CLAW_DBF_TEXT(4, trace, "SPend_rd");
+			return;
+		}
+		clear_bit(0, (void *)&p_ch->IO_active);
+		claw_clearbit_busy(TB_RETRY, dev);
+		if (test_and_set_bit(CLAW_BH_ACTIVE,
+			(void *)&p_ch->flag_a) == 0)
+			tasklet_schedule(&p_ch->tasklet);
+		else
+			CLAW_DBF_TEXT(4, trace, "RdBHAct");
+		CLAW_DBF_TEXT(4, trace, "RdIRQXit");
+		return;
+	case CLAW_START_WRITE:
+		if (p_ch->irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK) {
+			dev_info(&cdev->dev,
+				"%s: Unit Check Occurred in "
+				"write channel\n", dev->name);
+			clear_bit(0, (void *)&p_ch->IO_active);
+			if (p_ch->irb->ecw[0] & 0x80) {
+				dev_info(&cdev->dev,
+					"%s: Resetting Event "
+					"occurred:\n", dev->name);
+				init_timer(&p_ch->timer);
+				p_ch->timer.function =
+					(void *)claw_write_retry;
+				p_ch->timer.data = (unsigned long)p_ch;
+				p_ch->timer.expires = jiffies + 10*HZ;
+				add_timer(&p_ch->timer);
+				dev_info(&cdev->dev,
+					"%s: write connection "
+					"restarting\n", dev->name);
+			}
+			CLAW_DBF_TEXT(4, trace, "rstrtwrt");
+			return;
+		}
+		if (p_ch->irb->scsw.cmd.dstat & DEV_STAT_UNIT_EXCEP) {
+			clear_bit(0, (void *)&p_ch->IO_active);
+			dev_info(&cdev->dev,
+				"%s: Unit Exception "
+				"occurred in write channel\n",
+				dev->name);
+		}
+		if (!((p_ch->irb->scsw.cmd.stctl & SCSW_STCTL_SEC_STATUS) ||
+		(p_ch->irb->scsw.cmd.stctl == SCSW_STCTL_STATUS_PEND) ||
+		(p_ch->irb->scsw.cmd.stctl ==
+		(SCSW_STCTL_ALERT_STATUS | SCSW_STCTL_STATUS_PEND)))) {
+			CLAW_DBF_TEXT(4, trace, "writeUE");
+			return;
+		}
+		clear_bit(0, (void *)&p_ch->IO_active);
+		if (claw_test_and_setbit_busy(TB_TX, dev) == 0) {
+			claw_write_next(p_ch);
+			claw_clearbit_busy(TB_TX, dev);
+			claw_clear_busy(dev);
+		}
+		p_ch_r = (struct chbk *)&privptr->channel[READ_CHANNEL];
+		if (test_and_set_bit(CLAW_BH_ACTIVE,
+			(void *)&p_ch_r->flag_a) == 0)
+			tasklet_schedule(&p_ch_r->tasklet);
+		CLAW_DBF_TEXT(4, trace, "StWtExit");
+		return;
+	default:
+		dev_warn(&cdev->dev,
+			"The CLAW device for %s received an unexpected IRQ\n",
+			 dev->name);
+		CLAW_DBF_TEXT(2, trace, "badIRQ");
+		return;
+        }
+
+}       /*   end of claw_irq_handler    */
+
+
+/*-------------------------------------------------------------------*
+*       claw_irq_tasklet                                             *
+*                                                                    *
+*--------------------------------------------------------------------*/
+static void
+claw_irq_tasklet ( unsigned long data )
+{
+	struct chbk * p_ch;
+        struct net_device  *dev;
+
+	p_ch = (struct chbk *) data;
+        dev = (struct net_device *)p_ch->ndev;
+	CLAW_DBF_TEXT(4, trace, "IRQtask");
+        unpack_read(dev);
+        clear_bit(CLAW_BH_ACTIVE, (void *)&p_ch->flag_a);
+	CLAW_DBF_TEXT(4, trace, "TskletXt");
+        return;
+}       /*    end of claw_irq_bh    */
+
+/*-------------------------------------------------------------------*
+*       claw_release                                                 *
+*                                                                    *
+*--------------------------------------------------------------------*/
+static int
+claw_release(struct net_device *dev)
+{
+        int                rc;
+        int                i;
+        unsigned long      saveflags;
+        unsigned long      parm;
+        struct claw_privbk *privptr;
+        DECLARE_WAITQUEUE(wait, current);
+        struct ccwbk*             p_this_ccw;
+        struct ccwbk*             p_buf;
+
+	if (!dev)
+                return 0;
+	privptr = (struct claw_privbk *)dev->ml_priv;
+        if (!privptr)
+                return 0;
+	CLAW_DBF_TEXT(4, trace, "release");
+        privptr->release_pend=1;
+        claw_setbit_busy(TB_STOP,dev);
+        for ( i = 1; i >=0 ;  i--) {
+                spin_lock_irqsave(
+			get_ccwdev_lock(privptr->channel[i].cdev), saveflags);
+	     /*   del_timer(&privptr->channel[READ_CHANNEL].timer);  */
+ 		privptr->channel[i].claw_state = CLAW_STOP;
+                privptr->channel[i].IO_active = 0;
+                parm = (unsigned long) &privptr->channel[i];
+		if (i == WRITE_CHANNEL)
+			claw_purge_skb_queue(
+				&privptr->channel[WRITE_CHANNEL].collect_queue);
+                rc = ccw_device_halt (privptr->channel[i].cdev, parm);
+	        if (privptr->system_validate_comp==0x00)  /* never opened? */
+                   init_waitqueue_head(&privptr->channel[i].wait);
+                add_wait_queue(&privptr->channel[i].wait, &wait);
+                set_current_state(TASK_INTERRUPTIBLE);
+	        spin_unlock_irqrestore(
+			get_ccwdev_lock(privptr->channel[i].cdev), saveflags);
+	        schedule();
+		set_current_state(TASK_RUNNING);
+	        remove_wait_queue(&privptr->channel[i].wait, &wait);
+	        if (rc != 0) {
+                        ccw_check_return_code(privptr->channel[i].cdev, rc);
+                }
+        }
+	if (privptr->pk_skb != NULL) {
+		dev_kfree_skb_any(privptr->pk_skb);
+		privptr->pk_skb = NULL;
+	}
+	if(privptr->buffs_alloc != 1) {
+		CLAW_DBF_TEXT(4, trace, "none2fre");
+		return 0;
+	}
+	CLAW_DBF_TEXT(4, trace, "freebufs");
+	if (privptr->p_buff_ccw != NULL) {
+        	free_pages((unsigned long)privptr->p_buff_ccw,
+	        	(int)pages_to_order_of_mag(privptr->p_buff_ccw_num));
+	}
+	CLAW_DBF_TEXT(4, trace, "freeread");
+        if (privptr->p_env->read_size < PAGE_SIZE) {
+	    if (privptr->p_buff_read != NULL) {
+                free_pages((unsigned long)privptr->p_buff_read,
+		      (int)pages_to_order_of_mag(privptr->p_buff_read_num));
+		}
+        }
+        else {
+                p_buf=privptr->p_read_active_first;
+                while (p_buf!=NULL) {
+                        free_pages((unsigned long)p_buf->p_buffer,
+			     (int)pages_to_order_of_mag(
+			     	privptr->p_buff_pages_perread ));
+                        p_buf=p_buf->next;
+                }
+        }
+	 CLAW_DBF_TEXT(4, trace, "freewrit");
+        if (privptr->p_env->write_size < PAGE_SIZE ) {
+                free_pages((unsigned long)privptr->p_buff_write,
+		      (int)pages_to_order_of_mag(privptr->p_buff_write_num));
+        }
+        else {
+                p_buf=privptr->p_write_active_first;
+                while (p_buf!=NULL) {
+                        free_pages((unsigned long)p_buf->p_buffer,
+			      (int)pages_to_order_of_mag(
+			      privptr->p_buff_pages_perwrite ));
+                        p_buf=p_buf->next;
+                }
+        }
+	 CLAW_DBF_TEXT(4, trace, "clearptr");
+	privptr->buffs_alloc = 0;
+        privptr->p_buff_ccw=NULL;
+        privptr->p_buff_read=NULL;
+        privptr->p_buff_write=NULL;
+        privptr->system_validate_comp=0;
+        privptr->release_pend=0;
+        /*      Remove any writes that were pending and reset all reads   */
+        p_this_ccw=privptr->p_read_active_first;
+        while (p_this_ccw!=NULL) {
+                p_this_ccw->header.length=0xffff;
+                p_this_ccw->header.opcode=0xff;
+                p_this_ccw->header.flag=0x00;
+                p_this_ccw=p_this_ccw->next;
+        }
+
+        while (privptr->p_write_active_first!=NULL) {
+                p_this_ccw=privptr->p_write_active_first;
+                p_this_ccw->header.flag=CLAW_PENDING;
+                privptr->p_write_active_first=p_this_ccw->next;
+                p_this_ccw->next=privptr->p_write_free_chain;
+                privptr->p_write_free_chain=p_this_ccw;
+                ++privptr->write_free_count;
+        }
+        privptr->p_write_active_last=NULL;
+        privptr->mtc_logical_link = -1;
+        privptr->mtc_skipping = 1;
+        privptr->mtc_offset=0;
+
+	if (((privptr->channel[READ_CHANNEL].last_dstat |
+		privptr->channel[WRITE_CHANNEL].last_dstat) &
+		~(DEV_STAT_CHN_END | DEV_STAT_DEV_END)) != 0x00) {
+		dev_warn(&privptr->channel[READ_CHANNEL].cdev->dev,
+			"Deactivating %s completed with incorrect"
+			" subchannel status "
+			"(read %02x, write %02x)\n",
+                dev->name,
+		privptr->channel[READ_CHANNEL].last_dstat,
+		privptr->channel[WRITE_CHANNEL].last_dstat);
+		 CLAW_DBF_TEXT(2, trace, "badclose");
+        }
+	CLAW_DBF_TEXT(4, trace, "rlsexit");
+        return 0;
+}      /* end of claw_release     */
+
+/*-------------------------------------------------------------------*
+*       claw_write_retry                                             *
+*                                                                    *
+*--------------------------------------------------------------------*/
+
+static void
+claw_write_retry ( struct chbk *p_ch )
+{
+
+        struct net_device  *dev=p_ch->ndev;
+
+	CLAW_DBF_TEXT(4, trace, "w_retry");
+        if (p_ch->claw_state == CLAW_STOP) {
+        	return;
+        }
+	claw_strt_out_IO( dev );
+	CLAW_DBF_TEXT(4, trace, "rtry_xit");
+        return;
+}      /* end of claw_write_retry      */
+
+
+/*-------------------------------------------------------------------*
+*       claw_write_next                                              *
+*                                                                    *
+*--------------------------------------------------------------------*/
+
+static void
+claw_write_next ( struct chbk * p_ch )
+{
+
+        struct net_device  *dev;
+        struct claw_privbk *privptr=NULL;
+	struct sk_buff *pk_skb;
+
+	CLAW_DBF_TEXT(4, trace, "claw_wrt");
+        if (p_ch->claw_state == CLAW_STOP)
+                return;
+        dev = (struct net_device *) p_ch->ndev;
+	privptr = (struct claw_privbk *) dev->ml_priv;
+        claw_free_wrt_buf( dev );
+	if ((privptr->write_free_count > 0) &&
+	    !skb_queue_empty(&p_ch->collect_queue)) {
+	  	pk_skb = claw_pack_skb(privptr);
+		while (pk_skb != NULL) {
+			claw_hw_tx(pk_skb, dev, 1);
+			if (privptr->write_free_count > 0) {
+	   			pk_skb = claw_pack_skb(privptr);
+			} else
+				pk_skb = NULL;
+		}
+	}
+        if (privptr->p_write_active_first!=NULL) {
+                claw_strt_out_IO(dev);
+        }
+        return;
+}      /* end of claw_write_next      */
+
+/*-------------------------------------------------------------------*
+*                                                                    *
+*       claw_timer                                                   *
+*--------------------------------------------------------------------*/
+
+static void
+claw_timer ( struct chbk * p_ch )
+{
+	CLAW_DBF_TEXT(4, trace, "timer");
+        p_ch->flag |= CLAW_TIMER;
+        wake_up(&p_ch->wait);
+        return;
+}      /* end of claw_timer  */
+
+/*
+*
+*       functions
+*/
+
+
+/*-------------------------------------------------------------------*
+*                                                                    *
+*     pages_to_order_of_mag                                          *
+*                                                                    *
+*    takes a number of pages from 1 to 512 and returns the           *
+*    log(num_pages)/log(2) get_free_pages() needs a base 2 order     *
+*    of magnitude get_free_pages() has an upper order of 9           *
+*--------------------------------------------------------------------*/
+
+static int
+pages_to_order_of_mag(int num_of_pages)
+{
+	int	order_of_mag=1;		/* assume 2 pages */
+	int	nump;
+
+	CLAW_DBF_TEXT_(5, trace, "pages%d", num_of_pages);
+	if (num_of_pages == 1)   {return 0; }  /* magnitude of 0 = 1 page */
+	/* 512 pages = 2Meg on 4k page systems */
+	if (num_of_pages >= 512) {return 9; }
+	/* we have two or more pages order is at least 1 */
+	for (nump=2 ;nump <= 512;nump*=2) {
+	  if (num_of_pages <= nump)
+		  break;
+	  order_of_mag +=1;
+	}
+	if (order_of_mag > 9) { order_of_mag = 9; }  /* I know it's paranoid */
+	CLAW_DBF_TEXT_(5, trace, "mag%d", order_of_mag);
+	return order_of_mag;
+}
+
+/*-------------------------------------------------------------------*
+*                                                                    *
+*     add_claw_reads                                                 *
+*                                                                    *
+*--------------------------------------------------------------------*/
+static int
+add_claw_reads(struct net_device *dev, struct ccwbk* p_first,
+	struct ccwbk* p_last)
+{
+        struct claw_privbk *privptr;
+        struct ccw1  temp_ccw;
+        struct endccw * p_end;
+	CLAW_DBF_TEXT(4, trace, "addreads");
+	privptr = dev->ml_priv;
+        p_end = privptr->p_end_ccw;
+
+        /* first CCW and last CCW contains a new set of read channel programs
+        *       to apend the running channel programs
+        */
+        if ( p_first==NULL) {
+		CLAW_DBF_TEXT(4, trace, "addexit");
+                return 0;
+        }
+
+        /* set up ending CCW sequence for this segment */
+        if (p_end->read1) {
+                p_end->read1=0x00;    /*  second ending CCW is now active */
+                /*      reset ending CCWs and setup TIC CCWs              */
+                p_end->read2_nop2.cmd_code = CCW_CLAW_CMD_READFF;
+                p_end->read2_nop2.flags  = CCW_FLAG_SLI | CCW_FLAG_SKIP;
+                p_last->r_TIC_1.cda =(__u32)__pa(&p_end->read2_nop1);
+                p_last->r_TIC_2.cda =(__u32)__pa(&p_end->read2_nop1);
+                p_end->read2_nop2.cda=0;
+                p_end->read2_nop2.count=1;
+        }
+        else {
+                p_end->read1=0x01;  /* first ending CCW is now active */
+                /*      reset ending CCWs and setup TIC CCWs          */
+                p_end->read1_nop2.cmd_code = CCW_CLAW_CMD_READFF;
+                p_end->read1_nop2.flags  = CCW_FLAG_SLI | CCW_FLAG_SKIP;
+                p_last->r_TIC_1.cda = (__u32)__pa(&p_end->read1_nop1);
+                p_last->r_TIC_2.cda = (__u32)__pa(&p_end->read1_nop1);
+                p_end->read1_nop2.cda=0;
+                p_end->read1_nop2.count=1;
+        }
+
+        if ( privptr-> p_read_active_first ==NULL ) {
+		privptr->p_read_active_first = p_first;  /*  set new first */
+		privptr->p_read_active_last  = p_last;   /*  set new last  */
+        }
+        else {
+
+                /* set up TIC ccw  */
+                temp_ccw.cda= (__u32)__pa(&p_first->read);
+                temp_ccw.count=0;
+                temp_ccw.flags=0;
+                temp_ccw.cmd_code = CCW_CLAW_CMD_TIC;
+
+
+                if (p_end->read1) {
+
+               /* first set of CCW's is chained to the new read              */
+               /* chain, so the second set is chained to the active chain.   */
+               /* Therefore modify the second set to point to the new        */
+               /* read chain set up TIC CCWs                                 */
+               /* make sure we update the CCW so channel doesn't fetch it    */
+               /* when it's only half done                                   */
+                        memcpy( &p_end->read2_nop2, &temp_ccw ,
+				sizeof(struct ccw1));
+                        privptr->p_read_active_last->r_TIC_1.cda=
+				(__u32)__pa(&p_first->read);
+                        privptr->p_read_active_last->r_TIC_2.cda=
+				(__u32)__pa(&p_first->read);
+                }
+                else {
+                        /* make sure we update the CCW so channel doesn't   */
+			/* fetch it when it is only half done               */
+                        memcpy( &p_end->read1_nop2, &temp_ccw ,
+				sizeof(struct ccw1));
+                        privptr->p_read_active_last->r_TIC_1.cda=
+				(__u32)__pa(&p_first->read);
+                        privptr->p_read_active_last->r_TIC_2.cda=
+				(__u32)__pa(&p_first->read);
+                }
+		/*      chain in new set of blocks                         */
+                privptr->p_read_active_last->next = p_first;
+                privptr->p_read_active_last=p_last;
+        } /* end of if ( privptr-> p_read_active_first ==NULL)  */
+	CLAW_DBF_TEXT(4, trace, "addexit");
+        return 0;
+}    /*     end of add_claw_reads   */
+
+/*-------------------------------------------------------------------*
+ *   ccw_check_return_code                                           *
+ *                                                                   *
+ *-------------------------------------------------------------------*/
+
+static void
+ccw_check_return_code(struct ccw_device *cdev, int return_code)
+{
+	CLAW_DBF_TEXT(4, trace, "ccwret");
+        if (return_code != 0) {
+                switch (return_code) {
+		case -EBUSY: /* BUSY is a transient state no action needed */
+			break;
+		case -ENODEV:
+			dev_err(&cdev->dev, "The remote channel adapter is not"
+				" available\n");
+			break;
+		case -EINVAL:
+			dev_err(&cdev->dev,
+				"The status of the remote channel adapter"
+				" is not valid\n");
+			break;
+		default:
+			dev_err(&cdev->dev, "The common device layer"
+				" returned error code %d\n",
+				  return_code);
+		}
+	}
+	CLAW_DBF_TEXT(4, trace, "ccwret");
+}    /*    end of ccw_check_return_code   */
+
+/*-------------------------------------------------------------------*
+*       ccw_check_unit_check                                         *
+*--------------------------------------------------------------------*/
+
+static void
+ccw_check_unit_check(struct chbk * p_ch, unsigned char sense )
+{
+	struct net_device *ndev = p_ch->ndev;
+	struct device *dev = &p_ch->cdev->dev;
+
+	CLAW_DBF_TEXT(4, trace, "unitchek");
+	dev_warn(dev, "The communication peer of %s disconnected\n",
+		ndev->name);
+
+	if (sense & 0x40) {
+		if (sense & 0x01) {
+			dev_warn(dev, "The remote channel adapter for"
+				" %s has been reset\n",
+				ndev->name);
+		}
+	} else if (sense & 0x20) {
+		if (sense & 0x04) {
+			dev_warn(dev, "A data streaming timeout occurred"
+				" for %s\n",
+				ndev->name);
+		} else if (sense & 0x10) {
+			dev_warn(dev, "The remote channel adapter for %s"
+				" is faulty\n",
+				ndev->name);
+		} else {
+			dev_warn(dev, "A data transfer parity error occurred"
+				" for %s\n",
+				ndev->name);
+		}
+	} else if (sense & 0x10) {
+		dev_warn(dev, "A read data parity error occurred"
+			" for %s\n",
+			ndev->name);
+	}
+
+}   /*    end of ccw_check_unit_check    */
+
+/*-------------------------------------------------------------------*
+*               find_link                                            *
+*--------------------------------------------------------------------*/
+static int
+find_link(struct net_device *dev, char *host_name, char *ws_name )
+{
+	struct claw_privbk *privptr;
+	struct claw_env *p_env;
+	int    rc=0;
+
+	CLAW_DBF_TEXT(2, setup, "findlink");
+	privptr = dev->ml_priv;
+        p_env=privptr->p_env;
+	switch (p_env->packing)
+	{
+		case  PACKING_ASK:
+			if ((memcmp(WS_APPL_NAME_PACKED, host_name, 8)!=0) ||
+			    (memcmp(WS_APPL_NAME_PACKED, ws_name, 8)!=0 ))
+        	             rc = EINVAL;
+			break;
+		case  DO_PACKED:
+		case  PACK_SEND:
+			if ((memcmp(WS_APPL_NAME_IP_NAME, host_name, 8)!=0) ||
+			    (memcmp(WS_APPL_NAME_IP_NAME, ws_name, 8)!=0 ))
+        	        	rc = EINVAL;
+			break;
+		default:
+	       		if ((memcmp(HOST_APPL_NAME, host_name, 8)!=0) ||
+		    	    (memcmp(p_env->api_type , ws_name, 8)!=0))
+        	        	rc = EINVAL;
+			break;
+	}
+
+	return rc;
+}    /*    end of find_link    */
+
+/*-------------------------------------------------------------------*
+ *   claw_hw_tx                                                      *
+ *                                                                   *
+ *                                                                   *
+ *-------------------------------------------------------------------*/
+
+static int
+claw_hw_tx(struct sk_buff *skb, struct net_device *dev, long linkid)
+{
+        int                             rc=0;
+        struct claw_privbk 		*privptr;
+        struct ccwbk           *p_this_ccw;
+        struct ccwbk           *p_first_ccw;
+        struct ccwbk           *p_last_ccw;
+        __u32                           numBuffers;
+        signed long                     len_of_data;
+        unsigned long                   bytesInThisBuffer;
+        unsigned char                   *pDataAddress;
+        struct endccw                   *pEnd;
+        struct ccw1                     tempCCW;
+	struct claw_env			*p_env;
+	struct clawph			*pk_head;
+	struct chbk			*ch;
+
+	CLAW_DBF_TEXT(4, trace, "hw_tx");
+	privptr = (struct claw_privbk *)(dev->ml_priv);
+	p_env =privptr->p_env;
+	claw_free_wrt_buf(dev);	/* Clean up free chain if posible */
+        /*  scan the write queue to free any completed write packets   */
+        p_first_ccw=NULL;
+        p_last_ccw=NULL;
+	if ((p_env->packing >= PACK_SEND) &&
+       	    (skb->cb[1] != 'P')) {
+		skb_push(skb,sizeof(struct clawph));
+		pk_head=(struct clawph *)skb->data;
+		pk_head->len=skb->len-sizeof(struct clawph);
+		if (pk_head->len%4)  {
+			pk_head->len+= 4-(pk_head->len%4);
+			skb_pad(skb,4-(pk_head->len%4));
+			skb_put(skb,4-(pk_head->len%4));
+		}
+		if (p_env->packing == DO_PACKED)
+			pk_head->link_num = linkid;
+		else
+			pk_head->link_num = 0;
+		pk_head->flag = 0x00;
+		skb_pad(skb,4);
+		skb->cb[1] = 'P';
+	}
+        if (linkid == 0) {
+        	if (claw_check_busy(dev)) {
+                	if (privptr->write_free_count!=0) {
+                                claw_clear_busy(dev);
+                        }
+                        else {
+                                claw_strt_out_IO(dev );
+                                claw_free_wrt_buf( dev );
+                                if (privptr->write_free_count==0) {
+					ch = &privptr->channel[WRITE_CHANNEL];
+					atomic_inc(&skb->users);
+					skb_queue_tail(&ch->collect_queue, skb);
+                                	goto Done;
+                                }
+                                else {
+                                	claw_clear_busy(dev);
+                                }
+                        }
+                }
+                /*  tx lock  */
+                if (claw_test_and_setbit_busy(TB_TX,dev)) { /* set to busy */
+			ch = &privptr->channel[WRITE_CHANNEL];
+			atomic_inc(&skb->users);
+			skb_queue_tail(&ch->collect_queue, skb);
+                        claw_strt_out_IO(dev );
+                        rc=-EBUSY;
+                        goto Done2;
+                }
+        }
+        /*      See how many write buffers are required to hold this data */
+	numBuffers = DIV_ROUND_UP(skb->len, privptr->p_env->write_size);
+
+        /*      If that number of buffers isn't available, give up for now */
+        if (privptr->write_free_count < numBuffers ||
+            privptr->p_write_free_chain == NULL ) {
+
+                claw_setbit_busy(TB_NOBUFFER,dev);
+		ch = &privptr->channel[WRITE_CHANNEL];
+		atomic_inc(&skb->users);
+		skb_queue_tail(&ch->collect_queue, skb);
+		CLAW_DBF_TEXT(2, trace, "clawbusy");
+                goto Done2;
+        }
+        pDataAddress=skb->data;
+        len_of_data=skb->len;
+
+        while (len_of_data > 0) {
+                p_this_ccw=privptr->p_write_free_chain;  /* get a block */
+		if (p_this_ccw == NULL) { /* lost the race */
+			ch = &privptr->channel[WRITE_CHANNEL];
+			atomic_inc(&skb->users);
+			skb_queue_tail(&ch->collect_queue, skb);
+			goto Done2;
+		}
+                privptr->p_write_free_chain=p_this_ccw->next;
+                p_this_ccw->next=NULL;
+                --privptr->write_free_count; /* -1 */
+		if (len_of_data >= privptr->p_env->write_size)
+			bytesInThisBuffer = privptr->p_env->write_size;
+		else
+			bytesInThisBuffer = len_of_data;
+                memcpy( p_this_ccw->p_buffer,pDataAddress, bytesInThisBuffer);
+                len_of_data-=bytesInThisBuffer;
+                pDataAddress+=(unsigned long)bytesInThisBuffer;
+                /*      setup write CCW         */
+                p_this_ccw->write.cmd_code = (linkid * 8) +1;
+                if (len_of_data>0) {
+                        p_this_ccw->write.cmd_code+=MORE_to_COME_FLAG;
+                }
+                p_this_ccw->write.count=bytesInThisBuffer;
+                /*      now add to end of this chain    */
+                if (p_first_ccw==NULL)    {
+                        p_first_ccw=p_this_ccw;
+                }
+                if (p_last_ccw!=NULL) {
+                        p_last_ccw->next=p_this_ccw;
+                        /*      set up TIC ccws         */
+                        p_last_ccw->w_TIC_1.cda=
+				(__u32)__pa(&p_this_ccw->write);
+                }
+                p_last_ccw=p_this_ccw;      /* save new last block */
+        }
+
+        /*      FirstCCW and LastCCW now contain a new set of write channel
+        *       programs to append to the running channel program
+        */
+
+        if (p_first_ccw!=NULL) {
+		/*      setup ending ccw sequence for this segment           */
+                pEnd=privptr->p_end_ccw;
+                if (pEnd->write1) {
+                        pEnd->write1=0x00;   /* second end ccw is now active */
+                        /*      set up Tic CCWs         */
+                        p_last_ccw->w_TIC_1.cda=
+				(__u32)__pa(&pEnd->write2_nop1);
+                        pEnd->write2_nop2.cmd_code = CCW_CLAW_CMD_READFF;
+                        pEnd->write2_nop2.flags    =
+				CCW_FLAG_SLI | CCW_FLAG_SKIP;
+                        pEnd->write2_nop2.cda=0;
+                        pEnd->write2_nop2.count=1;
+                }
+                else {  /*  end of if (pEnd->write1)*/
+                        pEnd->write1=0x01;   /* first end ccw is now active */
+                        /*      set up Tic CCWs         */
+                        p_last_ccw->w_TIC_1.cda=
+				(__u32)__pa(&pEnd->write1_nop1);
+                        pEnd->write1_nop2.cmd_code = CCW_CLAW_CMD_READFF;
+                        pEnd->write1_nop2.flags    =
+				CCW_FLAG_SLI | CCW_FLAG_SKIP;
+                        pEnd->write1_nop2.cda=0;
+                        pEnd->write1_nop2.count=1;
+                }  /* end if if (pEnd->write1) */
+
+                if (privptr->p_write_active_first==NULL ) {
+                        privptr->p_write_active_first=p_first_ccw;
+                        privptr->p_write_active_last=p_last_ccw;
+                }
+                else {
+                        /*      set up Tic CCWs         */
+
+                        tempCCW.cda=(__u32)__pa(&p_first_ccw->write);
+                        tempCCW.count=0;
+                        tempCCW.flags=0;
+                        tempCCW.cmd_code=CCW_CLAW_CMD_TIC;
+
+                        if (pEnd->write1) {
+
+                 /*
+                 * first set of ending CCW's is chained to the new write
+                 * chain, so the second set is chained to the active chain
+                 * Therefore modify the second set to point the new write chain.
+                 * make sure we update the CCW atomically
+                 * so channel does not fetch it when it's only half done
+                 */
+                                memcpy( &pEnd->write2_nop2, &tempCCW ,
+					sizeof(struct ccw1));
+                                privptr->p_write_active_last->w_TIC_1.cda=
+					(__u32)__pa(&p_first_ccw->write);
+                        }
+                        else {
+
+                        /*make sure we update the CCW atomically
+                         *so channel does not fetch it when it's only half done
+                         */
+                                memcpy(&pEnd->write1_nop2, &tempCCW ,
+					sizeof(struct ccw1));
+                                privptr->p_write_active_last->w_TIC_1.cda=
+				        (__u32)__pa(&p_first_ccw->write);
+
+                        } /* end if if (pEnd->write1) */
+
+                        privptr->p_write_active_last->next=p_first_ccw;
+                        privptr->p_write_active_last=p_last_ccw;
+                }
+
+        } /* endif (p_first_ccw!=NULL)  */
+        dev_kfree_skb_any(skb);
+        claw_strt_out_IO(dev );
+        /*      if write free count is zero , set NOBUFFER       */
+	if (privptr->write_free_count==0) {
+		claw_setbit_busy(TB_NOBUFFER,dev);
+        }
+Done2:
+	claw_clearbit_busy(TB_TX,dev);
+Done:
+	return(rc);
+}    /*    end of claw_hw_tx    */
+
+/*-------------------------------------------------------------------*
+*                                                                    *
+*     init_ccw_bk                                                    *
+*                                                                    *
+*--------------------------------------------------------------------*/
+
+static int
+init_ccw_bk(struct net_device *dev)
+{
+
+        __u32   ccw_blocks_required;
+        __u32   ccw_blocks_perpage;
+        __u32   ccw_pages_required;
+        __u32   claw_reads_perpage=1;
+        __u32   claw_read_pages;
+        __u32   claw_writes_perpage=1;
+        __u32   claw_write_pages;
+        void    *p_buff=NULL;
+        struct ccwbk*p_free_chain;
+	struct ccwbk*p_buf;
+	struct ccwbk*p_last_CCWB;
+	struct ccwbk*p_first_CCWB;
+        struct endccw *p_endccw=NULL;
+	addr_t  real_address;
+	struct claw_privbk *privptr = dev->ml_priv;
+        struct clawh *pClawH=NULL;
+        addr_t   real_TIC_address;
+        int i,j;
+	CLAW_DBF_TEXT(4, trace, "init_ccw");
+
+        /*  initialize  statistics field */
+        privptr->active_link_ID=0;
+        /*  initialize  ccwbk pointers  */
+        privptr->p_write_free_chain=NULL;   /* pointer to free ccw chain*/
+        privptr->p_write_active_first=NULL; /* pointer to the first write ccw*/
+        privptr->p_write_active_last=NULL;  /* pointer to the last write ccw*/
+        privptr->p_read_active_first=NULL;  /* pointer to the first read ccw*/
+        privptr->p_read_active_last=NULL;   /* pointer to the last read ccw */
+        privptr->p_end_ccw=NULL;            /* pointer to ending ccw        */
+        privptr->p_claw_signal_blk=NULL;    /* pointer to signal block      */
+	privptr->buffs_alloc = 0;
+        memset(&privptr->end_ccw, 0x00, sizeof(struct endccw));
+        memset(&privptr->ctl_bk, 0x00, sizeof(struct clawctl));
+        /*  initialize  free write ccwbk counter  */
+        privptr->write_free_count=0;  /* number of free bufs on write chain */
+        p_last_CCWB = NULL;
+        p_first_CCWB= NULL;
+        /*
+        *  We need 1 CCW block for each read buffer, 1 for each
+        *  write buffer, plus 1 for ClawSignalBlock
+        */
+        ccw_blocks_required =
+		privptr->p_env->read_buffers+privptr->p_env->write_buffers+1;
+        /*
+        * compute number of CCW blocks that will fit in a page
+        */
+        ccw_blocks_perpage= PAGE_SIZE /  CCWBK_SIZE;
+        ccw_pages_required=
+		DIV_ROUND_UP(ccw_blocks_required, ccw_blocks_perpage);
+
+        /*
+         *  read and write sizes are set by 2 constants in claw.h
+	 *  4k and 32k.  Unpacked values other than 4k are not going to
+	 * provide good performance. With packing buffers support 32k
+	 * buffers are used.
+         */
+	if (privptr->p_env->read_size < PAGE_SIZE) {
+		claw_reads_perpage = PAGE_SIZE / privptr->p_env->read_size;
+		claw_read_pages = DIV_ROUND_UP(privptr->p_env->read_buffers,
+						claw_reads_perpage);
+         }
+         else {       /* > or equal  */
+		privptr->p_buff_pages_perread =
+			DIV_ROUND_UP(privptr->p_env->read_size, PAGE_SIZE);
+		claw_read_pages = privptr->p_env->read_buffers *
+					privptr->p_buff_pages_perread;
+         }
+        if (privptr->p_env->write_size < PAGE_SIZE) {
+		claw_writes_perpage =
+			PAGE_SIZE / privptr->p_env->write_size;
+		claw_write_pages = DIV_ROUND_UP(privptr->p_env->write_buffers,
+						claw_writes_perpage);
+
+        }
+        else {      /* >  or equal  */
+		privptr->p_buff_pages_perwrite =
+			DIV_ROUND_UP(privptr->p_env->read_size, PAGE_SIZE);
+		claw_write_pages = privptr->p_env->write_buffers *
+					privptr->p_buff_pages_perwrite;
+        }
+        /*
+        *               allocate ccw_pages_required
+        */
+        if (privptr->p_buff_ccw==NULL) {
+                privptr->p_buff_ccw=
+			(void *)__get_free_pages(__GFP_DMA,
+		        (int)pages_to_order_of_mag(ccw_pages_required ));
+                if (privptr->p_buff_ccw==NULL) {
+                        return -ENOMEM;
+                }
+                privptr->p_buff_ccw_num=ccw_pages_required;
+        }
+        memset(privptr->p_buff_ccw, 0x00,
+		privptr->p_buff_ccw_num * PAGE_SIZE);
+
+        /*
+        *               obtain ending ccw block address
+        *
+        */
+        privptr->p_end_ccw = (struct endccw *)&privptr->end_ccw;
+        real_address  = (__u32)__pa(privptr->p_end_ccw);
+        /*                              Initialize ending CCW block       */
+        p_endccw=privptr->p_end_ccw;
+        p_endccw->real=real_address;
+        p_endccw->write1=0x00;
+        p_endccw->read1=0x00;
+
+        /*      write1_nop1                                     */
+        p_endccw->write1_nop1.cmd_code = CCW_CLAW_CMD_NOP;
+        p_endccw->write1_nop1.flags       = CCW_FLAG_SLI | CCW_FLAG_CC;
+        p_endccw->write1_nop1.count       = 1;
+        p_endccw->write1_nop1.cda         = 0;
+
+        /*      write1_nop2                                     */
+        p_endccw->write1_nop2.cmd_code = CCW_CLAW_CMD_READFF;
+        p_endccw->write1_nop2.flags        = CCW_FLAG_SLI | CCW_FLAG_SKIP;
+        p_endccw->write1_nop2.count      = 1;
+        p_endccw->write1_nop2.cda        = 0;
+
+        /*      write2_nop1                                     */
+        p_endccw->write2_nop1.cmd_code = CCW_CLAW_CMD_NOP;
+        p_endccw->write2_nop1.flags        = CCW_FLAG_SLI | CCW_FLAG_CC;
+        p_endccw->write2_nop1.count        = 1;
+        p_endccw->write2_nop1.cda          = 0;
+
+        /*      write2_nop2                                     */
+        p_endccw->write2_nop2.cmd_code = CCW_CLAW_CMD_READFF;
+        p_endccw->write2_nop2.flags        = CCW_FLAG_SLI | CCW_FLAG_SKIP;
+        p_endccw->write2_nop2.count        = 1;
+        p_endccw->write2_nop2.cda          = 0;
+
+        /*      read1_nop1                                      */
+        p_endccw->read1_nop1.cmd_code = CCW_CLAW_CMD_NOP;
+        p_endccw->read1_nop1.flags        = CCW_FLAG_SLI | CCW_FLAG_CC;
+        p_endccw->read1_nop1.count        = 1;
+        p_endccw->read1_nop1.cda          = 0;
+
+        /*      read1_nop2                                      */
+        p_endccw->read1_nop2.cmd_code = CCW_CLAW_CMD_READFF;
+        p_endccw->read1_nop2.flags        = CCW_FLAG_SLI | CCW_FLAG_SKIP;
+        p_endccw->read1_nop2.count        = 1;
+        p_endccw->read1_nop2.cda          = 0;
+
+        /*      read2_nop1                                      */
+        p_endccw->read2_nop1.cmd_code = CCW_CLAW_CMD_NOP;
+        p_endccw->read2_nop1.flags        = CCW_FLAG_SLI | CCW_FLAG_CC;
+        p_endccw->read2_nop1.count        = 1;
+        p_endccw->read2_nop1.cda          = 0;
+
+        /*      read2_nop2                                      */
+        p_endccw->read2_nop2.cmd_code = CCW_CLAW_CMD_READFF;
+        p_endccw->read2_nop2.flags        = CCW_FLAG_SLI | CCW_FLAG_SKIP;
+        p_endccw->read2_nop2.count        = 1;
+        p_endccw->read2_nop2.cda          = 0;
+
+        /*
+        *                               Build a chain of CCWs
+        *
+        */
+        p_buff=privptr->p_buff_ccw;
+
+        p_free_chain=NULL;
+        for (i=0 ; i < ccw_pages_required; i++ ) {
+                real_address  = (__u32)__pa(p_buff);
+                p_buf=p_buff;
+                for (j=0 ; j < ccw_blocks_perpage ; j++) {
+                        p_buf->next  = p_free_chain;
+                        p_free_chain = p_buf;
+                        p_buf->real=(__u32)__pa(p_buf);
+                        ++p_buf;
+                }
+                p_buff+=PAGE_SIZE;
+        }
+        /*
+        *                               Initialize ClawSignalBlock
+        *
+        */
+        if (privptr->p_claw_signal_blk==NULL) {
+                privptr->p_claw_signal_blk=p_free_chain;
+                p_free_chain=p_free_chain->next;
+                pClawH=(struct clawh *)privptr->p_claw_signal_blk;
+                pClawH->length=0xffff;
+                pClawH->opcode=0xff;
+                pClawH->flag=CLAW_BUSY;
+        }
+
+        /*
+        *               allocate write_pages_required and add to free chain
+        */
+        if (privptr->p_buff_write==NULL) {
+            if (privptr->p_env->write_size < PAGE_SIZE) {
+                privptr->p_buff_write=
+			(void *)__get_free_pages(__GFP_DMA,
+			(int)pages_to_order_of_mag(claw_write_pages ));
+                if (privptr->p_buff_write==NULL) {
+                        privptr->p_buff_ccw=NULL;
+                        return -ENOMEM;
+                }
+                /*
+                *                               Build CLAW write free chain
+                *
+                */
+
+                memset(privptr->p_buff_write, 0x00,
+			ccw_pages_required * PAGE_SIZE);
+                privptr->p_write_free_chain=NULL;
+
+                p_buff=privptr->p_buff_write;
+
+                for (i=0 ; i< privptr->p_env->write_buffers ; i++) {
+                        p_buf        = p_free_chain;      /*  get a CCW */
+                        p_free_chain = p_buf->next;
+                        p_buf->next  =privptr->p_write_free_chain;
+                        privptr->p_write_free_chain = p_buf;
+                        p_buf-> p_buffer	= (struct clawbuf *)p_buff;
+                        p_buf-> write.cda       = (__u32)__pa(p_buff);
+                        p_buf-> write.flags     = CCW_FLAG_SLI | CCW_FLAG_CC;
+                        p_buf-> w_read_FF.cmd_code = CCW_CLAW_CMD_READFF;
+                        p_buf-> w_read_FF.flags   = CCW_FLAG_SLI | CCW_FLAG_CC;
+                        p_buf-> w_read_FF.count   = 1;
+                        p_buf-> w_read_FF.cda     =
+				(__u32)__pa(&p_buf-> header.flag);
+                        p_buf-> w_TIC_1.cmd_code = CCW_CLAW_CMD_TIC;
+                        p_buf-> w_TIC_1.flags      = 0;
+                        p_buf-> w_TIC_1.count      = 0;
+
+			if (((unsigned long)p_buff +
+					    privptr->p_env->write_size) >=
+			   ((unsigned long)(p_buff+2*
+			    (privptr->p_env->write_size) - 1) & PAGE_MASK)) {
+				p_buff = p_buff+privptr->p_env->write_size;
+                        }
+                }
+           }
+           else      /*  Buffers are => PAGE_SIZE. 1 buff per get_free_pages */
+           {
+               privptr->p_write_free_chain=NULL;
+               for (i = 0; i< privptr->p_env->write_buffers ; i++) {
+                   p_buff=(void *)__get_free_pages(__GFP_DMA,
+		        (int)pages_to_order_of_mag(
+			privptr->p_buff_pages_perwrite) );
+                   if (p_buff==NULL) {
+                        free_pages((unsigned long)privptr->p_buff_ccw,
+			      (int)pages_to_order_of_mag(
+			      		privptr->p_buff_ccw_num));
+                        privptr->p_buff_ccw=NULL;
+			p_buf=privptr->p_buff_write;
+                        while (p_buf!=NULL) {
+                                free_pages((unsigned long)
+					p_buf->p_buffer,
+					(int)pages_to_order_of_mag(
+					privptr->p_buff_pages_perwrite));
+                                p_buf=p_buf->next;
+                        }
+                        return -ENOMEM;
+                   }  /* Error on get_pages   */
+                   memset(p_buff, 0x00, privptr->p_env->write_size );
+                   p_buf         = p_free_chain;
+                   p_free_chain  = p_buf->next;
+                   p_buf->next   = privptr->p_write_free_chain;
+                   privptr->p_write_free_chain = p_buf;
+                   privptr->p_buff_write = p_buf;
+                   p_buf->p_buffer=(struct clawbuf *)p_buff;
+                   p_buf-> write.cda     = (__u32)__pa(p_buff);
+                   p_buf-> write.flags   = CCW_FLAG_SLI | CCW_FLAG_CC;
+                   p_buf-> w_read_FF.cmd_code = CCW_CLAW_CMD_READFF;
+                   p_buf-> w_read_FF.flags    = CCW_FLAG_SLI | CCW_FLAG_CC;
+                   p_buf-> w_read_FF.count    = 1;
+                   p_buf-> w_read_FF.cda      =
+			(__u32)__pa(&p_buf-> header.flag);
+                   p_buf-> w_TIC_1.cmd_code = CCW_CLAW_CMD_TIC;
+                   p_buf-> w_TIC_1.flags   = 0;
+                   p_buf-> w_TIC_1.count   = 0;
+               }  /* for all write_buffers   */
+
+           }    /* else buffers are PAGE_SIZE or bigger */
+
+        }
+        privptr->p_buff_write_num=claw_write_pages;
+        privptr->write_free_count=privptr->p_env->write_buffers;
+
+
+        /*
+        *               allocate read_pages_required and chain to free chain
+        */
+        if (privptr->p_buff_read==NULL) {
+            if (privptr->p_env->read_size < PAGE_SIZE)  {
+                privptr->p_buff_read=
+			(void *)__get_free_pages(__GFP_DMA,
+			(int)pages_to_order_of_mag(claw_read_pages) );
+                if (privptr->p_buff_read==NULL) {
+                        free_pages((unsigned long)privptr->p_buff_ccw,
+				(int)pages_to_order_of_mag(
+					privptr->p_buff_ccw_num));
+			/* free the write pages size is < page size  */
+                        free_pages((unsigned long)privptr->p_buff_write,
+				(int)pages_to_order_of_mag(
+				privptr->p_buff_write_num));
+                        privptr->p_buff_ccw=NULL;
+                        privptr->p_buff_write=NULL;
+                        return -ENOMEM;
+                }
+                memset(privptr->p_buff_read, 0x00, claw_read_pages * PAGE_SIZE);
+                privptr->p_buff_read_num=claw_read_pages;
+                /*
+                *                               Build CLAW read free chain
+                *
+                */
+                p_buff=privptr->p_buff_read;
+                for (i=0 ; i< privptr->p_env->read_buffers ; i++) {
+                        p_buf        = p_free_chain;
+                        p_free_chain = p_buf->next;
+
+                        if (p_last_CCWB==NULL) {
+                                p_buf->next=NULL;
+                                real_TIC_address=0;
+                                p_last_CCWB=p_buf;
+                        }
+                        else {
+                                p_buf->next=p_first_CCWB;
+                                real_TIC_address=
+				(__u32)__pa(&p_first_CCWB -> read );
+                        }
+
+                        p_first_CCWB=p_buf;
+
+                        p_buf->p_buffer=(struct clawbuf *)p_buff;
+                        /*  initialize read command */
+                        p_buf-> read.cmd_code = CCW_CLAW_CMD_READ;
+                        p_buf-> read.cda = (__u32)__pa(p_buff);
+                        p_buf-> read.flags = CCW_FLAG_SLI | CCW_FLAG_CC;
+                        p_buf-> read.count       = privptr->p_env->read_size;
+
+                        /*  initialize read_h command */
+                        p_buf-> read_h.cmd_code = CCW_CLAW_CMD_READHEADER;
+                        p_buf-> read_h.cda =
+				(__u32)__pa(&(p_buf->header));
+                        p_buf-> read_h.flags = CCW_FLAG_SLI | CCW_FLAG_CC;
+                        p_buf-> read_h.count      = sizeof(struct clawh);
+
+                        /*  initialize Signal command */
+                        p_buf-> signal.cmd_code = CCW_CLAW_CMD_SIGNAL_SMOD;
+                        p_buf-> signal.cda =
+				(__u32)__pa(&(pClawH->flag));
+                        p_buf-> signal.flags = CCW_FLAG_SLI | CCW_FLAG_CC;
+                        p_buf-> signal.count     = 1;
+
+                        /*  initialize r_TIC_1 command */
+                        p_buf-> r_TIC_1.cmd_code = CCW_CLAW_CMD_TIC;
+                        p_buf-> r_TIC_1.cda = (__u32)real_TIC_address;
+                        p_buf-> r_TIC_1.flags = 0;
+                        p_buf-> r_TIC_1.count      = 0;
+
+                        /*  initialize r_read_FF command */
+                        p_buf-> r_read_FF.cmd_code = CCW_CLAW_CMD_READFF;
+                        p_buf-> r_read_FF.cda =
+				(__u32)__pa(&(pClawH->flag));
+                        p_buf-> r_read_FF.flags =
+				CCW_FLAG_SLI | CCW_FLAG_CC | CCW_FLAG_PCI;
+                        p_buf-> r_read_FF.count    = 1;
+
+                        /*    initialize r_TIC_2          */
+                        memcpy(&p_buf->r_TIC_2,
+				&p_buf->r_TIC_1, sizeof(struct ccw1));
+
+                        /*     initialize Header     */
+                        p_buf->header.length=0xffff;
+                        p_buf->header.opcode=0xff;
+                        p_buf->header.flag=CLAW_PENDING;
+
+			if (((unsigned long)p_buff+privptr->p_env->read_size) >=
+			  ((unsigned long)(p_buff+2*(privptr->p_env->read_size)
+				 -1)
+			   & PAGE_MASK)) {
+                                p_buff= p_buff+privptr->p_env->read_size;
+                        }
+                        else {
+                                p_buff=
+				(void *)((unsigned long)
+					(p_buff+2*(privptr->p_env->read_size)-1)
+					 & PAGE_MASK) ;
+                        }
+                }   /* for read_buffers   */
+          }         /* read_size < PAGE_SIZE  */
+          else {  /* read Size >= PAGE_SIZE  */
+                for (i=0 ; i< privptr->p_env->read_buffers ; i++) {
+                        p_buff = (void *)__get_free_pages(__GFP_DMA,
+				(int)pages_to_order_of_mag(
+					privptr->p_buff_pages_perread));
+                        if (p_buff==NULL) {
+                                free_pages((unsigned long)privptr->p_buff_ccw,
+					(int)pages_to_order_of_mag(privptr->
+					p_buff_ccw_num));
+				/* free the write pages  */
+	                        p_buf=privptr->p_buff_write;
+                                while (p_buf!=NULL) {
+					free_pages(
+					    (unsigned long)p_buf->p_buffer,
+					    (int)pages_to_order_of_mag(
+					    privptr->p_buff_pages_perwrite));
+                                        p_buf=p_buf->next;
+                                }
+				/* free any read pages already alloc  */
+	                        p_buf=privptr->p_buff_read;
+                                while (p_buf!=NULL) {
+					free_pages(
+					    (unsigned long)p_buf->p_buffer,
+					    (int)pages_to_order_of_mag(
+					     privptr->p_buff_pages_perread));
+                                        p_buf=p_buf->next;
+                                }
+                                privptr->p_buff_ccw=NULL;
+                                privptr->p_buff_write=NULL;
+                                return -ENOMEM;
+                        }
+                        memset(p_buff, 0x00, privptr->p_env->read_size);
+                        p_buf        = p_free_chain;
+                        privptr->p_buff_read = p_buf;
+                        p_free_chain = p_buf->next;
+
+                        if (p_last_CCWB==NULL) {
+                                p_buf->next=NULL;
+                                real_TIC_address=0;
+                                p_last_CCWB=p_buf;
+                        }
+                        else {
+                                p_buf->next=p_first_CCWB;
+                                real_TIC_address=
+					(addr_t)__pa(
+						&p_first_CCWB -> read );
+                        }
+
+                        p_first_CCWB=p_buf;
+				/* save buff address */
+                        p_buf->p_buffer=(struct clawbuf *)p_buff;
+                        /*  initialize read command */
+                        p_buf-> read.cmd_code = CCW_CLAW_CMD_READ;
+                        p_buf-> read.cda = (__u32)__pa(p_buff);
+                        p_buf-> read.flags = CCW_FLAG_SLI | CCW_FLAG_CC;
+                        p_buf-> read.count       = privptr->p_env->read_size;
+
+                        /*  initialize read_h command */
+                        p_buf-> read_h.cmd_code = CCW_CLAW_CMD_READHEADER;
+                        p_buf-> read_h.cda =
+				(__u32)__pa(&(p_buf->header));
+                        p_buf-> read_h.flags = CCW_FLAG_SLI | CCW_FLAG_CC;
+                        p_buf-> read_h.count      = sizeof(struct clawh);
+
+                        /*  initialize Signal command */
+                        p_buf-> signal.cmd_code = CCW_CLAW_CMD_SIGNAL_SMOD;
+                        p_buf-> signal.cda =
+				(__u32)__pa(&(pClawH->flag));
+                        p_buf-> signal.flags = CCW_FLAG_SLI | CCW_FLAG_CC;
+                        p_buf-> signal.count     = 1;
+
+                        /*  initialize r_TIC_1 command */
+                        p_buf-> r_TIC_1.cmd_code = CCW_CLAW_CMD_TIC;
+                        p_buf-> r_TIC_1.cda = (__u32)real_TIC_address;
+                        p_buf-> r_TIC_1.flags = 0;
+                        p_buf-> r_TIC_1.count      = 0;
+
+                        /*  initialize r_read_FF command */
+                        p_buf-> r_read_FF.cmd_code = CCW_CLAW_CMD_READFF;
+                        p_buf-> r_read_FF.cda =
+				(__u32)__pa(&(pClawH->flag));
+                        p_buf-> r_read_FF.flags =
+				CCW_FLAG_SLI | CCW_FLAG_CC | CCW_FLAG_PCI;
+                        p_buf-> r_read_FF.count    = 1;
+
+                        /*    initialize r_TIC_2          */
+                        memcpy(&p_buf->r_TIC_2, &p_buf->r_TIC_1,
+				sizeof(struct ccw1));
+
+                        /*     initialize Header     */
+                        p_buf->header.length=0xffff;
+                        p_buf->header.opcode=0xff;
+                        p_buf->header.flag=CLAW_PENDING;
+
+                }    /* For read_buffers   */
+          }     /*  read_size >= PAGE_SIZE   */
+        }       /*  pBuffread = NULL */
+        add_claw_reads( dev  ,p_first_CCWB , p_last_CCWB);
+	privptr->buffs_alloc = 1;
+
+        return 0;
+}    /*    end of init_ccw_bk */
+
+/*-------------------------------------------------------------------*
+*                                                                    *
+*       probe_error                                                  *
+*                                                                    *
+*--------------------------------------------------------------------*/
+
+static void
+probe_error( struct ccwgroup_device *cgdev)
+{
+	struct claw_privbk *privptr;
+
+	CLAW_DBF_TEXT(4, trace, "proberr");
+	privptr = dev_get_drvdata(&cgdev->dev);
+	if (privptr != NULL) {
+		dev_set_drvdata(&cgdev->dev, NULL);
+		kfree(privptr->p_env);
+		kfree(privptr->p_mtc_envelope);
+		kfree(privptr);
+	}
+}    /*    probe_error    */
+
+/*-------------------------------------------------------------------*
+*    claw_process_control                                            *
+*                                                                    *
+*                                                                    *
+*--------------------------------------------------------------------*/
+
+static int
+claw_process_control( struct net_device *dev, struct ccwbk * p_ccw)
+{
+
+        struct clawbuf *p_buf;
+        struct clawctl  ctlbk;
+        struct clawctl *p_ctlbk;
+        char    temp_host_name[8];
+        char    temp_ws_name[8];
+        struct claw_privbk *privptr;
+        struct claw_env *p_env;
+        struct sysval *p_sysval;
+        struct conncmd *p_connect=NULL;
+        int rc;
+        struct chbk *p_ch = NULL;
+	struct device *tdev;
+	CLAW_DBF_TEXT(2, setup, "clw_cntl");
+        udelay(1000);  /* Wait a ms for the control packets to
+			*catch up to each other */
+	privptr = dev->ml_priv;
+        p_env=privptr->p_env;
+	tdev = &privptr->channel[READ_CHANNEL].cdev->dev;
+	memcpy( &temp_host_name, p_env->host_name, 8);
+        memcpy( &temp_ws_name, p_env->adapter_name , 8);
+	dev_info(tdev, "%s: CLAW device %.8s: "
+		"Received Control Packet\n",
+		dev->name, temp_ws_name);
+        if (privptr->release_pend==1) {
+                return 0;
+        }
+        p_buf=p_ccw->p_buffer;
+        p_ctlbk=&ctlbk;
+	if (p_env->packing == DO_PACKED) { /* packing in progress?*/
+		memcpy(p_ctlbk, &p_buf->buffer[4], sizeof(struct clawctl));
+	} else {
+		memcpy(p_ctlbk, p_buf, sizeof(struct clawctl));
+	}
+        switch (p_ctlbk->command)
+        {
+	case SYSTEM_VALIDATE_REQUEST:
+		if (p_ctlbk->version != CLAW_VERSION_ID) {
+			claw_snd_sys_validate_rsp(dev, p_ctlbk,
+				CLAW_RC_WRONG_VERSION);
+			dev_warn(tdev, "The communication peer of %s"
+				" uses an incorrect API version %d\n",
+				dev->name, p_ctlbk->version);
+		}
+		p_sysval = (struct sysval *)&(p_ctlbk->data);
+		dev_info(tdev, "%s: Recv Sys Validate Request: "
+			"Vers=%d,link_id=%d,Corr=%d,WS name=%.8s,"
+			"Host name=%.8s\n",
+			dev->name, p_ctlbk->version,
+			p_ctlbk->linkid,
+			p_ctlbk->correlator,
+			p_sysval->WS_name,
+			p_sysval->host_name);
+		if (memcmp(temp_host_name, p_sysval->host_name, 8)) {
+			claw_snd_sys_validate_rsp(dev, p_ctlbk,
+				CLAW_RC_NAME_MISMATCH);
+			CLAW_DBF_TEXT(2, setup, "HSTBAD");
+			CLAW_DBF_TEXT_(2, setup, "%s", p_sysval->host_name);
+			CLAW_DBF_TEXT_(2, setup, "%s", temp_host_name);
+			dev_warn(tdev,
+				"Host name %s for %s does not match the"
+				" remote adapter name %s\n",
+				p_sysval->host_name,
+				dev->name,
+				temp_host_name);
+		}
+		if (memcmp(temp_ws_name, p_sysval->WS_name, 8)) {
+			claw_snd_sys_validate_rsp(dev, p_ctlbk,
+				CLAW_RC_NAME_MISMATCH);
+			CLAW_DBF_TEXT(2, setup, "WSNBAD");
+			CLAW_DBF_TEXT_(2, setup, "%s", p_sysval->WS_name);
+			CLAW_DBF_TEXT_(2, setup, "%s", temp_ws_name);
+			dev_warn(tdev, "Adapter name %s for %s does not match"
+				" the remote host name %s\n",
+				p_sysval->WS_name,
+				dev->name,
+				temp_ws_name);
+		}
+		if ((p_sysval->write_frame_size < p_env->write_size) &&
+		    (p_env->packing == 0)) {
+			claw_snd_sys_validate_rsp(dev, p_ctlbk,
+				CLAW_RC_HOST_RCV_TOO_SMALL);
+			dev_warn(tdev,
+				"The local write buffer is smaller than the"
+				" remote read buffer\n");
+			CLAW_DBF_TEXT(2, setup, "wrtszbad");
+		}
+		if ((p_sysval->read_frame_size < p_env->read_size) &&
+		    (p_env->packing == 0)) {
+			claw_snd_sys_validate_rsp(dev, p_ctlbk,
+				CLAW_RC_HOST_RCV_TOO_SMALL);
+			dev_warn(tdev,
+				"The local read buffer is smaller than the"
+				" remote write buffer\n");
+			CLAW_DBF_TEXT(2, setup, "rdsizbad");
+		}
+		claw_snd_sys_validate_rsp(dev, p_ctlbk, 0);
+		dev_info(tdev,
+			"CLAW device %.8s: System validate"
+			" completed.\n", temp_ws_name);
+		dev_info(tdev,
+			"%s: sys Validate Rsize:%d Wsize:%d\n",
+			dev->name, p_sysval->read_frame_size,
+			p_sysval->write_frame_size);
+		privptr->system_validate_comp = 1;
+		if (strncmp(p_env->api_type, WS_APPL_NAME_PACKED, 6) == 0)
+			p_env->packing = PACKING_ASK;
+		claw_strt_conn_req(dev);
+		break;
+	case SYSTEM_VALIDATE_RESPONSE:
+		p_sysval = (struct sysval *)&(p_ctlbk->data);
+		dev_info(tdev,
+			"Settings for %s validated (version=%d, "
+			"remote device=%d, rc=%d, adapter name=%.8s, "
+			"host name=%.8s)\n",
+			dev->name,
+			p_ctlbk->version,
+			p_ctlbk->correlator,
+			p_ctlbk->rc,
+			p_sysval->WS_name,
+			p_sysval->host_name);
+		switch (p_ctlbk->rc) {
+		case 0:
+			dev_info(tdev, "%s: CLAW device "
+				"%.8s: System validate completed.\n",
+				dev->name, temp_ws_name);
+			if (privptr->system_validate_comp == 0)
+				claw_strt_conn_req(dev);
+			privptr->system_validate_comp = 1;
+			break;
+		case CLAW_RC_NAME_MISMATCH:
+			dev_warn(tdev, "Validating %s failed because of"
+				" a host or adapter name mismatch\n",
+				dev->name);
+			break;
+		case CLAW_RC_WRONG_VERSION:
+			dev_warn(tdev, "Validating %s failed because of a"
+				" version conflict\n",
+				dev->name);
+			break;
+		case CLAW_RC_HOST_RCV_TOO_SMALL:
+			dev_warn(tdev, "Validating %s failed because of a"
+				" frame size conflict\n",
+				dev->name);
+			break;
+		default:
+			dev_warn(tdev, "The communication peer of %s rejected"
+				" the connection\n",
+				 dev->name);
+			break;
+		}
+		break;
+
+	case CONNECTION_REQUEST:
+		p_connect = (struct conncmd *)&(p_ctlbk->data);
+		dev_info(tdev, "%s: Recv Conn Req: Vers=%d,link_id=%d,"
+			"Corr=%d,HOST appl=%.8s,WS appl=%.8s\n",
+			dev->name,
+			p_ctlbk->version,
+			p_ctlbk->linkid,
+			p_ctlbk->correlator,
+			p_connect->host_name,
+			p_connect->WS_name);
+		if (privptr->active_link_ID != 0) {
+			claw_snd_disc(dev, p_ctlbk);
+			dev_info(tdev, "%s rejected a connection request"
+				" because it is already active\n",
+				dev->name);
+		}
+		if (p_ctlbk->linkid != 1) {
+			claw_snd_disc(dev, p_ctlbk);
+			dev_info(tdev, "%s rejected a request to open multiple"
+				" connections\n",
+				dev->name);
+		}
+		rc = find_link(dev, p_connect->host_name, p_connect->WS_name);
+		if (rc != 0) {
+			claw_snd_disc(dev, p_ctlbk);
+			dev_info(tdev, "%s rejected a connection request"
+				" because of a type mismatch\n",
+				dev->name);
+		}
+		claw_send_control(dev,
+			CONNECTION_CONFIRM, p_ctlbk->linkid,
+			p_ctlbk->correlator,
+			0, p_connect->host_name,
+			p_connect->WS_name);
+		if (p_env->packing == PACKING_ASK) {
+			p_env->packing = PACK_SEND;
+			claw_snd_conn_req(dev, 0);
+		}
+		dev_info(tdev, "%s: CLAW device %.8s: Connection "
+			"completed link_id=%d.\n",
+			dev->name, temp_ws_name,
+			p_ctlbk->linkid);
+			privptr->active_link_ID = p_ctlbk->linkid;
+			p_ch = &privptr->channel[WRITE_CHANNEL];
+			wake_up(&p_ch->wait);  /* wake up claw_open ( WRITE) */
+		break;
+	case CONNECTION_RESPONSE:
+		p_connect = (struct conncmd *)&(p_ctlbk->data);
+		dev_info(tdev, "%s: Recv Conn Resp: Vers=%d,link_id=%d,"
+			"Corr=%d,RC=%d,Host appl=%.8s, WS appl=%.8s\n",
+			dev->name,
+			p_ctlbk->version,
+			p_ctlbk->linkid,
+			p_ctlbk->correlator,
+			p_ctlbk->rc,
+			p_connect->host_name,
+			p_connect->WS_name);
+
+		if (p_ctlbk->rc != 0) {
+			dev_warn(tdev, "The communication peer of %s rejected"
+				" a connection request\n",
+				dev->name);
+			return 1;
+		}
+		rc = find_link(dev,
+			p_connect->host_name, p_connect->WS_name);
+		if (rc != 0) {
+			claw_snd_disc(dev, p_ctlbk);
+			dev_warn(tdev, "The communication peer of %s"
+				" rejected a connection "
+				"request because of a type mismatch\n",
+				 dev->name);
+		}
+		/* should be until CONNECTION_CONFIRM */
+		privptr->active_link_ID = -(p_ctlbk->linkid);
+		break;
+	case CONNECTION_CONFIRM:
+		p_connect = (struct conncmd *)&(p_ctlbk->data);
+		dev_info(tdev,
+			"%s: Recv Conn Confirm:Vers=%d,link_id=%d,"
+			"Corr=%d,Host appl=%.8s,WS appl=%.8s\n",
+			dev->name,
+			p_ctlbk->version,
+			p_ctlbk->linkid,
+			p_ctlbk->correlator,
+			p_connect->host_name,
+			p_connect->WS_name);
+		if (p_ctlbk->linkid == -(privptr->active_link_ID)) {
+			privptr->active_link_ID = p_ctlbk->linkid;
+			if (p_env->packing > PACKING_ASK) {
+				dev_info(tdev,
+				"%s: Confirmed Now packing\n", dev->name);
+				p_env->packing = DO_PACKED;
+			}
+			p_ch = &privptr->channel[WRITE_CHANNEL];
+			wake_up(&p_ch->wait);
+		} else {
+			dev_warn(tdev, "Activating %s failed because of"
+				" an incorrect link ID=%d\n",
+				dev->name, p_ctlbk->linkid);
+			claw_snd_disc(dev, p_ctlbk);
+		}
+		break;
+	case DISCONNECT:
+		dev_info(tdev, "%s: Disconnect: "
+			"Vers=%d,link_id=%d,Corr=%d\n",
+			dev->name, p_ctlbk->version,
+			p_ctlbk->linkid, p_ctlbk->correlator);
+		if ((p_ctlbk->linkid == 2) &&
+		    (p_env->packing == PACK_SEND)) {
+			privptr->active_link_ID = 1;
+			p_env->packing = DO_PACKED;
+		} else
+			privptr->active_link_ID = 0;
+		break;
+	case CLAW_ERROR:
+		dev_warn(tdev, "The communication peer of %s failed\n",
+			dev->name);
+		break;
+	default:
+		dev_warn(tdev, "The communication peer of %s sent"
+			" an unknown command code\n",
+			dev->name);
+		break;
+        }
+
+        return 0;
+}   /*    end of claw_process_control    */
+
+
+/*-------------------------------------------------------------------*
+*               claw_send_control                                    *
+*                                                                    *
+*--------------------------------------------------------------------*/
+
+static int
+claw_send_control(struct net_device *dev, __u8 type, __u8 link,
+	 __u8 correlator, __u8 rc, char *local_name, char *remote_name)
+{
+        struct claw_privbk 		*privptr;
+        struct clawctl                  *p_ctl;
+        struct sysval                   *p_sysval;
+        struct conncmd                  *p_connect;
+        struct sk_buff 			*skb;
+
+	CLAW_DBF_TEXT(2, setup, "sndcntl");
+	privptr = dev->ml_priv;
+        p_ctl=(struct clawctl *)&privptr->ctl_bk;
+
+        p_ctl->command=type;
+        p_ctl->version=CLAW_VERSION_ID;
+        p_ctl->linkid=link;
+        p_ctl->correlator=correlator;
+        p_ctl->rc=rc;
+
+        p_sysval=(struct sysval *)&p_ctl->data;
+        p_connect=(struct conncmd *)&p_ctl->data;
+
+        switch (p_ctl->command) {
+                case SYSTEM_VALIDATE_REQUEST:
+                case SYSTEM_VALIDATE_RESPONSE:
+                        memcpy(&p_sysval->host_name, local_name, 8);
+                        memcpy(&p_sysval->WS_name, remote_name, 8);
+			if (privptr->p_env->packing > 0) {
+				p_sysval->read_frame_size = DEF_PACK_BUFSIZE;
+				p_sysval->write_frame_size = DEF_PACK_BUFSIZE;
+			} else {
+				/* how big is the biggest group of packets */
+			   p_sysval->read_frame_size =
+				privptr->p_env->read_size;
+			   p_sysval->write_frame_size =
+				privptr->p_env->write_size;
+			}
+                        memset(&p_sysval->reserved, 0x00, 4);
+                        break;
+                case CONNECTION_REQUEST:
+                case CONNECTION_RESPONSE:
+                case CONNECTION_CONFIRM:
+                case DISCONNECT:
+                        memcpy(&p_sysval->host_name, local_name, 8);
+                        memcpy(&p_sysval->WS_name, remote_name, 8);
+			if (privptr->p_env->packing > 0) {
+			/* How big is the biggest packet */
+				p_connect->reserved1[0]=CLAW_FRAME_SIZE;
+                        	p_connect->reserved1[1]=CLAW_FRAME_SIZE;
+			} else {
+	                        memset(&p_connect->reserved1, 0x00, 4);
+        	                memset(&p_connect->reserved2, 0x00, 4);
+			}
+                        break;
+                default:
+                        break;
+        }
+
+        /*      write Control Record to the device                   */
+
+
+        skb = dev_alloc_skb(sizeof(struct clawctl));
+        if (!skb) {
+                return -ENOMEM;
+        }
+	memcpy(skb_put(skb, sizeof(struct clawctl)),
+		p_ctl, sizeof(struct clawctl));
+	if (privptr->p_env->packing >= PACK_SEND)
+		claw_hw_tx(skb, dev, 1);
+	else
+        	claw_hw_tx(skb, dev, 0);
+        return 0;
+}  /*   end of claw_send_control  */
+
+/*-------------------------------------------------------------------*
+*               claw_snd_conn_req                                    *
+*                                                                    *
+*--------------------------------------------------------------------*/
+static int
+claw_snd_conn_req(struct net_device *dev, __u8 link)
+{
+        int                rc;
+	struct claw_privbk *privptr = dev->ml_priv;
+        struct clawctl 	   *p_ctl;
+
+	CLAW_DBF_TEXT(2, setup, "snd_conn");
+	rc = 1;
+        p_ctl=(struct clawctl *)&privptr->ctl_bk;
+	p_ctl->linkid = link;
+        if ( privptr->system_validate_comp==0x00 ) {
+                return rc;
+        }
+	if (privptr->p_env->packing == PACKING_ASK )
+		rc=claw_send_control(dev, CONNECTION_REQUEST,0,0,0,
+        		WS_APPL_NAME_PACKED, WS_APPL_NAME_PACKED);
+	if (privptr->p_env->packing == PACK_SEND)  {
+		rc=claw_send_control(dev, CONNECTION_REQUEST,0,0,0,
+        		WS_APPL_NAME_IP_NAME, WS_APPL_NAME_IP_NAME);
+	}
+	if (privptr->p_env->packing == 0)
+        	rc=claw_send_control(dev, CONNECTION_REQUEST,0,0,0,
+       			HOST_APPL_NAME, privptr->p_env->api_type);
+        return rc;
+
+}  /*  end of claw_snd_conn_req */
+
+
+/*-------------------------------------------------------------------*
+*               claw_snd_disc                                        *
+*                                                                    *
+*--------------------------------------------------------------------*/
+
+static int
+claw_snd_disc(struct net_device *dev, struct clawctl * p_ctl)
+{
+        int rc;
+        struct conncmd *  p_connect;
+
+	CLAW_DBF_TEXT(2, setup, "snd_dsc");
+        p_connect=(struct conncmd *)&p_ctl->data;
+
+        rc=claw_send_control(dev, DISCONNECT, p_ctl->linkid,
+		p_ctl->correlator, 0,
+                p_connect->host_name, p_connect->WS_name);
+        return rc;
+}     /*   end of claw_snd_disc    */
+
+
+/*-------------------------------------------------------------------*
+*               claw_snd_sys_validate_rsp                            *
+*                                                                    *
+*--------------------------------------------------------------------*/
+
+static int
+claw_snd_sys_validate_rsp(struct net_device *dev,
+	struct clawctl *p_ctl, __u32 return_code)
+{
+        struct claw_env *  p_env;
+        struct claw_privbk *privptr;
+        int    rc;
+
+	CLAW_DBF_TEXT(2, setup, "chkresp");
+	privptr = dev->ml_priv;
+        p_env=privptr->p_env;
+        rc=claw_send_control(dev, SYSTEM_VALIDATE_RESPONSE,
+		p_ctl->linkid,
+		p_ctl->correlator,
+                return_code,
+		p_env->host_name,
+		p_env->adapter_name  );
+        return rc;
+}     /*    end of claw_snd_sys_validate_rsp    */
+
+/*-------------------------------------------------------------------*
+*               claw_strt_conn_req                                   *
+*                                                                    *
+*--------------------------------------------------------------------*/
+
+static int
+claw_strt_conn_req(struct net_device *dev )
+{
+        int rc;
+
+	CLAW_DBF_TEXT(2, setup, "conn_req");
+        rc=claw_snd_conn_req(dev, 1);
+        return rc;
+}    /*   end of claw_strt_conn_req   */
+
+
+
+/*-------------------------------------------------------------------*
+ *   claw_stats                                                      *
+ *-------------------------------------------------------------------*/
+
+static struct
+net_device_stats *claw_stats(struct net_device *dev)
+{
+        struct claw_privbk *privptr;
+
+	CLAW_DBF_TEXT(4, trace, "stats");
+	privptr = dev->ml_priv;
+        return &privptr->stats;
+}     /*   end of claw_stats   */
+
+
+/*-------------------------------------------------------------------*
+*       unpack_read                                                  *
+*                                                                    *
+*--------------------------------------------------------------------*/
+static void
+unpack_read(struct net_device *dev )
+{
+        struct sk_buff *skb;
+        struct claw_privbk *privptr;
+	struct claw_env    *p_env;
+        struct ccwbk 	*p_this_ccw;
+        struct ccwbk 	*p_first_ccw;
+        struct ccwbk 	*p_last_ccw;
+	struct clawph 	*p_packh;
+	void		*p_packd;
+	struct clawctl 	*p_ctlrec=NULL;
+	struct device	*p_dev;
+
+        __u32	len_of_data;
+	__u32	pack_off;
+        __u8	link_num;
+        __u8 	mtc_this_frm=0;
+        __u32	bytes_to_mov;
+        int	i=0;
+	int     p=0;
+
+	CLAW_DBF_TEXT(4, trace, "unpkread");
+        p_first_ccw=NULL;
+        p_last_ccw=NULL;
+	p_packh=NULL;
+	p_packd=NULL;
+	privptr = dev->ml_priv;
+
+	p_dev = &privptr->channel[READ_CHANNEL].cdev->dev;
+	p_env = privptr->p_env;
+        p_this_ccw=privptr->p_read_active_first;
+	while (p_this_ccw!=NULL && p_this_ccw->header.flag!=CLAW_PENDING) {
+		pack_off = 0;
+		p = 0;
+		p_this_ccw->header.flag=CLAW_PENDING;
+		privptr->p_read_active_first=p_this_ccw->next;
+                p_this_ccw->next=NULL;
+		p_packh = (struct clawph *)p_this_ccw->p_buffer;
+		if ((p_env->packing == PACK_SEND) &&
+		    (p_packh->len == 32)           &&
+		    (p_packh->link_num == 0)) {   /* is it a packed ctl rec? */
+			p_packh++;  /* peek past pack header */
+			p_ctlrec = (struct clawctl *)p_packh;
+			p_packh--;  /* un peek */
+			if ((p_ctlrec->command == CONNECTION_RESPONSE) ||
+		            (p_ctlrec->command == CONNECTION_CONFIRM))
+				p_env->packing = DO_PACKED;
+		}
+		if (p_env->packing == DO_PACKED)
+			link_num=p_packh->link_num;
+		else
+	                link_num=p_this_ccw->header.opcode / 8;
+                if ((p_this_ccw->header.opcode & MORE_to_COME_FLAG)!=0) {
+                        mtc_this_frm=1;
+                        if (p_this_ccw->header.length!=
+				privptr->p_env->read_size ) {
+				dev_warn(p_dev,
+					"The communication peer of %s"
+					" sent a faulty"
+					" frame of length %02x\n",
+                                        dev->name, p_this_ccw->header.length);
+                        }
+                }
+
+                if (privptr->mtc_skipping) {
+                        /*
+                        *   We're in the mode of skipping past a
+			*   multi-frame message
+                        *   that we can't process for some reason or other.
+                        *   The first frame without the More-To-Come flag is
+			*   the last frame of the skipped message.
+                        */
+                        /*  in case of More-To-Come not set in this frame */
+                        if (mtc_this_frm==0) {
+                                privptr->mtc_skipping=0; /* Ok, the end */
+                                privptr->mtc_logical_link=-1;
+                        }
+                        goto NextFrame;
+                }
+
+                if (link_num==0) {
+                        claw_process_control(dev, p_this_ccw);
+			CLAW_DBF_TEXT(4, trace, "UnpkCntl");
+                        goto NextFrame;
+                }
+unpack_next:
+		if (p_env->packing == DO_PACKED) {
+			if (pack_off > p_env->read_size)
+				goto NextFrame;
+			p_packd = p_this_ccw->p_buffer+pack_off;
+			p_packh = (struct clawph *) p_packd;
+			if ((p_packh->len == 0) || /* done with this frame? */
+			    (p_packh->flag != 0))
+				goto NextFrame;
+			bytes_to_mov = p_packh->len;
+			pack_off += bytes_to_mov+sizeof(struct clawph);
+			p++;
+		} else {
+                	bytes_to_mov=p_this_ccw->header.length;
+		}
+                if (privptr->mtc_logical_link<0) {
+
+                /*
+                *  if More-To-Come is set in this frame then we don't know
+                *  length of entire message, and hence have to allocate
+		*  large buffer   */
+
+                /*      We are starting a new envelope  */
+                privptr->mtc_offset=0;
+                        privptr->mtc_logical_link=link_num;
+                }
+
+                if (bytes_to_mov > (MAX_ENVELOPE_SIZE- privptr->mtc_offset) ) {
+                        /*      error     */
+                        privptr->stats.rx_frame_errors++;
+                        goto NextFrame;
+                }
+		if (p_env->packing == DO_PACKED) {
+			memcpy( privptr->p_mtc_envelope+ privptr->mtc_offset,
+				p_packd+sizeof(struct clawph), bytes_to_mov);
+
+		} else	{
+                	memcpy( privptr->p_mtc_envelope+ privptr->mtc_offset,
+                        	p_this_ccw->p_buffer, bytes_to_mov);
+		}
+                if (mtc_this_frm==0) {
+                        len_of_data=privptr->mtc_offset+bytes_to_mov;
+                        skb=dev_alloc_skb(len_of_data);
+                        if (skb) {
+                                memcpy(skb_put(skb,len_of_data),
+					privptr->p_mtc_envelope,
+					len_of_data);
+                                skb->dev=dev;
+				skb_reset_mac_header(skb);
+                                skb->protocol=htons(ETH_P_IP);
+                                skb->ip_summed=CHECKSUM_UNNECESSARY;
+                                privptr->stats.rx_packets++;
+				privptr->stats.rx_bytes+=len_of_data;
+                                netif_rx(skb);
+                        }
+                        else {
+				dev_info(p_dev, "Allocating a buffer for"
+					" incoming data failed\n");
+                                privptr->stats.rx_dropped++;
+                        }
+                        privptr->mtc_offset=0;
+                        privptr->mtc_logical_link=-1;
+                }
+                else {
+                        privptr->mtc_offset+=bytes_to_mov;
+                }
+		if (p_env->packing == DO_PACKED)
+			goto unpack_next;
+NextFrame:
+                /*
+                *   Remove ThisCCWblock from active read queue, and add it
+                *   to queue of free blocks to be reused.
+                */
+                i++;
+                p_this_ccw->header.length=0xffff;
+                p_this_ccw->header.opcode=0xff;
+                /*
+                *       add this one to the free queue for later reuse
+                */
+                if (p_first_ccw==NULL) {
+                        p_first_ccw = p_this_ccw;
+                }
+                else {
+                        p_last_ccw->next = p_this_ccw;
+                }
+                p_last_ccw = p_this_ccw;
+                /*
+                *       chain to next block on active read queue
+                */
+                p_this_ccw = privptr->p_read_active_first;
+		CLAW_DBF_TEXT_(4, trace, "rxpkt %d", p);
+        } /* end of while */
+
+        /*      check validity                  */
+
+	CLAW_DBF_TEXT_(4, trace, "rxfrm %d", i);
+        add_claw_reads(dev, p_first_ccw, p_last_ccw);
+        claw_strt_read(dev, LOCK_YES);
+        return;
+}     /*  end of unpack_read   */
+
+/*-------------------------------------------------------------------*
+*       claw_strt_read                                               *
+*                                                                    *
+*--------------------------------------------------------------------*/
+static void
+claw_strt_read (struct net_device *dev, int lock )
+{
+        int        rc = 0;
+        __u32      parm;
+        unsigned long  saveflags = 0;
+	struct claw_privbk *privptr = dev->ml_priv;
+        struct ccwbk*p_ccwbk;
+        struct chbk *p_ch;
+        struct clawh *p_clawh;
+	p_ch = &privptr->channel[READ_CHANNEL];
+
+	CLAW_DBF_TEXT(4, trace, "StRdNter");
+        p_clawh=(struct clawh *)privptr->p_claw_signal_blk;
+        p_clawh->flag=CLAW_IDLE;    /* 0x00 */
+
+        if ((privptr->p_write_active_first!=NULL &&
+             privptr->p_write_active_first->header.flag!=CLAW_PENDING) ||
+            (privptr->p_read_active_first!=NULL &&
+             privptr->p_read_active_first->header.flag!=CLAW_PENDING )) {
+                p_clawh->flag=CLAW_BUSY;    /* 0xff */
+        }
+        if (lock==LOCK_YES) {
+                spin_lock_irqsave(get_ccwdev_lock(p_ch->cdev), saveflags);
+        }
+        if (test_and_set_bit(0, (void *)&p_ch->IO_active) == 0) {
+		CLAW_DBF_TEXT(4, trace, "HotRead");
+                p_ccwbk=privptr->p_read_active_first;
+                parm = (unsigned long) p_ch;
+                rc = ccw_device_start (p_ch->cdev, &p_ccwbk->read, parm,
+				       0xff, 0);
+                if (rc != 0) {
+                        ccw_check_return_code(p_ch->cdev, rc);
+                }
+        }
+	else {
+		CLAW_DBF_TEXT(2, trace, "ReadAct");
+	}
+
+        if (lock==LOCK_YES) {
+                spin_unlock_irqrestore(get_ccwdev_lock(p_ch->cdev), saveflags);
+        }
+	CLAW_DBF_TEXT(4, trace, "StRdExit");
+        return;
+}       /*    end of claw_strt_read    */
+
+/*-------------------------------------------------------------------*
+*       claw_strt_out_IO                                             *
+*                                                                    *
+*--------------------------------------------------------------------*/
+
+static void
+claw_strt_out_IO( struct net_device *dev )
+{
+        int             	rc = 0;
+        unsigned long   	parm;
+        struct claw_privbk 	*privptr;
+        struct chbk     	*p_ch;
+        struct ccwbk   	*p_first_ccw;
+
+	if (!dev) {
+		return;
+	}
+	privptr = (struct claw_privbk *)dev->ml_priv;
+	p_ch = &privptr->channel[WRITE_CHANNEL];
+
+	CLAW_DBF_TEXT(4, trace, "strt_io");
+        p_first_ccw=privptr->p_write_active_first;
+
+        if (p_ch->claw_state == CLAW_STOP)
+                return;
+        if (p_first_ccw == NULL) {
+                return;
+        }
+        if (test_and_set_bit(0, (void *)&p_ch->IO_active) == 0) {
+                parm = (unsigned long) p_ch;
+		CLAW_DBF_TEXT(2, trace, "StWrtIO");
+		rc = ccw_device_start(p_ch->cdev, &p_first_ccw->write, parm,
+				      0xff, 0);
+                if (rc != 0) {
+                        ccw_check_return_code(p_ch->cdev, rc);
+                }
+        }
+        dev->trans_start = jiffies;
+        return;
+}       /*    end of claw_strt_out_IO    */
+
+/*-------------------------------------------------------------------*
+*       Free write buffers                                           *
+*                                                                    *
+*--------------------------------------------------------------------*/
+
+static void
+claw_free_wrt_buf( struct net_device *dev )
+{
+
+	struct claw_privbk *privptr = (struct claw_privbk *)dev->ml_priv;
+	struct ccwbk*p_this_ccw;
+	struct ccwbk*p_next_ccw;
+
+	CLAW_DBF_TEXT(4, trace, "freewrtb");
+        /*  scan the write queue to free any completed write packets   */
+        p_this_ccw=privptr->p_write_active_first;
+        while ( (p_this_ccw!=NULL) && (p_this_ccw->header.flag!=CLAW_PENDING))
+        {
+                p_next_ccw = p_this_ccw->next;
+                if (((p_next_ccw!=NULL) &&
+		     (p_next_ccw->header.flag!=CLAW_PENDING)) ||
+                    ((p_this_ccw == privptr->p_write_active_last) &&
+                     (p_this_ccw->header.flag!=CLAW_PENDING))) {
+                        /* The next CCW is OK or this is  */
+			/* the last CCW...free it   @A1A  */
+                        privptr->p_write_active_first=p_this_ccw->next;
+			p_this_ccw->header.flag=CLAW_PENDING;
+                        p_this_ccw->next=privptr->p_write_free_chain;
+			privptr->p_write_free_chain=p_this_ccw;
+                        ++privptr->write_free_count;
+			privptr->stats.tx_bytes+= p_this_ccw->write.count;
+			p_this_ccw=privptr->p_write_active_first;
+                        privptr->stats.tx_packets++;
+                }
+                else {
+			break;
+                }
+        }
+        if (privptr->write_free_count!=0) {
+                claw_clearbit_busy(TB_NOBUFFER,dev);
+        }
+        /*   whole chain removed?   */
+        if (privptr->p_write_active_first==NULL) {
+                privptr->p_write_active_last=NULL;
+        }
+	CLAW_DBF_TEXT_(4, trace, "FWC=%d", privptr->write_free_count);
+        return;
+}
+
+/*-------------------------------------------------------------------*
+*       claw free netdevice                                          *
+*                                                                    *
+*--------------------------------------------------------------------*/
+static void
+claw_free_netdevice(struct net_device * dev, int free_dev)
+{
+	struct claw_privbk *privptr;
+
+	CLAW_DBF_TEXT(2, setup, "free_dev");
+	if (!dev)
+		return;
+	CLAW_DBF_TEXT_(2, setup, "%s", dev->name);
+	privptr = dev->ml_priv;
+	if (dev->flags & IFF_RUNNING)
+		claw_release(dev);
+	if (privptr) {
+		privptr->channel[READ_CHANNEL].ndev = NULL;  /* say it's free */
+	}
+	dev->ml_priv = NULL;
+#ifdef MODULE
+	if (free_dev) {
+		free_netdev(dev);
+	}
+#endif
+	CLAW_DBF_TEXT(2, setup, "free_ok");
+}
+
+/**
+ * Claw init netdevice
+ * Initialize everything of the net device except the name and the
+ * channel structs.
+ */
+static const struct net_device_ops claw_netdev_ops = {
+	.ndo_open		= claw_open,
+	.ndo_stop		= claw_release,
+	.ndo_get_stats		= claw_stats,
+	.ndo_start_xmit		= claw_tx,
+	.ndo_change_mtu		= claw_change_mtu,
+};
+
+static void
+claw_init_netdevice(struct net_device * dev)
+{
+	CLAW_DBF_TEXT(2, setup, "init_dev");
+	CLAW_DBF_TEXT_(2, setup, "%s", dev->name);
+	dev->mtu = CLAW_DEFAULT_MTU_SIZE;
+	dev->hard_header_len = 0;
+	dev->addr_len = 0;
+	dev->type = ARPHRD_SLIP;
+	dev->tx_queue_len = 1300;
+	dev->flags = IFF_POINTOPOINT | IFF_NOARP;
+	dev->netdev_ops = &claw_netdev_ops;
+	CLAW_DBF_TEXT(2, setup, "initok");
+	return;
+}
+
+/**
+ * Init a new channel in the privptr->channel[i].
+ *
+ * @param cdev  The ccw_device to be added.
+ *
+ * @return 0 on success, !0 on error.
+ */
+static int
+add_channel(struct ccw_device *cdev,int i,struct claw_privbk *privptr)
+{
+	struct chbk *p_ch;
+	struct ccw_dev_id dev_id;
+
+	CLAW_DBF_TEXT_(2, setup, "%s", dev_name(&cdev->dev));
+	privptr->channel[i].flag  = i+1;   /* Read is 1 Write is 2 */
+	p_ch = &privptr->channel[i];
+	p_ch->cdev = cdev;
+	snprintf(p_ch->id, CLAW_ID_SIZE, "cl-%s", dev_name(&cdev->dev));
+	ccw_device_get_id(cdev, &dev_id);
+	p_ch->devno = dev_id.devno;
+	if ((p_ch->irb = kzalloc(sizeof (struct irb),GFP_KERNEL)) == NULL) {
+		return -ENOMEM;
+	}
+	return 0;
+}
+
+
+/**
+ *
+ * Setup an interface.
+ *
+ * @param cgdev  Device to be setup.
+ *
+ * @returns 0 on success, !0 on failure.
+ */
+static int
+claw_new_device(struct ccwgroup_device *cgdev)
+{
+	struct claw_privbk *privptr;
+	struct claw_env *p_env;
+	struct net_device *dev;
+	int ret;
+	struct ccw_dev_id dev_id;
+
+	dev_info(&cgdev->dev, "add for %s\n",
+		 dev_name(&cgdev->cdev[READ_CHANNEL]->dev));
+	CLAW_DBF_TEXT(2, setup, "new_dev");
+	privptr = dev_get_drvdata(&cgdev->dev);
+	dev_set_drvdata(&cgdev->cdev[READ_CHANNEL]->dev, privptr);
+	dev_set_drvdata(&cgdev->cdev[WRITE_CHANNEL]->dev, privptr);
+	if (!privptr)
+		return -ENODEV;
+	p_env = privptr->p_env;
+	ccw_device_get_id(cgdev->cdev[READ_CHANNEL], &dev_id);
+	p_env->devno[READ_CHANNEL] = dev_id.devno;
+	ccw_device_get_id(cgdev->cdev[WRITE_CHANNEL], &dev_id);
+	p_env->devno[WRITE_CHANNEL] = dev_id.devno;
+	ret = add_channel(cgdev->cdev[0],0,privptr);
+	if (ret == 0)
+		ret = add_channel(cgdev->cdev[1],1,privptr);
+	if (ret != 0) {
+		dev_warn(&cgdev->dev, "Creating a CLAW group device"
+			" failed with error code %d\n", ret);
+		goto out;
+	}
+	ret = ccw_device_set_online(cgdev->cdev[READ_CHANNEL]);
+	if (ret != 0) {
+		dev_warn(&cgdev->dev,
+			"Setting the read subchannel online"
+			" failed with error code %d\n", ret);
+		goto out;
+	}
+	ret = ccw_device_set_online(cgdev->cdev[WRITE_CHANNEL]);
+	if (ret != 0) {
+		dev_warn(&cgdev->dev,
+			"Setting the write subchannel online "
+			"failed with error code %d\n", ret);
+		goto out;
+	}
+	dev = alloc_netdev(0,"claw%d",claw_init_netdevice);
+	if (!dev) {
+		dev_warn(&cgdev->dev,
+			"Activating the CLAW device failed\n");
+		goto out;
+	}
+	dev->ml_priv = privptr;
+	dev_set_drvdata(&cgdev->dev, privptr);
+	dev_set_drvdata(&cgdev->cdev[READ_CHANNEL]->dev, privptr);
+	dev_set_drvdata(&cgdev->cdev[WRITE_CHANNEL]->dev, privptr);
+	/* sysfs magic */
+        SET_NETDEV_DEV(dev, &cgdev->dev);
+	if (register_netdev(dev) != 0) {
+		claw_free_netdevice(dev, 1);
+		CLAW_DBF_TEXT(2, trace, "regfail");
+		goto out;
+	}
+	dev->flags &=~IFF_RUNNING;
+	if (privptr->buffs_alloc == 0) {
+	        ret=init_ccw_bk(dev);
+		if (ret !=0) {
+			unregister_netdev(dev);
+			claw_free_netdevice(dev,1);
+			CLAW_DBF_TEXT(2, trace, "ccwmem");
+			goto out;
+		}
+	}
+	privptr->channel[READ_CHANNEL].ndev = dev;
+	privptr->channel[WRITE_CHANNEL].ndev = dev;
+	privptr->p_env->ndev = dev;
+
+	dev_info(&cgdev->dev, "%s:readsize=%d  writesize=%d "
+		"readbuffer=%d writebuffer=%d read=0x%04x write=0x%04x\n",
+                dev->name, p_env->read_size,
+		p_env->write_size, p_env->read_buffers,
+		p_env->write_buffers, p_env->devno[READ_CHANNEL],
+		p_env->devno[WRITE_CHANNEL]);
+	dev_info(&cgdev->dev, "%s:host_name:%.8s, adapter_name "
+		":%.8s api_type: %.8s\n",
+                dev->name, p_env->host_name,
+		p_env->adapter_name , p_env->api_type);
+	return 0;
+out:
+	ccw_device_set_offline(cgdev->cdev[1]);
+	ccw_device_set_offline(cgdev->cdev[0]);
+	return -ENODEV;
+}
+
+static void
+claw_purge_skb_queue(struct sk_buff_head *q)
+{
+        struct sk_buff *skb;
+
+	CLAW_DBF_TEXT(4, trace, "purgque");
+        while ((skb = skb_dequeue(q))) {
+                atomic_dec(&skb->users);
+                dev_kfree_skb_any(skb);
+        }
+}
+
+/**
+ * Shutdown an interface.
+ *
+ * @param cgdev  Device to be shut down.
+ *
+ * @returns 0 on success, !0 on failure.
+ */
+static int
+claw_shutdown_device(struct ccwgroup_device *cgdev)
+{
+	struct claw_privbk *priv;
+	struct net_device *ndev;
+	int ret = 0;
+
+	CLAW_DBF_TEXT_(2, setup, "%s", dev_name(&cgdev->dev));
+	priv = dev_get_drvdata(&cgdev->dev);
+	if (!priv)
+		return -ENODEV;
+	ndev = priv->channel[READ_CHANNEL].ndev;
+	if (ndev) {
+		/* Close the device */
+		dev_info(&cgdev->dev, "%s: shutting down\n",
+			ndev->name);
+		if (ndev->flags & IFF_RUNNING)
+			ret = claw_release(ndev);
+		ndev->flags &=~IFF_RUNNING;
+		unregister_netdev(ndev);
+		ndev->ml_priv = NULL;  /* cgdev data, not ndev's to free */
+		claw_free_netdevice(ndev, 1);
+		priv->channel[READ_CHANNEL].ndev = NULL;
+		priv->channel[WRITE_CHANNEL].ndev = NULL;
+		priv->p_env->ndev = NULL;
+	}
+	ccw_device_set_offline(cgdev->cdev[1]);
+	ccw_device_set_offline(cgdev->cdev[0]);
+	return ret;
+}
+
+static void
+claw_remove_device(struct ccwgroup_device *cgdev)
+{
+	struct claw_privbk *priv;
+
+	BUG_ON(!cgdev);
+	CLAW_DBF_TEXT_(2, setup, "%s", dev_name(&cgdev->dev));
+	priv = dev_get_drvdata(&cgdev->dev);
+	BUG_ON(!priv);
+	dev_info(&cgdev->dev, " will be removed.\n");
+	if (cgdev->state == CCWGROUP_ONLINE)
+		claw_shutdown_device(cgdev);
+	claw_remove_files(&cgdev->dev);
+	kfree(priv->p_mtc_envelope);
+	priv->p_mtc_envelope=NULL;
+	kfree(priv->p_env);
+	priv->p_env=NULL;
+	kfree(priv->channel[0].irb);
+	priv->channel[0].irb=NULL;
+	kfree(priv->channel[1].irb);
+	priv->channel[1].irb=NULL;
+	kfree(priv);
+	dev_set_drvdata(&cgdev->dev, NULL);
+	dev_set_drvdata(&cgdev->cdev[READ_CHANNEL]->dev, NULL);
+	dev_set_drvdata(&cgdev->cdev[WRITE_CHANNEL]->dev, NULL);
+	put_device(&cgdev->dev);
+
+	return;
+}
+
+
+/*
+ * sysfs attributes
+ */
+static ssize_t
+claw_hname_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+	struct claw_privbk *priv;
+	struct claw_env *  p_env;
+
+	priv = dev_get_drvdata(dev);
+	if (!priv)
+		return -ENODEV;
+	p_env = priv->p_env;
+	return sprintf(buf, "%s\n",p_env->host_name);
+}
+
+static ssize_t
+claw_hname_write(struct device *dev, struct device_attribute *attr,
+	 const char *buf, size_t count)
+{
+	struct claw_privbk *priv;
+	struct claw_env *  p_env;
+
+	priv = dev_get_drvdata(dev);
+	if (!priv)
+		return -ENODEV;
+	p_env = priv->p_env;
+	if (count > MAX_NAME_LEN+1)
+		return -EINVAL;
+	memset(p_env->host_name, 0x20, MAX_NAME_LEN);
+	strncpy(p_env->host_name,buf, count);
+	p_env->host_name[count-1] = 0x20;  /* clear extra 0x0a */
+	p_env->host_name[MAX_NAME_LEN] = 0x00;
+	CLAW_DBF_TEXT(2, setup, "HstnSet");
+	CLAW_DBF_TEXT_(2, setup, "%s", p_env->host_name);
+
+	return count;
+}
+
+static DEVICE_ATTR(host_name, 0644, claw_hname_show, claw_hname_write);
+
+static ssize_t
+claw_adname_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+	struct claw_privbk *priv;
+	struct claw_env *  p_env;
+
+	priv = dev_get_drvdata(dev);
+	if (!priv)
+		return -ENODEV;
+	p_env = priv->p_env;
+	return sprintf(buf, "%s\n", p_env->adapter_name);
+}
+
+static ssize_t
+claw_adname_write(struct device *dev, struct device_attribute *attr,
+	 const char *buf, size_t count)
+{
+	struct claw_privbk *priv;
+	struct claw_env *  p_env;
+
+	priv = dev_get_drvdata(dev);
+	if (!priv)
+		return -ENODEV;
+	p_env = priv->p_env;
+	if (count > MAX_NAME_LEN+1)
+		return -EINVAL;
+	memset(p_env->adapter_name, 0x20, MAX_NAME_LEN);
+	strncpy(p_env->adapter_name,buf, count);
+	p_env->adapter_name[count-1] = 0x20; /* clear extra 0x0a */
+	p_env->adapter_name[MAX_NAME_LEN] = 0x00;
+	CLAW_DBF_TEXT(2, setup, "AdnSet");
+	CLAW_DBF_TEXT_(2, setup, "%s", p_env->adapter_name);
+
+	return count;
+}
+
+static DEVICE_ATTR(adapter_name, 0644, claw_adname_show, claw_adname_write);
+
+static ssize_t
+claw_apname_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+	struct claw_privbk *priv;
+	struct claw_env *  p_env;
+
+	priv = dev_get_drvdata(dev);
+	if (!priv)
+		return -ENODEV;
+	p_env = priv->p_env;
+	return sprintf(buf, "%s\n",
+		       p_env->api_type);
+}
+
+static ssize_t
+claw_apname_write(struct device *dev, struct device_attribute *attr,
+	const char *buf, size_t count)
+{
+	struct claw_privbk *priv;
+	struct claw_env *  p_env;
+
+	priv = dev_get_drvdata(dev);
+	if (!priv)
+		return -ENODEV;
+	p_env = priv->p_env;
+	if (count > MAX_NAME_LEN+1)
+		return -EINVAL;
+	memset(p_env->api_type, 0x20, MAX_NAME_LEN);
+	strncpy(p_env->api_type,buf, count);
+	p_env->api_type[count-1] = 0x20;  /* we get a loose 0x0a */
+	p_env->api_type[MAX_NAME_LEN] = 0x00;
+	if(strncmp(p_env->api_type,WS_APPL_NAME_PACKED,6) == 0) {
+		p_env->read_size=DEF_PACK_BUFSIZE;
+		p_env->write_size=DEF_PACK_BUFSIZE;
+		p_env->packing=PACKING_ASK;
+		CLAW_DBF_TEXT(2, setup, "PACKING");
+	}
+	else {
+		p_env->packing=0;
+		p_env->read_size=CLAW_FRAME_SIZE;
+		p_env->write_size=CLAW_FRAME_SIZE;
+		CLAW_DBF_TEXT(2, setup, "ApiSet");
+	}
+	CLAW_DBF_TEXT_(2, setup, "%s", p_env->api_type);
+	return count;
+}
+
+static DEVICE_ATTR(api_type, 0644, claw_apname_show, claw_apname_write);
+
+static ssize_t
+claw_wbuff_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+	struct claw_privbk *priv;
+	struct claw_env * p_env;
+
+	priv = dev_get_drvdata(dev);
+	if (!priv)
+		return -ENODEV;
+	p_env = priv->p_env;
+	return sprintf(buf, "%d\n", p_env->write_buffers);
+}
+
+static ssize_t
+claw_wbuff_write(struct device *dev, struct device_attribute *attr,
+	const char *buf, size_t count)
+{
+	struct claw_privbk *priv;
+	struct claw_env *  p_env;
+	int nnn,max;
+
+	priv = dev_get_drvdata(dev);
+	if (!priv)
+		return -ENODEV;
+	p_env = priv->p_env;
+	sscanf(buf, "%i", &nnn);
+	if (p_env->packing) {
+		max = 64;
+	}
+	else {
+		max = 512;
+	}
+	if ((nnn > max ) || (nnn < 2))
+		return -EINVAL;
+	p_env->write_buffers = nnn;
+	CLAW_DBF_TEXT(2, setup, "Wbufset");
+	CLAW_DBF_TEXT_(2, setup, "WB=%d", p_env->write_buffers);
+	return count;
+}
+
+static DEVICE_ATTR(write_buffer, 0644, claw_wbuff_show, claw_wbuff_write);
+
+static ssize_t
+claw_rbuff_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+	struct claw_privbk *priv;
+	struct claw_env *  p_env;
+
+	priv = dev_get_drvdata(dev);
+	if (!priv)
+		return -ENODEV;
+	p_env = priv->p_env;
+	return sprintf(buf, "%d\n", p_env->read_buffers);
+}
+
+static ssize_t
+claw_rbuff_write(struct device *dev, struct device_attribute *attr,
+	const char *buf, size_t count)
+{
+	struct claw_privbk *priv;
+	struct claw_env *p_env;
+	int nnn,max;
+
+	priv = dev_get_drvdata(dev);
+	if (!priv)
+		return -ENODEV;
+	p_env = priv->p_env;
+	sscanf(buf, "%i", &nnn);
+	if (p_env->packing) {
+		max = 64;
+	}
+	else {
+		max = 512;
+	}
+	if ((nnn > max ) || (nnn < 2))
+		return -EINVAL;
+	p_env->read_buffers = nnn;
+	CLAW_DBF_TEXT(2, setup, "Rbufset");
+	CLAW_DBF_TEXT_(2, setup, "RB=%d", p_env->read_buffers);
+	return count;
+}
+
+static DEVICE_ATTR(read_buffer, 0644, claw_rbuff_show, claw_rbuff_write);
+
+static struct attribute *claw_attr[] = {
+	&dev_attr_read_buffer.attr,
+	&dev_attr_write_buffer.attr,
+	&dev_attr_adapter_name.attr,
+	&dev_attr_api_type.attr,
+	&dev_attr_host_name.attr,
+	NULL,
+};
+
+static struct attribute_group claw_attr_group = {
+	.attrs = claw_attr,
+};
+
+static int
+claw_add_files(struct device *dev)
+{
+	CLAW_DBF_TEXT(2, setup, "add_file");
+	return sysfs_create_group(&dev->kobj, &claw_attr_group);
+}
+
+static void
+claw_remove_files(struct device *dev)
+{
+	CLAW_DBF_TEXT(2, setup, "rem_file");
+	sysfs_remove_group(&dev->kobj, &claw_attr_group);
+}
+
+/*--------------------------------------------------------------------*
+*    claw_init  and cleanup                                           *
+*---------------------------------------------------------------------*/
+
+static void __exit
+claw_cleanup(void)
+{
+	driver_remove_file(&claw_group_driver.driver,
+			   &driver_attr_group);
+	ccwgroup_driver_unregister(&claw_group_driver);
+	ccw_driver_unregister(&claw_ccw_driver);
+	root_device_unregister(claw_root_dev);
+	claw_unregister_debug_facility();
+	pr_info("Driver unloaded\n");
+
+}
+
+/**
+ * Initialize module.
+ * This is called just after the module is loaded.
+ *
+ * @return 0 on success, !0 on error.
+ */
+static int __init
+claw_init(void)
+{
+	int ret = 0;
+
+	pr_info("Loading %s\n", version);
+	ret = claw_register_debug_facility();
+	if (ret) {
+		pr_err("Registering with the S/390 debug feature"
+			" failed with error code %d\n", ret);
+		goto out_err;
+	}
+	CLAW_DBF_TEXT(2, setup, "init_mod");
+	claw_root_dev = root_device_register("claw");
+	ret = IS_ERR(claw_root_dev) ? PTR_ERR(claw_root_dev) : 0;
+	if (ret)
+		goto register_err;
+	ret = ccw_driver_register(&claw_ccw_driver);
+	if (ret)
+		goto ccw_err;
+	claw_group_driver.driver.groups = claw_group_attr_groups;
+	ret = ccwgroup_driver_register(&claw_group_driver);
+	if (ret)
+		goto ccwgroup_err;
+	return 0;
+
+ccwgroup_err:
+	ccw_driver_unregister(&claw_ccw_driver);
+ccw_err:
+	root_device_unregister(claw_root_dev);
+register_err:
+	CLAW_DBF_TEXT(2, setup, "init_bad");
+	claw_unregister_debug_facility();
+out_err:
+	pr_err("Initializing the claw device driver failed\n");
+	return ret;
+}
+
+module_init(claw_init);
+module_exit(claw_cleanup);
+
+MODULE_AUTHOR("Andy Richter <richtera@us.ibm.com>");
+MODULE_DESCRIPTION("Linux for System z CLAW Driver\n" \
+			"Copyright 2000,2008 IBM Corporation\n");
+MODULE_LICENSE("GPL");
diff --git a/ap/os/linux/linux-3.4.x/drivers/s390/net/claw.h b/ap/os/linux/linux-3.4.x/drivers/s390/net/claw.h
new file mode 100644
index 0000000..1bc5904
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/s390/net/claw.h
@@ -0,0 +1,354 @@
+/*******************************************************
+*  Define constants                                    *
+*                                                      *
+********************************************************/
+
+/*-----------------------------------------------------*
+*     CCW command codes for CLAW protocol              *
+*------------------------------------------------------*/
+
+#define CCW_CLAW_CMD_WRITE           0x01      /* write - not including link */
+#define CCW_CLAW_CMD_READ            0x02      /* read */
+#define CCW_CLAW_CMD_NOP             0x03      /* NOP */
+#define CCW_CLAW_CMD_SENSE           0x04      /* Sense */
+#define CCW_CLAW_CMD_SIGNAL_SMOD     0x05      /* Signal Status Modifier */
+#define CCW_CLAW_CMD_TIC             0x08      /* TIC */
+#define CCW_CLAW_CMD_READHEADER      0x12      /* read header data */
+#define CCW_CLAW_CMD_READFF          0x22      /* read an FF */
+#define CCW_CLAW_CMD_SENSEID         0xe4      /* Sense ID */
+
+
+/*-----------------------------------------------------*
+*    CLAW Unique constants                             *
+*------------------------------------------------------*/
+
+#define MORE_to_COME_FLAG       0x04   /* OR with write CCW in case of m-t-c */
+#define CLAW_IDLE               0x00   /* flag to indicate CLAW is idle */
+#define CLAW_BUSY               0xff   /* flag to indicate CLAW is busy */
+#define CLAW_PENDING            0x00   /* flag to indicate i/o is pending */
+#define CLAW_COMPLETE           0xff   /* flag to indicate i/o completed */
+
+/*-----------------------------------------------------*
+*     CLAW control command code                        *
+*------------------------------------------------------*/
+
+#define SYSTEM_VALIDATE_REQUEST   0x01  /* System Validate request */
+#define SYSTEM_VALIDATE_RESPONSE  0x02  /* System Validate response */
+#define CONNECTION_REQUEST        0x21  /* Connection request */
+#define CONNECTION_RESPONSE       0x22  /* Connection response */
+#define CONNECTION_CONFIRM        0x23  /* Connection confirm */
+#define DISCONNECT                0x24  /* Disconnect */
+#define CLAW_ERROR                0x41  /* CLAW error message */
+#define CLAW_VERSION_ID           2     /* CLAW version ID */
+
+/*-----------------------------------------------------*
+*  CLAW adater sense bytes                             *
+*------------------------------------------------------*/
+
+#define CLAW_ADAPTER_SENSE_BYTE 0x41   /* Stop command issued to adapter */
+
+/*-----------------------------------------------------*
+*      CLAW control command return codes               *
+*------------------------------------------------------*/
+
+#define CLAW_RC_NAME_MISMATCH       166  /*  names do not match */
+#define CLAW_RC_WRONG_VERSION       167  /*  wrong CLAW version number */
+#define CLAW_RC_HOST_RCV_TOO_SMALL  180  /*  Host maximum receive is   */
+					 /*  less than Linux on zSeries*/
+                                         /*  transmit size             */
+
+/*-----------------------------------------------------*
+*      CLAW Constants application name                 *
+*------------------------------------------------------*/
+
+#define HOST_APPL_NAME          "TCPIP   "
+#define WS_APPL_NAME_IP_LINK    "TCPIP   "
+#define WS_APPL_NAME_IP_NAME	"IP      "
+#define WS_APPL_NAME_API_LINK   "API     "
+#define WS_APPL_NAME_PACKED     "PACKED  "
+#define WS_NAME_NOT_DEF         "NOT_DEF "
+#define PACKING_ASK		1
+#define PACK_SEND		2
+#define DO_PACKED		3
+
+#define MAX_ENVELOPE_SIZE       65536
+#define CLAW_DEFAULT_MTU_SIZE   4096
+#define DEF_PACK_BUFSIZE	32768
+#define READ_CHANNEL		0
+#define WRITE_CHANNEL		1
+
+#define TB_TX                   0          /* sk buffer handling in process  */
+#define TB_STOP                 1          /* network device stop in process */
+#define TB_RETRY                2          /* retry in process               */
+#define TB_NOBUFFER             3          /* no buffer on free queue        */
+#define CLAW_MAX_LINK_ID        1
+#define CLAW_MAX_DEV            256        /*      max claw devices          */
+#define MAX_NAME_LEN            8          /* host name, adapter name length */
+#define CLAW_FRAME_SIZE         4096
+#define CLAW_ID_SIZE		20+3
+
+/* state machine codes used in claw_irq_handler */
+
+#define CLAW_STOP                0
+#define CLAW_START_HALT_IO       1
+#define CLAW_START_SENSEID       2
+#define CLAW_START_READ          3
+#define CLAW_START_WRITE         4
+
+/*-----------------------------------------------------*
+*    Lock flag                                         *
+*------------------------------------------------------*/
+#define LOCK_YES             0
+#define LOCK_NO              1
+
+/*-----------------------------------------------------*
+*    DBF Debug macros                                  *
+*------------------------------------------------------*/
+#define CLAW_DBF_TEXT(level, name, text) \
+	do { \
+		debug_text_event(claw_dbf_##name, level, text); \
+	} while (0)
+
+#define CLAW_DBF_HEX(level,name,addr,len) \
+do { \
+	debug_event(claw_dbf_##name,level,(void*)(addr),len); \
+} while (0)
+
+/* Allow to sort out low debug levels early to avoid wasted sprints */
+static inline int claw_dbf_passes(debug_info_t *dbf_grp, int level)
+{
+	return (level <= dbf_grp->level);
+}
+
+#define CLAW_DBF_TEXT_(level,name,text...) \
+	do { \
+		if (claw_dbf_passes(claw_dbf_##name, level)) { \
+			sprintf(debug_buffer, text); \
+			debug_text_event(claw_dbf_##name, level, \
+						debug_buffer); \
+		} \
+	} while (0)
+
+/**
+ * Enum for classifying detected devices.
+ */
+enum claw_channel_types {
+	/* Device is not a channel  */
+	claw_channel_type_none,
+
+	/* Device is a CLAW channel device */
+	claw_channel_type_claw
+};
+
+
+/*******************************************************
+*  Define Control Blocks                               *
+*                                                      *
+********************************************************/
+
+/*------------------------------------------------------*/
+/*     CLAW header                                      */
+/*------------------------------------------------------*/
+
+struct clawh {
+        __u16  length;     /* length of data read by preceding read CCW */
+        __u8   opcode;     /* equivalent read CCW */
+        __u8   flag;       /* flag of FF to indicate read was completed */
+};
+
+/*------------------------------------------------------*/
+/*     CLAW Packing header   4 bytes                    */
+/*------------------------------------------------------*/
+struct clawph {
+       __u16 len;  	/* Length of Packed Data Area   */
+       __u8  flag;  	/* Reserved not used            */
+       __u8  link_num;	/* Link ID                      */
+};
+
+/*------------------------------------------------------*/
+/*     CLAW Ending struct ccwbk                         */
+/*------------------------------------------------------*/
+struct endccw {
+	__u32     real;            /* real address of this block */
+       __u8      write1;          /* write 1 is active */
+        __u8      read1;           /* read 1 is active  */
+        __u16     reserved;        /* reserved for future use */
+        struct ccw1    write1_nop1;
+        struct ccw1    write1_nop2;
+        struct ccw1    write2_nop1;
+        struct ccw1    write2_nop2;
+        struct ccw1    read1_nop1;
+        struct ccw1    read1_nop2;
+        struct ccw1    read2_nop1;
+        struct ccw1    read2_nop2;
+};
+
+/*------------------------------------------------------*/
+/*     CLAW struct ccwbk                                       */
+/*------------------------------------------------------*/
+struct ccwbk {
+        void   *next;        /* pointer to next ccw block */
+        __u32     real;         /* real address of this ccw */
+        void      *p_buffer;    /* virtual address of data */
+        struct clawh     header;       /* claw header */
+        struct ccw1    write;   /* write CCW    */
+        struct ccw1    w_read_FF; /* read FF */
+        struct ccw1    w_TIC_1;        /* TIC */
+        struct ccw1    read;         /* read CCW  */
+        struct ccw1    read_h;        /* read header */
+        struct ccw1    signal;       /* signal SMOD  */
+        struct ccw1    r_TIC_1;        /* TIC1 */
+        struct ccw1    r_read_FF;      /* read FF  */
+        struct ccw1    r_TIC_2;        /* TIC2 */
+};
+
+/*------------------------------------------------------*/
+/*     CLAW control block                               */
+/*------------------------------------------------------*/
+struct clawctl {
+        __u8    command;      /* control command */
+        __u8    version;      /* CLAW protocol version */
+        __u8    linkid;       /* link ID   */
+        __u8    correlator;   /* correlator */
+        __u8    rc;           /* return code */
+        __u8    reserved1;    /* reserved */
+        __u8    reserved2;    /* reserved */
+        __u8    reserved3;    /* reserved */
+        __u8    data[24];     /* command specific fields */
+};
+
+/*------------------------------------------------------*/
+/*     Data for SYSTEMVALIDATE command                  */
+/*------------------------------------------------------*/
+struct sysval  {
+        char    WS_name[8];        /* Workstation System name  */
+        char    host_name[8];      /* Host system name     */
+        __u16   read_frame_size;   /* read frame size */
+        __u16   write_frame_size;  /* write frame size */
+        __u8    reserved[4];       /* reserved */
+};
+
+/*------------------------------------------------------*/
+/*     Data for Connect command                         */
+/*------------------------------------------------------*/
+struct conncmd  {
+        char     WS_name[8];       /* Workstation application name  */
+        char     host_name[8];     /* Host application name      */
+        __u16    reserved1[2];     /* read frame size */
+        __u8     reserved2[4];     /* reserved  */
+};
+
+/*------------------------------------------------------*/
+/*     Data for CLAW error                              */
+/*------------------------------------------------------*/
+struct clawwerror  {
+        char      reserved1[8];   /* reserved */
+        char      reserved2[8];   /* reserved  */
+        char      reserved3[8];   /* reserved  */
+};
+
+/*------------------------------------------------------*/
+/*     Data buffer for CLAW                             */
+/*------------------------------------------------------*/
+struct clawbuf  {
+       char      buffer[MAX_ENVELOPE_SIZE];   /* data buffer */
+};
+
+/*------------------------------------------------------*/
+/*     Channel control block for read and write channel */
+/*------------------------------------------------------*/
+
+struct chbk {
+        unsigned int        devno;
+        int                 irq;
+	char 		    id[CLAW_ID_SIZE];
+       __u32               IO_active;
+        __u8                claw_state;
+        struct irb          *irb;
+       	struct ccw_device   *cdev;  /* pointer to the channel device */
+	struct net_device   *ndev;
+        wait_queue_head_t   wait;
+        struct tasklet_struct    tasklet;
+        struct timer_list   timer;
+        unsigned long       flag_a;    /* atomic flags */
+#define CLAW_BH_ACTIVE      0
+        unsigned long       flag_b;    /* atomic flags */
+#define CLAW_WRITE_ACTIVE   0
+        __u8                last_dstat;
+        __u8                flag;
+	struct sk_buff_head collect_queue;
+	spinlock_t collect_lock;
+#define CLAW_WRITE      0x02      /* - Set if this is a write channel */
+#define CLAW_READ	0x01      /* - Set if this is a read channel  */
+#define CLAW_TIMER      0x80      /* - Set if timer made the wake_up  */
+};
+
+/*--------------------------------------------------------------*
+*           CLAW  environment block                             *
+*---------------------------------------------------------------*/
+
+struct claw_env {
+        unsigned int            devno[2];       /* device number */
+        char                    host_name[9];   /* Host name */
+        char                    adapter_name [9]; /* adapter name */
+        char                    api_type[9];    /* TCPIP, API or PACKED */
+        void                    *p_priv;        /* privptr */
+        __u16                   read_buffers;   /* read buffer number */
+        __u16                   write_buffers;  /* write buffer number */
+        __u16                   read_size;      /* read buffer size */
+        __u16                   write_size;     /* write buffer size */
+        __u16                   dev_id;         /* device ident */
+	__u8			packing;	/* are we packing? */
+        __u8                    in_use;         /* device active flag */
+        struct net_device       *ndev;    	/* backward ptr to the net dev*/
+};
+
+/*--------------------------------------------------------------*
+*           CLAW  main control block                            *
+*---------------------------------------------------------------*/
+
+struct claw_privbk {
+        void *p_buff_ccw;
+        __u32      p_buff_ccw_num;
+        void  *p_buff_read;
+        __u32      p_buff_read_num;
+        __u32      p_buff_pages_perread;
+        void  *p_buff_write;
+        __u32      p_buff_write_num;
+        __u32      p_buff_pages_perwrite;
+        long       active_link_ID;           /* Active logical link ID */
+        struct ccwbk *p_write_free_chain;     /* pointer to free ccw chain */
+        struct ccwbk *p_write_active_first;   /* ptr to the first write ccw */
+        struct ccwbk *p_write_active_last;    /* ptr to the last write ccw */
+        struct ccwbk *p_read_active_first;    /* ptr to the first read ccw */
+        struct ccwbk *p_read_active_last;     /* ptr to the last read ccw */
+        struct endccw *p_end_ccw;              /*ptr to ending ccw */
+        struct ccwbk *p_claw_signal_blk;      /* ptr to signal block */
+        __u32      write_free_count;       /* number of free bufs for write */
+	struct     net_device_stats  stats; /* 	 device status    */
+        struct chbk channel[2];            /* Channel control blocks */
+        __u8       mtc_skipping;
+        int        mtc_offset;
+        int        mtc_logical_link;
+        void       *p_mtc_envelope;
+	struct	   sk_buff	*pk_skb;	/* packing buffer    */
+	int	   pk_cnt;
+        struct clawctl ctl_bk;
+        struct claw_env *p_env;
+        __u8       system_validate_comp;
+        __u8       release_pend;
+        __u8      checksum_received_ip_pkts;
+	__u8      buffs_alloc;
+        struct endccw  end_ccw;
+        unsigned long  tbusy;
+
+};
+
+
+/************************************************************/
+/* define global constants                                  */
+/************************************************************/
+
+#define CCWBK_SIZE sizeof(struct ccwbk)
+
+
diff --git a/ap/os/linux/linux-3.4.x/drivers/s390/net/ctcm_dbug.c b/ap/os/linux/linux-3.4.x/drivers/s390/net/ctcm_dbug.c
new file mode 100644
index 0000000..d962fd7
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/s390/net/ctcm_dbug.c
@@ -0,0 +1,79 @@
+/*
+ *	drivers/s390/net/ctcm_dbug.c
+ *
+ *	Copyright IBM Corp. 2001, 2007
+ *	Authors:	Peter Tiedemann (ptiedem@de.ibm.com)
+ *
+ */
+
+#include <linux/stddef.h>
+#include <linux/string.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/ctype.h>
+#include <linux/sysctl.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/fs.h>
+#include <linux/debugfs.h>
+#include "ctcm_dbug.h"
+
+/*
+ * Debug Facility Stuff
+ */
+
+struct ctcm_dbf_info ctcm_dbf[CTCM_DBF_INFOS] = {
+	[CTCM_DBF_SETUP]     = {"ctc_setup", 8, 1, 64, CTC_DBF_INFO, NULL},
+	[CTCM_DBF_ERROR]     = {"ctc_error", 8, 1, 64, CTC_DBF_ERROR, NULL},
+	[CTCM_DBF_TRACE]     = {"ctc_trace", 8, 1, 64, CTC_DBF_ERROR, NULL},
+	[CTCM_DBF_MPC_SETUP] = {"mpc_setup", 8, 1, 80, CTC_DBF_INFO, NULL},
+	[CTCM_DBF_MPC_ERROR] = {"mpc_error", 8, 1, 80, CTC_DBF_ERROR, NULL},
+	[CTCM_DBF_MPC_TRACE] = {"mpc_trace", 8, 1, 80, CTC_DBF_ERROR, NULL},
+};
+
+void ctcm_unregister_dbf_views(void)
+{
+	int x;
+	for (x = 0; x < CTCM_DBF_INFOS; x++) {
+		debug_unregister(ctcm_dbf[x].id);
+		ctcm_dbf[x].id = NULL;
+	}
+}
+
+int ctcm_register_dbf_views(void)
+{
+	int x;
+	for (x = 0; x < CTCM_DBF_INFOS; x++) {
+		/* register the areas */
+		ctcm_dbf[x].id = debug_register(ctcm_dbf[x].name,
+						ctcm_dbf[x].pages,
+						ctcm_dbf[x].areas,
+						ctcm_dbf[x].len);
+		if (ctcm_dbf[x].id == NULL) {
+			ctcm_unregister_dbf_views();
+			return -ENOMEM;
+		}
+
+		/* register a view */
+		debug_register_view(ctcm_dbf[x].id, &debug_hex_ascii_view);
+		/* set a passing level */
+		debug_set_level(ctcm_dbf[x].id, ctcm_dbf[x].level);
+	}
+
+	return 0;
+}
+
+void ctcm_dbf_longtext(enum ctcm_dbf_names dbf_nix, int level, char *fmt, ...)
+{
+	char dbf_txt_buf[64];
+	va_list args;
+
+	if (level > (ctcm_dbf[dbf_nix].id)->level)
+		return;
+	va_start(args, fmt);
+	vsnprintf(dbf_txt_buf, sizeof(dbf_txt_buf), fmt, args);
+	va_end(args);
+
+	debug_text_event(ctcm_dbf[dbf_nix].id, level, dbf_txt_buf);
+}
+
diff --git a/ap/os/linux/linux-3.4.x/drivers/s390/net/ctcm_dbug.h b/ap/os/linux/linux-3.4.x/drivers/s390/net/ctcm_dbug.h
new file mode 100644
index 0000000..26966d0
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/s390/net/ctcm_dbug.h
@@ -0,0 +1,143 @@
+/*
+ *	drivers/s390/net/ctcm_dbug.h
+ *
+ *	Copyright IBM Corp. 2001, 2007
+ *	Authors:	Peter Tiedemann (ptiedem@de.ibm.com)
+ *
+ */
+
+#ifndef _CTCM_DBUG_H_
+#define _CTCM_DBUG_H_
+
+/*
+ * Debug Facility stuff
+ */
+
+#include <asm/debug.h>
+
+#ifdef DEBUG
+ #define do_debug 1
+#else
+ #define do_debug 0
+#endif
+#ifdef DEBUGCCW
+ #define do_debug_ccw 1
+ #define DEBUGDATA 1
+#else
+ #define do_debug_ccw 0
+#endif
+#ifdef DEBUGDATA
+ #define do_debug_data 1
+#else
+ #define do_debug_data 0
+#endif
+
+/* define dbf debug levels similar to kernel msg levels */
+#define	CTC_DBF_ALWAYS	0	/* always print this 			*/
+#define	CTC_DBF_EMERG	0	/* system is unusable			*/
+#define	CTC_DBF_ALERT	1	/* action must be taken immediately	*/
+#define	CTC_DBF_CRIT	2	/* critical conditions			*/
+#define	CTC_DBF_ERROR	3	/* error conditions			*/
+#define	CTC_DBF_WARN	4	/* warning conditions			*/
+#define	CTC_DBF_NOTICE	5	/* normal but significant condition	*/
+#define	CTC_DBF_INFO	5	/* informational			*/
+#define	CTC_DBF_DEBUG	6	/* debug-level messages			*/
+
+enum ctcm_dbf_names {
+	CTCM_DBF_SETUP,
+	CTCM_DBF_ERROR,
+	CTCM_DBF_TRACE,
+	CTCM_DBF_MPC_SETUP,
+	CTCM_DBF_MPC_ERROR,
+	CTCM_DBF_MPC_TRACE,
+	CTCM_DBF_INFOS	/* must be last element */
+};
+
+struct ctcm_dbf_info {
+	char name[DEBUG_MAX_NAME_LEN];
+	int pages;
+	int areas;
+	int len;
+	int level;
+	debug_info_t *id;
+};
+
+extern struct ctcm_dbf_info ctcm_dbf[CTCM_DBF_INFOS];
+
+int ctcm_register_dbf_views(void);
+void ctcm_unregister_dbf_views(void);
+void ctcm_dbf_longtext(enum ctcm_dbf_names dbf_nix, int level, char *text, ...);
+
+static inline const char *strtail(const char *s, int n)
+{
+	int l = strlen(s);
+	return (l > n) ? s + (l - n) : s;
+}
+
+#define CTCM_FUNTAIL strtail((char *)__func__, 16)
+
+#define CTCM_DBF_TEXT(name, level, text) \
+	do { \
+		debug_text_event(ctcm_dbf[CTCM_DBF_##name].id, level, text); \
+	} while (0)
+
+#define CTCM_DBF_HEX(name, level, addr, len) \
+	do { \
+		debug_event(ctcm_dbf[CTCM_DBF_##name].id, \
+					level, (void *)(addr), len); \
+	} while (0)
+
+#define CTCM_DBF_TEXT_(name, level, text...) \
+	ctcm_dbf_longtext(CTCM_DBF_##name, level, text)
+
+/*
+ * cat : one of {setup, mpc_setup, trace, mpc_trace, error, mpc_error}.
+ * dev : netdevice with valid name field.
+ * text: any text string.
+ */
+#define CTCM_DBF_DEV_NAME(cat, dev, text) \
+	do { \
+		CTCM_DBF_TEXT_(cat, CTC_DBF_INFO, "%s(%s) :- %s", \
+			CTCM_FUNTAIL, dev->name, text); \
+	} while (0)
+
+#define MPC_DBF_DEV_NAME(cat, dev, text) \
+	do { \
+		CTCM_DBF_TEXT_(MPC_##cat, CTC_DBF_INFO, "%s(%s) := %s", \
+			CTCM_FUNTAIL, dev->name, text); \
+	} while (0)
+
+#define CTCMY_DBF_DEV_NAME(cat, dev, text) \
+	do { \
+		if (IS_MPCDEV(dev)) \
+			MPC_DBF_DEV_NAME(cat, dev, text); \
+		else \
+			CTCM_DBF_DEV_NAME(cat, dev, text); \
+	} while (0)
+
+/*
+ * cat : one of {setup, mpc_setup, trace, mpc_trace, error, mpc_error}.
+ * dev : netdevice.
+ * text: any text string.
+ */
+#define CTCM_DBF_DEV(cat, dev, text) \
+	do { \
+		CTCM_DBF_TEXT_(cat, CTC_DBF_INFO, "%s(%p) :-: %s", \
+			CTCM_FUNTAIL, dev, text); \
+	} while (0)
+
+#define MPC_DBF_DEV(cat, dev, text) \
+	do { \
+		CTCM_DBF_TEXT_(MPC_##cat, CTC_DBF_INFO, "%s(%p) :=: %s", \
+			CTCM_FUNTAIL, dev, text); \
+	} while (0)
+
+#define CTCMY_DBF_DEV(cat, dev, text) \
+	do { \
+		if (IS_MPCDEV(dev)) \
+			MPC_DBF_DEV(cat, dev, text); \
+		else \
+			CTCM_DBF_DEV(cat, dev, text); \
+	} while (0)
+
+#endif
diff --git a/ap/os/linux/linux-3.4.x/drivers/s390/net/ctcm_fsms.c b/ap/os/linux/linux-3.4.x/drivers/s390/net/ctcm_fsms.c
new file mode 100644
index 0000000..a697669
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/s390/net/ctcm_fsms.c
@@ -0,0 +1,2306 @@
+/*
+ * drivers/s390/net/ctcm_fsms.c
+ *
+ * Copyright IBM Corp. 2001, 2007
+ * Authors:	Fritz Elfert (felfert@millenux.com)
+ * 		Peter Tiedemann (ptiedem@de.ibm.com)
+ *	MPC additions :
+ *		Belinda Thompson (belindat@us.ibm.com)
+ *		Andy Richter (richtera@us.ibm.com)
+ */
+
+#undef DEBUG
+#undef DEBUGDATA
+#undef DEBUGCCW
+
+#define KMSG_COMPONENT "ctcm"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/errno.h>
+#include <linux/types.h>
+#include <linux/interrupt.h>
+#include <linux/timer.h>
+#include <linux/bitops.h>
+
+#include <linux/signal.h>
+#include <linux/string.h>
+
+#include <linux/ip.h>
+#include <linux/if_arp.h>
+#include <linux/tcp.h>
+#include <linux/skbuff.h>
+#include <linux/ctype.h>
+#include <net/dst.h>
+
+#include <linux/io.h>
+#include <asm/ccwdev.h>
+#include <asm/ccwgroup.h>
+#include <linux/uaccess.h>
+
+#include <asm/idals.h>
+
+#include "fsm.h"
+
+#include "ctcm_dbug.h"
+#include "ctcm_main.h"
+#include "ctcm_fsms.h"
+
+const char *dev_state_names[] = {
+	[DEV_STATE_STOPPED]		= "Stopped",
+	[DEV_STATE_STARTWAIT_RXTX]	= "StartWait RXTX",
+	[DEV_STATE_STARTWAIT_RX]	= "StartWait RX",
+	[DEV_STATE_STARTWAIT_TX]	= "StartWait TX",
+	[DEV_STATE_STOPWAIT_RXTX]	= "StopWait RXTX",
+	[DEV_STATE_STOPWAIT_RX]		= "StopWait RX",
+	[DEV_STATE_STOPWAIT_TX]		= "StopWait TX",
+	[DEV_STATE_RUNNING]		= "Running",
+};
+
+const char *dev_event_names[] = {
+	[DEV_EVENT_START]	= "Start",
+	[DEV_EVENT_STOP]	= "Stop",
+	[DEV_EVENT_RXUP]	= "RX up",
+	[DEV_EVENT_TXUP]	= "TX up",
+	[DEV_EVENT_RXDOWN]	= "RX down",
+	[DEV_EVENT_TXDOWN]	= "TX down",
+	[DEV_EVENT_RESTART]	= "Restart",
+};
+
+const char *ctc_ch_event_names[] = {
+	[CTC_EVENT_IO_SUCCESS]	= "ccw_device success",
+	[CTC_EVENT_IO_EBUSY]	= "ccw_device busy",
+	[CTC_EVENT_IO_ENODEV]	= "ccw_device enodev",
+	[CTC_EVENT_IO_UNKNOWN]	= "ccw_device unknown",
+	[CTC_EVENT_ATTNBUSY]	= "Status ATTN & BUSY",
+	[CTC_EVENT_ATTN]	= "Status ATTN",
+	[CTC_EVENT_BUSY]	= "Status BUSY",
+	[CTC_EVENT_UC_RCRESET]	= "Unit check remote reset",
+	[CTC_EVENT_UC_RSRESET]	= "Unit check remote system reset",
+	[CTC_EVENT_UC_TXTIMEOUT] = "Unit check TX timeout",
+	[CTC_EVENT_UC_TXPARITY]	= "Unit check TX parity",
+	[CTC_EVENT_UC_HWFAIL]	= "Unit check Hardware failure",
+	[CTC_EVENT_UC_RXPARITY]	= "Unit check RX parity",
+	[CTC_EVENT_UC_ZERO]	= "Unit check ZERO",
+	[CTC_EVENT_UC_UNKNOWN]	= "Unit check Unknown",
+	[CTC_EVENT_SC_UNKNOWN]	= "SubChannel check Unknown",
+	[CTC_EVENT_MC_FAIL]	= "Machine check failure",
+	[CTC_EVENT_MC_GOOD]	= "Machine check operational",
+	[CTC_EVENT_IRQ]		= "IRQ normal",
+	[CTC_EVENT_FINSTAT]	= "IRQ final",
+	[CTC_EVENT_TIMER]	= "Timer",
+	[CTC_EVENT_START]	= "Start",
+	[CTC_EVENT_STOP]	= "Stop",
+	/*
+	* additional MPC events
+	*/
+	[CTC_EVENT_SEND_XID]	= "XID Exchange",
+	[CTC_EVENT_RSWEEP_TIMER] = "MPC Group Sweep Timer",
+};
+
+const char *ctc_ch_state_names[] = {
+	[CTC_STATE_IDLE]	= "Idle",
+	[CTC_STATE_STOPPED]	= "Stopped",
+	[CTC_STATE_STARTWAIT]	= "StartWait",
+	[CTC_STATE_STARTRETRY]	= "StartRetry",
+	[CTC_STATE_SETUPWAIT]	= "SetupWait",
+	[CTC_STATE_RXINIT]	= "RX init",
+	[CTC_STATE_TXINIT]	= "TX init",
+	[CTC_STATE_RX]		= "RX",
+	[CTC_STATE_TX]		= "TX",
+	[CTC_STATE_RXIDLE]	= "RX idle",
+	[CTC_STATE_TXIDLE]	= "TX idle",
+	[CTC_STATE_RXERR]	= "RX error",
+	[CTC_STATE_TXERR]	= "TX error",
+	[CTC_STATE_TERM]	= "Terminating",
+	[CTC_STATE_DTERM]	= "Restarting",
+	[CTC_STATE_NOTOP]	= "Not operational",
+	/*
+	* additional MPC states
+	*/
+	[CH_XID0_PENDING]	= "Pending XID0 Start",
+	[CH_XID0_INPROGRESS]	= "In XID0 Negotiations ",
+	[CH_XID7_PENDING]	= "Pending XID7 P1 Start",
+	[CH_XID7_PENDING1]	= "Active XID7 P1 Exchange ",
+	[CH_XID7_PENDING2]	= "Pending XID7 P2 Start ",
+	[CH_XID7_PENDING3]	= "Active XID7 P2 Exchange ",
+	[CH_XID7_PENDING4]	= "XID7 Complete - Pending READY ",
+};
+
+static void ctcm_action_nop(fsm_instance *fi, int event, void *arg);
+
+/*
+ * ----- static ctcm actions for channel statemachine -----
+ *
+*/
+static void chx_txdone(fsm_instance *fi, int event, void *arg);
+static void chx_rx(fsm_instance *fi, int event, void *arg);
+static void chx_rxidle(fsm_instance *fi, int event, void *arg);
+static void chx_firstio(fsm_instance *fi, int event, void *arg);
+static void ctcm_chx_setmode(fsm_instance *fi, int event, void *arg);
+static void ctcm_chx_start(fsm_instance *fi, int event, void *arg);
+static void ctcm_chx_haltio(fsm_instance *fi, int event, void *arg);
+static void ctcm_chx_stopped(fsm_instance *fi, int event, void *arg);
+static void ctcm_chx_stop(fsm_instance *fi, int event, void *arg);
+static void ctcm_chx_fail(fsm_instance *fi, int event, void *arg);
+static void ctcm_chx_setuperr(fsm_instance *fi, int event, void *arg);
+static void ctcm_chx_restart(fsm_instance *fi, int event, void *arg);
+static void ctcm_chx_rxiniterr(fsm_instance *fi, int event, void *arg);
+static void ctcm_chx_rxinitfail(fsm_instance *fi, int event, void *arg);
+static void ctcm_chx_rxdisc(fsm_instance *fi, int event, void *arg);
+static void ctcm_chx_txiniterr(fsm_instance *fi, int event, void *arg);
+static void ctcm_chx_txretry(fsm_instance *fi, int event, void *arg);
+static void ctcm_chx_iofatal(fsm_instance *fi, int event, void *arg);
+
+/*
+ * ----- static ctcmpc actions for ctcmpc channel statemachine -----
+ *
+*/
+static void ctcmpc_chx_txdone(fsm_instance *fi, int event, void *arg);
+static void ctcmpc_chx_rx(fsm_instance *fi, int event, void *arg);
+static void ctcmpc_chx_firstio(fsm_instance *fi, int event, void *arg);
+/* shared :
+static void ctcm_chx_setmode(fsm_instance *fi, int event, void *arg);
+static void ctcm_chx_start(fsm_instance *fi, int event, void *arg);
+static void ctcm_chx_haltio(fsm_instance *fi, int event, void *arg);
+static void ctcm_chx_stopped(fsm_instance *fi, int event, void *arg);
+static void ctcm_chx_stop(fsm_instance *fi, int event, void *arg);
+static void ctcm_chx_fail(fsm_instance *fi, int event, void *arg);
+static void ctcm_chx_setuperr(fsm_instance *fi, int event, void *arg);
+static void ctcm_chx_restart(fsm_instance *fi, int event, void *arg);
+static void ctcm_chx_rxiniterr(fsm_instance *fi, int event, void *arg);
+static void ctcm_chx_rxinitfail(fsm_instance *fi, int event, void *arg);
+static void ctcm_chx_rxdisc(fsm_instance *fi, int event, void *arg);
+static void ctcm_chx_txiniterr(fsm_instance *fi, int event, void *arg);
+static void ctcm_chx_txretry(fsm_instance *fi, int event, void *arg);
+static void ctcm_chx_iofatal(fsm_instance *fi, int event, void *arg);
+*/
+static void ctcmpc_chx_attn(fsm_instance *fsm, int event, void *arg);
+static void ctcmpc_chx_attnbusy(fsm_instance *, int, void *);
+static void ctcmpc_chx_resend(fsm_instance *, int, void *);
+static void ctcmpc_chx_send_sweep(fsm_instance *fsm, int event, void *arg);
+
+/**
+ * Check return code of a preceding ccw_device call, halt_IO etc...
+ *
+ * ch	:	The channel, the error belongs to.
+ * Returns the error code (!= 0) to inspect.
+ */
+void ctcm_ccw_check_rc(struct channel *ch, int rc, char *msg)
+{
+	CTCM_DBF_TEXT_(ERROR, CTC_DBF_ERROR,
+		"%s(%s): %s: %04x\n",
+		CTCM_FUNTAIL, ch->id, msg, rc);
+	switch (rc) {
+	case -EBUSY:
+		pr_info("%s: The communication peer is busy\n",
+			ch->id);
+		fsm_event(ch->fsm, CTC_EVENT_IO_EBUSY, ch);
+		break;
+	case -ENODEV:
+		pr_err("%s: The specified target device is not valid\n",
+		       ch->id);
+		fsm_event(ch->fsm, CTC_EVENT_IO_ENODEV, ch);
+		break;
+	default:
+		pr_err("An I/O operation resulted in error %04x\n",
+		       rc);
+		fsm_event(ch->fsm, CTC_EVENT_IO_UNKNOWN, ch);
+	}
+}
+
+void ctcm_purge_skb_queue(struct sk_buff_head *q)
+{
+	struct sk_buff *skb;
+
+	CTCM_DBF_TEXT(TRACE, CTC_DBF_DEBUG, __func__);
+
+	while ((skb = skb_dequeue(q))) {
+		atomic_dec(&skb->users);
+		dev_kfree_skb_any(skb);
+	}
+}
+
+/**
+ * NOP action for statemachines
+ */
+static void ctcm_action_nop(fsm_instance *fi, int event, void *arg)
+{
+}
+
+/*
+ * Actions for channel - statemachines.
+ */
+
+/**
+ * Normal data has been send. Free the corresponding
+ * skb (it's in io_queue), reset dev->tbusy and
+ * revert to idle state.
+ *
+ * fi		An instance of a channel statemachine.
+ * event	The event, just happened.
+ * arg		Generic pointer, casted from channel * upon call.
+ */
+static void chx_txdone(fsm_instance *fi, int event, void *arg)
+{
+	struct channel *ch = arg;
+	struct net_device *dev = ch->netdev;
+	struct ctcm_priv *priv = dev->ml_priv;
+	struct sk_buff *skb;
+	int first = 1;
+	int i;
+	unsigned long duration;
+	struct timespec done_stamp = current_kernel_time(); /* xtime */
+
+	CTCM_PR_DEBUG("%s(%s): %s\n", __func__, ch->id, dev->name);
+
+	duration =
+	    (done_stamp.tv_sec - ch->prof.send_stamp.tv_sec) * 1000000 +
+	    (done_stamp.tv_nsec - ch->prof.send_stamp.tv_nsec) / 1000;
+	if (duration > ch->prof.tx_time)
+		ch->prof.tx_time = duration;
+
+	if (ch->irb->scsw.cmd.count != 0)
+		CTCM_DBF_TEXT_(TRACE, CTC_DBF_DEBUG,
+			"%s(%s): TX not complete, remaining %d bytes",
+			     CTCM_FUNTAIL, dev->name, ch->irb->scsw.cmd.count);
+	fsm_deltimer(&ch->timer);
+	while ((skb = skb_dequeue(&ch->io_queue))) {
+		priv->stats.tx_packets++;
+		priv->stats.tx_bytes += skb->len - LL_HEADER_LENGTH;
+		if (first) {
+			priv->stats.tx_bytes += 2;
+			first = 0;
+		}
+		atomic_dec(&skb->users);
+		dev_kfree_skb_irq(skb);
+	}
+	spin_lock(&ch->collect_lock);
+	clear_normalized_cda(&ch->ccw[4]);
+	if (ch->collect_len > 0) {
+		int rc;
+
+		if (ctcm_checkalloc_buffer(ch)) {
+			spin_unlock(&ch->collect_lock);
+			return;
+		}
+		ch->trans_skb->data = ch->trans_skb_data;
+		skb_reset_tail_pointer(ch->trans_skb);
+		ch->trans_skb->len = 0;
+		if (ch->prof.maxmulti < (ch->collect_len + 2))
+			ch->prof.maxmulti = ch->collect_len + 2;
+		if (ch->prof.maxcqueue < skb_queue_len(&ch->collect_queue))
+			ch->prof.maxcqueue = skb_queue_len(&ch->collect_queue);
+		*((__u16 *)skb_put(ch->trans_skb, 2)) = ch->collect_len + 2;
+		i = 0;
+		while ((skb = skb_dequeue(&ch->collect_queue))) {
+			skb_copy_from_linear_data(skb,
+				skb_put(ch->trans_skb, skb->len), skb->len);
+			priv->stats.tx_packets++;
+			priv->stats.tx_bytes += skb->len - LL_HEADER_LENGTH;
+			atomic_dec(&skb->users);
+			dev_kfree_skb_irq(skb);
+			i++;
+		}
+		ch->collect_len = 0;
+		spin_unlock(&ch->collect_lock);
+		ch->ccw[1].count = ch->trans_skb->len;
+		fsm_addtimer(&ch->timer, CTCM_TIME_5_SEC, CTC_EVENT_TIMER, ch);
+		ch->prof.send_stamp = current_kernel_time(); /* xtime */
+		rc = ccw_device_start(ch->cdev, &ch->ccw[0],
+						(unsigned long)ch, 0xff, 0);
+		ch->prof.doios_multi++;
+		if (rc != 0) {
+			priv->stats.tx_dropped += i;
+			priv->stats.tx_errors += i;
+			fsm_deltimer(&ch->timer);
+			ctcm_ccw_check_rc(ch, rc, "chained TX");
+		}
+	} else {
+		spin_unlock(&ch->collect_lock);
+		fsm_newstate(fi, CTC_STATE_TXIDLE);
+	}
+	ctcm_clear_busy_do(dev);
+}
+
+/**
+ * Initial data is sent.
+ * Notify device statemachine that we are up and
+ * running.
+ *
+ * fi		An instance of a channel statemachine.
+ * event	The event, just happened.
+ * arg		Generic pointer, casted from channel * upon call.
+ */
+void ctcm_chx_txidle(fsm_instance *fi, int event, void *arg)
+{
+	struct channel *ch = arg;
+	struct net_device *dev = ch->netdev;
+	struct ctcm_priv *priv = dev->ml_priv;
+
+	CTCM_PR_DEBUG("%s(%s): %s\n", __func__, ch->id, dev->name);
+
+	fsm_deltimer(&ch->timer);
+	fsm_newstate(fi, CTC_STATE_TXIDLE);
+	fsm_event(priv->fsm, DEV_EVENT_TXUP, ch->netdev);
+}
+
+/**
+ * Got normal data, check for sanity, queue it up, allocate new buffer
+ * trigger bottom half, and initiate next read.
+ *
+ * fi		An instance of a channel statemachine.
+ * event	The event, just happened.
+ * arg		Generic pointer, casted from channel * upon call.
+ */
+static void chx_rx(fsm_instance *fi, int event, void *arg)
+{
+	struct channel *ch = arg;
+	struct net_device *dev = ch->netdev;
+	struct ctcm_priv *priv = dev->ml_priv;
+	int len = ch->max_bufsize - ch->irb->scsw.cmd.count;
+	struct sk_buff *skb = ch->trans_skb;
+	__u16 block_len = *((__u16 *)skb->data);
+	int check_len;
+	int rc;
+
+	fsm_deltimer(&ch->timer);
+	if (len < 8) {
+		CTCM_DBF_TEXT_(TRACE, CTC_DBF_NOTICE,
+			"%s(%s): got packet with length %d < 8\n",
+					CTCM_FUNTAIL, dev->name, len);
+		priv->stats.rx_dropped++;
+		priv->stats.rx_length_errors++;
+						goto again;
+	}
+	if (len > ch->max_bufsize) {
+		CTCM_DBF_TEXT_(TRACE, CTC_DBF_NOTICE,
+			"%s(%s): got packet with length %d > %d\n",
+				CTCM_FUNTAIL, dev->name, len, ch->max_bufsize);
+		priv->stats.rx_dropped++;
+		priv->stats.rx_length_errors++;
+						goto again;
+	}
+
+	/*
+	 * VM TCP seems to have a bug sending 2 trailing bytes of garbage.
+	 */
+	switch (ch->protocol) {
+	case CTCM_PROTO_S390:
+	case CTCM_PROTO_OS390:
+		check_len = block_len + 2;
+		break;
+	default:
+		check_len = block_len;
+		break;
+	}
+	if ((len < block_len) || (len > check_len)) {
+		CTCM_DBF_TEXT_(TRACE, CTC_DBF_NOTICE,
+			"%s(%s): got block length %d != rx length %d\n",
+				CTCM_FUNTAIL, dev->name, block_len, len);
+		if (do_debug)
+			ctcmpc_dump_skb(skb, 0);
+
+		*((__u16 *)skb->data) = len;
+		priv->stats.rx_dropped++;
+		priv->stats.rx_length_errors++;
+						goto again;
+	}
+	if (block_len > 2) {
+		*((__u16 *)skb->data) = block_len - 2;
+		ctcm_unpack_skb(ch, skb);
+	}
+ again:
+	skb->data = ch->trans_skb_data;
+	skb_reset_tail_pointer(skb);
+	skb->len = 0;
+	if (ctcm_checkalloc_buffer(ch))
+		return;
+	ch->ccw[1].count = ch->max_bufsize;
+	rc = ccw_device_start(ch->cdev, &ch->ccw[0],
+					(unsigned long)ch, 0xff, 0);
+	if (rc != 0)
+		ctcm_ccw_check_rc(ch, rc, "normal RX");
+}
+
+/**
+ * Initialize connection by sending a __u16 of value 0.
+ *
+ * fi		An instance of a channel statemachine.
+ * event	The event, just happened.
+ * arg		Generic pointer, casted from channel * upon call.
+ */
+static void chx_firstio(fsm_instance *fi, int event, void *arg)
+{
+	int rc;
+	struct channel *ch = arg;
+	int fsmstate = fsm_getstate(fi);
+
+	CTCM_DBF_TEXT_(TRACE, CTC_DBF_NOTICE,
+		"%s(%s) : %02x",
+		CTCM_FUNTAIL, ch->id, fsmstate);
+
+	ch->sense_rc = 0;	/* reset unit check report control */
+	if (fsmstate == CTC_STATE_TXIDLE)
+		CTCM_DBF_TEXT_(TRACE, CTC_DBF_DEBUG,
+			"%s(%s): remote side issued READ?, init.\n",
+				CTCM_FUNTAIL, ch->id);
+	fsm_deltimer(&ch->timer);
+	if (ctcm_checkalloc_buffer(ch))
+		return;
+	if ((fsmstate == CTC_STATE_SETUPWAIT) &&
+	    (ch->protocol == CTCM_PROTO_OS390)) {
+		/* OS/390 resp. z/OS */
+		if (CHANNEL_DIRECTION(ch->flags) == CTCM_READ) {
+			*((__u16 *)ch->trans_skb->data) = CTCM_INITIAL_BLOCKLEN;
+			fsm_addtimer(&ch->timer, CTCM_TIME_5_SEC,
+				     CTC_EVENT_TIMER, ch);
+			chx_rxidle(fi, event, arg);
+		} else {
+			struct net_device *dev = ch->netdev;
+			struct ctcm_priv *priv = dev->ml_priv;
+			fsm_newstate(fi, CTC_STATE_TXIDLE);
+			fsm_event(priv->fsm, DEV_EVENT_TXUP, dev);
+		}
+		return;
+	}
+	/*
+	 * Don't setup a timer for receiving the initial RX frame
+	 * if in compatibility mode, since VM TCP delays the initial
+	 * frame until it has some data to send.
+	 */
+	if ((CHANNEL_DIRECTION(ch->flags) == CTCM_WRITE) ||
+	    (ch->protocol != CTCM_PROTO_S390))
+		fsm_addtimer(&ch->timer, CTCM_TIME_5_SEC, CTC_EVENT_TIMER, ch);
+
+	*((__u16 *)ch->trans_skb->data) = CTCM_INITIAL_BLOCKLEN;
+	ch->ccw[1].count = 2;	/* Transfer only length */
+
+	fsm_newstate(fi, (CHANNEL_DIRECTION(ch->flags) == CTCM_READ)
+		     ? CTC_STATE_RXINIT : CTC_STATE_TXINIT);
+	rc = ccw_device_start(ch->cdev, &ch->ccw[0],
+					(unsigned long)ch, 0xff, 0);
+	if (rc != 0) {
+		fsm_deltimer(&ch->timer);
+		fsm_newstate(fi, CTC_STATE_SETUPWAIT);
+		ctcm_ccw_check_rc(ch, rc, "init IO");
+	}
+	/*
+	 * If in compatibility mode since we don't setup a timer, we
+	 * also signal RX channel up immediately. This enables us
+	 * to send packets early which in turn usually triggers some
+	 * reply from VM TCP which brings up the RX channel to it's
+	 * final state.
+	 */
+	if ((CHANNEL_DIRECTION(ch->flags) == CTCM_READ) &&
+	    (ch->protocol == CTCM_PROTO_S390)) {
+		struct net_device *dev = ch->netdev;
+		struct ctcm_priv *priv = dev->ml_priv;
+		fsm_event(priv->fsm, DEV_EVENT_RXUP, dev);
+	}
+}
+
+/**
+ * Got initial data, check it. If OK,
+ * notify device statemachine that we are up and
+ * running.
+ *
+ * fi		An instance of a channel statemachine.
+ * event	The event, just happened.
+ * arg		Generic pointer, casted from channel * upon call.
+ */
+static void chx_rxidle(fsm_instance *fi, int event, void *arg)
+{
+	struct channel *ch = arg;
+	struct net_device *dev = ch->netdev;
+	struct ctcm_priv *priv = dev->ml_priv;
+	__u16 buflen;
+	int rc;
+
+	fsm_deltimer(&ch->timer);
+	buflen = *((__u16 *)ch->trans_skb->data);
+	CTCM_PR_DEBUG("%s: %s: Initial RX count = %d\n",
+			__func__, dev->name, buflen);
+
+	if (buflen >= CTCM_INITIAL_BLOCKLEN) {
+		if (ctcm_checkalloc_buffer(ch))
+			return;
+		ch->ccw[1].count = ch->max_bufsize;
+		fsm_newstate(fi, CTC_STATE_RXIDLE);
+		rc = ccw_device_start(ch->cdev, &ch->ccw[0],
+						(unsigned long)ch, 0xff, 0);
+		if (rc != 0) {
+			fsm_newstate(fi, CTC_STATE_RXINIT);
+			ctcm_ccw_check_rc(ch, rc, "initial RX");
+		} else
+			fsm_event(priv->fsm, DEV_EVENT_RXUP, dev);
+	} else {
+		CTCM_PR_DEBUG("%s: %s: Initial RX count %d not %d\n",
+				__func__, dev->name,
+					buflen, CTCM_INITIAL_BLOCKLEN);
+		chx_firstio(fi, event, arg);
+	}
+}
+
+/**
+ * Set channel into extended mode.
+ *
+ * fi		An instance of a channel statemachine.
+ * event	The event, just happened.
+ * arg		Generic pointer, casted from channel * upon call.
+ */
+static void ctcm_chx_setmode(fsm_instance *fi, int event, void *arg)
+{
+	struct channel *ch = arg;
+	int rc;
+	unsigned long saveflags = 0;
+	int timeout = CTCM_TIME_5_SEC;
+
+	fsm_deltimer(&ch->timer);
+	if (IS_MPC(ch)) {
+		timeout = 1500;
+		CTCM_PR_DEBUG("enter %s: cp=%i ch=0x%p id=%s\n",
+				__func__, smp_processor_id(), ch, ch->id);
+	}
+	fsm_addtimer(&ch->timer, timeout, CTC_EVENT_TIMER, ch);
+	fsm_newstate(fi, CTC_STATE_SETUPWAIT);
+	CTCM_CCW_DUMP((char *)&ch->ccw[6], sizeof(struct ccw1) * 2);
+
+	if (event == CTC_EVENT_TIMER)	/* only for timer not yet locked */
+		spin_lock_irqsave(get_ccwdev_lock(ch->cdev), saveflags);
+			/* Such conditional locking is undeterministic in
+			 * static view. => ignore sparse warnings here. */
+
+	rc = ccw_device_start(ch->cdev, &ch->ccw[6],
+					(unsigned long)ch, 0xff, 0);
+	if (event == CTC_EVENT_TIMER)	/* see above comments */
+		spin_unlock_irqrestore(get_ccwdev_lock(ch->cdev), saveflags);
+	if (rc != 0) {
+		fsm_deltimer(&ch->timer);
+		fsm_newstate(fi, CTC_STATE_STARTWAIT);
+		ctcm_ccw_check_rc(ch, rc, "set Mode");
+	} else
+		ch->retry = 0;
+}
+
+/**
+ * Setup channel.
+ *
+ * fi		An instance of a channel statemachine.
+ * event	The event, just happened.
+ * arg		Generic pointer, casted from channel * upon call.
+ */
+static void ctcm_chx_start(fsm_instance *fi, int event, void *arg)
+{
+	struct channel *ch	= arg;
+	unsigned long saveflags;
+	int rc;
+
+	CTCM_DBF_TEXT_(SETUP, CTC_DBF_INFO, "%s(%s): %s",
+		CTCM_FUNTAIL, ch->id,
+		(CHANNEL_DIRECTION(ch->flags) == CTCM_READ) ? "RX" : "TX");
+
+	if (ch->trans_skb != NULL) {
+		clear_normalized_cda(&ch->ccw[1]);
+		dev_kfree_skb(ch->trans_skb);
+		ch->trans_skb = NULL;
+	}
+	if (CHANNEL_DIRECTION(ch->flags) == CTCM_READ) {
+		ch->ccw[1].cmd_code = CCW_CMD_READ;
+		ch->ccw[1].flags = CCW_FLAG_SLI;
+		ch->ccw[1].count = 0;
+	} else {
+		ch->ccw[1].cmd_code = CCW_CMD_WRITE;
+		ch->ccw[1].flags = CCW_FLAG_SLI | CCW_FLAG_CC;
+		ch->ccw[1].count = 0;
+	}
+	if (ctcm_checkalloc_buffer(ch)) {
+		CTCM_DBF_TEXT_(TRACE, CTC_DBF_DEBUG,
+			"%s(%s): %s trans_skb alloc delayed "
+			"until first transfer",
+			CTCM_FUNTAIL, ch->id,
+			(CHANNEL_DIRECTION(ch->flags) == CTCM_READ) ?
+				"RX" : "TX");
+	}
+	ch->ccw[0].cmd_code = CCW_CMD_PREPARE;
+	ch->ccw[0].flags = CCW_FLAG_SLI | CCW_FLAG_CC;
+	ch->ccw[0].count = 0;
+	ch->ccw[0].cda = 0;
+	ch->ccw[2].cmd_code = CCW_CMD_NOOP;	/* jointed CE + DE */
+	ch->ccw[2].flags = CCW_FLAG_SLI;
+	ch->ccw[2].count = 0;
+	ch->ccw[2].cda = 0;
+	memcpy(&ch->ccw[3], &ch->ccw[0], sizeof(struct ccw1) * 3);
+	ch->ccw[4].cda = 0;
+	ch->ccw[4].flags &= ~CCW_FLAG_IDA;
+
+	fsm_newstate(fi, CTC_STATE_STARTWAIT);
+	fsm_addtimer(&ch->timer, 1000, CTC_EVENT_TIMER, ch);
+	spin_lock_irqsave(get_ccwdev_lock(ch->cdev), saveflags);
+	rc = ccw_device_halt(ch->cdev, (unsigned long)ch);
+	spin_unlock_irqrestore(get_ccwdev_lock(ch->cdev), saveflags);
+	if (rc != 0) {
+		if (rc != -EBUSY)
+			fsm_deltimer(&ch->timer);
+		ctcm_ccw_check_rc(ch, rc, "initial HaltIO");
+	}
+}
+
+/**
+ * Shutdown a channel.
+ *
+ * fi		An instance of a channel statemachine.
+ * event	The event, just happened.
+ * arg		Generic pointer, casted from channel * upon call.
+ */
+static void ctcm_chx_haltio(fsm_instance *fi, int event, void *arg)
+{
+	struct channel *ch = arg;
+	unsigned long saveflags = 0;
+	int rc;
+	int oldstate;
+
+	fsm_deltimer(&ch->timer);
+	if (IS_MPC(ch))
+		fsm_deltimer(&ch->sweep_timer);
+
+	fsm_addtimer(&ch->timer, CTCM_TIME_5_SEC, CTC_EVENT_TIMER, ch);
+
+	if (event == CTC_EVENT_STOP)	/* only for STOP not yet locked */
+		spin_lock_irqsave(get_ccwdev_lock(ch->cdev), saveflags);
+			/* Such conditional locking is undeterministic in
+			 * static view. => ignore sparse warnings here. */
+	oldstate = fsm_getstate(fi);
+	fsm_newstate(fi, CTC_STATE_TERM);
+	rc = ccw_device_halt(ch->cdev, (unsigned long)ch);
+
+	if (event == CTC_EVENT_STOP)
+		spin_unlock_irqrestore(get_ccwdev_lock(ch->cdev), saveflags);
+			/* see remark above about conditional locking */
+
+	if (rc != 0 && rc != -EBUSY) {
+		fsm_deltimer(&ch->timer);
+		if (event != CTC_EVENT_STOP) {
+			fsm_newstate(fi, oldstate);
+			ctcm_ccw_check_rc(ch, rc, (char *)__func__);
+		}
+	}
+}
+
+/**
+ * Cleanup helper for chx_fail and chx_stopped
+ * cleanup channels queue and notify interface statemachine.
+ *
+ * fi		An instance of a channel statemachine.
+ * state	The next state (depending on caller).
+ * ch		The channel to operate on.
+ */
+static void ctcm_chx_cleanup(fsm_instance *fi, int state,
+		struct channel *ch)
+{
+	struct net_device *dev = ch->netdev;
+	struct ctcm_priv *priv = dev->ml_priv;
+
+	CTCM_DBF_TEXT_(SETUP, CTC_DBF_NOTICE,
+			"%s(%s): %s[%d]\n",
+			CTCM_FUNTAIL, dev->name, ch->id, state);
+
+	fsm_deltimer(&ch->timer);
+	if (IS_MPC(ch))
+		fsm_deltimer(&ch->sweep_timer);
+
+	fsm_newstate(fi, state);
+	if (state == CTC_STATE_STOPPED && ch->trans_skb != NULL) {
+		clear_normalized_cda(&ch->ccw[1]);
+		dev_kfree_skb_any(ch->trans_skb);
+		ch->trans_skb = NULL;
+	}
+
+	ch->th_seg = 0x00;
+	ch->th_seq_num = 0x00;
+	if (CHANNEL_DIRECTION(ch->flags) == CTCM_READ) {
+		skb_queue_purge(&ch->io_queue);
+		fsm_event(priv->fsm, DEV_EVENT_RXDOWN, dev);
+	} else {
+		ctcm_purge_skb_queue(&ch->io_queue);
+		if (IS_MPC(ch))
+			ctcm_purge_skb_queue(&ch->sweep_queue);
+		spin_lock(&ch->collect_lock);
+		ctcm_purge_skb_queue(&ch->collect_queue);
+		ch->collect_len = 0;
+		spin_unlock(&ch->collect_lock);
+		fsm_event(priv->fsm, DEV_EVENT_TXDOWN, dev);
+	}
+}
+
+/**
+ * A channel has successfully been halted.
+ * Cleanup it's queue and notify interface statemachine.
+ *
+ * fi		An instance of a channel statemachine.
+ * event	The event, just happened.
+ * arg		Generic pointer, casted from channel * upon call.
+ */
+static void ctcm_chx_stopped(fsm_instance *fi, int event, void *arg)
+{
+	ctcm_chx_cleanup(fi, CTC_STATE_STOPPED, arg);
+}
+
+/**
+ * A stop command from device statemachine arrived and we are in
+ * not operational mode. Set state to stopped.
+ *
+ * fi		An instance of a channel statemachine.
+ * event	The event, just happened.
+ * arg		Generic pointer, casted from channel * upon call.
+ */
+static void ctcm_chx_stop(fsm_instance *fi, int event, void *arg)
+{
+	fsm_newstate(fi, CTC_STATE_STOPPED);
+}
+
+/**
+ * A machine check for no path, not operational status or gone device has
+ * happened.
+ * Cleanup queue and notify interface statemachine.
+ *
+ * fi		An instance of a channel statemachine.
+ * event	The event, just happened.
+ * arg		Generic pointer, casted from channel * upon call.
+ */
+static void ctcm_chx_fail(fsm_instance *fi, int event, void *arg)
+{
+	ctcm_chx_cleanup(fi, CTC_STATE_NOTOP, arg);
+}
+
+/**
+ * Handle error during setup of channel.
+ *
+ * fi		An instance of a channel statemachine.
+ * event	The event, just happened.
+ * arg		Generic pointer, casted from channel * upon call.
+ */
+static void ctcm_chx_setuperr(fsm_instance *fi, int event, void *arg)
+{
+	struct channel *ch = arg;
+	struct net_device *dev = ch->netdev;
+	struct ctcm_priv *priv = dev->ml_priv;
+
+	/*
+	 * Special case: Got UC_RCRESET on setmode.
+	 * This means that remote side isn't setup. In this case
+	 * simply retry after some 10 secs...
+	 */
+	if ((fsm_getstate(fi) == CTC_STATE_SETUPWAIT) &&
+	    ((event == CTC_EVENT_UC_RCRESET) ||
+	     (event == CTC_EVENT_UC_RSRESET))) {
+		fsm_newstate(fi, CTC_STATE_STARTRETRY);
+		fsm_deltimer(&ch->timer);
+		fsm_addtimer(&ch->timer, CTCM_TIME_5_SEC, CTC_EVENT_TIMER, ch);
+		if (!IS_MPC(ch) &&
+		    (CHANNEL_DIRECTION(ch->flags) == CTCM_READ)) {
+			int rc = ccw_device_halt(ch->cdev, (unsigned long)ch);
+			if (rc != 0)
+				ctcm_ccw_check_rc(ch, rc,
+					"HaltIO in chx_setuperr");
+		}
+		return;
+	}
+
+	CTCM_DBF_TEXT_(ERROR, CTC_DBF_CRIT,
+		"%s(%s) : %s error during %s channel setup state=%s\n",
+		CTCM_FUNTAIL, dev->name, ctc_ch_event_names[event],
+		(CHANNEL_DIRECTION(ch->flags) == CTCM_READ) ? "RX" : "TX",
+		fsm_getstate_str(fi));
+
+	if (CHANNEL_DIRECTION(ch->flags) == CTCM_READ) {
+		fsm_newstate(fi, CTC_STATE_RXERR);
+		fsm_event(priv->fsm, DEV_EVENT_RXDOWN, dev);
+	} else {
+		fsm_newstate(fi, CTC_STATE_TXERR);
+		fsm_event(priv->fsm, DEV_EVENT_TXDOWN, dev);
+	}
+}
+
+/**
+ * Restart a channel after an error.
+ *
+ * fi		An instance of a channel statemachine.
+ * event	The event, just happened.
+ * arg		Generic pointer, casted from channel * upon call.
+ */
+static void ctcm_chx_restart(fsm_instance *fi, int event, void *arg)
+{
+	struct channel *ch = arg;
+	struct net_device *dev = ch->netdev;
+	unsigned long saveflags = 0;
+	int oldstate;
+	int rc;
+
+	CTCM_DBF_TEXT_(TRACE, CTC_DBF_NOTICE,
+		"%s: %s[%d] of %s\n",
+			CTCM_FUNTAIL, ch->id, event, dev->name);
+
+	fsm_deltimer(&ch->timer);
+
+	fsm_addtimer(&ch->timer, CTCM_TIME_5_SEC, CTC_EVENT_TIMER, ch);
+	oldstate = fsm_getstate(fi);
+	fsm_newstate(fi, CTC_STATE_STARTWAIT);
+	if (event == CTC_EVENT_TIMER)	/* only for timer not yet locked */
+		spin_lock_irqsave(get_ccwdev_lock(ch->cdev), saveflags);
+			/* Such conditional locking is a known problem for
+			 * sparse because its undeterministic in static view.
+			 * Warnings should be ignored here. */
+	rc = ccw_device_halt(ch->cdev, (unsigned long)ch);
+	if (event == CTC_EVENT_TIMER)
+		spin_unlock_irqrestore(get_ccwdev_lock(ch->cdev), saveflags);
+	if (rc != 0) {
+		if (rc != -EBUSY) {
+		    fsm_deltimer(&ch->timer);
+		    fsm_newstate(fi, oldstate);
+		}
+		ctcm_ccw_check_rc(ch, rc, "HaltIO in ctcm_chx_restart");
+	}
+}
+
+/**
+ * Handle error during RX initial handshake (exchange of
+ * 0-length block header)
+ *
+ * fi		An instance of a channel statemachine.
+ * event	The event, just happened.
+ * arg		Generic pointer, casted from channel * upon call.
+ */
+static void ctcm_chx_rxiniterr(fsm_instance *fi, int event, void *arg)
+{
+	struct channel *ch = arg;
+	struct net_device *dev = ch->netdev;
+	struct ctcm_priv *priv = dev->ml_priv;
+
+	if (event == CTC_EVENT_TIMER) {
+		if (!IS_MPCDEV(dev))
+			/* TODO : check if MPC deletes timer somewhere */
+			fsm_deltimer(&ch->timer);
+		if (ch->retry++ < 3)
+			ctcm_chx_restart(fi, event, arg);
+		else {
+			fsm_newstate(fi, CTC_STATE_RXERR);
+			fsm_event(priv->fsm, DEV_EVENT_RXDOWN, dev);
+		}
+	} else {
+		CTCM_DBF_TEXT_(ERROR, CTC_DBF_ERROR,
+			"%s(%s): %s in %s", CTCM_FUNTAIL, ch->id,
+			ctc_ch_event_names[event], fsm_getstate_str(fi));
+
+		dev_warn(&dev->dev,
+			"Initialization failed with RX/TX init handshake "
+			"error %s\n", ctc_ch_event_names[event]);
+	}
+}
+
+/**
+ * Notify device statemachine if we gave up initialization
+ * of RX channel.
+ *
+ * fi		An instance of a channel statemachine.
+ * event	The event, just happened.
+ * arg		Generic pointer, casted from channel * upon call.
+ */
+static void ctcm_chx_rxinitfail(fsm_instance *fi, int event, void *arg)
+{
+	struct channel *ch = arg;
+	struct net_device *dev = ch->netdev;
+	struct ctcm_priv *priv = dev->ml_priv;
+
+	CTCM_DBF_TEXT_(ERROR, CTC_DBF_ERROR,
+			"%s(%s): RX %s busy, init. fail",
+				CTCM_FUNTAIL, dev->name, ch->id);
+	fsm_newstate(fi, CTC_STATE_RXERR);
+	fsm_event(priv->fsm, DEV_EVENT_RXDOWN, dev);
+}
+
+/**
+ * Handle RX Unit check remote reset (remote disconnected)
+ *
+ * fi		An instance of a channel statemachine.
+ * event	The event, just happened.
+ * arg		Generic pointer, casted from channel * upon call.
+ */
+static void ctcm_chx_rxdisc(fsm_instance *fi, int event, void *arg)
+{
+	struct channel *ch = arg;
+	struct channel *ch2;
+	struct net_device *dev = ch->netdev;
+	struct ctcm_priv *priv = dev->ml_priv;
+
+	CTCM_DBF_TEXT_(TRACE, CTC_DBF_NOTICE,
+			"%s: %s: remote disconnect - re-init ...",
+				CTCM_FUNTAIL, dev->name);
+	fsm_deltimer(&ch->timer);
+	/*
+	 * Notify device statemachine
+	 */
+	fsm_event(priv->fsm, DEV_EVENT_RXDOWN, dev);
+	fsm_event(priv->fsm, DEV_EVENT_TXDOWN, dev);
+
+	fsm_newstate(fi, CTC_STATE_DTERM);
+	ch2 = priv->channel[CTCM_WRITE];
+	fsm_newstate(ch2->fsm, CTC_STATE_DTERM);
+
+	ccw_device_halt(ch->cdev, (unsigned long)ch);
+	ccw_device_halt(ch2->cdev, (unsigned long)ch2);
+}
+
+/**
+ * Handle error during TX channel initialization.
+ *
+ * fi		An instance of a channel statemachine.
+ * event	The event, just happened.
+ * arg		Generic pointer, casted from channel * upon call.
+ */
+static void ctcm_chx_txiniterr(fsm_instance *fi, int event, void *arg)
+{
+	struct channel *ch = arg;
+	struct net_device *dev = ch->netdev;
+	struct ctcm_priv *priv = dev->ml_priv;
+
+	if (event == CTC_EVENT_TIMER) {
+		fsm_deltimer(&ch->timer);
+		if (ch->retry++ < 3)
+			ctcm_chx_restart(fi, event, arg);
+		else {
+			fsm_newstate(fi, CTC_STATE_TXERR);
+			fsm_event(priv->fsm, DEV_EVENT_TXDOWN, dev);
+		}
+	} else {
+		CTCM_DBF_TEXT_(ERROR, CTC_DBF_ERROR,
+			"%s(%s): %s in %s", CTCM_FUNTAIL, ch->id,
+			ctc_ch_event_names[event], fsm_getstate_str(fi));
+
+		dev_warn(&dev->dev,
+			"Initialization failed with RX/TX init handshake "
+			"error %s\n", ctc_ch_event_names[event]);
+	}
+}
+
+/**
+ * Handle TX timeout by retrying operation.
+ *
+ * fi		An instance of a channel statemachine.
+ * event	The event, just happened.
+ * arg		Generic pointer, casted from channel * upon call.
+ */
+static void ctcm_chx_txretry(fsm_instance *fi, int event, void *arg)
+{
+	struct channel *ch = arg;
+	struct net_device *dev = ch->netdev;
+	struct ctcm_priv *priv = dev->ml_priv;
+	struct sk_buff *skb;
+
+	CTCM_PR_DEBUG("Enter: %s: cp=%i ch=0x%p id=%s\n",
+			__func__, smp_processor_id(), ch, ch->id);
+
+	fsm_deltimer(&ch->timer);
+	if (ch->retry++ > 3) {
+		struct mpc_group *gptr = priv->mpcg;
+		CTCM_DBF_TEXT_(TRACE, CTC_DBF_INFO,
+				"%s: %s: retries exceeded",
+					CTCM_FUNTAIL, ch->id);
+		fsm_event(priv->fsm, DEV_EVENT_TXDOWN, dev);
+		/* call restart if not MPC or if MPC and mpcg fsm is ready.
+			use gptr as mpc indicator */
+		if (!(gptr && (fsm_getstate(gptr->fsm) != MPCG_STATE_READY)))
+			ctcm_chx_restart(fi, event, arg);
+				goto done;
+	}
+
+	CTCM_DBF_TEXT_(TRACE, CTC_DBF_DEBUG,
+			"%s : %s: retry %d",
+				CTCM_FUNTAIL, ch->id, ch->retry);
+	skb = skb_peek(&ch->io_queue);
+	if (skb) {
+		int rc = 0;
+		unsigned long saveflags = 0;
+		clear_normalized_cda(&ch->ccw[4]);
+		ch->ccw[4].count = skb->len;
+		if (set_normalized_cda(&ch->ccw[4], skb->data)) {
+			CTCM_DBF_TEXT_(TRACE, CTC_DBF_INFO,
+				"%s: %s: IDAL alloc failed",
+						CTCM_FUNTAIL, ch->id);
+			fsm_event(priv->fsm, DEV_EVENT_TXDOWN, dev);
+			ctcm_chx_restart(fi, event, arg);
+				goto done;
+		}
+		fsm_addtimer(&ch->timer, 1000, CTC_EVENT_TIMER, ch);
+		if (event == CTC_EVENT_TIMER) /* for TIMER not yet locked */
+			spin_lock_irqsave(get_ccwdev_lock(ch->cdev), saveflags);
+			/* Such conditional locking is a known problem for
+			 * sparse because its undeterministic in static view.
+			 * Warnings should be ignored here. */
+		if (do_debug_ccw)
+			ctcmpc_dumpit((char *)&ch->ccw[3],
+					sizeof(struct ccw1) * 3);
+
+		rc = ccw_device_start(ch->cdev, &ch->ccw[3],
+						(unsigned long)ch, 0xff, 0);
+		if (event == CTC_EVENT_TIMER)
+			spin_unlock_irqrestore(get_ccwdev_lock(ch->cdev),
+					saveflags);
+		if (rc != 0) {
+			fsm_deltimer(&ch->timer);
+			ctcm_ccw_check_rc(ch, rc, "TX in chx_txretry");
+			ctcm_purge_skb_queue(&ch->io_queue);
+		}
+	}
+done:
+	return;
+}
+
+/**
+ * Handle fatal errors during an I/O command.
+ *
+ * fi		An instance of a channel statemachine.
+ * event	The event, just happened.
+ * arg		Generic pointer, casted from channel * upon call.
+ */
+static void ctcm_chx_iofatal(fsm_instance *fi, int event, void *arg)
+{
+	struct channel *ch = arg;
+	struct net_device *dev = ch->netdev;
+	struct ctcm_priv *priv = dev->ml_priv;
+	int rd = CHANNEL_DIRECTION(ch->flags);
+
+	fsm_deltimer(&ch->timer);
+	CTCM_DBF_TEXT_(ERROR, CTC_DBF_ERROR,
+		"%s: %s: %s unrecoverable channel error",
+			CTCM_FUNTAIL, ch->id, rd == CTCM_READ ? "RX" : "TX");
+
+	if (IS_MPC(ch)) {
+		priv->stats.tx_dropped++;
+		priv->stats.tx_errors++;
+	}
+	if (rd == CTCM_READ) {
+		fsm_newstate(fi, CTC_STATE_RXERR);
+		fsm_event(priv->fsm, DEV_EVENT_RXDOWN, dev);
+	} else {
+		fsm_newstate(fi, CTC_STATE_TXERR);
+		fsm_event(priv->fsm, DEV_EVENT_TXDOWN, dev);
+	}
+}
+
+/*
+ * The ctcm statemachine for a channel.
+ */
+const fsm_node ch_fsm[] = {
+	{ CTC_STATE_STOPPED,	CTC_EVENT_STOP,		ctcm_action_nop  },
+	{ CTC_STATE_STOPPED,	CTC_EVENT_START,	ctcm_chx_start  },
+	{ CTC_STATE_STOPPED,	CTC_EVENT_FINSTAT,	ctcm_action_nop  },
+	{ CTC_STATE_STOPPED,	CTC_EVENT_MC_FAIL,	ctcm_action_nop  },
+
+	{ CTC_STATE_NOTOP,	CTC_EVENT_STOP,		ctcm_chx_stop  },
+	{ CTC_STATE_NOTOP,	CTC_EVENT_START,	ctcm_action_nop  },
+	{ CTC_STATE_NOTOP,	CTC_EVENT_FINSTAT,	ctcm_action_nop  },
+	{ CTC_STATE_NOTOP,	CTC_EVENT_MC_FAIL,	ctcm_action_nop  },
+	{ CTC_STATE_NOTOP,	CTC_EVENT_MC_GOOD,	ctcm_chx_start  },
+
+	{ CTC_STATE_STARTWAIT,	CTC_EVENT_STOP,		ctcm_chx_haltio  },
+	{ CTC_STATE_STARTWAIT,	CTC_EVENT_START,	ctcm_action_nop  },
+	{ CTC_STATE_STARTWAIT,	CTC_EVENT_FINSTAT,	ctcm_chx_setmode  },
+	{ CTC_STATE_STARTWAIT,	CTC_EVENT_TIMER,	ctcm_chx_setuperr  },
+	{ CTC_STATE_STARTWAIT,	CTC_EVENT_IO_ENODEV,	ctcm_chx_iofatal  },
+	{ CTC_STATE_STARTWAIT,	CTC_EVENT_MC_FAIL,	ctcm_chx_fail  },
+
+	{ CTC_STATE_STARTRETRY,	CTC_EVENT_STOP,		ctcm_chx_haltio  },
+	{ CTC_STATE_STARTRETRY,	CTC_EVENT_TIMER,	ctcm_chx_setmode  },
+	{ CTC_STATE_STARTRETRY,	CTC_EVENT_FINSTAT,	ctcm_action_nop  },
+	{ CTC_STATE_STARTRETRY,	CTC_EVENT_MC_FAIL,	ctcm_chx_fail  },
+
+	{ CTC_STATE_SETUPWAIT,	CTC_EVENT_STOP,		ctcm_chx_haltio  },
+	{ CTC_STATE_SETUPWAIT,	CTC_EVENT_START,	ctcm_action_nop  },
+	{ CTC_STATE_SETUPWAIT,	CTC_EVENT_FINSTAT,	chx_firstio  },
+	{ CTC_STATE_SETUPWAIT,	CTC_EVENT_UC_RCRESET,	ctcm_chx_setuperr  },
+	{ CTC_STATE_SETUPWAIT,	CTC_EVENT_UC_RSRESET,	ctcm_chx_setuperr  },
+	{ CTC_STATE_SETUPWAIT,	CTC_EVENT_TIMER,	ctcm_chx_setmode  },
+	{ CTC_STATE_SETUPWAIT,	CTC_EVENT_IO_ENODEV,	ctcm_chx_iofatal  },
+	{ CTC_STATE_SETUPWAIT,	CTC_EVENT_MC_FAIL,	ctcm_chx_fail  },
+
+	{ CTC_STATE_RXINIT,	CTC_EVENT_STOP,		ctcm_chx_haltio  },
+	{ CTC_STATE_RXINIT,	CTC_EVENT_START,	ctcm_action_nop  },
+	{ CTC_STATE_RXINIT,	CTC_EVENT_FINSTAT,	chx_rxidle  },
+	{ CTC_STATE_RXINIT,	CTC_EVENT_UC_RCRESET,	ctcm_chx_rxiniterr  },
+	{ CTC_STATE_RXINIT,	CTC_EVENT_UC_RSRESET,	ctcm_chx_rxiniterr  },
+	{ CTC_STATE_RXINIT,	CTC_EVENT_TIMER,	ctcm_chx_rxiniterr  },
+	{ CTC_STATE_RXINIT,	CTC_EVENT_ATTNBUSY,	ctcm_chx_rxinitfail  },
+	{ CTC_STATE_RXINIT,	CTC_EVENT_IO_ENODEV,	ctcm_chx_iofatal  },
+	{ CTC_STATE_RXINIT,	CTC_EVENT_UC_ZERO,	chx_firstio  },
+	{ CTC_STATE_RXINIT,	CTC_EVENT_MC_FAIL,	ctcm_chx_fail  },
+
+	{ CTC_STATE_RXIDLE,	CTC_EVENT_STOP,		ctcm_chx_haltio  },
+	{ CTC_STATE_RXIDLE,	CTC_EVENT_START,	ctcm_action_nop  },
+	{ CTC_STATE_RXIDLE,	CTC_EVENT_FINSTAT,	chx_rx  },
+	{ CTC_STATE_RXIDLE,	CTC_EVENT_UC_RCRESET,	ctcm_chx_rxdisc  },
+	{ CTC_STATE_RXIDLE,	CTC_EVENT_IO_ENODEV,	ctcm_chx_iofatal  },
+	{ CTC_STATE_RXIDLE,	CTC_EVENT_MC_FAIL,	ctcm_chx_fail  },
+	{ CTC_STATE_RXIDLE,	CTC_EVENT_UC_ZERO,	chx_rx  },
+
+	{ CTC_STATE_TXINIT,	CTC_EVENT_STOP,		ctcm_chx_haltio  },
+	{ CTC_STATE_TXINIT,	CTC_EVENT_START,	ctcm_action_nop  },
+	{ CTC_STATE_TXINIT,	CTC_EVENT_FINSTAT,	ctcm_chx_txidle  },
+	{ CTC_STATE_TXINIT,	CTC_EVENT_UC_RCRESET,	ctcm_chx_txiniterr  },
+	{ CTC_STATE_TXINIT,	CTC_EVENT_UC_RSRESET,	ctcm_chx_txiniterr  },
+	{ CTC_STATE_TXINIT,	CTC_EVENT_TIMER,	ctcm_chx_txiniterr  },
+	{ CTC_STATE_TXINIT,	CTC_EVENT_IO_ENODEV,	ctcm_chx_iofatal  },
+	{ CTC_STATE_TXINIT,	CTC_EVENT_MC_FAIL,	ctcm_chx_fail  },
+
+	{ CTC_STATE_TXIDLE,	CTC_EVENT_STOP,		ctcm_chx_haltio  },
+	{ CTC_STATE_TXIDLE,	CTC_EVENT_START,	ctcm_action_nop  },
+	{ CTC_STATE_TXIDLE,	CTC_EVENT_FINSTAT,	chx_firstio  },
+	{ CTC_STATE_TXIDLE,	CTC_EVENT_UC_RCRESET,	ctcm_action_nop  },
+	{ CTC_STATE_TXIDLE,	CTC_EVENT_UC_RSRESET,	ctcm_action_nop  },
+	{ CTC_STATE_TXIDLE,	CTC_EVENT_IO_ENODEV,	ctcm_chx_iofatal  },
+	{ CTC_STATE_TXIDLE,	CTC_EVENT_MC_FAIL,	ctcm_chx_fail  },
+
+	{ CTC_STATE_TERM,	CTC_EVENT_STOP,		ctcm_action_nop  },
+	{ CTC_STATE_TERM,	CTC_EVENT_START,	ctcm_chx_restart  },
+	{ CTC_STATE_TERM,	CTC_EVENT_FINSTAT,	ctcm_chx_stopped  },
+	{ CTC_STATE_TERM,	CTC_EVENT_UC_RCRESET,	ctcm_action_nop  },
+	{ CTC_STATE_TERM,	CTC_EVENT_UC_RSRESET,	ctcm_action_nop  },
+	{ CTC_STATE_TERM,	CTC_EVENT_MC_FAIL,	ctcm_chx_fail  },
+
+	{ CTC_STATE_DTERM,	CTC_EVENT_STOP,		ctcm_chx_haltio  },
+	{ CTC_STATE_DTERM,	CTC_EVENT_START,	ctcm_chx_restart  },
+	{ CTC_STATE_DTERM,	CTC_EVENT_FINSTAT,	ctcm_chx_setmode  },
+	{ CTC_STATE_DTERM,	CTC_EVENT_UC_RCRESET,	ctcm_action_nop  },
+	{ CTC_STATE_DTERM,	CTC_EVENT_UC_RSRESET,	ctcm_action_nop  },
+	{ CTC_STATE_DTERM,	CTC_EVENT_MC_FAIL,	ctcm_chx_fail  },
+
+	{ CTC_STATE_TX,		CTC_EVENT_STOP,		ctcm_chx_haltio  },
+	{ CTC_STATE_TX,		CTC_EVENT_START,	ctcm_action_nop  },
+	{ CTC_STATE_TX,		CTC_EVENT_FINSTAT,	chx_txdone  },
+	{ CTC_STATE_TX,		CTC_EVENT_UC_RCRESET,	ctcm_chx_txretry  },
+	{ CTC_STATE_TX,		CTC_EVENT_UC_RSRESET,	ctcm_chx_txretry  },
+	{ CTC_STATE_TX,		CTC_EVENT_TIMER,	ctcm_chx_txretry  },
+	{ CTC_STATE_TX,		CTC_EVENT_IO_ENODEV,	ctcm_chx_iofatal  },
+	{ CTC_STATE_TX,		CTC_EVENT_MC_FAIL,	ctcm_chx_fail  },
+
+	{ CTC_STATE_RXERR,	CTC_EVENT_STOP,		ctcm_chx_haltio  },
+	{ CTC_STATE_TXERR,	CTC_EVENT_STOP,		ctcm_chx_haltio  },
+	{ CTC_STATE_TXERR,	CTC_EVENT_MC_FAIL,	ctcm_chx_fail  },
+	{ CTC_STATE_RXERR,	CTC_EVENT_MC_FAIL,	ctcm_chx_fail  },
+};
+
+int ch_fsm_len = ARRAY_SIZE(ch_fsm);
+
+/*
+ * MPC actions for mpc channel statemachine
+ * handling of MPC protocol requires extra
+ * statemachine and actions which are prefixed ctcmpc_ .
+ * The ctc_ch_states and ctc_ch_state_names,
+ * ctc_ch_events and ctc_ch_event_names share the ctcm definitions
+ * which are expanded by some elements.
+ */
+
+/*
+ * Actions for mpc channel statemachine.
+ */
+
+/**
+ * Normal data has been send. Free the corresponding
+ * skb (it's in io_queue), reset dev->tbusy and
+ * revert to idle state.
+ *
+ * fi		An instance of a channel statemachine.
+ * event	The event, just happened.
+ * arg		Generic pointer, casted from channel * upon call.
+ */
+static void ctcmpc_chx_txdone(fsm_instance *fi, int event, void *arg)
+{
+	struct channel		*ch = arg;
+	struct net_device	*dev = ch->netdev;
+	struct ctcm_priv	*priv = dev->ml_priv;
+	struct mpc_group	*grp = priv->mpcg;
+	struct sk_buff		*skb;
+	int		first = 1;
+	int		i;
+	__u32		data_space;
+	unsigned long	duration;
+	struct sk_buff	*peekskb;
+	int		rc;
+	struct th_header *header;
+	struct pdu	*p_header;
+	struct timespec done_stamp = current_kernel_time(); /* xtime */
+
+	CTCM_PR_DEBUG("Enter %s: %s cp:%i\n",
+			__func__, dev->name, smp_processor_id());
+
+	duration =
+		(done_stamp.tv_sec - ch->prof.send_stamp.tv_sec) * 1000000 +
+		(done_stamp.tv_nsec - ch->prof.send_stamp.tv_nsec) / 1000;
+	if (duration > ch->prof.tx_time)
+		ch->prof.tx_time = duration;
+
+	if (ch->irb->scsw.cmd.count != 0)
+		CTCM_DBF_TEXT_(MPC_TRACE, CTC_DBF_DEBUG,
+			"%s(%s): TX not complete, remaining %d bytes",
+			     CTCM_FUNTAIL, dev->name, ch->irb->scsw.cmd.count);
+	fsm_deltimer(&ch->timer);
+	while ((skb = skb_dequeue(&ch->io_queue))) {
+		priv->stats.tx_packets++;
+		priv->stats.tx_bytes += skb->len - TH_HEADER_LENGTH;
+		if (first) {
+			priv->stats.tx_bytes += 2;
+			first = 0;
+		}
+		atomic_dec(&skb->users);
+		dev_kfree_skb_irq(skb);
+	}
+	spin_lock(&ch->collect_lock);
+	clear_normalized_cda(&ch->ccw[4]);
+	if ((ch->collect_len <= 0) || (grp->in_sweep != 0)) {
+		spin_unlock(&ch->collect_lock);
+		fsm_newstate(fi, CTC_STATE_TXIDLE);
+				goto done;
+	}
+
+	if (ctcm_checkalloc_buffer(ch)) {
+		spin_unlock(&ch->collect_lock);
+				goto done;
+	}
+	ch->trans_skb->data = ch->trans_skb_data;
+	skb_reset_tail_pointer(ch->trans_skb);
+	ch->trans_skb->len = 0;
+	if (ch->prof.maxmulti < (ch->collect_len + TH_HEADER_LENGTH))
+		ch->prof.maxmulti = ch->collect_len + TH_HEADER_LENGTH;
+	if (ch->prof.maxcqueue < skb_queue_len(&ch->collect_queue))
+		ch->prof.maxcqueue = skb_queue_len(&ch->collect_queue);
+	i = 0;
+	p_header = NULL;
+	data_space = grp->group_max_buflen - TH_HEADER_LENGTH;
+
+	CTCM_PR_DBGDATA("%s: building trans_skb from collect_q"
+		       " data_space:%04x\n",
+		       __func__, data_space);
+
+	while ((skb = skb_dequeue(&ch->collect_queue))) {
+		memcpy(skb_put(ch->trans_skb, skb->len), skb->data, skb->len);
+		p_header = (struct pdu *)
+			(skb_tail_pointer(ch->trans_skb) - skb->len);
+		p_header->pdu_flag = 0x00;
+		if (skb->protocol == ntohs(ETH_P_SNAP))
+			p_header->pdu_flag |= 0x60;
+		else
+			p_header->pdu_flag |= 0x20;
+
+		CTCM_PR_DBGDATA("%s: trans_skb len:%04x \n",
+				__func__, ch->trans_skb->len);
+		CTCM_PR_DBGDATA("%s: pdu header and data for up"
+				" to 32 bytes sent to vtam\n", __func__);
+		CTCM_D3_DUMP((char *)p_header, min_t(int, skb->len, 32));
+
+		ch->collect_len -= skb->len;
+		data_space -= skb->len;
+		priv->stats.tx_packets++;
+		priv->stats.tx_bytes += skb->len;
+		atomic_dec(&skb->users);
+		dev_kfree_skb_any(skb);
+		peekskb = skb_peek(&ch->collect_queue);
+		if (peekskb->len > data_space)
+			break;
+		i++;
+	}
+	/* p_header points to the last one we handled */
+	if (p_header)
+		p_header->pdu_flag |= PDU_LAST;	/*Say it's the last one*/
+	header = kzalloc(TH_HEADER_LENGTH, gfp_type());
+	if (!header) {
+		spin_unlock(&ch->collect_lock);
+		fsm_event(priv->mpcg->fsm, MPCG_EVENT_INOP, dev);
+				goto done;
+	}
+	header->th_ch_flag = TH_HAS_PDU;  /* Normal data */
+	ch->th_seq_num++;
+	header->th_seq_num = ch->th_seq_num;
+
+	CTCM_PR_DBGDATA("%s: ToVTAM_th_seq= %08x\n" ,
+					__func__, ch->th_seq_num);
+
+	memcpy(skb_push(ch->trans_skb, TH_HEADER_LENGTH), header,
+		TH_HEADER_LENGTH);	/* put the TH on the packet */
+
+	kfree(header);
+
+	CTCM_PR_DBGDATA("%s: trans_skb len:%04x \n",
+		       __func__, ch->trans_skb->len);
+	CTCM_PR_DBGDATA("%s: up-to-50 bytes of trans_skb "
+			"data to vtam from collect_q\n", __func__);
+	CTCM_D3_DUMP((char *)ch->trans_skb->data,
+				min_t(int, ch->trans_skb->len, 50));
+
+	spin_unlock(&ch->collect_lock);
+	clear_normalized_cda(&ch->ccw[1]);
+
+	CTCM_PR_DBGDATA("ccwcda=0x%p data=0x%p\n",
+			(void *)(unsigned long)ch->ccw[1].cda,
+			ch->trans_skb->data);
+	ch->ccw[1].count = ch->max_bufsize;
+
+	if (set_normalized_cda(&ch->ccw[1], ch->trans_skb->data)) {
+		dev_kfree_skb_any(ch->trans_skb);
+		ch->trans_skb = NULL;
+		CTCM_DBF_TEXT_(MPC_TRACE, CTC_DBF_ERROR,
+			"%s: %s: IDAL alloc failed",
+				CTCM_FUNTAIL, ch->id);
+		fsm_event(priv->mpcg->fsm, MPCG_EVENT_INOP, dev);
+		return;
+	}
+
+	CTCM_PR_DBGDATA("ccwcda=0x%p data=0x%p\n",
+			(void *)(unsigned long)ch->ccw[1].cda,
+			ch->trans_skb->data);
+
+	ch->ccw[1].count = ch->trans_skb->len;
+	fsm_addtimer(&ch->timer, CTCM_TIME_5_SEC, CTC_EVENT_TIMER, ch);
+	ch->prof.send_stamp = current_kernel_time(); /* xtime */
+	if (do_debug_ccw)
+		ctcmpc_dumpit((char *)&ch->ccw[0], sizeof(struct ccw1) * 3);
+	rc = ccw_device_start(ch->cdev, &ch->ccw[0],
+					(unsigned long)ch, 0xff, 0);
+	ch->prof.doios_multi++;
+	if (rc != 0) {
+		priv->stats.tx_dropped += i;
+		priv->stats.tx_errors += i;
+		fsm_deltimer(&ch->timer);
+		ctcm_ccw_check_rc(ch, rc, "chained TX");
+	}
+done:
+	ctcm_clear_busy(dev);
+	return;
+}
+
+/**
+ * Got normal data, check for sanity, queue it up, allocate new buffer
+ * trigger bottom half, and initiate next read.
+ *
+ * fi		An instance of a channel statemachine.
+ * event	The event, just happened.
+ * arg		Generic pointer, casted from channel * upon call.
+ */
+static void ctcmpc_chx_rx(fsm_instance *fi, int event, void *arg)
+{
+	struct channel		*ch = arg;
+	struct net_device	*dev = ch->netdev;
+	struct ctcm_priv	*priv = dev->ml_priv;
+	struct mpc_group	*grp = priv->mpcg;
+	struct sk_buff		*skb = ch->trans_skb;
+	struct sk_buff		*new_skb;
+	unsigned long		saveflags = 0;	/* avoids compiler warning */
+	int len	= ch->max_bufsize - ch->irb->scsw.cmd.count;
+
+	CTCM_PR_DEBUG("%s: %s: cp:%i %s maxbuf : %04x, len: %04x\n",
+			CTCM_FUNTAIL, dev->name, smp_processor_id(),
+				ch->id, ch->max_bufsize, len);
+	fsm_deltimer(&ch->timer);
+
+	if (skb == NULL) {
+		CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR,
+			"%s(%s): TRANS_SKB = NULL",
+				CTCM_FUNTAIL, dev->name);
+			goto again;
+	}
+
+	if (len < TH_HEADER_LENGTH) {
+		CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR,
+				"%s(%s): packet length %d to short",
+					CTCM_FUNTAIL, dev->name, len);
+		priv->stats.rx_dropped++;
+		priv->stats.rx_length_errors++;
+	} else {
+		/* must have valid th header or game over */
+		__u32	block_len = len;
+		len = TH_HEADER_LENGTH + XID2_LENGTH + 4;
+		new_skb = __dev_alloc_skb(ch->max_bufsize, GFP_ATOMIC);
+
+		if (new_skb == NULL) {
+			CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR,
+				"%s(%d): skb allocation failed",
+						CTCM_FUNTAIL, dev->name);
+			fsm_event(priv->mpcg->fsm, MPCG_EVENT_INOP, dev);
+					goto again;
+		}
+		switch (fsm_getstate(grp->fsm)) {
+		case MPCG_STATE_RESET:
+		case MPCG_STATE_INOP:
+			dev_kfree_skb_any(new_skb);
+			break;
+		case MPCG_STATE_FLOWC:
+		case MPCG_STATE_READY:
+			memcpy(skb_put(new_skb, block_len),
+					       skb->data, block_len);
+			skb_queue_tail(&ch->io_queue, new_skb);
+			tasklet_schedule(&ch->ch_tasklet);
+			break;
+		default:
+			memcpy(skb_put(new_skb, len), skb->data, len);
+			skb_queue_tail(&ch->io_queue, new_skb);
+			tasklet_hi_schedule(&ch->ch_tasklet);
+			break;
+		}
+	}
+
+again:
+	switch (fsm_getstate(grp->fsm)) {
+	int rc, dolock;
+	case MPCG_STATE_FLOWC:
+	case MPCG_STATE_READY:
+		if (ctcm_checkalloc_buffer(ch))
+			break;
+		ch->trans_skb->data = ch->trans_skb_data;
+		skb_reset_tail_pointer(ch->trans_skb);
+		ch->trans_skb->len = 0;
+		ch->ccw[1].count = ch->max_bufsize;
+			if (do_debug_ccw)
+			ctcmpc_dumpit((char *)&ch->ccw[0],
+					sizeof(struct ccw1) * 3);
+		dolock = !in_irq();
+		if (dolock)
+			spin_lock_irqsave(
+				get_ccwdev_lock(ch->cdev), saveflags);
+		rc = ccw_device_start(ch->cdev, &ch->ccw[0],
+						(unsigned long)ch, 0xff, 0);
+		if (dolock) /* see remark about conditional locking */
+			spin_unlock_irqrestore(
+				get_ccwdev_lock(ch->cdev), saveflags);
+		if (rc != 0)
+			ctcm_ccw_check_rc(ch, rc, "normal RX");
+	default:
+		break;
+	}
+
+	CTCM_PR_DEBUG("Exit %s: %s, ch=0x%p, id=%s\n",
+			__func__, dev->name, ch, ch->id);
+
+}
+
+/**
+ * Initialize connection by sending a __u16 of value 0.
+ *
+ * fi		An instance of a channel statemachine.
+ * event	The event, just happened.
+ * arg		Generic pointer, casted from channel * upon call.
+ */
+static void ctcmpc_chx_firstio(fsm_instance *fi, int event, void *arg)
+{
+	struct channel		*ch = arg;
+	struct net_device	*dev = ch->netdev;
+	struct ctcm_priv	*priv = dev->ml_priv;
+	struct mpc_group	*gptr = priv->mpcg;
+
+	CTCM_PR_DEBUG("Enter %s: id=%s, ch=0x%p\n",
+				__func__, ch->id, ch);
+
+	CTCM_DBF_TEXT_(MPC_TRACE, CTC_DBF_INFO,
+			"%s: %s: chstate:%i, grpstate:%i, prot:%i\n",
+			CTCM_FUNTAIL, ch->id, fsm_getstate(fi),
+			fsm_getstate(gptr->fsm), ch->protocol);
+
+	if (fsm_getstate(fi) == CTC_STATE_TXIDLE)
+		MPC_DBF_DEV_NAME(TRACE, dev, "remote side issued READ? ");
+
+	fsm_deltimer(&ch->timer);
+	if (ctcm_checkalloc_buffer(ch))
+				goto done;
+
+	switch (fsm_getstate(fi)) {
+	case CTC_STATE_STARTRETRY:
+	case CTC_STATE_SETUPWAIT:
+		if (CHANNEL_DIRECTION(ch->flags) == CTCM_READ) {
+			ctcmpc_chx_rxidle(fi, event, arg);
+		} else {
+			fsm_newstate(fi, CTC_STATE_TXIDLE);
+			fsm_event(priv->fsm, DEV_EVENT_TXUP, dev);
+		}
+				goto done;
+	default:
+		break;
+	};
+
+	fsm_newstate(fi, (CHANNEL_DIRECTION(ch->flags) == CTCM_READ)
+		     ? CTC_STATE_RXINIT : CTC_STATE_TXINIT);
+
+done:
+	CTCM_PR_DEBUG("Exit %s: id=%s, ch=0x%p\n",
+				__func__, ch->id, ch);
+	return;
+}
+
+/**
+ * Got initial data, check it. If OK,
+ * notify device statemachine that we are up and
+ * running.
+ *
+ * fi		An instance of a channel statemachine.
+ * event	The event, just happened.
+ * arg		Generic pointer, casted from channel * upon call.
+ */
+void ctcmpc_chx_rxidle(fsm_instance *fi, int event, void *arg)
+{
+	struct channel *ch = arg;
+	struct net_device *dev = ch->netdev;
+	struct ctcm_priv  *priv = dev->ml_priv;
+	struct mpc_group  *grp = priv->mpcg;
+	int rc;
+	unsigned long saveflags = 0;	/* avoids compiler warning */
+
+	fsm_deltimer(&ch->timer);
+	CTCM_PR_DEBUG("%s: %s: %s: cp:%i, chstate:%i grpstate:%i\n",
+			__func__, ch->id, dev->name, smp_processor_id(),
+				fsm_getstate(fi), fsm_getstate(grp->fsm));
+
+	fsm_newstate(fi, CTC_STATE_RXIDLE);
+	/* XID processing complete */
+
+	switch (fsm_getstate(grp->fsm)) {
+	case MPCG_STATE_FLOWC:
+	case MPCG_STATE_READY:
+		if (ctcm_checkalloc_buffer(ch))
+				goto done;
+		ch->trans_skb->data = ch->trans_skb_data;
+		skb_reset_tail_pointer(ch->trans_skb);
+		ch->trans_skb->len = 0;
+		ch->ccw[1].count = ch->max_bufsize;
+		CTCM_CCW_DUMP((char *)&ch->ccw[0], sizeof(struct ccw1) * 3);
+		if (event == CTC_EVENT_START)
+			/* see remark about conditional locking */
+			spin_lock_irqsave(get_ccwdev_lock(ch->cdev), saveflags);
+		rc = ccw_device_start(ch->cdev, &ch->ccw[0],
+						(unsigned long)ch, 0xff, 0);
+		if (event == CTC_EVENT_START)
+			spin_unlock_irqrestore(
+					get_ccwdev_lock(ch->cdev), saveflags);
+		if (rc != 0) {
+			fsm_newstate(fi, CTC_STATE_RXINIT);
+			ctcm_ccw_check_rc(ch, rc, "initial RX");
+				goto done;
+		}
+		break;
+	default:
+		break;
+	}
+
+	fsm_event(priv->fsm, DEV_EVENT_RXUP, dev);
+done:
+	return;
+}
+
+/*
+ * ctcmpc channel FSM action
+ * called from several points in ctcmpc_ch_fsm
+ * ctcmpc only
+ */
+static void ctcmpc_chx_attn(fsm_instance *fsm, int event, void *arg)
+{
+	struct channel	  *ch     = arg;
+	struct net_device *dev    = ch->netdev;
+	struct ctcm_priv  *priv   = dev->ml_priv;
+	struct mpc_group  *grp = priv->mpcg;
+
+	CTCM_PR_DEBUG("%s(%s): %s(ch=0x%p), cp=%i, ChStat:%s, GrpStat:%s\n",
+		__func__, dev->name, ch->id, ch, smp_processor_id(),
+			fsm_getstate_str(ch->fsm), fsm_getstate_str(grp->fsm));
+
+	switch (fsm_getstate(grp->fsm)) {
+	case MPCG_STATE_XID2INITW:
+		/* ok..start yside xid exchanges */
+		if (!ch->in_mpcgroup)
+			break;
+		if (fsm_getstate(ch->fsm) ==  CH_XID0_PENDING) {
+			fsm_deltimer(&grp->timer);
+			fsm_addtimer(&grp->timer,
+				MPC_XID_TIMEOUT_VALUE,
+				MPCG_EVENT_TIMER, dev);
+			fsm_event(grp->fsm, MPCG_EVENT_XID0DO, ch);
+
+		} else if (fsm_getstate(ch->fsm) < CH_XID7_PENDING1)
+			/* attn rcvd before xid0 processed via bh */
+			fsm_newstate(ch->fsm, CH_XID7_PENDING1);
+		break;
+	case MPCG_STATE_XID2INITX:
+	case MPCG_STATE_XID0IOWAIT:
+	case MPCG_STATE_XID0IOWAIX:
+		/* attn rcvd before xid0 processed on ch
+		but mid-xid0 processing for group    */
+		if (fsm_getstate(ch->fsm) < CH_XID7_PENDING1)
+			fsm_newstate(ch->fsm, CH_XID7_PENDING1);
+		break;
+	case MPCG_STATE_XID7INITW:
+	case MPCG_STATE_XID7INITX:
+	case MPCG_STATE_XID7INITI:
+	case MPCG_STATE_XID7INITZ:
+		switch (fsm_getstate(ch->fsm)) {
+		case CH_XID7_PENDING:
+			fsm_newstate(ch->fsm, CH_XID7_PENDING1);
+			break;
+		case CH_XID7_PENDING2:
+			fsm_newstate(ch->fsm, CH_XID7_PENDING3);
+			break;
+		}
+		fsm_event(grp->fsm, MPCG_EVENT_XID7DONE, dev);
+		break;
+	}
+
+	return;
+}
+
+/*
+ * ctcmpc channel FSM action
+ * called from one point in ctcmpc_ch_fsm
+ * ctcmpc only
+ */
+static void ctcmpc_chx_attnbusy(fsm_instance *fsm, int event, void *arg)
+{
+	struct channel	  *ch     = arg;
+	struct net_device *dev    = ch->netdev;
+	struct ctcm_priv  *priv   = dev->ml_priv;
+	struct mpc_group  *grp    = priv->mpcg;
+
+	CTCM_PR_DEBUG("%s(%s): %s\n  ChState:%s GrpState:%s\n",
+			__func__, dev->name, ch->id,
+			fsm_getstate_str(ch->fsm), fsm_getstate_str(grp->fsm));
+
+	fsm_deltimer(&ch->timer);
+
+	switch (fsm_getstate(grp->fsm)) {
+	case MPCG_STATE_XID0IOWAIT:
+		/* vtam wants to be primary.start yside xid exchanges*/
+		/* only receive one attn-busy at a time so must not  */
+		/* change state each time			     */
+		grp->changed_side = 1;
+		fsm_newstate(grp->fsm, MPCG_STATE_XID2INITW);
+		break;
+	case MPCG_STATE_XID2INITW:
+		if (grp->changed_side == 1) {
+			grp->changed_side = 2;
+			break;
+		}
+		/* process began via call to establish_conn	 */
+		/* so must report failure instead of reverting	 */
+		/* back to ready-for-xid passive state		 */
+		if (grp->estconnfunc)
+				goto done;
+		/* this attnbusy is NOT the result of xside xid  */
+		/* collisions so yside must have been triggered  */
+		/* by an ATTN that was not intended to start XID */
+		/* processing. Revert back to ready-for-xid and  */
+		/* wait for ATTN interrupt to signal xid start	 */
+		if (fsm_getstate(ch->fsm) == CH_XID0_INPROGRESS) {
+			fsm_newstate(ch->fsm, CH_XID0_PENDING) ;
+			fsm_deltimer(&grp->timer);
+				goto done;
+		}
+		fsm_event(grp->fsm, MPCG_EVENT_INOP, dev);
+				goto done;
+	case MPCG_STATE_XID2INITX:
+		/* XID2 was received before ATTN Busy for second
+		   channel.Send yside xid for second channel.
+		*/
+		if (grp->changed_side == 1) {
+			grp->changed_side = 2;
+			break;
+		}
+	case MPCG_STATE_XID0IOWAIX:
+	case MPCG_STATE_XID7INITW:
+	case MPCG_STATE_XID7INITX:
+	case MPCG_STATE_XID7INITI:
+	case MPCG_STATE_XID7INITZ:
+	default:
+		/* multiple attn-busy indicates too out-of-sync      */
+		/* and they are certainly not being received as part */
+		/* of valid mpc group negotiations..		     */
+		fsm_event(grp->fsm, MPCG_EVENT_INOP, dev);
+				goto done;
+	}
+
+	if (grp->changed_side == 1) {
+		fsm_deltimer(&grp->timer);
+		fsm_addtimer(&grp->timer, MPC_XID_TIMEOUT_VALUE,
+			     MPCG_EVENT_TIMER, dev);
+	}
+	if (ch->in_mpcgroup)
+		fsm_event(grp->fsm, MPCG_EVENT_XID0DO, ch);
+	else
+		CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR,
+			"%s(%s): channel %s not added to group",
+				CTCM_FUNTAIL, dev->name, ch->id);
+
+done:
+	return;
+}
+
+/*
+ * ctcmpc channel FSM action
+ * called from several points in ctcmpc_ch_fsm
+ * ctcmpc only
+ */
+static void ctcmpc_chx_resend(fsm_instance *fsm, int event, void *arg)
+{
+	struct channel	   *ch	   = arg;
+	struct net_device  *dev    = ch->netdev;
+	struct ctcm_priv   *priv   = dev->ml_priv;
+	struct mpc_group   *grp    = priv->mpcg;
+
+	fsm_event(grp->fsm, MPCG_EVENT_XID0DO, ch);
+	return;
+}
+
+/*
+ * ctcmpc channel FSM action
+ * called from several points in ctcmpc_ch_fsm
+ * ctcmpc only
+ */
+static void ctcmpc_chx_send_sweep(fsm_instance *fsm, int event, void *arg)
+{
+	struct channel *ach = arg;
+	struct net_device *dev = ach->netdev;
+	struct ctcm_priv *priv = dev->ml_priv;
+	struct mpc_group *grp = priv->mpcg;
+	struct channel *wch = priv->channel[CTCM_WRITE];
+	struct channel *rch = priv->channel[CTCM_READ];
+	struct sk_buff *skb;
+	struct th_sweep *header;
+	int rc = 0;
+	unsigned long saveflags = 0;
+
+	CTCM_PR_DEBUG("ctcmpc enter: %s(): cp=%i ch=0x%p id=%s\n",
+			__func__, smp_processor_id(), ach, ach->id);
+
+	if (grp->in_sweep == 0)
+				goto done;
+
+	CTCM_PR_DBGDATA("%s: 1: ToVTAM_th_seq= %08x\n" ,
+				__func__, wch->th_seq_num);
+	CTCM_PR_DBGDATA("%s: 1: FromVTAM_th_seq= %08x\n" ,
+				__func__, rch->th_seq_num);
+
+	if (fsm_getstate(wch->fsm) != CTC_STATE_TXIDLE) {
+		/* give the previous IO time to complete */
+		fsm_addtimer(&wch->sweep_timer,
+			200, CTC_EVENT_RSWEEP_TIMER, wch);
+				goto done;
+	}
+
+	skb = skb_dequeue(&wch->sweep_queue);
+	if (!skb)
+				goto done;
+
+	if (set_normalized_cda(&wch->ccw[4], skb->data)) {
+		grp->in_sweep = 0;
+		ctcm_clear_busy_do(dev);
+		dev_kfree_skb_any(skb);
+		fsm_event(grp->fsm, MPCG_EVENT_INOP, dev);
+				goto done;
+	} else {
+		atomic_inc(&skb->users);
+		skb_queue_tail(&wch->io_queue, skb);
+	}
+
+	/* send out the sweep */
+	wch->ccw[4].count = skb->len;
+
+	header = (struct th_sweep *)skb->data;
+	switch (header->th.th_ch_flag) {
+	case TH_SWEEP_REQ:
+		grp->sweep_req_pend_num--;
+		break;
+	case TH_SWEEP_RESP:
+		grp->sweep_rsp_pend_num--;
+		break;
+	}
+
+	header->sw.th_last_seq = wch->th_seq_num;
+
+	CTCM_CCW_DUMP((char *)&wch->ccw[3], sizeof(struct ccw1) * 3);
+	CTCM_PR_DBGDATA("%s: sweep packet\n", __func__);
+	CTCM_D3_DUMP((char *)header, TH_SWEEP_LENGTH);
+
+	fsm_addtimer(&wch->timer, CTCM_TIME_5_SEC, CTC_EVENT_TIMER, wch);
+	fsm_newstate(wch->fsm, CTC_STATE_TX);
+
+	spin_lock_irqsave(get_ccwdev_lock(wch->cdev), saveflags);
+	wch->prof.send_stamp = current_kernel_time(); /* xtime */
+	rc = ccw_device_start(wch->cdev, &wch->ccw[3],
+					(unsigned long) wch, 0xff, 0);
+	spin_unlock_irqrestore(get_ccwdev_lock(wch->cdev), saveflags);
+
+	if ((grp->sweep_req_pend_num == 0) &&
+	   (grp->sweep_rsp_pend_num == 0)) {
+		grp->in_sweep = 0;
+		rch->th_seq_num = 0x00;
+		wch->th_seq_num = 0x00;
+		ctcm_clear_busy_do(dev);
+	}
+
+	CTCM_PR_DBGDATA("%s: To-/From-VTAM_th_seq = %08x/%08x\n" ,
+			__func__, wch->th_seq_num, rch->th_seq_num);
+
+	if (rc != 0)
+		ctcm_ccw_check_rc(wch, rc, "send sweep");
+
+done:
+	return;
+}
+
+
+/*
+ * The ctcmpc statemachine for a channel.
+ */
+
+const fsm_node ctcmpc_ch_fsm[] = {
+	{ CTC_STATE_STOPPED,	CTC_EVENT_STOP,		ctcm_action_nop  },
+	{ CTC_STATE_STOPPED,	CTC_EVENT_START,	ctcm_chx_start  },
+	{ CTC_STATE_STOPPED,	CTC_EVENT_IO_ENODEV,	ctcm_chx_iofatal  },
+	{ CTC_STATE_STOPPED,	CTC_EVENT_FINSTAT,	ctcm_action_nop  },
+	{ CTC_STATE_STOPPED,	CTC_EVENT_MC_FAIL,	ctcm_action_nop  },
+
+	{ CTC_STATE_NOTOP,	CTC_EVENT_STOP,		ctcm_chx_stop  },
+	{ CTC_STATE_NOTOP,	CTC_EVENT_START,	ctcm_action_nop  },
+	{ CTC_STATE_NOTOP,	CTC_EVENT_FINSTAT,	ctcm_action_nop  },
+	{ CTC_STATE_NOTOP,	CTC_EVENT_MC_FAIL,	ctcm_action_nop  },
+	{ CTC_STATE_NOTOP,	CTC_EVENT_MC_GOOD,	ctcm_chx_start  },
+	{ CTC_STATE_NOTOP,	CTC_EVENT_UC_RCRESET,	ctcm_chx_stop  },
+	{ CTC_STATE_NOTOP,	CTC_EVENT_UC_RSRESET,	ctcm_chx_stop  },
+	{ CTC_STATE_NOTOP,	CTC_EVENT_IO_ENODEV,	ctcm_chx_iofatal  },
+
+	{ CTC_STATE_STARTWAIT,	CTC_EVENT_STOP,		ctcm_chx_haltio  },
+	{ CTC_STATE_STARTWAIT,	CTC_EVENT_START,	ctcm_action_nop  },
+	{ CTC_STATE_STARTWAIT,	CTC_EVENT_FINSTAT,	ctcm_chx_setmode  },
+	{ CTC_STATE_STARTWAIT,	CTC_EVENT_TIMER,	ctcm_chx_setuperr  },
+	{ CTC_STATE_STARTWAIT,	CTC_EVENT_IO_ENODEV,	ctcm_chx_iofatal  },
+	{ CTC_STATE_STARTWAIT,	CTC_EVENT_MC_FAIL,	ctcm_chx_fail  },
+
+	{ CTC_STATE_STARTRETRY,	CTC_EVENT_STOP,		ctcm_chx_haltio  },
+	{ CTC_STATE_STARTRETRY,	CTC_EVENT_TIMER,	ctcm_chx_setmode  },
+	{ CTC_STATE_STARTRETRY,	CTC_EVENT_FINSTAT,	ctcm_chx_setmode  },
+	{ CTC_STATE_STARTRETRY,	CTC_EVENT_MC_FAIL,	ctcm_chx_fail  },
+	{ CTC_STATE_STARTRETRY,	CTC_EVENT_IO_ENODEV,	ctcm_chx_iofatal  },
+
+	{ CTC_STATE_SETUPWAIT,	CTC_EVENT_STOP,		ctcm_chx_haltio  },
+	{ CTC_STATE_SETUPWAIT,	CTC_EVENT_START,	ctcm_action_nop  },
+	{ CTC_STATE_SETUPWAIT,	CTC_EVENT_FINSTAT,	ctcmpc_chx_firstio  },
+	{ CTC_STATE_SETUPWAIT,	CTC_EVENT_UC_RCRESET,	ctcm_chx_setuperr  },
+	{ CTC_STATE_SETUPWAIT,	CTC_EVENT_UC_RSRESET,	ctcm_chx_setuperr  },
+	{ CTC_STATE_SETUPWAIT,	CTC_EVENT_TIMER,	ctcm_chx_setmode  },
+	{ CTC_STATE_SETUPWAIT,	CTC_EVENT_IO_ENODEV,	ctcm_chx_iofatal  },
+	{ CTC_STATE_SETUPWAIT,	CTC_EVENT_MC_FAIL,	ctcm_chx_fail  },
+
+	{ CTC_STATE_RXINIT,	CTC_EVENT_STOP,		ctcm_chx_haltio  },
+	{ CTC_STATE_RXINIT,	CTC_EVENT_START,	ctcm_action_nop  },
+	{ CTC_STATE_RXINIT,	CTC_EVENT_FINSTAT,	ctcmpc_chx_rxidle  },
+	{ CTC_STATE_RXINIT,	CTC_EVENT_UC_RCRESET,	ctcm_chx_rxiniterr  },
+	{ CTC_STATE_RXINIT,	CTC_EVENT_UC_RSRESET,	ctcm_chx_rxiniterr  },
+	{ CTC_STATE_RXINIT,	CTC_EVENT_TIMER,	ctcm_chx_rxiniterr  },
+	{ CTC_STATE_RXINIT,	CTC_EVENT_ATTNBUSY,	ctcm_chx_rxinitfail  },
+	{ CTC_STATE_RXINIT,	CTC_EVENT_IO_ENODEV,	ctcm_chx_iofatal  },
+	{ CTC_STATE_RXINIT,	CTC_EVENT_UC_ZERO,	ctcmpc_chx_firstio  },
+	{ CTC_STATE_RXINIT,	CTC_EVENT_MC_FAIL,	ctcm_chx_fail  },
+
+	{ CH_XID0_PENDING,	CTC_EVENT_FINSTAT,	ctcm_action_nop  },
+	{ CH_XID0_PENDING,	CTC_EVENT_ATTN,		ctcmpc_chx_attn  },
+	{ CH_XID0_PENDING,	CTC_EVENT_STOP,		ctcm_chx_haltio  },
+	{ CH_XID0_PENDING,	CTC_EVENT_START,	ctcm_action_nop  },
+	{ CH_XID0_PENDING,	CTC_EVENT_IO_ENODEV,	ctcm_chx_iofatal  },
+	{ CH_XID0_PENDING,	CTC_EVENT_MC_FAIL,	ctcm_chx_fail  },
+	{ CH_XID0_PENDING,	CTC_EVENT_UC_RCRESET,	ctcm_chx_setuperr  },
+	{ CH_XID0_PENDING,	CTC_EVENT_UC_RSRESET,	ctcm_chx_setuperr  },
+	{ CH_XID0_PENDING,	CTC_EVENT_UC_RSRESET,	ctcm_chx_setuperr  },
+	{ CH_XID0_PENDING,	CTC_EVENT_ATTNBUSY,	ctcm_chx_iofatal  },
+
+	{ CH_XID0_INPROGRESS,	CTC_EVENT_FINSTAT,	ctcmpc_chx_rx  },
+	{ CH_XID0_INPROGRESS,	CTC_EVENT_ATTN,		ctcmpc_chx_attn  },
+	{ CH_XID0_INPROGRESS,	CTC_EVENT_STOP,		ctcm_chx_haltio  },
+	{ CH_XID0_INPROGRESS,	CTC_EVENT_START,	ctcm_action_nop  },
+	{ CH_XID0_INPROGRESS,	CTC_EVENT_IO_ENODEV,	ctcm_chx_iofatal  },
+	{ CH_XID0_INPROGRESS,	CTC_EVENT_MC_FAIL,	ctcm_chx_fail  },
+	{ CH_XID0_INPROGRESS,	CTC_EVENT_UC_ZERO,	ctcmpc_chx_rx  },
+	{ CH_XID0_INPROGRESS,	CTC_EVENT_UC_RCRESET,	ctcm_chx_setuperr },
+	{ CH_XID0_INPROGRESS,	CTC_EVENT_ATTNBUSY,	ctcmpc_chx_attnbusy  },
+	{ CH_XID0_INPROGRESS,	CTC_EVENT_TIMER,	ctcmpc_chx_resend  },
+	{ CH_XID0_INPROGRESS,	CTC_EVENT_IO_EBUSY,	ctcm_chx_fail  },
+
+	{ CH_XID7_PENDING,	CTC_EVENT_FINSTAT,	ctcmpc_chx_rx  },
+	{ CH_XID7_PENDING,	CTC_EVENT_ATTN,		ctcmpc_chx_attn  },
+	{ CH_XID7_PENDING,	CTC_EVENT_STOP,		ctcm_chx_haltio  },
+	{ CH_XID7_PENDING,	CTC_EVENT_START,	ctcm_action_nop  },
+	{ CH_XID7_PENDING,	CTC_EVENT_IO_ENODEV,	ctcm_chx_iofatal  },
+	{ CH_XID7_PENDING,	CTC_EVENT_MC_FAIL,	ctcm_chx_fail  },
+	{ CH_XID7_PENDING,	CTC_EVENT_UC_ZERO,	ctcmpc_chx_rx  },
+	{ CH_XID7_PENDING,	CTC_EVENT_UC_RCRESET,	ctcm_chx_setuperr  },
+	{ CH_XID7_PENDING,	CTC_EVENT_UC_RSRESET,	ctcm_chx_setuperr  },
+	{ CH_XID7_PENDING,	CTC_EVENT_UC_RSRESET,	ctcm_chx_setuperr  },
+	{ CH_XID7_PENDING,	CTC_EVENT_ATTNBUSY,	ctcm_chx_iofatal  },
+	{ CH_XID7_PENDING,	CTC_EVENT_TIMER,	ctcmpc_chx_resend  },
+	{ CH_XID7_PENDING,	CTC_EVENT_IO_EBUSY,	ctcm_chx_fail  },
+
+	{ CH_XID7_PENDING1,	CTC_EVENT_FINSTAT,	ctcmpc_chx_rx  },
+	{ CH_XID7_PENDING1,	CTC_EVENT_ATTN,		ctcmpc_chx_attn  },
+	{ CH_XID7_PENDING1,	CTC_EVENT_STOP,		ctcm_chx_haltio  },
+	{ CH_XID7_PENDING1,	CTC_EVENT_START,	ctcm_action_nop  },
+	{ CH_XID7_PENDING1,	CTC_EVENT_IO_ENODEV,	ctcm_chx_iofatal  },
+	{ CH_XID7_PENDING1,	CTC_EVENT_MC_FAIL,	ctcm_chx_fail  },
+	{ CH_XID7_PENDING1,	CTC_EVENT_UC_ZERO,	ctcmpc_chx_rx  },
+	{ CH_XID7_PENDING1,	CTC_EVENT_UC_RCRESET,	ctcm_chx_setuperr  },
+	{ CH_XID7_PENDING1,	CTC_EVENT_UC_RSRESET,	ctcm_chx_setuperr  },
+	{ CH_XID7_PENDING1,	CTC_EVENT_ATTNBUSY,	ctcm_chx_iofatal  },
+	{ CH_XID7_PENDING1,	CTC_EVENT_TIMER,	ctcmpc_chx_resend  },
+	{ CH_XID7_PENDING1,	CTC_EVENT_IO_EBUSY,	ctcm_chx_fail  },
+
+	{ CH_XID7_PENDING2,	CTC_EVENT_FINSTAT,	ctcmpc_chx_rx  },
+	{ CH_XID7_PENDING2,	CTC_EVENT_ATTN,		ctcmpc_chx_attn  },
+	{ CH_XID7_PENDING2,	CTC_EVENT_STOP,		ctcm_chx_haltio  },
+	{ CH_XID7_PENDING2,	CTC_EVENT_START,	ctcm_action_nop  },
+	{ CH_XID7_PENDING2,	CTC_EVENT_IO_ENODEV,	ctcm_chx_iofatal  },
+	{ CH_XID7_PENDING2,	CTC_EVENT_MC_FAIL,	ctcm_chx_fail  },
+	{ CH_XID7_PENDING2,	CTC_EVENT_UC_ZERO,	ctcmpc_chx_rx  },
+	{ CH_XID7_PENDING2,	CTC_EVENT_UC_RCRESET,	ctcm_chx_setuperr  },
+	{ CH_XID7_PENDING2,	CTC_EVENT_UC_RSRESET,	ctcm_chx_setuperr  },
+	{ CH_XID7_PENDING2,	CTC_EVENT_ATTNBUSY,	ctcm_chx_iofatal  },
+	{ CH_XID7_PENDING2,	CTC_EVENT_TIMER,	ctcmpc_chx_resend  },
+	{ CH_XID7_PENDING2,	CTC_EVENT_IO_EBUSY,	ctcm_chx_fail  },
+
+	{ CH_XID7_PENDING3,	CTC_EVENT_FINSTAT,	ctcmpc_chx_rx  },
+	{ CH_XID7_PENDING3,	CTC_EVENT_ATTN,		ctcmpc_chx_attn  },
+	{ CH_XID7_PENDING3,	CTC_EVENT_STOP,		ctcm_chx_haltio  },
+	{ CH_XID7_PENDING3,	CTC_EVENT_START,	ctcm_action_nop  },
+	{ CH_XID7_PENDING3,	CTC_EVENT_IO_ENODEV,	ctcm_chx_iofatal  },
+	{ CH_XID7_PENDING3,	CTC_EVENT_MC_FAIL,	ctcm_chx_fail  },
+	{ CH_XID7_PENDING3,	CTC_EVENT_UC_ZERO,	ctcmpc_chx_rx  },
+	{ CH_XID7_PENDING3,	CTC_EVENT_UC_RCRESET,	ctcm_chx_setuperr  },
+	{ CH_XID7_PENDING3,	CTC_EVENT_UC_RSRESET,	ctcm_chx_setuperr  },
+	{ CH_XID7_PENDING3,	CTC_EVENT_ATTNBUSY,	ctcm_chx_iofatal  },
+	{ CH_XID7_PENDING3,	CTC_EVENT_TIMER,	ctcmpc_chx_resend  },
+	{ CH_XID7_PENDING3,	CTC_EVENT_IO_EBUSY,	ctcm_chx_fail  },
+
+	{ CH_XID7_PENDING4,	CTC_EVENT_FINSTAT,	ctcmpc_chx_rx  },
+	{ CH_XID7_PENDING4,	CTC_EVENT_ATTN,		ctcmpc_chx_attn  },
+	{ CH_XID7_PENDING4,	CTC_EVENT_STOP,		ctcm_chx_haltio  },
+	{ CH_XID7_PENDING4,	CTC_EVENT_START,	ctcm_action_nop  },
+	{ CH_XID7_PENDING4,	CTC_EVENT_IO_ENODEV,	ctcm_chx_iofatal  },
+	{ CH_XID7_PENDING4,	CTC_EVENT_MC_FAIL,	ctcm_chx_fail  },
+	{ CH_XID7_PENDING4,	CTC_EVENT_UC_ZERO,	ctcmpc_chx_rx  },
+	{ CH_XID7_PENDING4,	CTC_EVENT_UC_RCRESET,	ctcm_chx_setuperr  },
+	{ CH_XID7_PENDING4,	CTC_EVENT_UC_RSRESET,	ctcm_chx_setuperr  },
+	{ CH_XID7_PENDING4,	CTC_EVENT_ATTNBUSY,	ctcm_chx_iofatal  },
+	{ CH_XID7_PENDING4,	CTC_EVENT_TIMER,	ctcmpc_chx_resend  },
+	{ CH_XID7_PENDING4,	CTC_EVENT_IO_EBUSY,	ctcm_chx_fail  },
+
+	{ CTC_STATE_RXIDLE,	CTC_EVENT_STOP,		ctcm_chx_haltio  },
+	{ CTC_STATE_RXIDLE,	CTC_EVENT_START,	ctcm_action_nop  },
+	{ CTC_STATE_RXIDLE,	CTC_EVENT_FINSTAT,	ctcmpc_chx_rx  },
+	{ CTC_STATE_RXIDLE,	CTC_EVENT_UC_RCRESET,	ctcm_chx_rxdisc  },
+	{ CTC_STATE_RXIDLE,	CTC_EVENT_UC_RSRESET,	ctcm_chx_fail  },
+	{ CTC_STATE_RXIDLE,	CTC_EVENT_IO_ENODEV,	ctcm_chx_iofatal  },
+	{ CTC_STATE_RXIDLE,	CTC_EVENT_MC_FAIL,	ctcm_chx_fail  },
+	{ CTC_STATE_RXIDLE,	CTC_EVENT_UC_ZERO,	ctcmpc_chx_rx  },
+
+	{ CTC_STATE_TXINIT,	CTC_EVENT_STOP,		ctcm_chx_haltio  },
+	{ CTC_STATE_TXINIT,	CTC_EVENT_START,	ctcm_action_nop  },
+	{ CTC_STATE_TXINIT,	CTC_EVENT_FINSTAT,	ctcm_chx_txidle  },
+	{ CTC_STATE_TXINIT,	CTC_EVENT_UC_RCRESET,	ctcm_chx_txiniterr  },
+	{ CTC_STATE_TXINIT,	CTC_EVENT_UC_RSRESET,	ctcm_chx_txiniterr  },
+	{ CTC_STATE_TXINIT,	CTC_EVENT_TIMER,	ctcm_chx_txiniterr  },
+	{ CTC_STATE_TXINIT,	CTC_EVENT_IO_ENODEV,	ctcm_chx_iofatal  },
+	{ CTC_STATE_TXINIT,	CTC_EVENT_MC_FAIL,	ctcm_chx_fail  },
+	{ CTC_STATE_TXINIT,	CTC_EVENT_RSWEEP_TIMER,	ctcmpc_chx_send_sweep },
+
+	{ CTC_STATE_TXIDLE,	CTC_EVENT_STOP,		ctcm_chx_haltio  },
+	{ CTC_STATE_TXIDLE,	CTC_EVENT_START,	ctcm_action_nop  },
+	{ CTC_STATE_TXIDLE,	CTC_EVENT_FINSTAT,	ctcmpc_chx_firstio  },
+	{ CTC_STATE_TXIDLE,	CTC_EVENT_UC_RCRESET,	ctcm_chx_fail  },
+	{ CTC_STATE_TXIDLE,	CTC_EVENT_UC_RSRESET,	ctcm_chx_fail  },
+	{ CTC_STATE_TXIDLE,	CTC_EVENT_IO_ENODEV,	ctcm_chx_iofatal  },
+	{ CTC_STATE_TXIDLE,	CTC_EVENT_MC_FAIL,	ctcm_chx_fail  },
+	{ CTC_STATE_TXIDLE,	CTC_EVENT_RSWEEP_TIMER,	ctcmpc_chx_send_sweep },
+
+	{ CTC_STATE_TERM,	CTC_EVENT_STOP,		ctcm_action_nop  },
+	{ CTC_STATE_TERM,	CTC_EVENT_START,	ctcm_chx_restart  },
+	{ CTC_STATE_TERM,	CTC_EVENT_FINSTAT,	ctcm_chx_stopped  },
+	{ CTC_STATE_TERM,	CTC_EVENT_UC_RCRESET,	ctcm_action_nop  },
+	{ CTC_STATE_TERM,	CTC_EVENT_UC_RSRESET,	ctcm_action_nop  },
+	{ CTC_STATE_TERM,	CTC_EVENT_MC_FAIL,	ctcm_chx_fail  },
+	{ CTC_STATE_TERM,	CTC_EVENT_IO_EBUSY,	ctcm_chx_fail  },
+	{ CTC_STATE_TERM,	CTC_EVENT_IO_ENODEV,	ctcm_chx_iofatal  },
+
+	{ CTC_STATE_DTERM,	CTC_EVENT_STOP,		ctcm_chx_haltio  },
+	{ CTC_STATE_DTERM,	CTC_EVENT_START,	ctcm_chx_restart  },
+	{ CTC_STATE_DTERM,	CTC_EVENT_FINSTAT,	ctcm_chx_setmode  },
+	{ CTC_STATE_DTERM,	CTC_EVENT_UC_RCRESET,	ctcm_action_nop  },
+	{ CTC_STATE_DTERM,	CTC_EVENT_UC_RSRESET,	ctcm_action_nop  },
+	{ CTC_STATE_DTERM,	CTC_EVENT_MC_FAIL,	ctcm_chx_fail  },
+	{ CTC_STATE_DTERM,	CTC_EVENT_IO_ENODEV,	ctcm_chx_iofatal  },
+
+	{ CTC_STATE_TX,		CTC_EVENT_STOP,		ctcm_chx_haltio  },
+	{ CTC_STATE_TX,		CTC_EVENT_START,	ctcm_action_nop  },
+	{ CTC_STATE_TX,		CTC_EVENT_FINSTAT,	ctcmpc_chx_txdone  },
+	{ CTC_STATE_TX,		CTC_EVENT_UC_RCRESET,	ctcm_chx_fail  },
+	{ CTC_STATE_TX,		CTC_EVENT_UC_RSRESET,	ctcm_chx_fail  },
+	{ CTC_STATE_TX,		CTC_EVENT_TIMER,	ctcm_chx_txretry  },
+	{ CTC_STATE_TX,		CTC_EVENT_IO_ENODEV,	ctcm_chx_iofatal  },
+	{ CTC_STATE_TX,		CTC_EVENT_MC_FAIL,	ctcm_chx_fail  },
+	{ CTC_STATE_TX,		CTC_EVENT_RSWEEP_TIMER,	ctcmpc_chx_send_sweep },
+	{ CTC_STATE_TX,		CTC_EVENT_IO_EBUSY,	ctcm_chx_fail  },
+
+	{ CTC_STATE_RXERR,	CTC_EVENT_STOP,		ctcm_chx_haltio  },
+	{ CTC_STATE_TXERR,	CTC_EVENT_STOP,		ctcm_chx_haltio  },
+	{ CTC_STATE_TXERR,	CTC_EVENT_IO_ENODEV,	ctcm_chx_iofatal  },
+	{ CTC_STATE_TXERR,	CTC_EVENT_MC_FAIL,	ctcm_chx_fail  },
+	{ CTC_STATE_RXERR,	CTC_EVENT_MC_FAIL,	ctcm_chx_fail  },
+};
+
+int mpc_ch_fsm_len = ARRAY_SIZE(ctcmpc_ch_fsm);
+
+/*
+ * Actions for interface - statemachine.
+ */
+
+/**
+ * Startup channels by sending CTC_EVENT_START to each channel.
+ *
+ * fi		An instance of an interface statemachine.
+ * event	The event, just happened.
+ * arg		Generic pointer, casted from struct net_device * upon call.
+ */
+static void dev_action_start(fsm_instance *fi, int event, void *arg)
+{
+	struct net_device *dev = arg;
+	struct ctcm_priv *priv = dev->ml_priv;
+	int direction;
+
+	CTCMY_DBF_DEV_NAME(SETUP, dev, "");
+
+	fsm_deltimer(&priv->restart_timer);
+	fsm_newstate(fi, DEV_STATE_STARTWAIT_RXTX);
+	if (IS_MPC(priv))
+		priv->mpcg->channels_terminating = 0;
+	for (direction = CTCM_READ; direction <= CTCM_WRITE; direction++) {
+		struct channel *ch = priv->channel[direction];
+		fsm_event(ch->fsm, CTC_EVENT_START, ch);
+	}
+}
+
+/**
+ * Shutdown channels by sending CTC_EVENT_STOP to each channel.
+ *
+ * fi		An instance of an interface statemachine.
+ * event	The event, just happened.
+ * arg		Generic pointer, casted from struct net_device * upon call.
+ */
+static void dev_action_stop(fsm_instance *fi, int event, void *arg)
+{
+	int direction;
+	struct net_device *dev = arg;
+	struct ctcm_priv *priv = dev->ml_priv;
+
+	CTCMY_DBF_DEV_NAME(SETUP, dev, "");
+
+	fsm_newstate(fi, DEV_STATE_STOPWAIT_RXTX);
+	for (direction = CTCM_READ; direction <= CTCM_WRITE; direction++) {
+		struct channel *ch = priv->channel[direction];
+		fsm_event(ch->fsm, CTC_EVENT_STOP, ch);
+		ch->th_seq_num = 0x00;
+		CTCM_PR_DEBUG("%s: CH_th_seq= %08x\n",
+				__func__, ch->th_seq_num);
+	}
+	if (IS_MPC(priv))
+		fsm_newstate(priv->mpcg->fsm, MPCG_STATE_RESET);
+}
+
+static void dev_action_restart(fsm_instance *fi, int event, void *arg)
+{
+	int restart_timer;
+	struct net_device *dev = arg;
+	struct ctcm_priv *priv = dev->ml_priv;
+
+	CTCMY_DBF_DEV_NAME(TRACE, dev, "");
+
+	if (IS_MPC(priv)) {
+		restart_timer = CTCM_TIME_1_SEC;
+	} else {
+		restart_timer = CTCM_TIME_5_SEC;
+	}
+	dev_info(&dev->dev, "Restarting device\n");
+
+	dev_action_stop(fi, event, arg);
+	fsm_event(priv->fsm, DEV_EVENT_STOP, dev);
+	if (IS_MPC(priv))
+		fsm_newstate(priv->mpcg->fsm, MPCG_STATE_RESET);
+
+	/* going back into start sequence too quickly can	  */
+	/* result in the other side becoming unreachable   due	  */
+	/* to sense reported when IO is aborted			  */
+	fsm_addtimer(&priv->restart_timer, restart_timer,
+			DEV_EVENT_START, dev);
+}
+
+/**
+ * Called from channel statemachine
+ * when a channel is up and running.
+ *
+ * fi		An instance of an interface statemachine.
+ * event	The event, just happened.
+ * arg		Generic pointer, casted from struct net_device * upon call.
+ */
+static void dev_action_chup(fsm_instance *fi, int event, void *arg)
+{
+	struct net_device *dev = arg;
+	struct ctcm_priv *priv = dev->ml_priv;
+	int dev_stat = fsm_getstate(fi);
+
+	CTCM_DBF_TEXT_(SETUP, CTC_DBF_NOTICE,
+			"%s(%s): priv = %p [%d,%d]\n ",	CTCM_FUNTAIL,
+				dev->name, dev->ml_priv, dev_stat, event);
+
+	switch (fsm_getstate(fi)) {
+	case DEV_STATE_STARTWAIT_RXTX:
+		if (event == DEV_EVENT_RXUP)
+			fsm_newstate(fi, DEV_STATE_STARTWAIT_TX);
+		else
+			fsm_newstate(fi, DEV_STATE_STARTWAIT_RX);
+		break;
+	case DEV_STATE_STARTWAIT_RX:
+		if (event == DEV_EVENT_RXUP) {
+			fsm_newstate(fi, DEV_STATE_RUNNING);
+			dev_info(&dev->dev,
+				"Connected with remote side\n");
+			ctcm_clear_busy(dev);
+		}
+		break;
+	case DEV_STATE_STARTWAIT_TX:
+		if (event == DEV_EVENT_TXUP) {
+			fsm_newstate(fi, DEV_STATE_RUNNING);
+			dev_info(&dev->dev,
+				"Connected with remote side\n");
+			ctcm_clear_busy(dev);
+		}
+		break;
+	case DEV_STATE_STOPWAIT_TX:
+		if (event == DEV_EVENT_RXUP)
+			fsm_newstate(fi, DEV_STATE_STOPWAIT_RXTX);
+		break;
+	case DEV_STATE_STOPWAIT_RX:
+		if (event == DEV_EVENT_TXUP)
+			fsm_newstate(fi, DEV_STATE_STOPWAIT_RXTX);
+		break;
+	}
+
+	if (IS_MPC(priv)) {
+		if (event == DEV_EVENT_RXUP)
+			mpc_channel_action(priv->channel[CTCM_READ],
+				CTCM_READ, MPC_CHANNEL_ADD);
+		else
+			mpc_channel_action(priv->channel[CTCM_WRITE],
+				CTCM_WRITE, MPC_CHANNEL_ADD);
+	}
+}
+
+/**
+ * Called from device statemachine
+ * when a channel has been shutdown.
+ *
+ * fi		An instance of an interface statemachine.
+ * event	The event, just happened.
+ * arg		Generic pointer, casted from struct net_device * upon call.
+ */
+static void dev_action_chdown(fsm_instance *fi, int event, void *arg)
+{
+
+	struct net_device *dev = arg;
+	struct ctcm_priv *priv = dev->ml_priv;
+
+	CTCMY_DBF_DEV_NAME(SETUP, dev, "");
+
+	switch (fsm_getstate(fi)) {
+	case DEV_STATE_RUNNING:
+		if (event == DEV_EVENT_TXDOWN)
+			fsm_newstate(fi, DEV_STATE_STARTWAIT_TX);
+		else
+			fsm_newstate(fi, DEV_STATE_STARTWAIT_RX);
+		break;
+	case DEV_STATE_STARTWAIT_RX:
+		if (event == DEV_EVENT_TXDOWN)
+			fsm_newstate(fi, DEV_STATE_STARTWAIT_RXTX);
+		break;
+	case DEV_STATE_STARTWAIT_TX:
+		if (event == DEV_EVENT_RXDOWN)
+			fsm_newstate(fi, DEV_STATE_STARTWAIT_RXTX);
+		break;
+	case DEV_STATE_STOPWAIT_RXTX:
+		if (event == DEV_EVENT_TXDOWN)
+			fsm_newstate(fi, DEV_STATE_STOPWAIT_RX);
+		else
+			fsm_newstate(fi, DEV_STATE_STOPWAIT_TX);
+		break;
+	case DEV_STATE_STOPWAIT_RX:
+		if (event == DEV_EVENT_RXDOWN)
+			fsm_newstate(fi, DEV_STATE_STOPPED);
+		break;
+	case DEV_STATE_STOPWAIT_TX:
+		if (event == DEV_EVENT_TXDOWN)
+			fsm_newstate(fi, DEV_STATE_STOPPED);
+		break;
+	}
+	if (IS_MPC(priv)) {
+		if (event == DEV_EVENT_RXDOWN)
+			mpc_channel_action(priv->channel[CTCM_READ],
+				CTCM_READ, MPC_CHANNEL_REMOVE);
+		else
+			mpc_channel_action(priv->channel[CTCM_WRITE],
+				CTCM_WRITE, MPC_CHANNEL_REMOVE);
+	}
+}
+
+const fsm_node dev_fsm[] = {
+	{ DEV_STATE_STOPPED,        DEV_EVENT_START,   dev_action_start   },
+	{ DEV_STATE_STOPWAIT_RXTX,  DEV_EVENT_START,   dev_action_start   },
+	{ DEV_STATE_STOPWAIT_RXTX,  DEV_EVENT_RXDOWN,  dev_action_chdown  },
+	{ DEV_STATE_STOPWAIT_RXTX,  DEV_EVENT_TXDOWN,  dev_action_chdown  },
+	{ DEV_STATE_STOPWAIT_RXTX,  DEV_EVENT_RESTART, dev_action_restart },
+	{ DEV_STATE_STOPWAIT_RX,    DEV_EVENT_START,   dev_action_start   },
+	{ DEV_STATE_STOPWAIT_RX,    DEV_EVENT_RXUP,    dev_action_chup    },
+	{ DEV_STATE_STOPWAIT_RX,    DEV_EVENT_TXUP,    dev_action_chup    },
+	{ DEV_STATE_STOPWAIT_RX,    DEV_EVENT_RXDOWN,  dev_action_chdown  },
+	{ DEV_STATE_STOPWAIT_RX,    DEV_EVENT_RESTART, dev_action_restart },
+	{ DEV_STATE_STOPWAIT_TX,    DEV_EVENT_START,   dev_action_start   },
+	{ DEV_STATE_STOPWAIT_TX,    DEV_EVENT_RXUP,    dev_action_chup    },
+	{ DEV_STATE_STOPWAIT_TX,    DEV_EVENT_TXUP,    dev_action_chup    },
+	{ DEV_STATE_STOPWAIT_TX,    DEV_EVENT_TXDOWN,  dev_action_chdown  },
+	{ DEV_STATE_STOPWAIT_TX,    DEV_EVENT_RESTART, dev_action_restart },
+	{ DEV_STATE_STARTWAIT_RXTX, DEV_EVENT_STOP,    dev_action_stop    },
+	{ DEV_STATE_STARTWAIT_RXTX, DEV_EVENT_RXUP,    dev_action_chup    },
+	{ DEV_STATE_STARTWAIT_RXTX, DEV_EVENT_TXUP,    dev_action_chup    },
+	{ DEV_STATE_STARTWAIT_RXTX, DEV_EVENT_RXDOWN,  dev_action_chdown  },
+	{ DEV_STATE_STARTWAIT_RXTX, DEV_EVENT_TXDOWN,  dev_action_chdown  },
+	{ DEV_STATE_STARTWAIT_RXTX, DEV_EVENT_RESTART, dev_action_restart },
+	{ DEV_STATE_STARTWAIT_TX,   DEV_EVENT_STOP,    dev_action_stop    },
+	{ DEV_STATE_STARTWAIT_TX,   DEV_EVENT_RXUP,    dev_action_chup    },
+	{ DEV_STATE_STARTWAIT_TX,   DEV_EVENT_TXUP,    dev_action_chup    },
+	{ DEV_STATE_STARTWAIT_TX,   DEV_EVENT_RXDOWN,  dev_action_chdown  },
+	{ DEV_STATE_STARTWAIT_TX,   DEV_EVENT_RESTART, dev_action_restart },
+	{ DEV_STATE_STARTWAIT_RX,   DEV_EVENT_STOP,    dev_action_stop    },
+	{ DEV_STATE_STARTWAIT_RX,   DEV_EVENT_RXUP,    dev_action_chup    },
+	{ DEV_STATE_STARTWAIT_RX,   DEV_EVENT_TXUP,    dev_action_chup    },
+	{ DEV_STATE_STARTWAIT_RX,   DEV_EVENT_TXDOWN,  dev_action_chdown  },
+	{ DEV_STATE_STARTWAIT_RX,   DEV_EVENT_RESTART, dev_action_restart },
+	{ DEV_STATE_RUNNING,        DEV_EVENT_STOP,    dev_action_stop    },
+	{ DEV_STATE_RUNNING,        DEV_EVENT_RXDOWN,  dev_action_chdown  },
+	{ DEV_STATE_RUNNING,        DEV_EVENT_TXDOWN,  dev_action_chdown  },
+	{ DEV_STATE_RUNNING,        DEV_EVENT_TXUP,    ctcm_action_nop    },
+	{ DEV_STATE_RUNNING,        DEV_EVENT_RXUP,    ctcm_action_nop    },
+	{ DEV_STATE_RUNNING,        DEV_EVENT_RESTART, dev_action_restart },
+};
+
+int dev_fsm_len = ARRAY_SIZE(dev_fsm);
+
+/* --- This is the END my friend --- */
+
diff --git a/ap/os/linux/linux-3.4.x/drivers/s390/net/ctcm_fsms.h b/ap/os/linux/linux-3.4.x/drivers/s390/net/ctcm_fsms.h
new file mode 100644
index 0000000..046d077
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/s390/net/ctcm_fsms.h
@@ -0,0 +1,358 @@
+/*
+ * drivers/s390/net/ctcm_fsms.h
+ *
+ * Copyright IBM Corp. 2001, 2007
+ * Authors: 	Fritz Elfert (felfert@millenux.com)
+ * 		Peter Tiedemann (ptiedem@de.ibm.com)
+ * 	MPC additions :
+ *		Belinda Thompson (belindat@us.ibm.com)
+ *		Andy Richter (richtera@us.ibm.com)
+ */
+#ifndef _CTCM_FSMS_H_
+#define _CTCM_FSMS_H_
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/errno.h>
+#include <linux/types.h>
+#include <linux/interrupt.h>
+#include <linux/timer.h>
+#include <linux/bitops.h>
+
+#include <linux/signal.h>
+#include <linux/string.h>
+
+#include <linux/ip.h>
+#include <linux/if_arp.h>
+#include <linux/tcp.h>
+#include <linux/skbuff.h>
+#include <linux/ctype.h>
+#include <net/dst.h>
+
+#include <linux/io.h>
+#include <asm/ccwdev.h>
+#include <asm/ccwgroup.h>
+#include <linux/uaccess.h>
+
+#include <asm/idals.h>
+
+#include "fsm.h"
+#include "ctcm_main.h"
+
+/*
+ * Definitions for the channel statemachine(s) for ctc and ctcmpc
+ *
+ * To allow better kerntyping, prefix-less definitions for channel states
+ * and channel events have been replaced :
+ * ch_event... -> ctc_ch_event...
+ * CH_EVENT... -> CTC_EVENT...
+ * ch_state... -> ctc_ch_state...
+ * CH_STATE... -> CTC_STATE...
+ */
+/*
+ * Events of the channel statemachine(s) for ctc and ctcmpc
+ */
+enum ctc_ch_events {
+	/*
+	 * Events, representing return code of
+	 * I/O operations (ccw_device_start, ccw_device_halt et al.)
+	 */
+	CTC_EVENT_IO_SUCCESS,
+	CTC_EVENT_IO_EBUSY,
+	CTC_EVENT_IO_ENODEV,
+	CTC_EVENT_IO_UNKNOWN,
+
+	CTC_EVENT_ATTNBUSY,
+	CTC_EVENT_ATTN,
+	CTC_EVENT_BUSY,
+	/*
+	 * Events, representing unit-check
+	 */
+	CTC_EVENT_UC_RCRESET,
+	CTC_EVENT_UC_RSRESET,
+	CTC_EVENT_UC_TXTIMEOUT,
+	CTC_EVENT_UC_TXPARITY,
+	CTC_EVENT_UC_HWFAIL,
+	CTC_EVENT_UC_RXPARITY,
+	CTC_EVENT_UC_ZERO,
+	CTC_EVENT_UC_UNKNOWN,
+	/*
+	 * Events, representing subchannel-check
+	 */
+	CTC_EVENT_SC_UNKNOWN,
+	/*
+	 * Events, representing machine checks
+	 */
+	CTC_EVENT_MC_FAIL,
+	CTC_EVENT_MC_GOOD,
+	/*
+	 * Event, representing normal IRQ
+	 */
+	CTC_EVENT_IRQ,
+	CTC_EVENT_FINSTAT,
+	/*
+	 * Event, representing timer expiry.
+	 */
+	CTC_EVENT_TIMER,
+	/*
+	 * Events, representing commands from upper levels.
+	 */
+	CTC_EVENT_START,
+	CTC_EVENT_STOP,
+	CTC_NR_EVENTS,
+	/*
+	 * additional MPC events
+	 */
+	CTC_EVENT_SEND_XID = CTC_NR_EVENTS,
+	CTC_EVENT_RSWEEP_TIMER,
+	/*
+	 * MUST be always the last element!!
+	 */
+	CTC_MPC_NR_EVENTS,
+};
+
+/*
+ * States of the channel statemachine(s) for ctc and ctcmpc.
+ */
+enum ctc_ch_states {
+	/*
+	 * Channel not assigned to any device,
+	 * initial state, direction invalid
+	 */
+	CTC_STATE_IDLE,
+	/*
+	 * Channel assigned but not operating
+	 */
+	CTC_STATE_STOPPED,
+	CTC_STATE_STARTWAIT,
+	CTC_STATE_STARTRETRY,
+	CTC_STATE_SETUPWAIT,
+	CTC_STATE_RXINIT,
+	CTC_STATE_TXINIT,
+	CTC_STATE_RX,
+	CTC_STATE_TX,
+	CTC_STATE_RXIDLE,
+	CTC_STATE_TXIDLE,
+	CTC_STATE_RXERR,
+	CTC_STATE_TXERR,
+	CTC_STATE_TERM,
+	CTC_STATE_DTERM,
+	CTC_STATE_NOTOP,
+	CTC_NR_STATES,     /* MUST be the last element of non-expanded states */
+	/*
+	 * additional MPC states
+	 */
+	CH_XID0_PENDING = CTC_NR_STATES,
+	CH_XID0_INPROGRESS,
+	CH_XID7_PENDING,
+	CH_XID7_PENDING1,
+	CH_XID7_PENDING2,
+	CH_XID7_PENDING3,
+	CH_XID7_PENDING4,
+	CTC_MPC_NR_STATES, /* MUST be the last element of expanded mpc states */
+};
+
+extern const char *ctc_ch_event_names[];
+
+extern const char *ctc_ch_state_names[];
+
+void ctcm_ccw_check_rc(struct channel *ch, int rc, char *msg);
+void ctcm_purge_skb_queue(struct sk_buff_head *q);
+void fsm_action_nop(fsm_instance *fi, int event, void *arg);
+
+/*
+ * ----- non-static actions for ctcm channel statemachine -----
+ *
+ */
+void ctcm_chx_txidle(fsm_instance *fi, int event, void *arg);
+
+/*
+ * ----- FSM (state/event/action) of the ctcm channel statemachine -----
+ */
+extern const fsm_node ch_fsm[];
+extern int ch_fsm_len;
+
+
+/*
+ * ----- non-static actions for ctcmpc channel statemachine ----
+ *
+ */
+/* shared :
+void ctcm_chx_txidle(fsm_instance * fi, int event, void *arg);
+ */
+void ctcmpc_chx_rxidle(fsm_instance *fi, int event, void *arg);
+
+/*
+ * ----- FSM (state/event/action) of the ctcmpc channel statemachine -----
+ */
+extern const fsm_node ctcmpc_ch_fsm[];
+extern int mpc_ch_fsm_len;
+
+/*
+ * Definitions for the device interface statemachine for ctc and mpc
+ */
+
+/*
+ * States of the device interface statemachine.
+ */
+enum dev_states {
+	DEV_STATE_STOPPED,
+	DEV_STATE_STARTWAIT_RXTX,
+	DEV_STATE_STARTWAIT_RX,
+	DEV_STATE_STARTWAIT_TX,
+	DEV_STATE_STOPWAIT_RXTX,
+	DEV_STATE_STOPWAIT_RX,
+	DEV_STATE_STOPWAIT_TX,
+	DEV_STATE_RUNNING,
+	/*
+	 * MUST be always the last element!!
+	 */
+	CTCM_NR_DEV_STATES
+};
+
+extern const char *dev_state_names[];
+
+/*
+ * Events of the device interface statemachine.
+ * ctcm and ctcmpc
+ */
+enum dev_events {
+	DEV_EVENT_START,
+	DEV_EVENT_STOP,
+	DEV_EVENT_RXUP,
+	DEV_EVENT_TXUP,
+	DEV_EVENT_RXDOWN,
+	DEV_EVENT_TXDOWN,
+	DEV_EVENT_RESTART,
+	/*
+	 * MUST be always the last element!!
+	 */
+	CTCM_NR_DEV_EVENTS
+};
+
+extern const char *dev_event_names[];
+
+/*
+ * Actions for the device interface statemachine.
+ * ctc and ctcmpc
+ */
+/*
+static void dev_action_start(fsm_instance * fi, int event, void *arg);
+static void dev_action_stop(fsm_instance * fi, int event, void *arg);
+static void dev_action_restart(fsm_instance *fi, int event, void *arg);
+static void dev_action_chup(fsm_instance * fi, int event, void *arg);
+static void dev_action_chdown(fsm_instance * fi, int event, void *arg);
+*/
+
+/*
+ * The (state/event/action) fsm table of the device interface statemachine.
+ * ctcm and ctcmpc
+ */
+extern const fsm_node dev_fsm[];
+extern int dev_fsm_len;
+
+
+/*
+ * Definitions for the MPC Group statemachine
+ */
+
+/*
+ * MPC Group Station FSM States
+
+State Name		When In This State
+======================	=======================================
+MPCG_STATE_RESET	Initial State When Driver Loaded
+			We receive and send NOTHING
+
+MPCG_STATE_INOP         INOP Received.
+			Group level non-recoverable error
+
+MPCG_STATE_READY	XID exchanges for at least 1 write and
+			1 read channel have completed.
+			Group is ready for data transfer.
+
+States from ctc_mpc_alloc_channel
+==============================================================
+MPCG_STATE_XID2INITW	Awaiting XID2(0) Initiation
+			      ATTN from other side will start
+			      XID negotiations.
+			      Y-side protocol only.
+
+MPCG_STATE_XID2INITX	XID2(0) negotiations are in progress.
+			      At least 1, but not all, XID2(0)'s
+			      have been received from partner.
+
+MPCG_STATE_XID7INITW	XID2(0) complete
+			      No XID2(7)'s have yet been received.
+			      XID2(7) negotiations pending.
+
+MPCG_STATE_XID7INITX	XID2(7) negotiations in progress.
+			      At least 1, but not all, XID2(7)'s
+			      have been received from partner.
+
+MPCG_STATE_XID7INITF	XID2(7) negotiations complete.
+			      Transitioning to READY.
+
+MPCG_STATE_READY	      Ready for Data Transfer.
+
+
+States from ctc_mpc_establish_connectivity call
+==============================================================
+MPCG_STATE_XID0IOWAIT	Initiating XID2(0) negotiations.
+			      X-side protocol only.
+			      ATTN-BUSY from other side will convert
+			      this to Y-side protocol and the
+			      ctc_mpc_alloc_channel flow will begin.
+
+MPCG_STATE_XID0IOWAIX	XID2(0) negotiations are in progress.
+			      At least 1, but not all, XID2(0)'s
+			      have been received from partner.
+
+MPCG_STATE_XID7INITI	XID2(0) complete
+			      No XID2(7)'s have yet been received.
+			      XID2(7) negotiations pending.
+
+MPCG_STATE_XID7INITZ	XID2(7) negotiations in progress.
+			      At least 1, but not all, XID2(7)'s
+			      have been received from partner.
+
+MPCG_STATE_XID7INITF	XID2(7) negotiations complete.
+			      Transitioning to READY.
+
+MPCG_STATE_READY	      Ready for Data Transfer.
+
+*/
+
+enum mpcg_events {
+	MPCG_EVENT_INOP,
+	MPCG_EVENT_DISCONC,
+	MPCG_EVENT_XID0DO,
+	MPCG_EVENT_XID2,
+	MPCG_EVENT_XID2DONE,
+	MPCG_EVENT_XID7DONE,
+	MPCG_EVENT_TIMER,
+	MPCG_EVENT_DOIO,
+	MPCG_NR_EVENTS,
+};
+
+enum mpcg_states {
+	MPCG_STATE_RESET,
+	MPCG_STATE_INOP,
+	MPCG_STATE_XID2INITW,
+	MPCG_STATE_XID2INITX,
+	MPCG_STATE_XID7INITW,
+	MPCG_STATE_XID7INITX,
+	MPCG_STATE_XID0IOWAIT,
+	MPCG_STATE_XID0IOWAIX,
+	MPCG_STATE_XID7INITI,
+	MPCG_STATE_XID7INITZ,
+	MPCG_STATE_XID7INITF,
+	MPCG_STATE_FLOWC,
+	MPCG_STATE_READY,
+	MPCG_NR_STATES,
+};
+
+#endif
+/* --- This is the END my friend --- */
diff --git a/ap/os/linux/linux-3.4.x/drivers/s390/net/ctcm_main.c b/ap/os/linux/linux-3.4.x/drivers/s390/net/ctcm_main.c
new file mode 100644
index 0000000..11f3b07
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/s390/net/ctcm_main.c
@@ -0,0 +1,1895 @@
+/*
+ * drivers/s390/net/ctcm_main.c
+ *
+ * Copyright IBM Corp. 2001, 2009
+ * Author(s):
+ *	Original CTC driver(s):
+ *		Fritz Elfert (felfert@millenux.com)
+ *		Dieter Wellerdiek (wel@de.ibm.com)
+ *		Martin Schwidefsky (schwidefsky@de.ibm.com)
+ *		Denis Joseph Barrow (barrow_dj@yahoo.com)
+ *		Jochen Roehrig (roehrig@de.ibm.com)
+ *		Cornelia Huck <cornelia.huck@de.ibm.com>
+ *	MPC additions:
+ *		Belinda Thompson (belindat@us.ibm.com)
+ *		Andy Richter (richtera@us.ibm.com)
+ *	Revived by:
+ *		Peter Tiedemann (ptiedem@de.ibm.com)
+ */
+
+#undef DEBUG
+#undef DEBUGDATA
+#undef DEBUGCCW
+
+#define KMSG_COMPONENT "ctcm"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/errno.h>
+#include <linux/types.h>
+#include <linux/interrupt.h>
+#include <linux/timer.h>
+#include <linux/bitops.h>
+
+#include <linux/signal.h>
+#include <linux/string.h>
+
+#include <linux/ip.h>
+#include <linux/if_arp.h>
+#include <linux/tcp.h>
+#include <linux/skbuff.h>
+#include <linux/ctype.h>
+#include <net/dst.h>
+
+#include <linux/io.h>
+#include <asm/ccwdev.h>
+#include <asm/ccwgroup.h>
+#include <linux/uaccess.h>
+
+#include <asm/idals.h>
+
+#include "ctcm_fsms.h"
+#include "ctcm_main.h"
+
+/* Some common global variables */
+
+/**
+ * The root device for ctcm group devices
+ */
+static struct device *ctcm_root_dev;
+
+/*
+ * Linked list of all detected channels.
+ */
+struct channel *channels;
+
+/**
+ * Unpack a just received skb and hand it over to
+ * upper layers.
+ *
+ *  ch		The channel where this skb has been received.
+ *  pskb	The received skb.
+ */
+void ctcm_unpack_skb(struct channel *ch, struct sk_buff *pskb)
+{
+	struct net_device *dev = ch->netdev;
+	struct ctcm_priv *priv = dev->ml_priv;
+	__u16 len = *((__u16 *) pskb->data);
+
+	skb_put(pskb, 2 + LL_HEADER_LENGTH);
+	skb_pull(pskb, 2);
+	pskb->dev = dev;
+	pskb->ip_summed = CHECKSUM_UNNECESSARY;
+	while (len > 0) {
+		struct sk_buff *skb;
+		int skblen;
+		struct ll_header *header = (struct ll_header *)pskb->data;
+
+		skb_pull(pskb, LL_HEADER_LENGTH);
+		if ((ch->protocol == CTCM_PROTO_S390) &&
+		    (header->type != ETH_P_IP)) {
+			if (!(ch->logflags & LOG_FLAG_ILLEGALPKT)) {
+				ch->logflags |= LOG_FLAG_ILLEGALPKT;
+				/*
+				 * Check packet type only if we stick strictly
+				 * to S/390's protocol of OS390. This only
+				 * supports IP. Otherwise allow any packet
+				 * type.
+				 */
+				CTCM_DBF_TEXT_(ERROR, CTC_DBF_ERROR,
+					"%s(%s): Illegal packet type 0x%04x"
+					" - dropping",
+					CTCM_FUNTAIL, dev->name, header->type);
+			}
+			priv->stats.rx_dropped++;
+			priv->stats.rx_frame_errors++;
+			return;
+		}
+		pskb->protocol = ntohs(header->type);
+		if ((header->length <= LL_HEADER_LENGTH) ||
+		    (len <= LL_HEADER_LENGTH)) {
+			if (!(ch->logflags & LOG_FLAG_ILLEGALSIZE)) {
+				CTCM_DBF_TEXT_(ERROR, CTC_DBF_ERROR,
+					"%s(%s): Illegal packet size %d(%d,%d)"
+					"- dropping",
+					CTCM_FUNTAIL, dev->name,
+					header->length, dev->mtu, len);
+				ch->logflags |= LOG_FLAG_ILLEGALSIZE;
+			}
+
+			priv->stats.rx_dropped++;
+			priv->stats.rx_length_errors++;
+			return;
+		}
+		header->length -= LL_HEADER_LENGTH;
+		len -= LL_HEADER_LENGTH;
+		if ((header->length > skb_tailroom(pskb)) ||
+			(header->length > len)) {
+			if (!(ch->logflags & LOG_FLAG_OVERRUN)) {
+				CTCM_DBF_TEXT_(ERROR, CTC_DBF_ERROR,
+					"%s(%s): Packet size %d (overrun)"
+					" - dropping", CTCM_FUNTAIL,
+						dev->name, header->length);
+				ch->logflags |= LOG_FLAG_OVERRUN;
+			}
+
+			priv->stats.rx_dropped++;
+			priv->stats.rx_length_errors++;
+			return;
+		}
+		skb_put(pskb, header->length);
+		skb_reset_mac_header(pskb);
+		len -= header->length;
+		skb = dev_alloc_skb(pskb->len);
+		if (!skb) {
+			if (!(ch->logflags & LOG_FLAG_NOMEM)) {
+				CTCM_DBF_TEXT_(ERROR, CTC_DBF_ERROR,
+					"%s(%s): MEMORY allocation error",
+						CTCM_FUNTAIL, dev->name);
+				ch->logflags |= LOG_FLAG_NOMEM;
+			}
+			priv->stats.rx_dropped++;
+			return;
+		}
+		skb_copy_from_linear_data(pskb, skb_put(skb, pskb->len),
+					  pskb->len);
+		skb_reset_mac_header(skb);
+		skb->dev = pskb->dev;
+		skb->protocol = pskb->protocol;
+		pskb->ip_summed = CHECKSUM_UNNECESSARY;
+		skblen = skb->len;
+		/*
+		 * reset logflags
+		 */
+		ch->logflags = 0;
+		priv->stats.rx_packets++;
+		priv->stats.rx_bytes += skblen;
+		netif_rx_ni(skb);
+		if (len > 0) {
+			skb_pull(pskb, header->length);
+			if (skb_tailroom(pskb) < LL_HEADER_LENGTH) {
+				CTCM_DBF_DEV_NAME(TRACE, dev,
+					"Overrun in ctcm_unpack_skb");
+				ch->logflags |= LOG_FLAG_OVERRUN;
+				return;
+			}
+			skb_put(pskb, LL_HEADER_LENGTH);
+		}
+	}
+}
+
+/**
+ * Release a specific channel in the channel list.
+ *
+ *  ch		Pointer to channel struct to be released.
+ */
+static void channel_free(struct channel *ch)
+{
+	CTCM_DBF_TEXT_(SETUP, CTC_DBF_INFO, "%s(%s)", CTCM_FUNTAIL, ch->id);
+	ch->flags &= ~CHANNEL_FLAGS_INUSE;
+	fsm_newstate(ch->fsm, CTC_STATE_IDLE);
+}
+
+/**
+ * Remove a specific channel in the channel list.
+ *
+ *  ch		Pointer to channel struct to be released.
+ */
+static void channel_remove(struct channel *ch)
+{
+	struct channel **c = &channels;
+	char chid[CTCM_ID_SIZE+1];
+	int ok = 0;
+
+	if (ch == NULL)
+		return;
+	else
+		strncpy(chid, ch->id, CTCM_ID_SIZE);
+
+	channel_free(ch);
+	while (*c) {
+		if (*c == ch) {
+			*c = ch->next;
+			fsm_deltimer(&ch->timer);
+			if (IS_MPC(ch))
+				fsm_deltimer(&ch->sweep_timer);
+
+			kfree_fsm(ch->fsm);
+			clear_normalized_cda(&ch->ccw[4]);
+			if (ch->trans_skb != NULL) {
+				clear_normalized_cda(&ch->ccw[1]);
+				dev_kfree_skb_any(ch->trans_skb);
+			}
+			if (IS_MPC(ch)) {
+				tasklet_kill(&ch->ch_tasklet);
+				tasklet_kill(&ch->ch_disc_tasklet);
+				kfree(ch->discontact_th);
+			}
+			kfree(ch->ccw);
+			kfree(ch->irb);
+			kfree(ch);
+			ok = 1;
+			break;
+		}
+		c = &((*c)->next);
+	}
+
+	CTCM_DBF_TEXT_(SETUP, CTC_DBF_INFO, "%s(%s) %s", CTCM_FUNTAIL,
+			chid, ok ? "OK" : "failed");
+}
+
+/**
+ * Get a specific channel from the channel list.
+ *
+ *  type	Type of channel we are interested in.
+ *  id		Id of channel we are interested in.
+ *  direction	Direction we want to use this channel for.
+ *
+ * returns Pointer to a channel or NULL if no matching channel available.
+ */
+static struct channel *channel_get(enum ctcm_channel_types type,
+					char *id, int direction)
+{
+	struct channel *ch = channels;
+
+	while (ch && (strncmp(ch->id, id, CTCM_ID_SIZE) || (ch->type != type)))
+		ch = ch->next;
+	if (!ch) {
+		CTCM_DBF_TEXT_(ERROR, CTC_DBF_ERROR,
+				"%s(%d, %s, %d) not found in channel list\n",
+				CTCM_FUNTAIL, type, id, direction);
+	} else {
+		if (ch->flags & CHANNEL_FLAGS_INUSE)
+			ch = NULL;
+		else {
+			ch->flags |= CHANNEL_FLAGS_INUSE;
+			ch->flags &= ~CHANNEL_FLAGS_RWMASK;
+			ch->flags |= (direction == CTCM_WRITE)
+			    ? CHANNEL_FLAGS_WRITE : CHANNEL_FLAGS_READ;
+			fsm_newstate(ch->fsm, CTC_STATE_STOPPED);
+		}
+	}
+	return ch;
+}
+
+static long ctcm_check_irb_error(struct ccw_device *cdev, struct irb *irb)
+{
+	if (!IS_ERR(irb))
+		return 0;
+
+	CTCM_DBF_TEXT_(ERROR, CTC_DBF_WARN,
+			"irb error %ld on device %s\n",
+				PTR_ERR(irb), dev_name(&cdev->dev));
+
+	switch (PTR_ERR(irb)) {
+	case -EIO:
+		dev_err(&cdev->dev,
+			"An I/O-error occurred on the CTCM device\n");
+		break;
+	case -ETIMEDOUT:
+		dev_err(&cdev->dev,
+			"An adapter hardware operation timed out\n");
+		break;
+	default:
+		dev_err(&cdev->dev,
+			"An error occurred on the adapter hardware\n");
+	}
+	return PTR_ERR(irb);
+}
+
+
+/**
+ * Check sense of a unit check.
+ *
+ *  ch		The channel, the sense code belongs to.
+ *  sense	The sense code to inspect.
+ */
+static inline void ccw_unit_check(struct channel *ch, __u8 sense)
+{
+	CTCM_DBF_TEXT_(TRACE, CTC_DBF_DEBUG,
+			"%s(%s): %02x",
+				CTCM_FUNTAIL, ch->id, sense);
+
+	if (sense & SNS0_INTERVENTION_REQ) {
+		if (sense & 0x01) {
+			if (ch->sense_rc != 0x01) {
+				pr_notice(
+					"%s: The communication peer has "
+					"disconnected\n", ch->id);
+				ch->sense_rc = 0x01;
+			}
+			fsm_event(ch->fsm, CTC_EVENT_UC_RCRESET, ch);
+		} else {
+			if (ch->sense_rc != SNS0_INTERVENTION_REQ) {
+				pr_notice(
+					"%s: The remote operating system is "
+					"not available\n", ch->id);
+				ch->sense_rc = SNS0_INTERVENTION_REQ;
+			}
+			fsm_event(ch->fsm, CTC_EVENT_UC_RSRESET, ch);
+		}
+	} else if (sense & SNS0_EQUIPMENT_CHECK) {
+		if (sense & SNS0_BUS_OUT_CHECK) {
+			if (ch->sense_rc != SNS0_BUS_OUT_CHECK) {
+				CTCM_DBF_TEXT_(TRACE, CTC_DBF_WARN,
+					"%s(%s): remote HW error %02x",
+						CTCM_FUNTAIL, ch->id, sense);
+				ch->sense_rc = SNS0_BUS_OUT_CHECK;
+			}
+			fsm_event(ch->fsm, CTC_EVENT_UC_HWFAIL, ch);
+		} else {
+			if (ch->sense_rc != SNS0_EQUIPMENT_CHECK) {
+				CTCM_DBF_TEXT_(TRACE, CTC_DBF_WARN,
+					"%s(%s): remote read parity error %02x",
+						CTCM_FUNTAIL, ch->id, sense);
+				ch->sense_rc = SNS0_EQUIPMENT_CHECK;
+			}
+			fsm_event(ch->fsm, CTC_EVENT_UC_RXPARITY, ch);
+		}
+	} else if (sense & SNS0_BUS_OUT_CHECK) {
+		if (ch->sense_rc != SNS0_BUS_OUT_CHECK) {
+			CTCM_DBF_TEXT_(TRACE, CTC_DBF_WARN,
+				"%s(%s): BUS OUT error %02x",
+					CTCM_FUNTAIL, ch->id, sense);
+			ch->sense_rc = SNS0_BUS_OUT_CHECK;
+		}
+		if (sense & 0x04)	/* data-streaming timeout */
+			fsm_event(ch->fsm, CTC_EVENT_UC_TXTIMEOUT, ch);
+		else			/* Data-transfer parity error */
+			fsm_event(ch->fsm, CTC_EVENT_UC_TXPARITY, ch);
+	} else if (sense & SNS0_CMD_REJECT) {
+		if (ch->sense_rc != SNS0_CMD_REJECT) {
+			CTCM_DBF_TEXT_(TRACE, CTC_DBF_WARN,
+				"%s(%s): Command rejected",
+						CTCM_FUNTAIL, ch->id);
+			ch->sense_rc = SNS0_CMD_REJECT;
+		}
+	} else if (sense == 0) {
+		CTCM_DBF_TEXT_(TRACE, CTC_DBF_WARN,
+			"%s(%s): Unit check ZERO",
+					CTCM_FUNTAIL, ch->id);
+		fsm_event(ch->fsm, CTC_EVENT_UC_ZERO, ch);
+	} else {
+		CTCM_DBF_TEXT_(TRACE, CTC_DBF_WARN,
+			"%s(%s): Unit check code %02x unknown",
+					CTCM_FUNTAIL, ch->id, sense);
+		fsm_event(ch->fsm, CTC_EVENT_UC_UNKNOWN, ch);
+	}
+}
+
+int ctcm_ch_alloc_buffer(struct channel *ch)
+{
+	clear_normalized_cda(&ch->ccw[1]);
+	ch->trans_skb = __dev_alloc_skb(ch->max_bufsize, GFP_ATOMIC | GFP_DMA);
+	if (ch->trans_skb == NULL) {
+		CTCM_DBF_TEXT_(ERROR, CTC_DBF_ERROR,
+			"%s(%s): %s trans_skb allocation error",
+			CTCM_FUNTAIL, ch->id,
+			(CHANNEL_DIRECTION(ch->flags) == CTCM_READ) ?
+				"RX" : "TX");
+		return -ENOMEM;
+	}
+
+	ch->ccw[1].count = ch->max_bufsize;
+	if (set_normalized_cda(&ch->ccw[1], ch->trans_skb->data)) {
+		dev_kfree_skb(ch->trans_skb);
+		ch->trans_skb = NULL;
+		CTCM_DBF_TEXT_(ERROR, CTC_DBF_ERROR,
+			"%s(%s): %s set norm_cda failed",
+			CTCM_FUNTAIL, ch->id,
+			(CHANNEL_DIRECTION(ch->flags) == CTCM_READ) ?
+				"RX" : "TX");
+		return -ENOMEM;
+	}
+
+	ch->ccw[1].count = 0;
+	ch->trans_skb_data = ch->trans_skb->data;
+	ch->flags &= ~CHANNEL_FLAGS_BUFSIZE_CHANGED;
+	return 0;
+}
+
+/*
+ * Interface API for upper network layers
+ */
+
+/**
+ * Open an interface.
+ * Called from generic network layer when ifconfig up is run.
+ *
+ *  dev		Pointer to interface struct.
+ *
+ * returns 0 on success, -ERRNO on failure. (Never fails.)
+ */
+int ctcm_open(struct net_device *dev)
+{
+	struct ctcm_priv *priv = dev->ml_priv;
+
+	CTCMY_DBF_DEV_NAME(SETUP, dev, "");
+	if (!IS_MPC(priv))
+		fsm_event(priv->fsm,	DEV_EVENT_START, dev);
+	return 0;
+}
+
+/**
+ * Close an interface.
+ * Called from generic network layer when ifconfig down is run.
+ *
+ *  dev		Pointer to interface struct.
+ *
+ * returns 0 on success, -ERRNO on failure. (Never fails.)
+ */
+int ctcm_close(struct net_device *dev)
+{
+	struct ctcm_priv *priv = dev->ml_priv;
+
+	CTCMY_DBF_DEV_NAME(SETUP, dev, "");
+	if (!IS_MPC(priv))
+		fsm_event(priv->fsm, DEV_EVENT_STOP, dev);
+	return 0;
+}
+
+
+/**
+ * Transmit a packet.
+ * This is a helper function for ctcm_tx().
+ *
+ *  ch		Channel to be used for sending.
+ *  skb		Pointer to struct sk_buff of packet to send.
+ *            The linklevel header has already been set up
+ *            by ctcm_tx().
+ *
+ * returns 0 on success, -ERRNO on failure. (Never fails.)
+ */
+static int ctcm_transmit_skb(struct channel *ch, struct sk_buff *skb)
+{
+	unsigned long saveflags;
+	struct ll_header header;
+	int rc = 0;
+	__u16 block_len;
+	int ccw_idx;
+	struct sk_buff *nskb;
+	unsigned long hi;
+
+	/* we need to acquire the lock for testing the state
+	 * otherwise we can have an IRQ changing the state to
+	 * TXIDLE after the test but before acquiring the lock.
+	 */
+	spin_lock_irqsave(&ch->collect_lock, saveflags);
+	if (fsm_getstate(ch->fsm) != CTC_STATE_TXIDLE) {
+		int l = skb->len + LL_HEADER_LENGTH;
+
+		if (ch->collect_len + l > ch->max_bufsize - 2) {
+			spin_unlock_irqrestore(&ch->collect_lock, saveflags);
+			return -EBUSY;
+		} else {
+			atomic_inc(&skb->users);
+			header.length = l;
+			header.type = skb->protocol;
+			header.unused = 0;
+			memcpy(skb_push(skb, LL_HEADER_LENGTH), &header,
+			       LL_HEADER_LENGTH);
+			skb_queue_tail(&ch->collect_queue, skb);
+			ch->collect_len += l;
+		}
+		spin_unlock_irqrestore(&ch->collect_lock, saveflags);
+				goto done;
+	}
+	spin_unlock_irqrestore(&ch->collect_lock, saveflags);
+	/*
+	 * Protect skb against beeing free'd by upper
+	 * layers.
+	 */
+	atomic_inc(&skb->users);
+	ch->prof.txlen += skb->len;
+	header.length = skb->len + LL_HEADER_LENGTH;
+	header.type = skb->protocol;
+	header.unused = 0;
+	memcpy(skb_push(skb, LL_HEADER_LENGTH), &header, LL_HEADER_LENGTH);
+	block_len = skb->len + 2;
+	*((__u16 *)skb_push(skb, 2)) = block_len;
+
+	/*
+	 * IDAL support in CTCM is broken, so we have to
+	 * care about skb's above 2G ourselves.
+	 */
+	hi = ((unsigned long)skb_tail_pointer(skb) + LL_HEADER_LENGTH) >> 31;
+	if (hi) {
+		nskb = alloc_skb(skb->len, GFP_ATOMIC | GFP_DMA);
+		if (!nskb) {
+			atomic_dec(&skb->users);
+			skb_pull(skb, LL_HEADER_LENGTH + 2);
+			ctcm_clear_busy(ch->netdev);
+			return -ENOMEM;
+		} else {
+			memcpy(skb_put(nskb, skb->len), skb->data, skb->len);
+			atomic_inc(&nskb->users);
+			atomic_dec(&skb->users);
+			dev_kfree_skb_irq(skb);
+			skb = nskb;
+		}
+	}
+
+	ch->ccw[4].count = block_len;
+	if (set_normalized_cda(&ch->ccw[4], skb->data)) {
+		/*
+		 * idal allocation failed, try via copying to
+		 * trans_skb. trans_skb usually has a pre-allocated
+		 * idal.
+		 */
+		if (ctcm_checkalloc_buffer(ch)) {
+			/*
+			 * Remove our header. It gets added
+			 * again on retransmit.
+			 */
+			atomic_dec(&skb->users);
+			skb_pull(skb, LL_HEADER_LENGTH + 2);
+			ctcm_clear_busy(ch->netdev);
+			return -ENOMEM;
+		}
+
+		skb_reset_tail_pointer(ch->trans_skb);
+		ch->trans_skb->len = 0;
+		ch->ccw[1].count = skb->len;
+		skb_copy_from_linear_data(skb,
+				skb_put(ch->trans_skb, skb->len), skb->len);
+		atomic_dec(&skb->users);
+		dev_kfree_skb_irq(skb);
+		ccw_idx = 0;
+	} else {
+		skb_queue_tail(&ch->io_queue, skb);
+		ccw_idx = 3;
+	}
+	if (do_debug_ccw)
+		ctcmpc_dumpit((char *)&ch->ccw[ccw_idx],
+					sizeof(struct ccw1) * 3);
+	ch->retry = 0;
+	fsm_newstate(ch->fsm, CTC_STATE_TX);
+	fsm_addtimer(&ch->timer, CTCM_TIME_5_SEC, CTC_EVENT_TIMER, ch);
+	spin_lock_irqsave(get_ccwdev_lock(ch->cdev), saveflags);
+	ch->prof.send_stamp = current_kernel_time(); /* xtime */
+	rc = ccw_device_start(ch->cdev, &ch->ccw[ccw_idx],
+					(unsigned long)ch, 0xff, 0);
+	spin_unlock_irqrestore(get_ccwdev_lock(ch->cdev), saveflags);
+	if (ccw_idx == 3)
+		ch->prof.doios_single++;
+	if (rc != 0) {
+		fsm_deltimer(&ch->timer);
+		ctcm_ccw_check_rc(ch, rc, "single skb TX");
+		if (ccw_idx == 3)
+			skb_dequeue_tail(&ch->io_queue);
+		/*
+		 * Remove our header. It gets added
+		 * again on retransmit.
+		 */
+		skb_pull(skb, LL_HEADER_LENGTH + 2);
+	} else if (ccw_idx == 0) {
+		struct net_device *dev = ch->netdev;
+		struct ctcm_priv *priv = dev->ml_priv;
+		priv->stats.tx_packets++;
+		priv->stats.tx_bytes += skb->len - LL_HEADER_LENGTH;
+	}
+done:
+	ctcm_clear_busy(ch->netdev);
+	return rc;
+}
+
+static void ctcmpc_send_sweep_req(struct channel *rch)
+{
+	struct net_device *dev = rch->netdev;
+	struct ctcm_priv *priv;
+	struct mpc_group *grp;
+	struct th_sweep *header;
+	struct sk_buff *sweep_skb;
+	struct channel *ch;
+	/* int rc = 0; */
+
+	priv = dev->ml_priv;
+	grp = priv->mpcg;
+	ch = priv->channel[CTCM_WRITE];
+
+	/* sweep processing is not complete until response and request */
+	/* has completed for all read channels in group		       */
+	if (grp->in_sweep == 0) {
+		grp->in_sweep = 1;
+		grp->sweep_rsp_pend_num = grp->active_channels[CTCM_READ];
+		grp->sweep_req_pend_num = grp->active_channels[CTCM_READ];
+	}
+
+	sweep_skb = __dev_alloc_skb(MPC_BUFSIZE_DEFAULT, GFP_ATOMIC|GFP_DMA);
+
+	if (sweep_skb == NULL)	{
+		/* rc = -ENOMEM; */
+				goto nomem;
+	}
+
+	header = kmalloc(TH_SWEEP_LENGTH, gfp_type());
+
+	if (!header) {
+		dev_kfree_skb_any(sweep_skb);
+		/* rc = -ENOMEM; */
+				goto nomem;
+	}
+
+	header->th.th_seg	= 0x00 ;
+	header->th.th_ch_flag	= TH_SWEEP_REQ;  /* 0x0f */
+	header->th.th_blk_flag	= 0x00;
+	header->th.th_is_xid	= 0x00;
+	header->th.th_seq_num	= 0x00;
+	header->sw.th_last_seq	= ch->th_seq_num;
+
+	memcpy(skb_put(sweep_skb, TH_SWEEP_LENGTH), header, TH_SWEEP_LENGTH);
+
+	kfree(header);
+
+	dev->trans_start = jiffies;
+	skb_queue_tail(&ch->sweep_queue, sweep_skb);
+
+	fsm_addtimer(&ch->sweep_timer, 100, CTC_EVENT_RSWEEP_TIMER, ch);
+
+	return;
+
+nomem:
+	grp->in_sweep = 0;
+	ctcm_clear_busy(dev);
+	fsm_event(grp->fsm, MPCG_EVENT_INOP, dev);
+
+	return;
+}
+
+/*
+ * MPC mode version of transmit_skb
+ */
+static int ctcmpc_transmit_skb(struct channel *ch, struct sk_buff *skb)
+{
+	struct pdu *p_header;
+	struct net_device *dev = ch->netdev;
+	struct ctcm_priv *priv = dev->ml_priv;
+	struct mpc_group *grp = priv->mpcg;
+	struct th_header *header;
+	struct sk_buff *nskb;
+	int rc = 0;
+	int ccw_idx;
+	unsigned long hi;
+	unsigned long saveflags = 0;	/* avoids compiler warning */
+
+	CTCM_PR_DEBUG("Enter %s: %s, cp=%i ch=0x%p id=%s state=%s\n",
+			__func__, dev->name, smp_processor_id(), ch,
+					ch->id, fsm_getstate_str(ch->fsm));
+
+	if ((fsm_getstate(ch->fsm) != CTC_STATE_TXIDLE) || grp->in_sweep) {
+		spin_lock_irqsave(&ch->collect_lock, saveflags);
+		atomic_inc(&skb->users);
+		p_header = kmalloc(PDU_HEADER_LENGTH, gfp_type());
+
+		if (!p_header) {
+			spin_unlock_irqrestore(&ch->collect_lock, saveflags);
+				goto nomem_exit;
+		}
+
+		p_header->pdu_offset = skb->len;
+		p_header->pdu_proto = 0x01;
+		p_header->pdu_flag = 0x00;
+		if (skb->protocol == ntohs(ETH_P_SNAP)) {
+			p_header->pdu_flag |= PDU_FIRST | PDU_CNTL;
+		} else {
+			p_header->pdu_flag |= PDU_FIRST;
+		}
+		p_header->pdu_seq = 0;
+		memcpy(skb_push(skb, PDU_HEADER_LENGTH), p_header,
+		       PDU_HEADER_LENGTH);
+
+		CTCM_PR_DEBUG("%s(%s): Put on collect_q - skb len: %04x \n"
+				"pdu header and data for up to 32 bytes:\n",
+				__func__, dev->name, skb->len);
+		CTCM_D3_DUMP((char *)skb->data, min_t(int, 32, skb->len));
+
+		skb_queue_tail(&ch->collect_queue, skb);
+		ch->collect_len += skb->len;
+		kfree(p_header);
+
+		spin_unlock_irqrestore(&ch->collect_lock, saveflags);
+			goto done;
+	}
+
+	/*
+	 * Protect skb against beeing free'd by upper
+	 * layers.
+	 */
+	atomic_inc(&skb->users);
+
+	/*
+	 * IDAL support in CTCM is broken, so we have to
+	 * care about skb's above 2G ourselves.
+	 */
+	hi = ((unsigned long)skb->tail + TH_HEADER_LENGTH) >> 31;
+	if (hi) {
+		nskb = __dev_alloc_skb(skb->len, GFP_ATOMIC | GFP_DMA);
+		if (!nskb) {
+			goto nomem_exit;
+		} else {
+			memcpy(skb_put(nskb, skb->len), skb->data, skb->len);
+			atomic_inc(&nskb->users);
+			atomic_dec(&skb->users);
+			dev_kfree_skb_irq(skb);
+			skb = nskb;
+		}
+	}
+
+	p_header = kmalloc(PDU_HEADER_LENGTH, gfp_type());
+
+	if (!p_header)
+		goto nomem_exit;
+
+	p_header->pdu_offset = skb->len;
+	p_header->pdu_proto = 0x01;
+	p_header->pdu_flag = 0x00;
+	p_header->pdu_seq = 0;
+	if (skb->protocol == ntohs(ETH_P_SNAP)) {
+		p_header->pdu_flag |= PDU_FIRST | PDU_CNTL;
+	} else {
+		p_header->pdu_flag |= PDU_FIRST;
+	}
+	memcpy(skb_push(skb, PDU_HEADER_LENGTH), p_header, PDU_HEADER_LENGTH);
+
+	kfree(p_header);
+
+	if (ch->collect_len > 0) {
+		spin_lock_irqsave(&ch->collect_lock, saveflags);
+		skb_queue_tail(&ch->collect_queue, skb);
+		ch->collect_len += skb->len;
+		skb = skb_dequeue(&ch->collect_queue);
+		ch->collect_len -= skb->len;
+		spin_unlock_irqrestore(&ch->collect_lock, saveflags);
+	}
+
+	p_header = (struct pdu *)skb->data;
+	p_header->pdu_flag |= PDU_LAST;
+
+	ch->prof.txlen += skb->len - PDU_HEADER_LENGTH;
+
+	header = kmalloc(TH_HEADER_LENGTH, gfp_type());
+	if (!header)
+		goto nomem_exit;
+
+	header->th_seg = 0x00;
+	header->th_ch_flag = TH_HAS_PDU;  /* Normal data */
+	header->th_blk_flag = 0x00;
+	header->th_is_xid = 0x00;          /* Just data here */
+	ch->th_seq_num++;
+	header->th_seq_num = ch->th_seq_num;
+
+	CTCM_PR_DBGDATA("%s(%s) ToVTAM_th_seq= %08x\n" ,
+		       __func__, dev->name, ch->th_seq_num);
+
+	/* put the TH on the packet */
+	memcpy(skb_push(skb, TH_HEADER_LENGTH), header, TH_HEADER_LENGTH);
+
+	kfree(header);
+
+	CTCM_PR_DBGDATA("%s(%s): skb len: %04x\n - pdu header and data for "
+			"up to 32 bytes sent to vtam:\n",
+				__func__, dev->name, skb->len);
+	CTCM_D3_DUMP((char *)skb->data, min_t(int, 32, skb->len));
+
+	ch->ccw[4].count = skb->len;
+	if (set_normalized_cda(&ch->ccw[4], skb->data)) {
+		/*
+		 * idal allocation failed, try via copying to trans_skb.
+		 * trans_skb usually has a pre-allocated idal.
+		 */
+		if (ctcm_checkalloc_buffer(ch)) {
+			/*
+			 * Remove our header.
+			 * It gets added again on retransmit.
+			 */
+				goto nomem_exit;
+		}
+
+		skb_reset_tail_pointer(ch->trans_skb);
+		ch->trans_skb->len = 0;
+		ch->ccw[1].count = skb->len;
+		memcpy(skb_put(ch->trans_skb, skb->len), skb->data, skb->len);
+		atomic_dec(&skb->users);
+		dev_kfree_skb_irq(skb);
+		ccw_idx = 0;
+		CTCM_PR_DBGDATA("%s(%s): trans_skb len: %04x\n"
+				"up to 32 bytes sent to vtam:\n",
+				__func__, dev->name, ch->trans_skb->len);
+		CTCM_D3_DUMP((char *)ch->trans_skb->data,
+				min_t(int, 32, ch->trans_skb->len));
+	} else {
+		skb_queue_tail(&ch->io_queue, skb);
+		ccw_idx = 3;
+	}
+	ch->retry = 0;
+	fsm_newstate(ch->fsm, CTC_STATE_TX);
+	fsm_addtimer(&ch->timer, CTCM_TIME_5_SEC, CTC_EVENT_TIMER, ch);
+
+	if (do_debug_ccw)
+		ctcmpc_dumpit((char *)&ch->ccw[ccw_idx],
+					sizeof(struct ccw1) * 3);
+
+	spin_lock_irqsave(get_ccwdev_lock(ch->cdev), saveflags);
+	ch->prof.send_stamp = current_kernel_time(); /* xtime */
+	rc = ccw_device_start(ch->cdev, &ch->ccw[ccw_idx],
+					(unsigned long)ch, 0xff, 0);
+	spin_unlock_irqrestore(get_ccwdev_lock(ch->cdev), saveflags);
+	if (ccw_idx == 3)
+		ch->prof.doios_single++;
+	if (rc != 0) {
+		fsm_deltimer(&ch->timer);
+		ctcm_ccw_check_rc(ch, rc, "single skb TX");
+		if (ccw_idx == 3)
+			skb_dequeue_tail(&ch->io_queue);
+	} else if (ccw_idx == 0) {
+		priv->stats.tx_packets++;
+		priv->stats.tx_bytes += skb->len - TH_HEADER_LENGTH;
+	}
+	if (ch->th_seq_num > 0xf0000000)	/* Chose at random. */
+		ctcmpc_send_sweep_req(ch);
+
+	goto done;
+nomem_exit:
+	CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_CRIT,
+			"%s(%s): MEMORY allocation ERROR\n",
+			CTCM_FUNTAIL, ch->id);
+	rc = -ENOMEM;
+	atomic_dec(&skb->users);
+	dev_kfree_skb_any(skb);
+	fsm_event(priv->mpcg->fsm, MPCG_EVENT_INOP, dev);
+done:
+	CTCM_PR_DEBUG("Exit %s(%s)\n", __func__, dev->name);
+	return rc;
+}
+
+/**
+ * Start transmission of a packet.
+ * Called from generic network device layer.
+ *
+ *  skb		Pointer to buffer containing the packet.
+ *  dev		Pointer to interface struct.
+ *
+ * returns 0 if packet consumed, !0 if packet rejected.
+ *         Note: If we return !0, then the packet is free'd by
+ *               the generic network layer.
+ */
+/* first merge version - leaving both functions separated */
+static int ctcm_tx(struct sk_buff *skb, struct net_device *dev)
+{
+	struct ctcm_priv *priv = dev->ml_priv;
+
+	if (skb == NULL) {
+		CTCM_DBF_TEXT_(ERROR, CTC_DBF_ERROR,
+				"%s(%s): NULL sk_buff passed",
+					CTCM_FUNTAIL, dev->name);
+		priv->stats.tx_dropped++;
+		return NETDEV_TX_OK;
+	}
+	if (skb_headroom(skb) < (LL_HEADER_LENGTH + 2)) {
+		CTCM_DBF_TEXT_(ERROR, CTC_DBF_ERROR,
+			"%s(%s): Got sk_buff with head room < %ld bytes",
+			CTCM_FUNTAIL, dev->name, LL_HEADER_LENGTH + 2);
+		dev_kfree_skb(skb);
+		priv->stats.tx_dropped++;
+		return NETDEV_TX_OK;
+	}
+
+	/*
+	 * If channels are not running, try to restart them
+	 * and throw away packet.
+	 */
+	if (fsm_getstate(priv->fsm) != DEV_STATE_RUNNING) {
+		fsm_event(priv->fsm, DEV_EVENT_START, dev);
+		dev_kfree_skb(skb);
+		priv->stats.tx_dropped++;
+		priv->stats.tx_errors++;
+		priv->stats.tx_carrier_errors++;
+		return NETDEV_TX_OK;
+	}
+
+	if (ctcm_test_and_set_busy(dev))
+		return NETDEV_TX_BUSY;
+
+	dev->trans_start = jiffies;
+	if (ctcm_transmit_skb(priv->channel[CTCM_WRITE], skb) != 0)
+		return NETDEV_TX_BUSY;
+	return NETDEV_TX_OK;
+}
+
+/* unmerged MPC variant of ctcm_tx */
+static int ctcmpc_tx(struct sk_buff *skb, struct net_device *dev)
+{
+	int len = 0;
+	struct ctcm_priv *priv = dev->ml_priv;
+	struct mpc_group *grp  = priv->mpcg;
+	struct sk_buff *newskb = NULL;
+
+	/*
+	 * Some sanity checks ...
+	 */
+	if (skb == NULL) {
+		CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR,
+			"%s(%s): NULL sk_buff passed",
+					CTCM_FUNTAIL, dev->name);
+		priv->stats.tx_dropped++;
+					goto done;
+	}
+	if (skb_headroom(skb) < (TH_HEADER_LENGTH + PDU_HEADER_LENGTH)) {
+		CTCM_DBF_TEXT_(MPC_TRACE, CTC_DBF_ERROR,
+			"%s(%s): Got sk_buff with head room < %ld bytes",
+			CTCM_FUNTAIL, dev->name,
+				TH_HEADER_LENGTH + PDU_HEADER_LENGTH);
+
+		CTCM_D3_DUMP((char *)skb->data, min_t(int, 32, skb->len));
+
+		len =  skb->len + TH_HEADER_LENGTH + PDU_HEADER_LENGTH;
+		newskb = __dev_alloc_skb(len, gfp_type() | GFP_DMA);
+
+		if (!newskb) {
+			CTCM_DBF_TEXT_(MPC_TRACE, CTC_DBF_ERROR,
+				"%s: %s: __dev_alloc_skb failed",
+						__func__, dev->name);
+
+			dev_kfree_skb_any(skb);
+			priv->stats.tx_dropped++;
+			priv->stats.tx_errors++;
+			priv->stats.tx_carrier_errors++;
+			fsm_event(grp->fsm, MPCG_EVENT_INOP, dev);
+					goto done;
+		}
+		newskb->protocol = skb->protocol;
+		skb_reserve(newskb, TH_HEADER_LENGTH + PDU_HEADER_LENGTH);
+		memcpy(skb_put(newskb, skb->len), skb->data, skb->len);
+		dev_kfree_skb_any(skb);
+		skb = newskb;
+	}
+
+	/*
+	 * If channels are not running,
+	 * notify anybody about a link failure and throw
+	 * away packet.
+	 */
+	if ((fsm_getstate(priv->fsm) != DEV_STATE_RUNNING) ||
+	   (fsm_getstate(grp->fsm) <  MPCG_STATE_XID2INITW)) {
+		dev_kfree_skb_any(skb);
+		CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR,
+			"%s(%s): inactive MPCGROUP - dropped",
+					CTCM_FUNTAIL, dev->name);
+		priv->stats.tx_dropped++;
+		priv->stats.tx_errors++;
+		priv->stats.tx_carrier_errors++;
+					goto done;
+	}
+
+	if (ctcm_test_and_set_busy(dev)) {
+		CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR,
+			"%s(%s): device busy - dropped",
+					CTCM_FUNTAIL, dev->name);
+		dev_kfree_skb_any(skb);
+		priv->stats.tx_dropped++;
+		priv->stats.tx_errors++;
+		priv->stats.tx_carrier_errors++;
+		fsm_event(grp->fsm, MPCG_EVENT_INOP, dev);
+					goto done;
+	}
+
+	dev->trans_start = jiffies;
+	if (ctcmpc_transmit_skb(priv->channel[CTCM_WRITE], skb) != 0) {
+		CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR,
+			"%s(%s): device error - dropped",
+					CTCM_FUNTAIL, dev->name);
+		dev_kfree_skb_any(skb);
+		priv->stats.tx_dropped++;
+		priv->stats.tx_errors++;
+		priv->stats.tx_carrier_errors++;
+		ctcm_clear_busy(dev);
+		fsm_event(grp->fsm, MPCG_EVENT_INOP, dev);
+					goto done;
+	}
+	ctcm_clear_busy(dev);
+done:
+	if (do_debug)
+		MPC_DBF_DEV_NAME(TRACE, dev, "exit");
+
+	return NETDEV_TX_OK;	/* handle freeing of skb here */
+}
+
+
+/**
+ * Sets MTU of an interface.
+ *
+ *  dev		Pointer to interface struct.
+ *  new_mtu	The new MTU to use for this interface.
+ *
+ * returns 0 on success, -EINVAL if MTU is out of valid range.
+ *         (valid range is 576 .. 65527). If VM is on the
+ *         remote side, maximum MTU is 32760, however this is
+ *         not checked here.
+ */
+static int ctcm_change_mtu(struct net_device *dev, int new_mtu)
+{
+	struct ctcm_priv *priv;
+	int max_bufsize;
+
+	if (new_mtu < 576 || new_mtu > 65527)
+		return -EINVAL;
+
+	priv = dev->ml_priv;
+	max_bufsize = priv->channel[CTCM_READ]->max_bufsize;
+
+	if (IS_MPC(priv)) {
+		if (new_mtu > max_bufsize - TH_HEADER_LENGTH)
+			return -EINVAL;
+		dev->hard_header_len = TH_HEADER_LENGTH + PDU_HEADER_LENGTH;
+	} else {
+		if (new_mtu > max_bufsize - LL_HEADER_LENGTH - 2)
+			return -EINVAL;
+		dev->hard_header_len = LL_HEADER_LENGTH + 2;
+	}
+	dev->mtu = new_mtu;
+	return 0;
+}
+
+/**
+ * Returns interface statistics of a device.
+ *
+ *  dev		Pointer to interface struct.
+ *
+ * returns Pointer to stats struct of this interface.
+ */
+static struct net_device_stats *ctcm_stats(struct net_device *dev)
+{
+	return &((struct ctcm_priv *)dev->ml_priv)->stats;
+}
+
+static void ctcm_free_netdevice(struct net_device *dev)
+{
+	struct ctcm_priv *priv;
+	struct mpc_group *grp;
+
+	CTCM_DBF_TEXT_(SETUP, CTC_DBF_INFO,
+			"%s(%s)", CTCM_FUNTAIL, dev->name);
+	priv = dev->ml_priv;
+	if (priv) {
+		grp = priv->mpcg;
+		if (grp) {
+			if (grp->fsm)
+				kfree_fsm(grp->fsm);
+			if (grp->xid_skb)
+				dev_kfree_skb(grp->xid_skb);
+			if (grp->rcvd_xid_skb)
+				dev_kfree_skb(grp->rcvd_xid_skb);
+			tasklet_kill(&grp->mpc_tasklet2);
+			kfree(grp);
+			priv->mpcg = NULL;
+		}
+		if (priv->fsm) {
+			kfree_fsm(priv->fsm);
+			priv->fsm = NULL;
+		}
+		kfree(priv->xid);
+		priv->xid = NULL;
+	/*
+	 * Note: kfree(priv); is done in "opposite" function of
+	 * allocator function probe_device which is remove_device.
+	 */
+	}
+#ifdef MODULE
+	free_netdev(dev);
+#endif
+}
+
+struct mpc_group *ctcmpc_init_mpc_group(struct ctcm_priv *priv);
+
+static const struct net_device_ops ctcm_netdev_ops = {
+	.ndo_open		= ctcm_open,
+	.ndo_stop		= ctcm_close,
+	.ndo_get_stats		= ctcm_stats,
+	.ndo_change_mtu	   	= ctcm_change_mtu,
+	.ndo_start_xmit		= ctcm_tx,
+};
+
+static const struct net_device_ops ctcm_mpc_netdev_ops = {
+	.ndo_open		= ctcm_open,
+	.ndo_stop		= ctcm_close,
+	.ndo_get_stats		= ctcm_stats,
+	.ndo_change_mtu	   	= ctcm_change_mtu,
+	.ndo_start_xmit		= ctcmpc_tx,
+};
+
+void static ctcm_dev_setup(struct net_device *dev)
+{
+	dev->type = ARPHRD_SLIP;
+	dev->tx_queue_len = 100;
+	dev->flags = IFF_POINTOPOINT | IFF_NOARP;
+}
+
+/*
+ * Initialize everything of the net device except the name and the
+ * channel structs.
+ */
+static struct net_device *ctcm_init_netdevice(struct ctcm_priv *priv)
+{
+	struct net_device *dev;
+	struct mpc_group *grp;
+	if (!priv)
+		return NULL;
+
+	if (IS_MPC(priv))
+		dev = alloc_netdev(0, MPC_DEVICE_GENE, ctcm_dev_setup);
+	else
+		dev = alloc_netdev(0, CTC_DEVICE_GENE, ctcm_dev_setup);
+
+	if (!dev) {
+		CTCM_DBF_TEXT_(ERROR, CTC_DBF_CRIT,
+			"%s: MEMORY allocation ERROR",
+			CTCM_FUNTAIL);
+		return NULL;
+	}
+	dev->ml_priv = priv;
+	priv->fsm = init_fsm("ctcmdev", dev_state_names, dev_event_names,
+				CTCM_NR_DEV_STATES, CTCM_NR_DEV_EVENTS,
+				dev_fsm, dev_fsm_len, GFP_KERNEL);
+	if (priv->fsm == NULL) {
+		CTCMY_DBF_DEV(SETUP, dev, "init_fsm error");
+		free_netdev(dev);
+		return NULL;
+	}
+	fsm_newstate(priv->fsm, DEV_STATE_STOPPED);
+	fsm_settimer(priv->fsm, &priv->restart_timer);
+
+	if (IS_MPC(priv)) {
+		/*  MPC Group Initializations  */
+		grp = ctcmpc_init_mpc_group(priv);
+		if (grp == NULL) {
+			MPC_DBF_DEV(SETUP, dev, "init_mpc_group error");
+			free_netdev(dev);
+			return NULL;
+		}
+		tasklet_init(&grp->mpc_tasklet2,
+				mpc_group_ready, (unsigned long)dev);
+		dev->mtu = MPC_BUFSIZE_DEFAULT -
+				TH_HEADER_LENGTH - PDU_HEADER_LENGTH;
+
+		dev->netdev_ops = &ctcm_mpc_netdev_ops;
+		dev->hard_header_len = TH_HEADER_LENGTH + PDU_HEADER_LENGTH;
+		priv->buffer_size = MPC_BUFSIZE_DEFAULT;
+	} else {
+		dev->mtu = CTCM_BUFSIZE_DEFAULT - LL_HEADER_LENGTH - 2;
+		dev->netdev_ops = &ctcm_netdev_ops;
+		dev->hard_header_len = LL_HEADER_LENGTH + 2;
+	}
+
+	CTCMY_DBF_DEV(SETUP, dev, "finished");
+
+	return dev;
+}
+
+/**
+ * Main IRQ handler.
+ *
+ *  cdev	The ccw_device the interrupt is for.
+ *  intparm	interruption parameter.
+ *  irb		interruption response block.
+ */
+static void ctcm_irq_handler(struct ccw_device *cdev,
+				unsigned long intparm, struct irb *irb)
+{
+	struct channel		*ch;
+	struct net_device	*dev;
+	struct ctcm_priv	*priv;
+	struct ccwgroup_device	*cgdev;
+	int cstat;
+	int dstat;
+
+	CTCM_DBF_TEXT_(TRACE, CTC_DBF_DEBUG,
+		"Enter %s(%s)", CTCM_FUNTAIL, dev_name(&cdev->dev));
+
+	if (ctcm_check_irb_error(cdev, irb))
+		return;
+
+	cgdev = dev_get_drvdata(&cdev->dev);
+
+	cstat = irb->scsw.cmd.cstat;
+	dstat = irb->scsw.cmd.dstat;
+
+	/* Check for unsolicited interrupts. */
+	if (cgdev == NULL) {
+		CTCM_DBF_TEXT_(TRACE, CTC_DBF_ERROR,
+			"%s(%s) unsolicited irq: c-%02x d-%02x\n",
+			CTCM_FUNTAIL, dev_name(&cdev->dev), cstat, dstat);
+		dev_warn(&cdev->dev,
+			"The adapter received a non-specific IRQ\n");
+		return;
+	}
+
+	priv = dev_get_drvdata(&cgdev->dev);
+
+	/* Try to extract channel from driver data. */
+	if (priv->channel[CTCM_READ]->cdev == cdev)
+		ch = priv->channel[CTCM_READ];
+	else if (priv->channel[CTCM_WRITE]->cdev == cdev)
+		ch = priv->channel[CTCM_WRITE];
+	else {
+		dev_err(&cdev->dev,
+			"%s: Internal error: Can't determine channel for "
+			"interrupt device %s\n",
+			__func__, dev_name(&cdev->dev));
+			/* Explain: inconsistent internal structures */
+		return;
+	}
+
+	dev = ch->netdev;
+	if (dev == NULL) {
+		dev_err(&cdev->dev,
+			"%s Internal error: net_device is NULL, ch = 0x%p\n",
+			__func__, ch);
+			/* Explain: inconsistent internal structures */
+		return;
+	}
+
+	/* Copy interruption response block. */
+	memcpy(ch->irb, irb, sizeof(struct irb));
+
+	/* Issue error message and return on subchannel error code */
+	if (irb->scsw.cmd.cstat) {
+		fsm_event(ch->fsm, CTC_EVENT_SC_UNKNOWN, ch);
+		CTCM_DBF_TEXT_(TRACE, CTC_DBF_WARN,
+			"%s(%s): sub-ch check %s: cs=%02x ds=%02x",
+				CTCM_FUNTAIL, dev->name, ch->id, cstat, dstat);
+		dev_warn(&cdev->dev,
+				"A check occurred on the subchannel\n");
+		return;
+	}
+
+	/* Check the reason-code of a unit check */
+	if (irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK) {
+		if ((irb->ecw[0] & ch->sense_rc) == 0)
+			/* print it only once */
+			CTCM_DBF_TEXT_(TRACE, CTC_DBF_WARN,
+				"%s(%s): sense=%02x, ds=%02x",
+				CTCM_FUNTAIL, ch->id, irb->ecw[0], dstat);
+		ccw_unit_check(ch, irb->ecw[0]);
+		return;
+	}
+	if (irb->scsw.cmd.dstat & DEV_STAT_BUSY) {
+		if (irb->scsw.cmd.dstat & DEV_STAT_ATTENTION)
+			fsm_event(ch->fsm, CTC_EVENT_ATTNBUSY, ch);
+		else
+			fsm_event(ch->fsm, CTC_EVENT_BUSY, ch);
+		return;
+	}
+	if (irb->scsw.cmd.dstat & DEV_STAT_ATTENTION) {
+		fsm_event(ch->fsm, CTC_EVENT_ATTN, ch);
+		return;
+	}
+	if ((irb->scsw.cmd.stctl & SCSW_STCTL_SEC_STATUS) ||
+	    (irb->scsw.cmd.stctl == SCSW_STCTL_STATUS_PEND) ||
+	    (irb->scsw.cmd.stctl ==
+	     (SCSW_STCTL_ALERT_STATUS | SCSW_STCTL_STATUS_PEND)))
+		fsm_event(ch->fsm, CTC_EVENT_FINSTAT, ch);
+	else
+		fsm_event(ch->fsm, CTC_EVENT_IRQ, ch);
+
+}
+
+/**
+ * Add ctcm specific attributes.
+ * Add ctcm private data.
+ *
+ *  cgdev	pointer to ccwgroup_device just added
+ *
+ * returns 0 on success, !0 on failure.
+ */
+static int ctcm_probe_device(struct ccwgroup_device *cgdev)
+{
+	struct ctcm_priv *priv;
+	int rc;
+
+	CTCM_DBF_TEXT_(SETUP, CTC_DBF_INFO,
+			"%s %p",
+			__func__, cgdev);
+
+	if (!get_device(&cgdev->dev))
+		return -ENODEV;
+
+	priv = kzalloc(sizeof(struct ctcm_priv), GFP_KERNEL);
+	if (!priv) {
+		CTCM_DBF_TEXT_(ERROR, CTC_DBF_ERROR,
+			"%s: memory allocation failure",
+			CTCM_FUNTAIL);
+		put_device(&cgdev->dev);
+		return -ENOMEM;
+	}
+
+	rc = ctcm_add_files(&cgdev->dev);
+	if (rc) {
+		kfree(priv);
+		put_device(&cgdev->dev);
+		return rc;
+	}
+	priv->buffer_size = CTCM_BUFSIZE_DEFAULT;
+	cgdev->cdev[0]->handler = ctcm_irq_handler;
+	cgdev->cdev[1]->handler = ctcm_irq_handler;
+	dev_set_drvdata(&cgdev->dev, priv);
+
+	return 0;
+}
+
+/**
+ * Add a new channel to the list of channels.
+ * Keeps the channel list sorted.
+ *
+ *  cdev	The ccw_device to be added.
+ *  type	The type class of the new channel.
+ *  priv	Points to the private data of the ccwgroup_device.
+ *
+ * returns 0 on success, !0 on error.
+ */
+static int add_channel(struct ccw_device *cdev, enum ctcm_channel_types type,
+				struct ctcm_priv *priv)
+{
+	struct channel **c = &channels;
+	struct channel *ch;
+	int ccw_num;
+	int rc = 0;
+
+	CTCM_DBF_TEXT_(SETUP, CTC_DBF_INFO,
+		"%s(%s), type %d, proto %d",
+			__func__, dev_name(&cdev->dev),	type, priv->protocol);
+
+	ch = kzalloc(sizeof(struct channel), GFP_KERNEL);
+	if (ch == NULL)
+		return -ENOMEM;
+
+	ch->protocol = priv->protocol;
+	if (IS_MPC(priv)) {
+		ch->discontact_th = kzalloc(TH_HEADER_LENGTH, gfp_type());
+		if (ch->discontact_th == NULL)
+					goto nomem_return;
+
+		ch->discontact_th->th_blk_flag = TH_DISCONTACT;
+		tasklet_init(&ch->ch_disc_tasklet,
+			mpc_action_send_discontact, (unsigned long)ch);
+
+		tasklet_init(&ch->ch_tasklet, ctcmpc_bh, (unsigned long)ch);
+		ch->max_bufsize = (MPC_BUFSIZE_DEFAULT - 35);
+		ccw_num = 17;
+	} else
+		ccw_num = 8;
+
+	ch->ccw = kzalloc(ccw_num * sizeof(struct ccw1), GFP_KERNEL | GFP_DMA);
+	if (ch->ccw == NULL)
+					goto nomem_return;
+
+	ch->cdev = cdev;
+	snprintf(ch->id, CTCM_ID_SIZE, "ch-%s", dev_name(&cdev->dev));
+	ch->type = type;
+
+	/**
+	 * "static" ccws are used in the following way:
+	 *
+	 * ccw[0..2] (Channel program for generic I/O):
+	 *           0: prepare
+	 *           1: read or write (depending on direction) with fixed
+	 *              buffer (idal allocated once when buffer is allocated)
+	 *           2: nop
+	 * ccw[3..5] (Channel program for direct write of packets)
+	 *           3: prepare
+	 *           4: write (idal allocated on every write).
+	 *           5: nop
+	 * ccw[6..7] (Channel program for initial channel setup):
+	 *           6: set extended mode
+	 *           7: nop
+	 *
+	 * ch->ccw[0..5] are initialized in ch_action_start because
+	 * the channel's direction is yet unknown here.
+	 *
+	 * ccws used for xid2 negotiations
+	 *  ch-ccw[8-14] need to be used for the XID exchange either
+	 *    X side XID2 Processing
+	 *       8:  write control
+	 *       9:  write th
+	 *	     10: write XID
+	 *	     11: read th from secondary
+	 *	     12: read XID   from secondary
+	 *	     13: read 4 byte ID
+	 *	     14: nop
+	 *    Y side XID Processing
+	 *	     8:  sense
+	 *       9:  read th
+	 *	     10: read XID
+	 *	     11: write th
+	 *	     12: write XID
+	 *	     13: write 4 byte ID
+	 *	     14: nop
+	 *
+	 *  ccws used for double noop due to VM timing issues
+	 *  which result in unrecoverable Busy on channel
+	 *       15: nop
+	 *       16: nop
+	 */
+	ch->ccw[6].cmd_code	= CCW_CMD_SET_EXTENDED;
+	ch->ccw[6].flags	= CCW_FLAG_SLI;
+
+	ch->ccw[7].cmd_code	= CCW_CMD_NOOP;
+	ch->ccw[7].flags	= CCW_FLAG_SLI;
+
+	if (IS_MPC(priv)) {
+		ch->ccw[15].cmd_code = CCW_CMD_WRITE;
+		ch->ccw[15].flags    = CCW_FLAG_SLI | CCW_FLAG_CC;
+		ch->ccw[15].count    = TH_HEADER_LENGTH;
+		ch->ccw[15].cda      = virt_to_phys(ch->discontact_th);
+
+		ch->ccw[16].cmd_code = CCW_CMD_NOOP;
+		ch->ccw[16].flags    = CCW_FLAG_SLI;
+
+		ch->fsm = init_fsm(ch->id, ctc_ch_state_names,
+				ctc_ch_event_names, CTC_MPC_NR_STATES,
+				CTC_MPC_NR_EVENTS, ctcmpc_ch_fsm,
+				mpc_ch_fsm_len, GFP_KERNEL);
+	} else {
+		ch->fsm = init_fsm(ch->id, ctc_ch_state_names,
+				ctc_ch_event_names, CTC_NR_STATES,
+				CTC_NR_EVENTS, ch_fsm,
+				ch_fsm_len, GFP_KERNEL);
+	}
+	if (ch->fsm == NULL)
+				goto free_return;
+
+	fsm_newstate(ch->fsm, CTC_STATE_IDLE);
+
+	ch->irb = kzalloc(sizeof(struct irb), GFP_KERNEL);
+	if (ch->irb == NULL)
+				goto nomem_return;
+
+	while (*c && ctcm_less_than((*c)->id, ch->id))
+		c = &(*c)->next;
+
+	if (*c && (!strncmp((*c)->id, ch->id, CTCM_ID_SIZE))) {
+		CTCM_DBF_TEXT_(SETUP, CTC_DBF_INFO,
+				"%s (%s) already in list, using old entry",
+				__func__, (*c)->id);
+
+				goto free_return;
+	}
+
+	spin_lock_init(&ch->collect_lock);
+
+	fsm_settimer(ch->fsm, &ch->timer);
+	skb_queue_head_init(&ch->io_queue);
+	skb_queue_head_init(&ch->collect_queue);
+
+	if (IS_MPC(priv)) {
+		fsm_settimer(ch->fsm, &ch->sweep_timer);
+		skb_queue_head_init(&ch->sweep_queue);
+	}
+	ch->next = *c;
+	*c = ch;
+	return 0;
+
+nomem_return:
+	rc = -ENOMEM;
+
+free_return:	/* note that all channel pointers are 0 or valid */
+	kfree(ch->ccw);
+	kfree(ch->discontact_th);
+	kfree_fsm(ch->fsm);
+	kfree(ch->irb);
+	kfree(ch);
+	return rc;
+}
+
+/*
+ * Return type of a detected device.
+ */
+static enum ctcm_channel_types get_channel_type(struct ccw_device_id *id)
+{
+	enum ctcm_channel_types type;
+	type = (enum ctcm_channel_types)id->driver_info;
+
+	if (type == ctcm_channel_type_ficon)
+		type = ctcm_channel_type_escon;
+
+	return type;
+}
+
+/**
+ *
+ * Setup an interface.
+ *
+ *  cgdev	Device to be setup.
+ *
+ * returns 0 on success, !0 on failure.
+ */
+static int ctcm_new_device(struct ccwgroup_device *cgdev)
+{
+	char read_id[CTCM_ID_SIZE];
+	char write_id[CTCM_ID_SIZE];
+	int direction;
+	enum ctcm_channel_types type;
+	struct ctcm_priv *priv;
+	struct net_device *dev;
+	struct ccw_device *cdev0;
+	struct ccw_device *cdev1;
+	struct channel *readc;
+	struct channel *writec;
+	int ret;
+	int result;
+
+	priv = dev_get_drvdata(&cgdev->dev);
+	if (!priv) {
+		result = -ENODEV;
+		goto out_err_result;
+	}
+
+	cdev0 = cgdev->cdev[0];
+	cdev1 = cgdev->cdev[1];
+
+	type = get_channel_type(&cdev0->id);
+
+	snprintf(read_id, CTCM_ID_SIZE, "ch-%s", dev_name(&cdev0->dev));
+	snprintf(write_id, CTCM_ID_SIZE, "ch-%s", dev_name(&cdev1->dev));
+
+	ret = add_channel(cdev0, type, priv);
+	if (ret) {
+		result = ret;
+		goto out_err_result;
+	}
+	ret = add_channel(cdev1, type, priv);
+	if (ret) {
+		result = ret;
+		goto out_remove_channel1;
+	}
+
+	ret = ccw_device_set_online(cdev0);
+	if (ret != 0) {
+		CTCM_DBF_TEXT_(TRACE, CTC_DBF_NOTICE,
+			"%s(%s) set_online rc=%d",
+				CTCM_FUNTAIL, read_id, ret);
+		result = -EIO;
+		goto out_remove_channel2;
+	}
+
+	ret = ccw_device_set_online(cdev1);
+	if (ret != 0) {
+		CTCM_DBF_TEXT_(TRACE, CTC_DBF_NOTICE,
+			"%s(%s) set_online rc=%d",
+				CTCM_FUNTAIL, write_id, ret);
+
+		result = -EIO;
+		goto out_ccw1;
+	}
+
+	dev = ctcm_init_netdevice(priv);
+	if (dev == NULL) {
+		result = -ENODEV;
+		goto out_ccw2;
+	}
+
+	for (direction = CTCM_READ; direction <= CTCM_WRITE; direction++) {
+		priv->channel[direction] =
+			channel_get(type, direction == CTCM_READ ?
+				read_id : write_id, direction);
+		if (priv->channel[direction] == NULL) {
+			if (direction == CTCM_WRITE)
+				channel_free(priv->channel[CTCM_READ]);
+			goto out_dev;
+		}
+		priv->channel[direction]->netdev = dev;
+		priv->channel[direction]->protocol = priv->protocol;
+		priv->channel[direction]->max_bufsize = priv->buffer_size;
+	}
+	/* sysfs magic */
+	SET_NETDEV_DEV(dev, &cgdev->dev);
+
+	if (register_netdev(dev)) {
+		result = -ENODEV;
+		goto out_dev;
+	}
+
+	if (ctcm_add_attributes(&cgdev->dev)) {
+		result = -ENODEV;
+		goto out_unregister;
+	}
+
+	strlcpy(priv->fsm->name, dev->name, sizeof(priv->fsm->name));
+
+	dev_info(&dev->dev,
+		"setup OK : r/w = %s/%s, protocol : %d\n",
+			priv->channel[CTCM_READ]->id,
+			priv->channel[CTCM_WRITE]->id, priv->protocol);
+
+	CTCM_DBF_TEXT_(SETUP, CTC_DBF_INFO,
+		"setup(%s) OK : r/w = %s/%s, protocol : %d", dev->name,
+			priv->channel[CTCM_READ]->id,
+			priv->channel[CTCM_WRITE]->id, priv->protocol);
+
+	return 0;
+out_unregister:
+	unregister_netdev(dev);
+out_dev:
+	ctcm_free_netdevice(dev);
+out_ccw2:
+	ccw_device_set_offline(cgdev->cdev[1]);
+out_ccw1:
+	ccw_device_set_offline(cgdev->cdev[0]);
+out_remove_channel2:
+	readc = channel_get(type, read_id, CTCM_READ);
+	channel_remove(readc);
+out_remove_channel1:
+	writec = channel_get(type, write_id, CTCM_WRITE);
+	channel_remove(writec);
+out_err_result:
+	return result;
+}
+
+/**
+ * Shutdown an interface.
+ *
+ *  cgdev	Device to be shut down.
+ *
+ * returns 0 on success, !0 on failure.
+ */
+static int ctcm_shutdown_device(struct ccwgroup_device *cgdev)
+{
+	struct ctcm_priv *priv;
+	struct net_device *dev;
+
+	priv = dev_get_drvdata(&cgdev->dev);
+	if (!priv)
+		return -ENODEV;
+
+	if (priv->channel[CTCM_READ]) {
+		dev = priv->channel[CTCM_READ]->netdev;
+		CTCM_DBF_DEV(SETUP, dev, "");
+		/* Close the device */
+		ctcm_close(dev);
+		dev->flags &= ~IFF_RUNNING;
+		ctcm_remove_attributes(&cgdev->dev);
+		channel_free(priv->channel[CTCM_READ]);
+	} else
+		dev = NULL;
+
+	if (priv->channel[CTCM_WRITE])
+		channel_free(priv->channel[CTCM_WRITE]);
+
+	if (dev) {
+		unregister_netdev(dev);
+		ctcm_free_netdevice(dev);
+	}
+
+	if (priv->fsm)
+		kfree_fsm(priv->fsm);
+
+	ccw_device_set_offline(cgdev->cdev[1]);
+	ccw_device_set_offline(cgdev->cdev[0]);
+
+	if (priv->channel[CTCM_READ])
+		channel_remove(priv->channel[CTCM_READ]);
+	if (priv->channel[CTCM_WRITE])
+		channel_remove(priv->channel[CTCM_WRITE]);
+	priv->channel[CTCM_READ] = priv->channel[CTCM_WRITE] = NULL;
+
+	return 0;
+
+}
+
+
+static void ctcm_remove_device(struct ccwgroup_device *cgdev)
+{
+	struct ctcm_priv *priv = dev_get_drvdata(&cgdev->dev);
+
+	BUG_ON(priv == NULL);
+
+	CTCM_DBF_TEXT_(SETUP, CTC_DBF_INFO,
+			"removing device %p, proto : %d",
+			cgdev, priv->protocol);
+
+	if (cgdev->state == CCWGROUP_ONLINE)
+		ctcm_shutdown_device(cgdev);
+	ctcm_remove_files(&cgdev->dev);
+	dev_set_drvdata(&cgdev->dev, NULL);
+	kfree(priv);
+	put_device(&cgdev->dev);
+}
+
+static int ctcm_pm_suspend(struct ccwgroup_device *gdev)
+{
+	struct ctcm_priv *priv = dev_get_drvdata(&gdev->dev);
+
+	if (gdev->state == CCWGROUP_OFFLINE)
+		return 0;
+	netif_device_detach(priv->channel[CTCM_READ]->netdev);
+	ctcm_close(priv->channel[CTCM_READ]->netdev);
+	if (!wait_event_timeout(priv->fsm->wait_q,
+	    fsm_getstate(priv->fsm) == DEV_STATE_STOPPED, CTCM_TIME_5_SEC)) {
+		netif_device_attach(priv->channel[CTCM_READ]->netdev);
+		return -EBUSY;
+	}
+	ccw_device_set_offline(gdev->cdev[1]);
+	ccw_device_set_offline(gdev->cdev[0]);
+	return 0;
+}
+
+static int ctcm_pm_resume(struct ccwgroup_device *gdev)
+{
+	struct ctcm_priv *priv = dev_get_drvdata(&gdev->dev);
+	int rc;
+
+	if (gdev->state == CCWGROUP_OFFLINE)
+		return 0;
+	rc = ccw_device_set_online(gdev->cdev[1]);
+	if (rc)
+		goto err_out;
+	rc = ccw_device_set_online(gdev->cdev[0]);
+	if (rc)
+		goto err_out;
+	ctcm_open(priv->channel[CTCM_READ]->netdev);
+err_out:
+	netif_device_attach(priv->channel[CTCM_READ]->netdev);
+	return rc;
+}
+
+static struct ccw_device_id ctcm_ids[] = {
+	{CCW_DEVICE(0x3088, 0x08), .driver_info = ctcm_channel_type_parallel},
+	{CCW_DEVICE(0x3088, 0x1e), .driver_info = ctcm_channel_type_ficon},
+	{CCW_DEVICE(0x3088, 0x1f), .driver_info = ctcm_channel_type_escon},
+	{},
+};
+MODULE_DEVICE_TABLE(ccw, ctcm_ids);
+
+static struct ccw_driver ctcm_ccw_driver = {
+	.driver = {
+		.owner	= THIS_MODULE,
+		.name	= "ctcm",
+	},
+	.ids	= ctcm_ids,
+	.probe	= ccwgroup_probe_ccwdev,
+	.remove	= ccwgroup_remove_ccwdev,
+	.int_class = IOINT_CTC,
+};
+
+static struct ccwgroup_driver ctcm_group_driver = {
+	.driver = {
+		.owner	= THIS_MODULE,
+		.name	= CTC_DRIVER_NAME,
+	},
+	.max_slaves  = 2,
+	.driver_id   = 0xC3E3C3D4,	/* CTCM */
+	.probe       = ctcm_probe_device,
+	.remove      = ctcm_remove_device,
+	.set_online  = ctcm_new_device,
+	.set_offline = ctcm_shutdown_device,
+	.freeze	     = ctcm_pm_suspend,
+	.thaw	     = ctcm_pm_resume,
+	.restore     = ctcm_pm_resume,
+};
+
+static ssize_t
+ctcm_driver_group_store(struct device_driver *ddrv, const char *buf,
+			size_t count)
+{
+	int err;
+
+	err = ccwgroup_create_from_string(ctcm_root_dev,
+					  ctcm_group_driver.driver_id,
+					  &ctcm_ccw_driver, 2, buf);
+	return err ? err : count;
+}
+
+static DRIVER_ATTR(group, 0200, NULL, ctcm_driver_group_store);
+
+static struct attribute *ctcm_group_attrs[] = {
+	&driver_attr_group.attr,
+	NULL,
+};
+
+static struct attribute_group ctcm_group_attr_group = {
+	.attrs = ctcm_group_attrs,
+};
+
+static const struct attribute_group *ctcm_group_attr_groups[] = {
+	&ctcm_group_attr_group,
+	NULL,
+};
+
+/*
+ * Module related routines
+ */
+
+/*
+ * Prepare to be unloaded. Free IRQ's and release all resources.
+ * This is called just before this module is unloaded. It is
+ * not called, if the usage count is !0, so we don't need to check
+ * for that.
+ */
+static void __exit ctcm_exit(void)
+{
+	driver_remove_file(&ctcm_group_driver.driver, &driver_attr_group);
+	ccwgroup_driver_unregister(&ctcm_group_driver);
+	ccw_driver_unregister(&ctcm_ccw_driver);
+	root_device_unregister(ctcm_root_dev);
+	ctcm_unregister_dbf_views();
+	pr_info("CTCM driver unloaded\n");
+}
+
+/*
+ * Print Banner.
+ */
+static void print_banner(void)
+{
+	pr_info("CTCM driver initialized\n");
+}
+
+/**
+ * Initialize module.
+ * This is called just after the module is loaded.
+ *
+ * returns 0 on success, !0 on error.
+ */
+static int __init ctcm_init(void)
+{
+	int ret;
+
+	channels = NULL;
+
+	ret = ctcm_register_dbf_views();
+	if (ret)
+		goto out_err;
+	ctcm_root_dev = root_device_register("ctcm");
+	ret = IS_ERR(ctcm_root_dev) ? PTR_ERR(ctcm_root_dev) : 0;
+	if (ret)
+		goto register_err;
+	ret = ccw_driver_register(&ctcm_ccw_driver);
+	if (ret)
+		goto ccw_err;
+	ctcm_group_driver.driver.groups = ctcm_group_attr_groups;
+	ret = ccwgroup_driver_register(&ctcm_group_driver);
+	if (ret)
+		goto ccwgroup_err;
+	print_banner();
+	return 0;
+
+ccwgroup_err:
+	ccw_driver_unregister(&ctcm_ccw_driver);
+ccw_err:
+	root_device_unregister(ctcm_root_dev);
+register_err:
+	ctcm_unregister_dbf_views();
+out_err:
+	pr_err("%s / Initializing the ctcm device driver failed, ret = %d\n",
+		__func__, ret);
+	return ret;
+}
+
+module_init(ctcm_init);
+module_exit(ctcm_exit);
+
+MODULE_AUTHOR("Peter Tiedemann <ptiedem@de.ibm.com>");
+MODULE_DESCRIPTION("Network driver for S/390 CTC + CTCMPC (SNA)");
+MODULE_LICENSE("GPL");
+
diff --git a/ap/os/linux/linux-3.4.x/drivers/s390/net/ctcm_main.h b/ap/os/linux/linux-3.4.x/drivers/s390/net/ctcm_main.h
new file mode 100644
index 0000000..24d5215
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/s390/net/ctcm_main.h
@@ -0,0 +1,323 @@
+/*
+ *	drivers/s390/net/ctcm_main.h
+ *
+ *	Copyright IBM Corp. 2001, 2007
+ *	Authors:	Fritz Elfert (felfert@millenux.com)
+ *			Peter Tiedemann (ptiedem@de.ibm.com)
+ */
+
+#ifndef _CTCM_MAIN_H_
+#define _CTCM_MAIN_H_
+
+#include <asm/ccwdev.h>
+#include <asm/ccwgroup.h>
+
+#include <linux/skbuff.h>
+#include <linux/netdevice.h>
+
+#include "fsm.h"
+#include "ctcm_dbug.h"
+#include "ctcm_mpc.h"
+
+#define CTC_DRIVER_NAME	"ctcm"
+#define CTC_DEVICE_NAME	"ctc"
+#define MPC_DEVICE_NAME	"mpc"
+#define CTC_DEVICE_GENE CTC_DEVICE_NAME "%d"
+#define MPC_DEVICE_GENE	MPC_DEVICE_NAME "%d"
+
+#define CHANNEL_FLAGS_READ	0
+#define CHANNEL_FLAGS_WRITE	1
+#define CHANNEL_FLAGS_INUSE	2
+#define CHANNEL_FLAGS_BUFSIZE_CHANGED	4
+#define CHANNEL_FLAGS_FAILED	8
+#define CHANNEL_FLAGS_WAITIRQ	16
+#define CHANNEL_FLAGS_RWMASK	1
+#define CHANNEL_DIRECTION(f) (f & CHANNEL_FLAGS_RWMASK)
+
+#define LOG_FLAG_ILLEGALPKT	1
+#define LOG_FLAG_ILLEGALSIZE	2
+#define LOG_FLAG_OVERRUN	4
+#define LOG_FLAG_NOMEM		8
+
+#define ctcm_pr_debug(fmt, arg...) printk(KERN_DEBUG fmt, ##arg)
+
+#define CTCM_PR_DEBUG(fmt, arg...) \
+	do { \
+		if (do_debug) \
+			printk(KERN_DEBUG fmt, ##arg); \
+	} while (0)
+
+#define	CTCM_PR_DBGDATA(fmt, arg...) \
+	do { \
+		if (do_debug_data) \
+			printk(KERN_DEBUG fmt, ##arg); \
+	} while (0)
+
+#define	CTCM_D3_DUMP(buf, len) \
+	do { \
+		if (do_debug_data) \
+			ctcmpc_dumpit(buf, len); \
+	} while (0)
+
+#define	CTCM_CCW_DUMP(buf, len) \
+	do { \
+		if (do_debug_ccw) \
+			ctcmpc_dumpit(buf, len); \
+	} while (0)
+
+/**
+ * Enum for classifying detected devices
+ */
+enum ctcm_channel_types {
+	/* Device is not a channel  */
+	ctcm_channel_type_none,
+
+	/* Device is a CTC/A */
+	ctcm_channel_type_parallel,
+
+	/* Device is a FICON channel */
+	ctcm_channel_type_ficon,
+
+	/* Device is a ESCON channel */
+	ctcm_channel_type_escon
+};
+
+/*
+ * CCW commands, used in this driver.
+ */
+#define CCW_CMD_WRITE		0x01
+#define CCW_CMD_READ		0x02
+#define CCW_CMD_NOOP		0x03
+#define CCW_CMD_TIC             0x08
+#define CCW_CMD_SENSE_CMD	0x14
+#define CCW_CMD_WRITE_CTL	0x17
+#define CCW_CMD_SET_EXTENDED	0xc3
+#define CCW_CMD_PREPARE		0xe3
+
+#define CTCM_PROTO_S390		0
+#define CTCM_PROTO_LINUX	1
+#define CTCM_PROTO_LINUX_TTY	2
+#define CTCM_PROTO_OS390	3
+#define CTCM_PROTO_MPC		4
+#define CTCM_PROTO_MAX		4
+
+#define CTCM_BUFSIZE_LIMIT	65535
+#define CTCM_BUFSIZE_DEFAULT	32768
+#define MPC_BUFSIZE_DEFAULT	CTCM_BUFSIZE_LIMIT
+
+#define CTCM_TIME_1_SEC		1000
+#define CTCM_TIME_5_SEC		5000
+#define CTCM_TIME_10_SEC	10000
+
+#define CTCM_INITIAL_BLOCKLEN	2
+
+#define CTCM_READ		0
+#define CTCM_WRITE		1
+
+#define CTCM_ID_SIZE		20+3
+
+struct ctcm_profile {
+	unsigned long maxmulti;
+	unsigned long maxcqueue;
+	unsigned long doios_single;
+	unsigned long doios_multi;
+	unsigned long txlen;
+	unsigned long tx_time;
+	struct timespec send_stamp;
+};
+
+/*
+ * Definition of one channel
+ */
+struct channel {
+	struct channel *next;
+	char id[CTCM_ID_SIZE];
+	struct ccw_device *cdev;
+	/*
+	 * Type of this channel.
+	 * CTC/A or Escon for valid channels.
+	 */
+	enum ctcm_channel_types type;
+	/*
+	 * Misc. flags. See CHANNEL_FLAGS_... below
+	 */
+	__u32 flags;
+	__u16 protocol;		/* protocol of this channel (4 = MPC) */
+	/*
+	 * I/O and irq related stuff
+	 */
+	struct ccw1 *ccw;
+	struct irb *irb;
+	/*
+	 * RX/TX buffer size
+	 */
+	int max_bufsize;
+	struct sk_buff *trans_skb;	/* transmit/receive buffer */
+	struct sk_buff_head io_queue;	/* universal I/O queue */
+	struct tasklet_struct ch_tasklet;	/* MPC ONLY */
+	/*
+	 * TX queue for collecting skb's during busy.
+	 */
+	struct sk_buff_head collect_queue;
+	/*
+	 * Amount of data in collect_queue.
+	 */
+	int collect_len;
+	/*
+	 * spinlock for collect_queue and collect_len
+	 */
+	spinlock_t collect_lock;
+	/*
+	 * Timer for detecting unresposive
+	 * I/O operations.
+	 */
+	fsm_timer timer;
+	/* MPC ONLY section begin */
+	__u32	th_seq_num;	/* SNA TH seq number */
+	__u8	th_seg;
+	__u32	pdu_seq;
+	struct sk_buff		*xid_skb;
+	char			*xid_skb_data;
+	struct th_header	*xid_th;
+	struct xid2		*xid;
+	char			*xid_id;
+	struct th_header	*rcvd_xid_th;
+	struct xid2		*rcvd_xid;
+	char			*rcvd_xid_id;
+	__u8			in_mpcgroup;
+	fsm_timer		sweep_timer;
+	struct sk_buff_head	sweep_queue;
+	struct th_header	*discontact_th;
+	struct tasklet_struct	ch_disc_tasklet;
+	/* MPC ONLY section end */
+
+	int retry;		/* retry counter for misc. operations */
+	fsm_instance *fsm;	/* finite state machine of this channel */
+	struct net_device *netdev;	/* corresponding net_device */
+	struct ctcm_profile prof;
+	__u8 *trans_skb_data;
+	__u16 logflags;
+	__u8  sense_rc; /* last unit check sense code report control */
+};
+
+struct ctcm_priv {
+	struct net_device_stats	stats;
+	unsigned long	tbusy;
+
+	/* The MPC group struct of this interface */
+	struct	mpc_group	*mpcg;	/* MPC only */
+	struct	xid2		*xid;	/* MPC only */
+
+	/* The finite state machine of this interface */
+	fsm_instance *fsm;
+
+	/* The protocol of this device */
+	__u16 protocol;
+
+	/* Timer for restarting after I/O Errors */
+	fsm_timer	restart_timer;
+
+	int buffer_size;	/* ctc only */
+
+	struct channel *channel[2];
+};
+
+int ctcm_open(struct net_device *dev);
+int ctcm_close(struct net_device *dev);
+
+/*
+ * prototypes for non-static sysfs functions
+ */
+int ctcm_add_attributes(struct device *dev);
+void ctcm_remove_attributes(struct device *dev);
+int ctcm_add_files(struct device *dev);
+void ctcm_remove_files(struct device *dev);
+
+/*
+ * Compatibility macros for busy handling
+ * of network devices.
+ */
+static inline void ctcm_clear_busy_do(struct net_device *dev)
+{
+	clear_bit(0, &(((struct ctcm_priv *)dev->ml_priv)->tbusy));
+	netif_wake_queue(dev);
+}
+
+static inline void ctcm_clear_busy(struct net_device *dev)
+{
+	struct mpc_group *grp;
+	grp = ((struct ctcm_priv *)dev->ml_priv)->mpcg;
+
+	if (!(grp && grp->in_sweep))
+		ctcm_clear_busy_do(dev);
+}
+
+
+static inline int ctcm_test_and_set_busy(struct net_device *dev)
+{
+	netif_stop_queue(dev);
+	return test_and_set_bit(0,
+			&(((struct ctcm_priv *)dev->ml_priv)->tbusy));
+}
+
+extern int loglevel;
+extern struct channel *channels;
+
+void ctcm_unpack_skb(struct channel *ch, struct sk_buff *pskb);
+
+/*
+ * Functions related to setup and device detection.
+ */
+
+static inline int ctcm_less_than(char *id1, char *id2)
+{
+	unsigned long dev1, dev2;
+
+	id1 = id1 + 5;
+	id2 = id2 + 5;
+
+	dev1 = simple_strtoul(id1, &id1, 16);
+	dev2 = simple_strtoul(id2, &id2, 16);
+
+	return (dev1 < dev2);
+}
+
+int ctcm_ch_alloc_buffer(struct channel *ch);
+
+static inline int ctcm_checkalloc_buffer(struct channel *ch)
+{
+	if (ch->trans_skb == NULL)
+		return ctcm_ch_alloc_buffer(ch);
+	if (ch->flags & CHANNEL_FLAGS_BUFSIZE_CHANGED) {
+		dev_kfree_skb(ch->trans_skb);
+		return ctcm_ch_alloc_buffer(ch);
+	}
+	return 0;
+}
+
+struct mpc_group *ctcmpc_init_mpc_group(struct ctcm_priv *priv);
+
+/* test if protocol attribute (of struct ctcm_priv or struct channel)
+ * has MPC protocol setting. Type is not checked
+ */
+#define IS_MPC(p) ((p)->protocol == CTCM_PROTO_MPC)
+
+/* test if struct ctcm_priv of struct net_device has MPC protocol setting */
+#define IS_MPCDEV(dev) IS_MPC((struct ctcm_priv *)dev->ml_priv)
+
+static inline gfp_t gfp_type(void)
+{
+	return in_interrupt() ? GFP_ATOMIC : GFP_KERNEL;
+}
+
+/*
+ * Definition of our link level header.
+ */
+struct ll_header {
+	__u16 length;
+	__u16 type;
+	__u16 unused;
+};
+#define LL_HEADER_LENGTH (sizeof(struct ll_header))
+
+#endif
diff --git a/ap/os/linux/linux-3.4.x/drivers/s390/net/ctcm_mpc.c b/ap/os/linux/linux-3.4.x/drivers/s390/net/ctcm_mpc.c
new file mode 100644
index 0000000..ac7975b
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/s390/net/ctcm_mpc.c
@@ -0,0 +1,2179 @@
+/*
+ *	drivers/s390/net/ctcm_mpc.c
+ *
+ *	Copyright IBM Corp. 2004, 2007
+ *	Authors:	Belinda Thompson (belindat@us.ibm.com)
+ *			Andy Richter (richtera@us.ibm.com)
+ *			Peter Tiedemann (ptiedem@de.ibm.com)
+ */
+
+/*
+	This module exports functions to be used by CCS:
+	EXPORT_SYMBOL(ctc_mpc_alloc_channel);
+	EXPORT_SYMBOL(ctc_mpc_establish_connectivity);
+	EXPORT_SYMBOL(ctc_mpc_dealloc_ch);
+	EXPORT_SYMBOL(ctc_mpc_flow_control);
+*/
+
+#undef DEBUG
+#undef DEBUGDATA
+#undef DEBUGCCW
+
+#define KMSG_COMPONENT "ctcm"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/errno.h>
+#include <linux/types.h>
+#include <linux/interrupt.h>
+#include <linux/timer.h>
+#include <linux/sched.h>
+
+#include <linux/signal.h>
+#include <linux/string.h>
+#include <linux/proc_fs.h>
+
+#include <linux/ip.h>
+#include <linux/if_arp.h>
+#include <linux/tcp.h>
+#include <linux/skbuff.h>
+#include <linux/ctype.h>
+#include <linux/netdevice.h>
+#include <net/dst.h>
+
+#include <linux/io.h>		/* instead of <asm/io.h> ok ? */
+#include <asm/ccwdev.h>
+#include <asm/ccwgroup.h>
+#include <linux/bitops.h>	/* instead of <asm/bitops.h> ok ? */
+#include <linux/uaccess.h>	/* instead of <asm/uaccess.h> ok ? */
+#include <linux/wait.h>
+#include <linux/moduleparam.h>
+#include <asm/idals.h>
+
+#include "ctcm_main.h"
+#include "ctcm_mpc.h"
+#include "ctcm_fsms.h"
+
+static const struct xid2 init_xid = {
+	.xid2_type_id	=	XID_FM2,
+	.xid2_len	=	0x45,
+	.xid2_adj_id	=	0,
+	.xid2_rlen	=	0x31,
+	.xid2_resv1	=	0,
+	.xid2_flag1	=	0,
+	.xid2_fmtt	=	0,
+	.xid2_flag4	=	0x80,
+	.xid2_resv2	=	0,
+	.xid2_tgnum	=	0,
+	.xid2_sender_id	=	0,
+	.xid2_flag2	=	0,
+	.xid2_option	=	XID2_0,
+	.xid2_resv3	=	"\x00",
+	.xid2_resv4	=	0,
+	.xid2_dlc_type	=	XID2_READ_SIDE,
+	.xid2_resv5	=	0,
+	.xid2_mpc_flag	=	0,
+	.xid2_resv6	=	0,
+	.xid2_buf_len	=	(MPC_BUFSIZE_DEFAULT - 35),
+};
+
+static const struct th_header thnorm = {
+	.th_seg		=	0x00,
+	.th_ch_flag	=	TH_IS_XID,
+	.th_blk_flag	=	TH_DATA_IS_XID,
+	.th_is_xid	=	0x01,
+	.th_seq_num	=	0x00000000,
+};
+
+static const struct th_header thdummy = {
+	.th_seg		=	0x00,
+	.th_ch_flag	=	0x00,
+	.th_blk_flag	=	TH_DATA_IS_XID,
+	.th_is_xid	=	0x01,
+	.th_seq_num	=	0x00000000,
+};
+
+/*
+ * Definition of one MPC group
+ */
+
+/*
+ * Compatibility macros for busy handling
+ * of network devices.
+ */
+
+static void ctcmpc_unpack_skb(struct channel *ch, struct sk_buff *pskb);
+
+/*
+ * MPC Group state machine actions (static prototypes)
+ */
+static void mpc_action_nop(fsm_instance *fsm, int event, void *arg);
+static void mpc_action_go_ready(fsm_instance *fsm, int event, void *arg);
+static void mpc_action_go_inop(fsm_instance *fi, int event, void *arg);
+static void mpc_action_timeout(fsm_instance *fi, int event, void *arg);
+static int  mpc_validate_xid(struct mpcg_info *mpcginfo);
+static void mpc_action_yside_xid(fsm_instance *fsm, int event, void *arg);
+static void mpc_action_doxid0(fsm_instance *fsm, int event, void *arg);
+static void mpc_action_doxid7(fsm_instance *fsm, int event, void *arg);
+static void mpc_action_xside_xid(fsm_instance *fsm, int event, void *arg);
+static void mpc_action_rcvd_xid0(fsm_instance *fsm, int event, void *arg);
+static void mpc_action_rcvd_xid7(fsm_instance *fsm, int event, void *arg);
+
+#ifdef DEBUGDATA
+/*-------------------------------------------------------------------*
+* Dump buffer format						     *
+*								     *
+*--------------------------------------------------------------------*/
+void ctcmpc_dumpit(char *buf, int len)
+{
+	__u32	ct, sw, rm, dup;
+	char	*ptr, *rptr;
+	char	tbuf[82], tdup[82];
+	#ifdef CONFIG_64BIT
+	char	addr[22];
+	#else
+	char	addr[12];
+	#endif
+	char	boff[12];
+	char	bhex[82], duphex[82];
+	char	basc[40];
+
+	sw  = 0;
+	rptr = ptr = buf;
+	rm  = 16;
+	duphex[0] = 0x00;
+	dup = 0;
+
+	for (ct = 0; ct < len; ct++, ptr++, rptr++) {
+		if (sw == 0) {
+			#ifdef CONFIG_64BIT
+			sprintf(addr, "%16.16llx", (__u64)rptr);
+			#else
+			sprintf(addr, "%8.8X", (__u32)rptr);
+			#endif
+
+			sprintf(boff, "%4.4X", (__u32)ct);
+			bhex[0] = '\0';
+			basc[0] = '\0';
+		}
+		if ((sw == 4) || (sw == 12))
+			strcat(bhex, " ");
+		if (sw == 8)
+			strcat(bhex, "	");
+
+		#if CONFIG_64BIT
+		sprintf(tbuf, "%2.2llX", (__u64)*ptr);
+		#else
+		sprintf(tbuf, "%2.2X", (__u32)*ptr);
+		#endif
+
+		tbuf[2] = '\0';
+		strcat(bhex, tbuf);
+		if ((0 != isprint(*ptr)) && (*ptr >= 0x20))
+			basc[sw] = *ptr;
+		else
+			basc[sw] = '.';
+
+		basc[sw+1] = '\0';
+		sw++;
+		rm--;
+		if (sw != 16)
+			continue;
+		if ((strcmp(duphex, bhex)) != 0) {
+			if (dup != 0) {
+				sprintf(tdup,
+					"Duplicate as above to %s", addr);
+				ctcm_pr_debug("		       --- %s ---\n",
+						tdup);
+			}
+			ctcm_pr_debug("   %s (+%s) : %s  [%s]\n",
+					addr, boff, bhex, basc);
+			dup = 0;
+			strcpy(duphex, bhex);
+		} else
+			dup++;
+
+		sw = 0;
+		rm = 16;
+	}  /* endfor */
+
+	if (sw != 0) {
+		for ( ; rm > 0; rm--, sw++) {
+			if ((sw == 4) || (sw == 12))
+				strcat(bhex, " ");
+			if (sw == 8)
+				strcat(bhex, "	");
+			strcat(bhex, "	");
+			strcat(basc, " ");
+		}
+		if (dup != 0) {
+			sprintf(tdup, "Duplicate as above to %s", addr);
+			ctcm_pr_debug("		       --- %s ---\n", tdup);
+		}
+		ctcm_pr_debug("   %s (+%s) : %s  [%s]\n",
+					addr, boff, bhex, basc);
+	} else {
+		if (dup >= 1) {
+			sprintf(tdup, "Duplicate as above to %s", addr);
+			ctcm_pr_debug("		       --- %s ---\n", tdup);
+		}
+		if (dup != 0) {
+			ctcm_pr_debug("   %s (+%s) : %s  [%s]\n",
+				addr, boff, bhex, basc);
+		}
+	}
+
+	return;
+
+}   /*	 end of ctcmpc_dumpit  */
+#endif
+
+#ifdef DEBUGDATA
+/*
+ * Dump header and first 16 bytes of an sk_buff for debugging purposes.
+ *
+ * skb		The sk_buff to dump.
+ * offset	Offset relative to skb-data, where to start the dump.
+ */
+void ctcmpc_dump_skb(struct sk_buff *skb, int offset)
+{
+	__u8 *p = skb->data;
+	struct th_header *header;
+	struct pdu *pheader;
+	int bl = skb->len;
+	int i;
+
+	if (p == NULL)
+		return;
+
+	p += offset;
+	header = (struct th_header *)p;
+
+	ctcm_pr_debug("dump:\n");
+	ctcm_pr_debug("skb len=%d \n", skb->len);
+	if (skb->len > 2) {
+		switch (header->th_ch_flag) {
+		case TH_HAS_PDU:
+			break;
+		case 0x00:
+		case TH_IS_XID:
+			if ((header->th_blk_flag == TH_DATA_IS_XID) &&
+			   (header->th_is_xid == 0x01))
+				goto dumpth;
+		case TH_SWEEP_REQ:
+				goto dumpth;
+		case TH_SWEEP_RESP:
+				goto dumpth;
+		default:
+			break;
+		}
+
+		pheader = (struct pdu *)p;
+		ctcm_pr_debug("pdu->offset: %d hex: %04x\n",
+			       pheader->pdu_offset, pheader->pdu_offset);
+		ctcm_pr_debug("pdu->flag  : %02x\n", pheader->pdu_flag);
+		ctcm_pr_debug("pdu->proto : %02x\n", pheader->pdu_proto);
+		ctcm_pr_debug("pdu->seq   : %02x\n", pheader->pdu_seq);
+					goto dumpdata;
+
+dumpth:
+		ctcm_pr_debug("th->seg     : %02x\n", header->th_seg);
+		ctcm_pr_debug("th->ch      : %02x\n", header->th_ch_flag);
+		ctcm_pr_debug("th->blk_flag: %02x\n", header->th_blk_flag);
+		ctcm_pr_debug("th->type    : %s\n",
+			       (header->th_is_xid) ? "DATA" : "XID");
+		ctcm_pr_debug("th->seqnum  : %04x\n", header->th_seq_num);
+
+	}
+dumpdata:
+	if (bl > 32)
+		bl = 32;
+	ctcm_pr_debug("data: ");
+	for (i = 0; i < bl; i++)
+		ctcm_pr_debug("%02x%s", *p++, (i % 16) ? " " : "\n");
+	ctcm_pr_debug("\n");
+}
+#endif
+
+static struct net_device *ctcmpc_get_dev(int port_num)
+{
+	char device[20];
+	struct net_device *dev;
+	struct ctcm_priv *priv;
+
+	sprintf(device, "%s%i", MPC_DEVICE_NAME, port_num);
+
+	dev = __dev_get_by_name(&init_net, device);
+
+	if (dev == NULL) {
+		CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR,
+			"%s: Device not found by name: %s",
+					CTCM_FUNTAIL, device);
+		return NULL;
+	}
+	priv = dev->ml_priv;
+	if (priv == NULL) {
+		CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR,
+			"%s(%s): dev->ml_priv is NULL",
+					CTCM_FUNTAIL, device);
+		return NULL;
+	}
+	if (priv->mpcg == NULL) {
+		CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR,
+			"%s(%s): priv->mpcg is NULL",
+					CTCM_FUNTAIL, device);
+		return NULL;
+	}
+	return dev;
+}
+
+/*
+ * ctc_mpc_alloc_channel
+ *	(exported interface)
+ *
+ * Device Initialization :
+ *	ACTPATH  driven IO operations
+ */
+int ctc_mpc_alloc_channel(int port_num, void (*callback)(int, int))
+{
+	struct net_device *dev;
+	struct mpc_group *grp;
+	struct ctcm_priv *priv;
+
+	dev = ctcmpc_get_dev(port_num);
+	if (dev == NULL)
+		return 1;
+	priv = dev->ml_priv;
+	grp = priv->mpcg;
+
+	grp->allochanfunc = callback;
+	grp->port_num = port_num;
+	grp->port_persist = 1;
+
+	CTCM_DBF_TEXT_(MPC_SETUP, CTC_DBF_INFO,
+			"%s(%s): state=%s",
+			CTCM_FUNTAIL, dev->name, fsm_getstate_str(grp->fsm));
+
+	switch (fsm_getstate(grp->fsm)) {
+	case MPCG_STATE_INOP:
+		/* Group is in the process of terminating */
+		grp->alloc_called = 1;
+		break;
+	case MPCG_STATE_RESET:
+		/* MPC Group will transition to state		  */
+		/* MPCG_STATE_XID2INITW iff the minimum number	  */
+		/* of 1 read and 1 write channel have successfully*/
+		/* activated					  */
+		/*fsm_newstate(grp->fsm, MPCG_STATE_XID2INITW);*/
+		if (callback)
+			grp->send_qllc_disc = 1;
+	case MPCG_STATE_XID0IOWAIT:
+		fsm_deltimer(&grp->timer);
+		grp->outstanding_xid2 = 0;
+		grp->outstanding_xid7 = 0;
+		grp->outstanding_xid7_p2 = 0;
+		grp->saved_xid2 = NULL;
+		if (callback)
+			ctcm_open(dev);
+		fsm_event(priv->fsm, DEV_EVENT_START, dev);
+		break;
+	case MPCG_STATE_READY:
+		/* XID exchanges completed after PORT was activated */
+		/* Link station already active			    */
+		/* Maybe timing issue...retry callback		    */
+		grp->allocchan_callback_retries++;
+		if (grp->allocchan_callback_retries < 4) {
+			if (grp->allochanfunc)
+				grp->allochanfunc(grp->port_num,
+						  grp->group_max_buflen);
+		} else {
+			/* there are problems...bail out	    */
+			/* there may be a state mismatch so restart */
+			fsm_event(grp->fsm, MPCG_EVENT_INOP, dev);
+			grp->allocchan_callback_retries = 0;
+		}
+		break;
+	}
+
+	return 0;
+}
+EXPORT_SYMBOL(ctc_mpc_alloc_channel);
+
+/*
+ * ctc_mpc_establish_connectivity
+ *	(exported interface)
+ */
+void ctc_mpc_establish_connectivity(int port_num,
+				void (*callback)(int, int, int))
+{
+	struct net_device *dev;
+	struct mpc_group *grp;
+	struct ctcm_priv *priv;
+	struct channel *rch, *wch;
+
+	dev = ctcmpc_get_dev(port_num);
+	if (dev == NULL)
+		return;
+	priv = dev->ml_priv;
+	grp = priv->mpcg;
+	rch = priv->channel[CTCM_READ];
+	wch = priv->channel[CTCM_WRITE];
+
+	CTCM_DBF_TEXT_(MPC_SETUP, CTC_DBF_INFO,
+			"%s(%s): state=%s",
+			CTCM_FUNTAIL, dev->name, fsm_getstate_str(grp->fsm));
+
+	grp->estconnfunc = callback;
+	grp->port_num = port_num;
+
+	switch (fsm_getstate(grp->fsm)) {
+	case MPCG_STATE_READY:
+		/* XID exchanges completed after PORT was activated */
+		/* Link station already active			    */
+		/* Maybe timing issue...retry callback		    */
+		fsm_deltimer(&grp->timer);
+		grp->estconn_callback_retries++;
+		if (grp->estconn_callback_retries < 4) {
+			if (grp->estconnfunc) {
+				grp->estconnfunc(grp->port_num, 0,
+						grp->group_max_buflen);
+				grp->estconnfunc = NULL;
+			}
+		} else {
+			/* there are problems...bail out	 */
+			fsm_event(grp->fsm, MPCG_EVENT_INOP, dev);
+			grp->estconn_callback_retries = 0;
+		}
+		break;
+	case MPCG_STATE_INOP:
+	case MPCG_STATE_RESET:
+		/* MPC Group is not ready to start XID - min num of */
+		/* 1 read and 1 write channel have not been acquired*/
+
+		CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR,
+			"%s(%s): REJECTED - inactive channels",
+					CTCM_FUNTAIL, dev->name);
+		if (grp->estconnfunc) {
+			grp->estconnfunc(grp->port_num, -1, 0);
+			grp->estconnfunc = NULL;
+		}
+		break;
+	case MPCG_STATE_XID2INITW:
+		/* alloc channel was called but no XID exchange    */
+		/* has occurred. initiate xside XID exchange	   */
+		/* make sure yside XID0 processing has not started */
+
+		if ((fsm_getstate(rch->fsm) > CH_XID0_PENDING) ||
+			(fsm_getstate(wch->fsm) > CH_XID0_PENDING)) {
+			CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR,
+				"%s(%s): ABORT - PASSIVE XID",
+					CTCM_FUNTAIL, dev->name);
+			break;
+		}
+		grp->send_qllc_disc = 1;
+		fsm_newstate(grp->fsm, MPCG_STATE_XID0IOWAIT);
+		fsm_deltimer(&grp->timer);
+		fsm_addtimer(&grp->timer, MPC_XID_TIMEOUT_VALUE,
+						MPCG_EVENT_TIMER, dev);
+		grp->outstanding_xid7 = 0;
+		grp->outstanding_xid7_p2 = 0;
+		grp->saved_xid2 = NULL;
+		if ((rch->in_mpcgroup) &&
+				(fsm_getstate(rch->fsm) == CH_XID0_PENDING))
+			fsm_event(grp->fsm, MPCG_EVENT_XID0DO, rch);
+		else {
+			CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR,
+				"%s(%s): RX-%s not ready for ACTIVE XID0",
+					CTCM_FUNTAIL, dev->name, rch->id);
+			if (grp->estconnfunc) {
+				grp->estconnfunc(grp->port_num, -1, 0);
+				grp->estconnfunc = NULL;
+			}
+			fsm_deltimer(&grp->timer);
+				goto done;
+		}
+		if ((wch->in_mpcgroup) &&
+				(fsm_getstate(wch->fsm) == CH_XID0_PENDING))
+			fsm_event(grp->fsm, MPCG_EVENT_XID0DO, wch);
+		else {
+			CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR,
+				"%s(%s): WX-%s not ready for ACTIVE XID0",
+					CTCM_FUNTAIL, dev->name, wch->id);
+			if (grp->estconnfunc) {
+				grp->estconnfunc(grp->port_num, -1, 0);
+				grp->estconnfunc = NULL;
+			}
+			fsm_deltimer(&grp->timer);
+				goto done;
+			}
+		break;
+	case MPCG_STATE_XID0IOWAIT:
+		/* already in active XID negotiations */
+	default:
+		break;
+	}
+
+done:
+	CTCM_PR_DEBUG("Exit %s()\n", __func__);
+	return;
+}
+EXPORT_SYMBOL(ctc_mpc_establish_connectivity);
+
+/*
+ * ctc_mpc_dealloc_ch
+ *	(exported interface)
+ */
+void ctc_mpc_dealloc_ch(int port_num)
+{
+	struct net_device *dev;
+	struct ctcm_priv *priv;
+	struct mpc_group *grp;
+
+	dev = ctcmpc_get_dev(port_num);
+	if (dev == NULL)
+		return;
+	priv = dev->ml_priv;
+	grp = priv->mpcg;
+
+	CTCM_DBF_TEXT_(MPC_SETUP, CTC_DBF_DEBUG,
+			"%s: %s: refcount = %d\n",
+			CTCM_FUNTAIL, dev->name, netdev_refcnt_read(dev));
+
+	fsm_deltimer(&priv->restart_timer);
+	grp->channels_terminating = 0;
+	fsm_deltimer(&grp->timer);
+	grp->allochanfunc = NULL;
+	grp->estconnfunc = NULL;
+	grp->port_persist = 0;
+	grp->send_qllc_disc = 0;
+	fsm_event(grp->fsm, MPCG_EVENT_INOP, dev);
+
+	ctcm_close(dev);
+	return;
+}
+EXPORT_SYMBOL(ctc_mpc_dealloc_ch);
+
+/*
+ * ctc_mpc_flow_control
+ *	(exported interface)
+ */
+void ctc_mpc_flow_control(int port_num, int flowc)
+{
+	struct ctcm_priv *priv;
+	struct mpc_group *grp;
+	struct net_device *dev;
+	struct channel *rch;
+	int mpcg_state;
+
+	dev = ctcmpc_get_dev(port_num);
+	if (dev == NULL)
+		return;
+	priv = dev->ml_priv;
+	grp = priv->mpcg;
+
+	CTCM_DBF_TEXT_(MPC_TRACE, CTC_DBF_DEBUG,
+			"%s: %s: flowc = %d",
+				CTCM_FUNTAIL, dev->name, flowc);
+
+	rch = priv->channel[CTCM_READ];
+
+	mpcg_state = fsm_getstate(grp->fsm);
+	switch (flowc) {
+	case 1:
+		if (mpcg_state == MPCG_STATE_FLOWC)
+			break;
+		if (mpcg_state == MPCG_STATE_READY) {
+			if (grp->flow_off_called == 1)
+				grp->flow_off_called = 0;
+			else
+				fsm_newstate(grp->fsm, MPCG_STATE_FLOWC);
+			break;
+		}
+		break;
+	case 0:
+		if (mpcg_state == MPCG_STATE_FLOWC) {
+			fsm_newstate(grp->fsm, MPCG_STATE_READY);
+			/* ensure any data that has accumulated */
+			/* on the io_queue will now be sen t	*/
+			tasklet_schedule(&rch->ch_tasklet);
+		}
+		/* possible race condition			*/
+		if (mpcg_state == MPCG_STATE_READY) {
+			grp->flow_off_called = 1;
+			break;
+		}
+		break;
+	}
+
+}
+EXPORT_SYMBOL(ctc_mpc_flow_control);
+
+static int mpc_send_qllc_discontact(struct net_device *);
+
+/*
+ * helper function of ctcmpc_unpack_skb
+*/
+static void mpc_rcvd_sweep_resp(struct mpcg_info *mpcginfo)
+{
+	struct channel	  *rch = mpcginfo->ch;
+	struct net_device *dev = rch->netdev;
+	struct ctcm_priv   *priv = dev->ml_priv;
+	struct mpc_group  *grp = priv->mpcg;
+	struct channel	  *ch = priv->channel[CTCM_WRITE];
+
+	CTCM_PR_DEBUG("%s: ch=0x%p id=%s\n", __func__, ch, ch->id);
+	CTCM_D3_DUMP((char *)mpcginfo->sweep, TH_SWEEP_LENGTH);
+
+	grp->sweep_rsp_pend_num--;
+
+	if ((grp->sweep_req_pend_num == 0) &&
+			(grp->sweep_rsp_pend_num == 0)) {
+		fsm_deltimer(&ch->sweep_timer);
+		grp->in_sweep = 0;
+		rch->th_seq_num = 0x00;
+		ch->th_seq_num = 0x00;
+		ctcm_clear_busy_do(dev);
+	}
+
+	kfree(mpcginfo);
+
+	return;
+
+}
+
+/*
+ * helper function of mpc_rcvd_sweep_req
+ * which is a helper of ctcmpc_unpack_skb
+ */
+static void ctcmpc_send_sweep_resp(struct channel *rch)
+{
+	struct net_device *dev = rch->netdev;
+	struct ctcm_priv *priv = dev->ml_priv;
+	struct mpc_group *grp = priv->mpcg;
+	struct th_sweep *header;
+	struct sk_buff *sweep_skb;
+	struct channel *ch  = priv->channel[CTCM_WRITE];
+
+	CTCM_PR_DEBUG("%s: ch=0x%p id=%s\n", __func__, rch, rch->id);
+
+	sweep_skb = __dev_alloc_skb(MPC_BUFSIZE_DEFAULT, GFP_ATOMIC | GFP_DMA);
+	if (sweep_skb == NULL) {
+		CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR,
+			"%s(%s): sweep_skb allocation ERROR\n",
+			CTCM_FUNTAIL, rch->id);
+		goto done;
+	}
+
+	header = kmalloc(sizeof(struct th_sweep), gfp_type());
+
+	if (!header) {
+		dev_kfree_skb_any(sweep_skb);
+		goto done;
+	}
+
+	header->th.th_seg	= 0x00 ;
+	header->th.th_ch_flag	= TH_SWEEP_RESP;
+	header->th.th_blk_flag	= 0x00;
+	header->th.th_is_xid	= 0x00;
+	header->th.th_seq_num	= 0x00;
+	header->sw.th_last_seq	= ch->th_seq_num;
+
+	memcpy(skb_put(sweep_skb, TH_SWEEP_LENGTH), header, TH_SWEEP_LENGTH);
+
+	kfree(header);
+
+	dev->trans_start = jiffies;
+	skb_queue_tail(&ch->sweep_queue, sweep_skb);
+
+	fsm_addtimer(&ch->sweep_timer, 100, CTC_EVENT_RSWEEP_TIMER, ch);
+
+	return;
+
+done:
+	grp->in_sweep = 0;
+	ctcm_clear_busy_do(dev);
+	fsm_event(grp->fsm, MPCG_EVENT_INOP, dev);
+
+	return;
+}
+
+/*
+ * helper function of ctcmpc_unpack_skb
+ */
+static void mpc_rcvd_sweep_req(struct mpcg_info *mpcginfo)
+{
+	struct channel	  *rch     = mpcginfo->ch;
+	struct net_device *dev     = rch->netdev;
+	struct ctcm_priv  *priv = dev->ml_priv;
+	struct mpc_group  *grp  = priv->mpcg;
+	struct channel	  *ch	   = priv->channel[CTCM_WRITE];
+
+	if (do_debug)
+		CTCM_DBF_TEXT_(MPC_TRACE, CTC_DBF_DEBUG,
+			" %s(): ch=0x%p id=%s\n", __func__, ch, ch->id);
+
+	if (grp->in_sweep == 0) {
+		grp->in_sweep = 1;
+		ctcm_test_and_set_busy(dev);
+		grp->sweep_req_pend_num = grp->active_channels[CTCM_READ];
+		grp->sweep_rsp_pend_num = grp->active_channels[CTCM_READ];
+	}
+
+	CTCM_D3_DUMP((char *)mpcginfo->sweep, TH_SWEEP_LENGTH);
+
+	grp->sweep_req_pend_num--;
+	ctcmpc_send_sweep_resp(ch);
+	kfree(mpcginfo);
+	return;
+}
+
+/*
+  * MPC Group Station FSM definitions
+ */
+static const char *mpcg_event_names[] = {
+	[MPCG_EVENT_INOP]	= "INOP Condition",
+	[MPCG_EVENT_DISCONC]	= "Discontact Received",
+	[MPCG_EVENT_XID0DO]	= "Channel Active - Start XID",
+	[MPCG_EVENT_XID2]	= "XID2 Received",
+	[MPCG_EVENT_XID2DONE]	= "XID0 Complete",
+	[MPCG_EVENT_XID7DONE]	= "XID7 Complete",
+	[MPCG_EVENT_TIMER]	= "XID Setup Timer",
+	[MPCG_EVENT_DOIO]	= "XID DoIO",
+};
+
+static const char *mpcg_state_names[] = {
+	[MPCG_STATE_RESET]	= "Reset",
+	[MPCG_STATE_INOP]	= "INOP",
+	[MPCG_STATE_XID2INITW]	= "Passive XID- XID0 Pending Start",
+	[MPCG_STATE_XID2INITX]	= "Passive XID- XID0 Pending Complete",
+	[MPCG_STATE_XID7INITW]	= "Passive XID- XID7 Pending P1 Start",
+	[MPCG_STATE_XID7INITX]	= "Passive XID- XID7 Pending P2 Complete",
+	[MPCG_STATE_XID0IOWAIT]	= "Active  XID- XID0 Pending Start",
+	[MPCG_STATE_XID0IOWAIX]	= "Active  XID- XID0 Pending Complete",
+	[MPCG_STATE_XID7INITI]	= "Active  XID- XID7 Pending Start",
+	[MPCG_STATE_XID7INITZ]	= "Active  XID- XID7 Pending Complete ",
+	[MPCG_STATE_XID7INITF]	= "XID        - XID7 Complete ",
+	[MPCG_STATE_FLOWC]	= "FLOW CONTROL ON",
+	[MPCG_STATE_READY]	= "READY",
+};
+
+/*
+ * The MPC Group Station FSM
+ *   22 events
+ */
+static const fsm_node mpcg_fsm[] = {
+	{ MPCG_STATE_RESET,	MPCG_EVENT_INOP,	mpc_action_go_inop    },
+	{ MPCG_STATE_INOP,	MPCG_EVENT_INOP,	mpc_action_nop        },
+	{ MPCG_STATE_FLOWC,	MPCG_EVENT_INOP,	mpc_action_go_inop    },
+
+	{ MPCG_STATE_READY,	MPCG_EVENT_DISCONC,	mpc_action_discontact },
+	{ MPCG_STATE_READY,	MPCG_EVENT_INOP,	mpc_action_go_inop    },
+
+	{ MPCG_STATE_XID2INITW,	MPCG_EVENT_XID0DO,	mpc_action_doxid0     },
+	{ MPCG_STATE_XID2INITW,	MPCG_EVENT_XID2,	mpc_action_rcvd_xid0  },
+	{ MPCG_STATE_XID2INITW,	MPCG_EVENT_INOP,	mpc_action_go_inop    },
+	{ MPCG_STATE_XID2INITW,	MPCG_EVENT_TIMER,	mpc_action_timeout    },
+	{ MPCG_STATE_XID2INITW,	MPCG_EVENT_DOIO,	mpc_action_yside_xid  },
+
+	{ MPCG_STATE_XID2INITX,	MPCG_EVENT_XID0DO,	mpc_action_doxid0     },
+	{ MPCG_STATE_XID2INITX,	MPCG_EVENT_XID2,	mpc_action_rcvd_xid0  },
+	{ MPCG_STATE_XID2INITX,	MPCG_EVENT_INOP,	mpc_action_go_inop    },
+	{ MPCG_STATE_XID2INITX,	MPCG_EVENT_TIMER,	mpc_action_timeout    },
+	{ MPCG_STATE_XID2INITX,	MPCG_EVENT_DOIO,	mpc_action_yside_xid  },
+
+	{ MPCG_STATE_XID7INITW,	MPCG_EVENT_XID2DONE,	mpc_action_doxid7     },
+	{ MPCG_STATE_XID7INITW,	MPCG_EVENT_DISCONC,	mpc_action_discontact },
+	{ MPCG_STATE_XID7INITW,	MPCG_EVENT_XID2,	mpc_action_rcvd_xid7  },
+	{ MPCG_STATE_XID7INITW,	MPCG_EVENT_INOP,	mpc_action_go_inop    },
+	{ MPCG_STATE_XID7INITW,	MPCG_EVENT_TIMER,	mpc_action_timeout    },
+	{ MPCG_STATE_XID7INITW,	MPCG_EVENT_XID7DONE,	mpc_action_doxid7     },
+	{ MPCG_STATE_XID7INITW,	MPCG_EVENT_DOIO,	mpc_action_yside_xid  },
+
+	{ MPCG_STATE_XID7INITX,	MPCG_EVENT_DISCONC,	mpc_action_discontact },
+	{ MPCG_STATE_XID7INITX,	MPCG_EVENT_XID2,	mpc_action_rcvd_xid7  },
+	{ MPCG_STATE_XID7INITX,	MPCG_EVENT_INOP,	mpc_action_go_inop    },
+	{ MPCG_STATE_XID7INITX,	MPCG_EVENT_XID7DONE,	mpc_action_doxid7     },
+	{ MPCG_STATE_XID7INITX,	MPCG_EVENT_TIMER,	mpc_action_timeout    },
+	{ MPCG_STATE_XID7INITX,	MPCG_EVENT_DOIO,	mpc_action_yside_xid  },
+
+	{ MPCG_STATE_XID0IOWAIT, MPCG_EVENT_XID0DO,	mpc_action_doxid0     },
+	{ MPCG_STATE_XID0IOWAIT, MPCG_EVENT_DISCONC,	mpc_action_discontact },
+	{ MPCG_STATE_XID0IOWAIT, MPCG_EVENT_XID2,	mpc_action_rcvd_xid0  },
+	{ MPCG_STATE_XID0IOWAIT, MPCG_EVENT_INOP,	mpc_action_go_inop    },
+	{ MPCG_STATE_XID0IOWAIT, MPCG_EVENT_TIMER,	mpc_action_timeout    },
+	{ MPCG_STATE_XID0IOWAIT, MPCG_EVENT_DOIO,	mpc_action_xside_xid  },
+
+	{ MPCG_STATE_XID0IOWAIX, MPCG_EVENT_XID0DO,	mpc_action_doxid0     },
+	{ MPCG_STATE_XID0IOWAIX, MPCG_EVENT_DISCONC,	mpc_action_discontact },
+	{ MPCG_STATE_XID0IOWAIX, MPCG_EVENT_XID2,	mpc_action_rcvd_xid0  },
+	{ MPCG_STATE_XID0IOWAIX, MPCG_EVENT_INOP,	mpc_action_go_inop    },
+	{ MPCG_STATE_XID0IOWAIX, MPCG_EVENT_TIMER,	mpc_action_timeout    },
+	{ MPCG_STATE_XID0IOWAIX, MPCG_EVENT_DOIO,	mpc_action_xside_xid  },
+
+	{ MPCG_STATE_XID7INITI,	MPCG_EVENT_XID2DONE,	mpc_action_doxid7     },
+	{ MPCG_STATE_XID7INITI,	MPCG_EVENT_XID2,	mpc_action_rcvd_xid7  },
+	{ MPCG_STATE_XID7INITI,	MPCG_EVENT_DISCONC,	mpc_action_discontact },
+	{ MPCG_STATE_XID7INITI,	MPCG_EVENT_INOP,	mpc_action_go_inop    },
+	{ MPCG_STATE_XID7INITI,	MPCG_EVENT_TIMER,	mpc_action_timeout    },
+	{ MPCG_STATE_XID7INITI,	MPCG_EVENT_XID7DONE,	mpc_action_doxid7     },
+	{ MPCG_STATE_XID7INITI,	MPCG_EVENT_DOIO,	mpc_action_xside_xid  },
+
+	{ MPCG_STATE_XID7INITZ,	MPCG_EVENT_XID2,	mpc_action_rcvd_xid7  },
+	{ MPCG_STATE_XID7INITZ,	MPCG_EVENT_XID7DONE,	mpc_action_doxid7     },
+	{ MPCG_STATE_XID7INITZ,	MPCG_EVENT_DISCONC,	mpc_action_discontact },
+	{ MPCG_STATE_XID7INITZ,	MPCG_EVENT_INOP,	mpc_action_go_inop    },
+	{ MPCG_STATE_XID7INITZ,	MPCG_EVENT_TIMER,	mpc_action_timeout    },
+	{ MPCG_STATE_XID7INITZ,	MPCG_EVENT_DOIO,	mpc_action_xside_xid  },
+
+	{ MPCG_STATE_XID7INITF,	MPCG_EVENT_INOP,	mpc_action_go_inop    },
+	{ MPCG_STATE_XID7INITF,	MPCG_EVENT_XID7DONE,	mpc_action_go_ready   },
+};
+
+static int mpcg_fsm_len = ARRAY_SIZE(mpcg_fsm);
+
+/*
+ * MPC Group Station FSM action
+ * CTCM_PROTO_MPC only
+ */
+static void mpc_action_go_ready(fsm_instance *fsm, int event, void *arg)
+{
+	struct net_device *dev = arg;
+	struct ctcm_priv *priv = dev->ml_priv;
+	struct mpc_group *grp = priv->mpcg;
+
+	if (grp == NULL) {
+		CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR,
+			"%s(%s): No MPC group",
+				CTCM_FUNTAIL, dev->name);
+		return;
+	}
+
+	fsm_deltimer(&grp->timer);
+
+	if (grp->saved_xid2->xid2_flag2 == 0x40) {
+		priv->xid->xid2_flag2 = 0x00;
+		if (grp->estconnfunc) {
+			grp->estconnfunc(grp->port_num, 1,
+					grp->group_max_buflen);
+			grp->estconnfunc = NULL;
+		} else if (grp->allochanfunc)
+			grp->send_qllc_disc = 1;
+
+		fsm_event(grp->fsm, MPCG_EVENT_INOP, dev);
+		CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR,
+				"%s(%s): fails",
+					CTCM_FUNTAIL, dev->name);
+		return;
+	}
+
+	grp->port_persist = 1;
+	grp->out_of_sequence = 0;
+	grp->estconn_called = 0;
+
+	tasklet_hi_schedule(&grp->mpc_tasklet2);
+
+	return;
+}
+
+/*
+ * helper of ctcm_init_netdevice
+ * CTCM_PROTO_MPC only
+ */
+void mpc_group_ready(unsigned long adev)
+{
+	struct net_device *dev = (struct net_device *)adev;
+	struct ctcm_priv *priv = dev->ml_priv;
+	struct mpc_group *grp = priv->mpcg;
+	struct channel *ch = NULL;
+
+	if (grp == NULL) {
+		CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR,
+			"%s(%s): No MPC group",
+				CTCM_FUNTAIL, dev->name);
+		return;
+	}
+
+	CTCM_DBF_TEXT_(MPC_SETUP, CTC_DBF_NOTICE,
+		"%s: %s: GROUP TRANSITIONED TO READY, maxbuf = %d\n",
+			CTCM_FUNTAIL, dev->name, grp->group_max_buflen);
+
+	fsm_newstate(grp->fsm, MPCG_STATE_READY);
+
+	/* Put up a read on the channel */
+	ch = priv->channel[CTCM_READ];
+	ch->pdu_seq = 0;
+	CTCM_PR_DBGDATA("ctcmpc: %s() ToDCM_pdu_seq= %08x\n" ,
+			__func__, ch->pdu_seq);
+
+	ctcmpc_chx_rxidle(ch->fsm, CTC_EVENT_START, ch);
+	/* Put the write channel in idle state */
+	ch = priv->channel[CTCM_WRITE];
+	if (ch->collect_len > 0) {
+		spin_lock(&ch->collect_lock);
+		ctcm_purge_skb_queue(&ch->collect_queue);
+		ch->collect_len = 0;
+		spin_unlock(&ch->collect_lock);
+	}
+	ctcm_chx_txidle(ch->fsm, CTC_EVENT_START, ch);
+	ctcm_clear_busy(dev);
+
+	if (grp->estconnfunc) {
+		grp->estconnfunc(grp->port_num, 0,
+				    grp->group_max_buflen);
+		grp->estconnfunc = NULL;
+	} else 	if (grp->allochanfunc)
+		grp->allochanfunc(grp->port_num, grp->group_max_buflen);
+
+	grp->send_qllc_disc = 1;
+	grp->changed_side = 0;
+
+	return;
+
+}
+
+/*
+ * Increment the MPC Group Active Channel Counts
+ * helper of dev_action (called from channel fsm)
+ */
+void mpc_channel_action(struct channel *ch, int direction, int action)
+{
+	struct net_device  *dev  = ch->netdev;
+	struct ctcm_priv   *priv = dev->ml_priv;
+	struct mpc_group   *grp  = priv->mpcg;
+
+	if (grp == NULL) {
+		CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR,
+			"%s(%s): No MPC group",
+				CTCM_FUNTAIL, dev->name);
+		return;
+	}
+
+	CTCM_PR_DEBUG("enter %s: ch=0x%p id=%s\n", __func__, ch, ch->id);
+
+	CTCM_DBF_TEXT_(MPC_TRACE, CTC_DBF_NOTICE,
+		"%s: %i / Grp:%s total_channels=%i, active_channels: "
+		"read=%i, write=%i\n", __func__, action,
+		fsm_getstate_str(grp->fsm), grp->num_channel_paths,
+		grp->active_channels[CTCM_READ],
+		grp->active_channels[CTCM_WRITE]);
+
+	if ((action == MPC_CHANNEL_ADD) && (ch->in_mpcgroup == 0)) {
+		grp->num_channel_paths++;
+		grp->active_channels[direction]++;
+		grp->outstanding_xid2++;
+		ch->in_mpcgroup = 1;
+
+		if (ch->xid_skb != NULL)
+			dev_kfree_skb_any(ch->xid_skb);
+
+		ch->xid_skb = __dev_alloc_skb(MPC_BUFSIZE_DEFAULT,
+					GFP_ATOMIC | GFP_DMA);
+		if (ch->xid_skb == NULL) {
+			CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR,
+				"%s(%s): Couldn't alloc ch xid_skb\n",
+				CTCM_FUNTAIL, dev->name);
+			fsm_event(grp->fsm, MPCG_EVENT_INOP, dev);
+			return;
+		}
+		ch->xid_skb_data = ch->xid_skb->data;
+		ch->xid_th = (struct th_header *)ch->xid_skb->data;
+		skb_put(ch->xid_skb, TH_HEADER_LENGTH);
+		ch->xid = (struct xid2 *)skb_tail_pointer(ch->xid_skb);
+		skb_put(ch->xid_skb, XID2_LENGTH);
+		ch->xid_id = skb_tail_pointer(ch->xid_skb);
+		ch->xid_skb->data = ch->xid_skb_data;
+		skb_reset_tail_pointer(ch->xid_skb);
+		ch->xid_skb->len = 0;
+
+		memcpy(skb_put(ch->xid_skb, grp->xid_skb->len),
+				grp->xid_skb->data,
+				grp->xid_skb->len);
+
+		ch->xid->xid2_dlc_type =
+			((CHANNEL_DIRECTION(ch->flags) == CTCM_READ)
+				? XID2_READ_SIDE : XID2_WRITE_SIDE);
+
+		if (CHANNEL_DIRECTION(ch->flags) == CTCM_WRITE)
+			ch->xid->xid2_buf_len = 0x00;
+
+		ch->xid_skb->data = ch->xid_skb_data;
+		skb_reset_tail_pointer(ch->xid_skb);
+		ch->xid_skb->len = 0;
+
+		fsm_newstate(ch->fsm, CH_XID0_PENDING);
+
+		if ((grp->active_channels[CTCM_READ] > 0) &&
+		    (grp->active_channels[CTCM_WRITE] > 0) &&
+			(fsm_getstate(grp->fsm) < MPCG_STATE_XID2INITW)) {
+			fsm_newstate(grp->fsm, MPCG_STATE_XID2INITW);
+			CTCM_DBF_TEXT_(MPC_SETUP, CTC_DBF_NOTICE,
+				"%s: %s: MPC GROUP CHANNELS ACTIVE\n",
+						__func__, dev->name);
+		}
+	} else if ((action == MPC_CHANNEL_REMOVE) &&
+			(ch->in_mpcgroup == 1)) {
+		ch->in_mpcgroup = 0;
+		grp->num_channel_paths--;
+		grp->active_channels[direction]--;
+
+		if (ch->xid_skb != NULL)
+			dev_kfree_skb_any(ch->xid_skb);
+		ch->xid_skb = NULL;
+
+		if (grp->channels_terminating)
+					goto done;
+
+		if (((grp->active_channels[CTCM_READ] == 0) &&
+					(grp->active_channels[CTCM_WRITE] > 0))
+			|| ((grp->active_channels[CTCM_WRITE] == 0) &&
+					(grp->active_channels[CTCM_READ] > 0)))
+			fsm_event(grp->fsm, MPCG_EVENT_INOP, dev);
+	}
+done:
+	CTCM_DBF_TEXT_(MPC_TRACE, CTC_DBF_DEBUG,
+		"exit %s: %i / Grp:%s total_channels=%i, active_channels: "
+		"read=%i, write=%i\n", __func__, action,
+		fsm_getstate_str(grp->fsm), grp->num_channel_paths,
+		grp->active_channels[CTCM_READ],
+		grp->active_channels[CTCM_WRITE]);
+
+	CTCM_PR_DEBUG("exit %s: ch=0x%p id=%s\n", __func__, ch, ch->id);
+}
+
+/**
+ * Unpack a just received skb and hand it over to
+ * upper layers.
+ * special MPC version of unpack_skb.
+ *
+ * ch		The channel where this skb has been received.
+ * pskb		The received skb.
+ */
+static void ctcmpc_unpack_skb(struct channel *ch, struct sk_buff *pskb)
+{
+	struct net_device *dev	= ch->netdev;
+	struct ctcm_priv *priv = dev->ml_priv;
+	struct mpc_group *grp = priv->mpcg;
+	struct pdu *curr_pdu;
+	struct mpcg_info *mpcginfo;
+	struct th_header *header = NULL;
+	struct th_sweep *sweep = NULL;
+	int pdu_last_seen = 0;
+	__u32 new_len;
+	struct sk_buff *skb;
+	int skblen;
+	int sendrc = 0;
+
+	CTCM_PR_DEBUG("ctcmpc enter: %s() %s cp:%i ch:%s\n",
+			__func__, dev->name, smp_processor_id(), ch->id);
+
+	header = (struct th_header *)pskb->data;
+	if ((header->th_seg == 0) &&
+		(header->th_ch_flag == 0) &&
+		(header->th_blk_flag == 0) &&
+		(header->th_seq_num == 0))
+		/* nothing for us */	goto done;
+
+	CTCM_PR_DBGDATA("%s: th_header\n", __func__);
+	CTCM_D3_DUMP((char *)header, TH_HEADER_LENGTH);
+	CTCM_PR_DBGDATA("%s: pskb len: %04x \n", __func__, pskb->len);
+
+	pskb->dev = dev;
+	pskb->ip_summed = CHECKSUM_UNNECESSARY;
+	skb_pull(pskb, TH_HEADER_LENGTH);
+
+	if (likely(header->th_ch_flag == TH_HAS_PDU)) {
+		CTCM_PR_DBGDATA("%s: came into th_has_pdu\n", __func__);
+		if ((fsm_getstate(grp->fsm) == MPCG_STATE_FLOWC) ||
+		   ((fsm_getstate(grp->fsm) == MPCG_STATE_READY) &&
+		    (header->th_seq_num != ch->th_seq_num + 1) &&
+		    (ch->th_seq_num != 0))) {
+			/* This is NOT the next segment		*
+			 * we are not the correct race winner	*
+			 * go away and let someone else win	*
+			 * BUT..this only applies if xid negot	*
+			 * is done				*
+			*/
+			grp->out_of_sequence += 1;
+			__skb_push(pskb, TH_HEADER_LENGTH);
+			skb_queue_tail(&ch->io_queue, pskb);
+			CTCM_PR_DBGDATA("%s: th_seq_num expect:%08x "
+					"got:%08x\n", __func__,
+				ch->th_seq_num + 1, header->th_seq_num);
+
+			return;
+		}
+		grp->out_of_sequence = 0;
+		ch->th_seq_num = header->th_seq_num;
+
+		CTCM_PR_DBGDATA("ctcmpc: %s() FromVTAM_th_seq=%08x\n",
+					__func__, ch->th_seq_num);
+
+		if (unlikely(fsm_getstate(grp->fsm) != MPCG_STATE_READY))
+					goto done;
+		while ((pskb->len > 0) && !pdu_last_seen) {
+			curr_pdu = (struct pdu *)pskb->data;
+
+			CTCM_PR_DBGDATA("%s: pdu_header\n", __func__);
+			CTCM_D3_DUMP((char *)pskb->data, PDU_HEADER_LENGTH);
+			CTCM_PR_DBGDATA("%s: pskb len: %04x \n",
+						__func__, pskb->len);
+
+			skb_pull(pskb, PDU_HEADER_LENGTH);
+
+			if (curr_pdu->pdu_flag & PDU_LAST)
+				pdu_last_seen = 1;
+			if (curr_pdu->pdu_flag & PDU_CNTL)
+				pskb->protocol = htons(ETH_P_SNAP);
+			else
+				pskb->protocol = htons(ETH_P_SNA_DIX);
+
+			if ((pskb->len <= 0) || (pskb->len > ch->max_bufsize)) {
+				CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR,
+					"%s(%s): Dropping packet with "
+					"illegal siize %d",
+					CTCM_FUNTAIL, dev->name, pskb->len);
+
+				priv->stats.rx_dropped++;
+				priv->stats.rx_length_errors++;
+					goto done;
+			}
+			skb_reset_mac_header(pskb);
+			new_len = curr_pdu->pdu_offset;
+			CTCM_PR_DBGDATA("%s: new_len: %04x \n",
+						__func__, new_len);
+			if ((new_len == 0) || (new_len > pskb->len)) {
+				/* should never happen		    */
+				/* pskb len must be hosed...bail out */
+				CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR,
+					"%s(%s): non valid pdu_offset: %04x",
+					/* "data may be lost", */
+					CTCM_FUNTAIL, dev->name, new_len);
+				goto done;
+			}
+			skb = __dev_alloc_skb(new_len+4, GFP_ATOMIC);
+
+			if (!skb) {
+				CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR,
+					"%s(%s): MEMORY allocation error",
+						CTCM_FUNTAIL, dev->name);
+				priv->stats.rx_dropped++;
+				fsm_event(grp->fsm, MPCG_EVENT_INOP, dev);
+						goto done;
+			}
+			memcpy(skb_put(skb, new_len), pskb->data, new_len);
+
+			skb_reset_mac_header(skb);
+			skb->dev = pskb->dev;
+			skb->protocol = pskb->protocol;
+			skb->ip_summed = CHECKSUM_UNNECESSARY;
+			*((__u32 *) skb_push(skb, 4)) = ch->pdu_seq;
+			ch->pdu_seq++;
+
+			if (do_debug_data) {
+				ctcm_pr_debug("%s: ToDCM_pdu_seq= %08x\n",
+						__func__, ch->pdu_seq);
+				ctcm_pr_debug("%s: skb:%0lx "
+					"skb len: %d \n", __func__,
+					(unsigned long)skb, skb->len);
+				ctcm_pr_debug("%s: up to 32 bytes "
+					"of pdu_data sent\n", __func__);
+				ctcmpc_dump32((char *)skb->data, skb->len);
+			}
+
+			skblen = skb->len;
+			sendrc = netif_rx(skb);
+			priv->stats.rx_packets++;
+			priv->stats.rx_bytes += skblen;
+			skb_pull(pskb, new_len); /* point to next PDU */
+		}
+	} else {
+		mpcginfo = kmalloc(sizeof(struct mpcg_info), gfp_type());
+		if (mpcginfo == NULL)
+					goto done;
+
+		mpcginfo->ch = ch;
+		mpcginfo->th = header;
+		mpcginfo->skb = pskb;
+		CTCM_PR_DEBUG("%s: Not PDU - may be control pkt\n",
+					__func__);
+		/*  it's a sweep?   */
+		sweep = (struct th_sweep *)pskb->data;
+		mpcginfo->sweep = sweep;
+		if (header->th_ch_flag == TH_SWEEP_REQ)
+			mpc_rcvd_sweep_req(mpcginfo);
+		else if (header->th_ch_flag == TH_SWEEP_RESP)
+			mpc_rcvd_sweep_resp(mpcginfo);
+		else if (header->th_blk_flag == TH_DATA_IS_XID) {
+			struct xid2 *thisxid = (struct xid2 *)pskb->data;
+			skb_pull(pskb, XID2_LENGTH);
+			mpcginfo->xid = thisxid;
+			fsm_event(grp->fsm, MPCG_EVENT_XID2, mpcginfo);
+		} else if (header->th_blk_flag == TH_DISCONTACT)
+			fsm_event(grp->fsm, MPCG_EVENT_DISCONC, mpcginfo);
+		else if (header->th_seq_num != 0) {
+			CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR,
+				"%s(%s): control pkt expected\n",
+						CTCM_FUNTAIL, dev->name);
+			priv->stats.rx_dropped++;
+			/* mpcginfo only used for non-data transfers */
+			kfree(mpcginfo);
+			if (do_debug_data)
+				ctcmpc_dump_skb(pskb, -8);
+		}
+	}
+done:
+
+	dev_kfree_skb_any(pskb);
+	if (sendrc == NET_RX_DROP) {
+		dev_warn(&dev->dev,
+			"The network backlog for %s is exceeded, "
+			"package dropped\n", __func__);
+		fsm_event(grp->fsm, MPCG_EVENT_INOP, dev);
+	}
+
+	CTCM_PR_DEBUG("exit %s: %s: ch=0x%p id=%s\n",
+			__func__, dev->name, ch, ch->id);
+}
+
+/**
+ * tasklet helper for mpc's skb unpacking.
+ *
+ * ch		The channel to work on.
+ * Allow flow control back pressure to occur here.
+ * Throttling back channel can result in excessive
+ * channel inactivity and system deact of channel
+ */
+void ctcmpc_bh(unsigned long thischan)
+{
+	struct channel	  *ch	= (struct channel *)thischan;
+	struct sk_buff	  *skb;
+	struct net_device *dev	= ch->netdev;
+	struct ctcm_priv  *priv	= dev->ml_priv;
+	struct mpc_group  *grp	= priv->mpcg;
+
+	CTCM_PR_DEBUG("%s cp:%i enter:  %s() %s\n",
+	       dev->name, smp_processor_id(), __func__, ch->id);
+	/* caller has requested driver to throttle back */
+	while ((fsm_getstate(grp->fsm) != MPCG_STATE_FLOWC) &&
+			(skb = skb_dequeue(&ch->io_queue))) {
+		ctcmpc_unpack_skb(ch, skb);
+		if (grp->out_of_sequence > 20) {
+			/* assume data loss has occurred if */
+			/* missing seq_num for extended     */
+			/* period of time		    */
+			grp->out_of_sequence = 0;
+			fsm_event(grp->fsm, MPCG_EVENT_INOP, dev);
+			break;
+		}
+		if (skb == skb_peek(&ch->io_queue))
+			break;
+	}
+	CTCM_PR_DEBUG("exit %s: %s: ch=0x%p id=%s\n",
+			__func__, dev->name, ch, ch->id);
+	return;
+}
+
+/*
+ *  MPC Group Initializations
+ */
+struct mpc_group *ctcmpc_init_mpc_group(struct ctcm_priv *priv)
+{
+	struct mpc_group *grp;
+
+	CTCM_DBF_TEXT_(MPC_SETUP, CTC_DBF_INFO,
+			"Enter %s(%p)", CTCM_FUNTAIL, priv);
+
+	grp = kzalloc(sizeof(struct mpc_group), GFP_KERNEL);
+	if (grp == NULL)
+		return NULL;
+
+	grp->fsm = init_fsm("mpcg", mpcg_state_names, mpcg_event_names,
+			MPCG_NR_STATES, MPCG_NR_EVENTS, mpcg_fsm,
+			mpcg_fsm_len, GFP_KERNEL);
+	if (grp->fsm == NULL) {
+		kfree(grp);
+		return NULL;
+	}
+
+	fsm_newstate(grp->fsm, MPCG_STATE_RESET);
+	fsm_settimer(grp->fsm, &grp->timer);
+
+	grp->xid_skb =
+		 __dev_alloc_skb(MPC_BUFSIZE_DEFAULT, GFP_ATOMIC | GFP_DMA);
+	if (grp->xid_skb == NULL) {
+		kfree_fsm(grp->fsm);
+		kfree(grp);
+		return NULL;
+	}
+	/*  base xid for all channels in group  */
+	grp->xid_skb_data = grp->xid_skb->data;
+	grp->xid_th = (struct th_header *)grp->xid_skb->data;
+	memcpy(skb_put(grp->xid_skb, TH_HEADER_LENGTH),
+			&thnorm, TH_HEADER_LENGTH);
+
+	grp->xid = (struct xid2 *)skb_tail_pointer(grp->xid_skb);
+	memcpy(skb_put(grp->xid_skb, XID2_LENGTH), &init_xid, XID2_LENGTH);
+	grp->xid->xid2_adj_id = jiffies | 0xfff00000;
+	grp->xid->xid2_sender_id = jiffies;
+
+	grp->xid_id = skb_tail_pointer(grp->xid_skb);
+	memcpy(skb_put(grp->xid_skb, 4), "VTAM", 4);
+
+	grp->rcvd_xid_skb =
+		__dev_alloc_skb(MPC_BUFSIZE_DEFAULT, GFP_ATOMIC|GFP_DMA);
+	if (grp->rcvd_xid_skb == NULL) {
+		kfree_fsm(grp->fsm);
+		dev_kfree_skb(grp->xid_skb);
+		kfree(grp);
+		return NULL;
+	}
+	grp->rcvd_xid_data = grp->rcvd_xid_skb->data;
+	grp->rcvd_xid_th = (struct th_header *)grp->rcvd_xid_skb->data;
+	memcpy(skb_put(grp->rcvd_xid_skb, TH_HEADER_LENGTH),
+			&thnorm, TH_HEADER_LENGTH);
+	grp->saved_xid2 = NULL;
+	priv->xid = grp->xid;
+	priv->mpcg = grp;
+	return grp;
+}
+
+/*
+ * The MPC Group Station FSM
+ */
+
+/*
+ * MPC Group Station FSM actions
+ * CTCM_PROTO_MPC only
+ */
+
+/**
+ * NOP action for statemachines
+ */
+static void mpc_action_nop(fsm_instance *fi, int event, void *arg)
+{
+}
+
+/*
+ * invoked when the device transitions to dev_stopped
+ * MPC will stop each individual channel if a single XID failure
+ * occurs, or will intitiate all channels be stopped if a GROUP
+ * level failure occurs.
+ */
+static void mpc_action_go_inop(fsm_instance *fi, int event, void *arg)
+{
+	struct net_device  *dev = arg;
+	struct ctcm_priv    *priv;
+	struct mpc_group *grp;
+	struct channel *wch;
+
+	BUG_ON(dev == NULL);
+	CTCM_PR_DEBUG("Enter %s: %s\n",	__func__, dev->name);
+
+	priv  = dev->ml_priv;
+	grp =  priv->mpcg;
+	grp->flow_off_called = 0;
+	fsm_deltimer(&grp->timer);
+	if (grp->channels_terminating)
+			return;
+
+	grp->channels_terminating = 1;
+	grp->saved_state = fsm_getstate(grp->fsm);
+	fsm_newstate(grp->fsm, MPCG_STATE_INOP);
+	if (grp->saved_state > MPCG_STATE_XID7INITF)
+		CTCM_DBF_TEXT_(MPC_TRACE, CTC_DBF_NOTICE,
+			"%s(%s): MPC GROUP INOPERATIVE",
+				CTCM_FUNTAIL, dev->name);
+	if ((grp->saved_state != MPCG_STATE_RESET) ||
+		/* dealloc_channel has been called */
+		(grp->port_persist == 0))
+		fsm_deltimer(&priv->restart_timer);
+
+	wch = priv->channel[CTCM_WRITE];
+
+	switch (grp->saved_state) {
+	case MPCG_STATE_RESET:
+	case MPCG_STATE_INOP:
+	case MPCG_STATE_XID2INITW:
+	case MPCG_STATE_XID0IOWAIT:
+	case MPCG_STATE_XID2INITX:
+	case MPCG_STATE_XID7INITW:
+	case MPCG_STATE_XID7INITX:
+	case MPCG_STATE_XID0IOWAIX:
+	case MPCG_STATE_XID7INITI:
+	case MPCG_STATE_XID7INITZ:
+	case MPCG_STATE_XID7INITF:
+		break;
+	case MPCG_STATE_FLOWC:
+	case MPCG_STATE_READY:
+	default:
+		tasklet_hi_schedule(&wch->ch_disc_tasklet);
+	}
+
+	grp->xid2_tgnum = 0;
+	grp->group_max_buflen = 0;  /*min of all received */
+	grp->outstanding_xid2 = 0;
+	grp->outstanding_xid7 = 0;
+	grp->outstanding_xid7_p2 = 0;
+	grp->saved_xid2 = NULL;
+	grp->xidnogood = 0;
+	grp->changed_side = 0;
+
+	grp->rcvd_xid_skb->data = grp->rcvd_xid_data;
+	skb_reset_tail_pointer(grp->rcvd_xid_skb);
+	grp->rcvd_xid_skb->len = 0;
+	grp->rcvd_xid_th = (struct th_header *)grp->rcvd_xid_skb->data;
+	memcpy(skb_put(grp->rcvd_xid_skb, TH_HEADER_LENGTH), &thnorm,
+	       TH_HEADER_LENGTH);
+
+	if (grp->send_qllc_disc == 1) {
+		grp->send_qllc_disc = 0;
+		mpc_send_qllc_discontact(dev);
+	}
+
+	/* DO NOT issue DEV_EVENT_STOP directly out of this code */
+	/* This can result in INOP of VTAM PU due to halting of  */
+	/* outstanding IO which causes a sense to be returned	 */
+	/* Only about 3 senses are allowed and then IOS/VTAM will*/
+	/* become unreachable without manual intervention	 */
+	if ((grp->port_persist == 1) || (grp->alloc_called)) {
+		grp->alloc_called = 0;
+		fsm_deltimer(&priv->restart_timer);
+		fsm_addtimer(&priv->restart_timer, 500, DEV_EVENT_RESTART, dev);
+		fsm_newstate(grp->fsm, MPCG_STATE_RESET);
+		if (grp->saved_state > MPCG_STATE_XID7INITF)
+			CTCM_DBF_TEXT_(MPC_TRACE, CTC_DBF_ALWAYS,
+				"%s(%s): MPC GROUP RECOVERY SCHEDULED",
+					CTCM_FUNTAIL, dev->name);
+	} else {
+		fsm_deltimer(&priv->restart_timer);
+		fsm_addtimer(&priv->restart_timer, 500, DEV_EVENT_STOP, dev);
+		fsm_newstate(grp->fsm, MPCG_STATE_RESET);
+		CTCM_DBF_TEXT_(MPC_TRACE, CTC_DBF_ALWAYS,
+			"%s(%s): NO MPC GROUP RECOVERY ATTEMPTED",
+						CTCM_FUNTAIL, dev->name);
+	}
+}
+
+/**
+ * Handle mpc group  action timeout.
+ * MPC Group Station FSM action
+ * CTCM_PROTO_MPC only
+ *
+ * fi		An instance of an mpc_group fsm.
+ * event	The event, just happened.
+ * arg		Generic pointer, casted from net_device * upon call.
+ */
+static void mpc_action_timeout(fsm_instance *fi, int event, void *arg)
+{
+	struct net_device *dev = arg;
+	struct ctcm_priv *priv;
+	struct mpc_group *grp;
+	struct channel *wch;
+	struct channel *rch;
+
+	BUG_ON(dev == NULL);
+
+	priv = dev->ml_priv;
+	grp = priv->mpcg;
+	wch = priv->channel[CTCM_WRITE];
+	rch = priv->channel[CTCM_READ];
+
+	switch (fsm_getstate(grp->fsm)) {
+	case MPCG_STATE_XID2INITW:
+		/* Unless there is outstanding IO on the  */
+		/* channel just return and wait for ATTN  */
+		/* interrupt to begin XID negotiations	  */
+		if ((fsm_getstate(rch->fsm) == CH_XID0_PENDING) &&
+		   (fsm_getstate(wch->fsm) == CH_XID0_PENDING))
+			break;
+	default:
+		fsm_event(grp->fsm, MPCG_EVENT_INOP, dev);
+	}
+
+	CTCM_DBF_TEXT_(MPC_TRACE, CTC_DBF_DEBUG,
+			"%s: dev=%s exit",
+			CTCM_FUNTAIL, dev->name);
+	return;
+}
+
+/*
+ * MPC Group Station FSM action
+ * CTCM_PROTO_MPC only
+ */
+void mpc_action_discontact(fsm_instance *fi, int event, void *arg)
+{
+	struct mpcg_info   *mpcginfo   = arg;
+	struct channel	   *ch	       = mpcginfo->ch;
+	struct net_device  *dev;
+	struct ctcm_priv   *priv;
+	struct mpc_group   *grp;
+
+	if (ch) {
+		dev = ch->netdev;
+		if (dev) {
+			priv = dev->ml_priv;
+			if (priv) {
+				CTCM_DBF_TEXT_(MPC_TRACE, CTC_DBF_NOTICE,
+					"%s: %s: %s\n",
+					CTCM_FUNTAIL, dev->name, ch->id);
+				grp = priv->mpcg;
+				grp->send_qllc_disc = 1;
+				fsm_event(grp->fsm, MPCG_EVENT_INOP, dev);
+			}
+		}
+	}
+
+	return;
+}
+
+/*
+ * MPC Group Station - not part of FSM
+ * CTCM_PROTO_MPC only
+ * called from add_channel in ctcm_main.c
+ */
+void mpc_action_send_discontact(unsigned long thischan)
+{
+	int rc;
+	struct channel	*ch = (struct channel *)thischan;
+	unsigned long	saveflags = 0;
+
+	spin_lock_irqsave(get_ccwdev_lock(ch->cdev), saveflags);
+	rc = ccw_device_start(ch->cdev, &ch->ccw[15],
+					(unsigned long)ch, 0xff, 0);
+	spin_unlock_irqrestore(get_ccwdev_lock(ch->cdev), saveflags);
+
+	if (rc != 0) {
+		ctcm_ccw_check_rc(ch, rc, (char *)__func__);
+	}
+
+	return;
+}
+
+
+/*
+ * helper function of mpc FSM
+ * CTCM_PROTO_MPC only
+ * mpc_action_rcvd_xid7
+*/
+static int mpc_validate_xid(struct mpcg_info *mpcginfo)
+{
+	struct channel	   *ch	 = mpcginfo->ch;
+	struct net_device  *dev  = ch->netdev;
+	struct ctcm_priv   *priv = dev->ml_priv;
+	struct mpc_group   *grp  = priv->mpcg;
+	struct xid2	   *xid  = mpcginfo->xid;
+	int	rc	 = 0;
+	__u64	our_id   = 0;
+	__u64   their_id = 0;
+	int	len = TH_HEADER_LENGTH + PDU_HEADER_LENGTH;
+
+	CTCM_PR_DEBUG("Enter %s: xid=%p\n", __func__, xid);
+
+	if (xid == NULL) {
+		rc = 1;
+		/* XID REJECTED: xid == NULL */
+		CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR,
+			"%s(%s): xid = NULL",
+				CTCM_FUNTAIL, ch->id);
+			goto done;
+	}
+
+	CTCM_D3_DUMP((char *)xid, XID2_LENGTH);
+
+	/*the received direction should be the opposite of ours  */
+	if (((CHANNEL_DIRECTION(ch->flags) == CTCM_READ) ? XID2_WRITE_SIDE :
+				XID2_READ_SIDE) != xid->xid2_dlc_type) {
+		rc = 2;
+		/* XID REJECTED: r/w channel pairing mismatch */
+		CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR,
+			"%s(%s): r/w channel pairing mismatch",
+				CTCM_FUNTAIL, ch->id);
+			goto done;
+	}
+
+	if (xid->xid2_dlc_type == XID2_READ_SIDE) {
+		CTCM_PR_DEBUG("%s: grpmaxbuf:%d xid2buflen:%d\n", __func__,
+				grp->group_max_buflen, xid->xid2_buf_len);
+
+		if (grp->group_max_buflen == 0 || grp->group_max_buflen >
+						xid->xid2_buf_len - len)
+			grp->group_max_buflen = xid->xid2_buf_len - len;
+	}
+
+	if (grp->saved_xid2 == NULL) {
+		grp->saved_xid2 =
+			(struct xid2 *)skb_tail_pointer(grp->rcvd_xid_skb);
+
+		memcpy(skb_put(grp->rcvd_xid_skb,
+					XID2_LENGTH), xid, XID2_LENGTH);
+		grp->rcvd_xid_skb->data = grp->rcvd_xid_data;
+
+		skb_reset_tail_pointer(grp->rcvd_xid_skb);
+		grp->rcvd_xid_skb->len = 0;
+
+		/* convert two 32 bit numbers into 1 64 bit for id compare */
+		our_id = (__u64)priv->xid->xid2_adj_id;
+		our_id = our_id << 32;
+		our_id = our_id + priv->xid->xid2_sender_id;
+		their_id = (__u64)xid->xid2_adj_id;
+		their_id = their_id << 32;
+		their_id = their_id + xid->xid2_sender_id;
+		/* lower id assume the xside role */
+		if (our_id < their_id) {
+			grp->roll = XSIDE;
+			CTCM_DBF_TEXT_(MPC_TRACE, CTC_DBF_NOTICE,
+				"%s(%s): WE HAVE LOW ID - TAKE XSIDE",
+					CTCM_FUNTAIL, ch->id);
+		} else {
+			grp->roll = YSIDE;
+			CTCM_DBF_TEXT_(MPC_TRACE, CTC_DBF_NOTICE,
+				"%s(%s): WE HAVE HIGH ID - TAKE YSIDE",
+					CTCM_FUNTAIL, ch->id);
+		}
+
+	} else {
+		if (xid->xid2_flag4 != grp->saved_xid2->xid2_flag4) {
+			rc = 3;
+			/* XID REJECTED: xid flag byte4 mismatch */
+			CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR,
+				"%s(%s): xid flag byte4 mismatch",
+					CTCM_FUNTAIL, ch->id);
+		}
+		if (xid->xid2_flag2 == 0x40) {
+			rc = 4;
+			/* XID REJECTED - xid NOGOOD */
+			CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR,
+				"%s(%s): xid NOGOOD",
+					CTCM_FUNTAIL, ch->id);
+		}
+		if (xid->xid2_adj_id != grp->saved_xid2->xid2_adj_id) {
+			rc = 5;
+			/* XID REJECTED - Adjacent Station ID Mismatch */
+			CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR,
+				"%s(%s): Adjacent Station ID Mismatch",
+					CTCM_FUNTAIL, ch->id);
+		}
+		if (xid->xid2_sender_id != grp->saved_xid2->xid2_sender_id) {
+			rc = 6;
+			/* XID REJECTED - Sender Address Mismatch */
+			CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR,
+				"%s(%s): Sender Address Mismatch",
+					CTCM_FUNTAIL, ch->id);
+		}
+	}
+done:
+	if (rc) {
+		dev_warn(&dev->dev,
+			"The XID used in the MPC protocol is not valid, "
+			"rc = %d\n", rc);
+		priv->xid->xid2_flag2 = 0x40;
+		grp->saved_xid2->xid2_flag2 = 0x40;
+	}
+
+	return rc;
+}
+
+/*
+ * MPC Group Station FSM action
+ * CTCM_PROTO_MPC only
+ */
+static void mpc_action_side_xid(fsm_instance *fsm, void *arg, int side)
+{
+	struct channel *ch = arg;
+	int rc = 0;
+	int gotlock = 0;
+	unsigned long saveflags = 0;	/* avoids compiler warning with
+					   spin_unlock_irqrestore */
+
+	CTCM_PR_DEBUG("Enter %s: cp=%i ch=0x%p id=%s\n",
+			__func__, smp_processor_id(), ch, ch->id);
+
+	if (ctcm_checkalloc_buffer(ch))
+					goto done;
+
+	/*
+	 * skb data-buffer referencing:
+	 */
+	ch->trans_skb->data = ch->trans_skb_data;
+	skb_reset_tail_pointer(ch->trans_skb);
+	ch->trans_skb->len = 0;
+	/* result of the previous 3 statements is NOT always
+	 * already set after ctcm_checkalloc_buffer
+	 * because of possible reuse of the trans_skb
+	 */
+	memset(ch->trans_skb->data, 0, 16);
+	ch->rcvd_xid_th =  (struct th_header *)ch->trans_skb_data;
+	/* check is main purpose here: */
+	skb_put(ch->trans_skb, TH_HEADER_LENGTH);
+	ch->rcvd_xid = (struct xid2 *)skb_tail_pointer(ch->trans_skb);
+	/* check is main purpose here: */
+	skb_put(ch->trans_skb, XID2_LENGTH);
+	ch->rcvd_xid_id = skb_tail_pointer(ch->trans_skb);
+	/* cleanup back to startpoint */
+	ch->trans_skb->data = ch->trans_skb_data;
+	skb_reset_tail_pointer(ch->trans_skb);
+	ch->trans_skb->len = 0;
+
+	/* non-checking rewrite of above skb data-buffer referencing: */
+	/*
+	memset(ch->trans_skb->data, 0, 16);
+	ch->rcvd_xid_th =  (struct th_header *)ch->trans_skb_data;
+	ch->rcvd_xid = (struct xid2 *)(ch->trans_skb_data + TH_HEADER_LENGTH);
+	ch->rcvd_xid_id = ch->trans_skb_data + TH_HEADER_LENGTH + XID2_LENGTH;
+	 */
+
+	ch->ccw[8].flags	= CCW_FLAG_SLI | CCW_FLAG_CC;
+	ch->ccw[8].count	= 0;
+	ch->ccw[8].cda		= 0x00;
+
+	if (!(ch->xid_th && ch->xid && ch->xid_id))
+		CTCM_DBF_TEXT_(MPC_TRACE, CTC_DBF_INFO,
+			"%s(%s): xid_th=%p, xid=%p, xid_id=%p",
+			CTCM_FUNTAIL, ch->id, ch->xid_th, ch->xid, ch->xid_id);
+
+	if (side == XSIDE) {
+		/* mpc_action_xside_xid */
+		if (ch->xid_th == NULL)
+				goto done;
+		ch->ccw[9].cmd_code	= CCW_CMD_WRITE;
+		ch->ccw[9].flags	= CCW_FLAG_SLI | CCW_FLAG_CC;
+		ch->ccw[9].count	= TH_HEADER_LENGTH;
+		ch->ccw[9].cda		= virt_to_phys(ch->xid_th);
+
+		if (ch->xid == NULL)
+				goto done;
+		ch->ccw[10].cmd_code	= CCW_CMD_WRITE;
+		ch->ccw[10].flags	= CCW_FLAG_SLI | CCW_FLAG_CC;
+		ch->ccw[10].count	= XID2_LENGTH;
+		ch->ccw[10].cda		= virt_to_phys(ch->xid);
+
+		ch->ccw[11].cmd_code	= CCW_CMD_READ;
+		ch->ccw[11].flags	= CCW_FLAG_SLI | CCW_FLAG_CC;
+		ch->ccw[11].count	= TH_HEADER_LENGTH;
+		ch->ccw[11].cda		= virt_to_phys(ch->rcvd_xid_th);
+
+		ch->ccw[12].cmd_code	= CCW_CMD_READ;
+		ch->ccw[12].flags	= CCW_FLAG_SLI | CCW_FLAG_CC;
+		ch->ccw[12].count	= XID2_LENGTH;
+		ch->ccw[12].cda		= virt_to_phys(ch->rcvd_xid);
+
+		ch->ccw[13].cmd_code	= CCW_CMD_READ;
+		ch->ccw[13].cda		= virt_to_phys(ch->rcvd_xid_id);
+
+	} else { /* side == YSIDE : mpc_action_yside_xid */
+		ch->ccw[9].cmd_code	= CCW_CMD_READ;
+		ch->ccw[9].flags	= CCW_FLAG_SLI | CCW_FLAG_CC;
+		ch->ccw[9].count	= TH_HEADER_LENGTH;
+		ch->ccw[9].cda		= virt_to_phys(ch->rcvd_xid_th);
+
+		ch->ccw[10].cmd_code	= CCW_CMD_READ;
+		ch->ccw[10].flags	= CCW_FLAG_SLI | CCW_FLAG_CC;
+		ch->ccw[10].count	= XID2_LENGTH;
+		ch->ccw[10].cda		= virt_to_phys(ch->rcvd_xid);
+
+		if (ch->xid_th == NULL)
+				goto done;
+		ch->ccw[11].cmd_code	= CCW_CMD_WRITE;
+		ch->ccw[11].flags	= CCW_FLAG_SLI | CCW_FLAG_CC;
+		ch->ccw[11].count	= TH_HEADER_LENGTH;
+		ch->ccw[11].cda		= virt_to_phys(ch->xid_th);
+
+		if (ch->xid == NULL)
+				goto done;
+		ch->ccw[12].cmd_code	= CCW_CMD_WRITE;
+		ch->ccw[12].flags	= CCW_FLAG_SLI | CCW_FLAG_CC;
+		ch->ccw[12].count	= XID2_LENGTH;
+		ch->ccw[12].cda		= virt_to_phys(ch->xid);
+
+		if (ch->xid_id == NULL)
+				goto done;
+		ch->ccw[13].cmd_code	= CCW_CMD_WRITE;
+		ch->ccw[13].cda		= virt_to_phys(ch->xid_id);
+
+	}
+	ch->ccw[13].flags	= CCW_FLAG_SLI | CCW_FLAG_CC;
+	ch->ccw[13].count	= 4;
+
+	ch->ccw[14].cmd_code	= CCW_CMD_NOOP;
+	ch->ccw[14].flags	= CCW_FLAG_SLI;
+	ch->ccw[14].count	= 0;
+	ch->ccw[14].cda		= 0;
+
+	CTCM_CCW_DUMP((char *)&ch->ccw[8], sizeof(struct ccw1) * 7);
+	CTCM_D3_DUMP((char *)ch->xid_th, TH_HEADER_LENGTH);
+	CTCM_D3_DUMP((char *)ch->xid, XID2_LENGTH);
+	CTCM_D3_DUMP((char *)ch->xid_id, 4);
+
+	if (!in_irq()) {
+			 /* Such conditional locking is a known problem for
+			  * sparse because its static undeterministic.
+			  * Warnings should be ignored here. */
+		spin_lock_irqsave(get_ccwdev_lock(ch->cdev), saveflags);
+		gotlock = 1;
+	}
+
+	fsm_addtimer(&ch->timer, 5000 , CTC_EVENT_TIMER, ch);
+	rc = ccw_device_start(ch->cdev, &ch->ccw[8],
+				(unsigned long)ch, 0xff, 0);
+
+	if (gotlock)	/* see remark above about conditional locking */
+		spin_unlock_irqrestore(get_ccwdev_lock(ch->cdev), saveflags);
+
+	if (rc != 0) {
+		ctcm_ccw_check_rc(ch, rc,
+				(side == XSIDE) ? "x-side XID" : "y-side XID");
+	}
+
+done:
+	CTCM_PR_DEBUG("Exit %s: ch=0x%p id=%s\n",
+				__func__, ch, ch->id);
+	return;
+
+}
+
+/*
+ * MPC Group Station FSM action
+ * CTCM_PROTO_MPC only
+ */
+static void mpc_action_xside_xid(fsm_instance *fsm, int event, void *arg)
+{
+	mpc_action_side_xid(fsm, arg, XSIDE);
+}
+
+/*
+ * MPC Group Station FSM action
+ * CTCM_PROTO_MPC only
+ */
+static void mpc_action_yside_xid(fsm_instance *fsm, int event, void *arg)
+{
+	mpc_action_side_xid(fsm, arg, YSIDE);
+}
+
+/*
+ * MPC Group Station FSM action
+ * CTCM_PROTO_MPC only
+ */
+static void mpc_action_doxid0(fsm_instance *fsm, int event, void *arg)
+{
+	struct channel	   *ch   = arg;
+	struct net_device  *dev  = ch->netdev;
+	struct ctcm_priv   *priv = dev->ml_priv;
+	struct mpc_group   *grp  = priv->mpcg;
+
+	CTCM_PR_DEBUG("Enter %s: cp=%i ch=0x%p id=%s\n",
+			__func__, smp_processor_id(), ch, ch->id);
+
+	if (ch->xid == NULL) {
+		CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR,
+			"%s(%s): ch->xid == NULL",
+				CTCM_FUNTAIL, dev->name);
+		return;
+	}
+
+	fsm_newstate(ch->fsm, CH_XID0_INPROGRESS);
+
+	ch->xid->xid2_option =	XID2_0;
+
+	switch (fsm_getstate(grp->fsm)) {
+	case MPCG_STATE_XID2INITW:
+	case MPCG_STATE_XID2INITX:
+		ch->ccw[8].cmd_code = CCW_CMD_SENSE_CMD;
+		break;
+	case MPCG_STATE_XID0IOWAIT:
+	case MPCG_STATE_XID0IOWAIX:
+		ch->ccw[8].cmd_code = CCW_CMD_WRITE_CTL;
+		break;
+	}
+
+	fsm_event(grp->fsm, MPCG_EVENT_DOIO, ch);
+
+	return;
+}
+
+/*
+ * MPC Group Station FSM action
+ * CTCM_PROTO_MPC only
+*/
+static void mpc_action_doxid7(fsm_instance *fsm, int event, void *arg)
+{
+	struct net_device *dev = arg;
+	struct ctcm_priv  *priv = dev->ml_priv;
+	struct mpc_group  *grp  = NULL;
+	int direction;
+	int send = 0;
+
+	if (priv)
+		grp = priv->mpcg;
+	if (grp == NULL)
+		return;
+
+	for (direction = CTCM_READ; direction <= CTCM_WRITE; direction++) {
+		struct channel *ch = priv->channel[direction];
+		struct xid2 *thisxid = ch->xid;
+		ch->xid_skb->data = ch->xid_skb_data;
+		skb_reset_tail_pointer(ch->xid_skb);
+		ch->xid_skb->len = 0;
+		thisxid->xid2_option = XID2_7;
+		send = 0;
+
+		/* xid7 phase 1 */
+		if (grp->outstanding_xid7_p2 > 0) {
+			if (grp->roll == YSIDE) {
+				if (fsm_getstate(ch->fsm) == CH_XID7_PENDING1) {
+					fsm_newstate(ch->fsm, CH_XID7_PENDING2);
+					ch->ccw[8].cmd_code = CCW_CMD_SENSE_CMD;
+					memcpy(skb_put(ch->xid_skb,
+							TH_HEADER_LENGTH),
+					       &thdummy, TH_HEADER_LENGTH);
+					send = 1;
+				}
+			} else if (fsm_getstate(ch->fsm) < CH_XID7_PENDING2) {
+					fsm_newstate(ch->fsm, CH_XID7_PENDING2);
+					ch->ccw[8].cmd_code = CCW_CMD_WRITE_CTL;
+					memcpy(skb_put(ch->xid_skb,
+						       TH_HEADER_LENGTH),
+					       &thnorm, TH_HEADER_LENGTH);
+					send = 1;
+			}
+		} else {
+			/* xid7 phase 2 */
+			if (grp->roll == YSIDE) {
+				if (fsm_getstate(ch->fsm) < CH_XID7_PENDING4) {
+					fsm_newstate(ch->fsm, CH_XID7_PENDING4);
+					memcpy(skb_put(ch->xid_skb,
+						       TH_HEADER_LENGTH),
+					       &thnorm, TH_HEADER_LENGTH);
+					ch->ccw[8].cmd_code = CCW_CMD_WRITE_CTL;
+					send = 1;
+				}
+			} else if (fsm_getstate(ch->fsm) == CH_XID7_PENDING3) {
+				fsm_newstate(ch->fsm, CH_XID7_PENDING4);
+				ch->ccw[8].cmd_code = CCW_CMD_SENSE_CMD;
+				memcpy(skb_put(ch->xid_skb, TH_HEADER_LENGTH),
+						&thdummy, TH_HEADER_LENGTH);
+				send = 1;
+			}
+		}
+
+		if (send)
+			fsm_event(grp->fsm, MPCG_EVENT_DOIO, ch);
+	}
+
+	return;
+}
+
+/*
+ * MPC Group Station FSM action
+ * CTCM_PROTO_MPC only
+ */
+static void mpc_action_rcvd_xid0(fsm_instance *fsm, int event, void *arg)
+{
+
+	struct mpcg_info   *mpcginfo  = arg;
+	struct channel	   *ch   = mpcginfo->ch;
+	struct net_device  *dev  = ch->netdev;
+	struct ctcm_priv   *priv = dev->ml_priv;
+	struct mpc_group   *grp  = priv->mpcg;
+
+	CTCM_PR_DEBUG("%s: ch-id:%s xid2:%i xid7:%i xidt_p2:%i \n",
+			__func__, ch->id, grp->outstanding_xid2,
+			grp->outstanding_xid7, grp->outstanding_xid7_p2);
+
+	if (fsm_getstate(ch->fsm) < CH_XID7_PENDING)
+		fsm_newstate(ch->fsm, CH_XID7_PENDING);
+
+	grp->outstanding_xid2--;
+	grp->outstanding_xid7++;
+	grp->outstanding_xid7_p2++;
+
+	/* must change state before validating xid to */
+	/* properly handle interim interrupts received*/
+	switch (fsm_getstate(grp->fsm)) {
+	case MPCG_STATE_XID2INITW:
+		fsm_newstate(grp->fsm, MPCG_STATE_XID2INITX);
+		mpc_validate_xid(mpcginfo);
+		break;
+	case MPCG_STATE_XID0IOWAIT:
+		fsm_newstate(grp->fsm, MPCG_STATE_XID0IOWAIX);
+		mpc_validate_xid(mpcginfo);
+		break;
+	case MPCG_STATE_XID2INITX:
+		if (grp->outstanding_xid2 == 0) {
+			fsm_newstate(grp->fsm, MPCG_STATE_XID7INITW);
+			mpc_validate_xid(mpcginfo);
+			fsm_event(grp->fsm, MPCG_EVENT_XID2DONE, dev);
+		}
+		break;
+	case MPCG_STATE_XID0IOWAIX:
+		if (grp->outstanding_xid2 == 0) {
+			fsm_newstate(grp->fsm, MPCG_STATE_XID7INITI);
+			mpc_validate_xid(mpcginfo);
+			fsm_event(grp->fsm, MPCG_EVENT_XID2DONE, dev);
+		}
+		break;
+	}
+	kfree(mpcginfo);
+
+	CTCM_PR_DEBUG("ctcmpc:%s() %s xid2:%i xid7:%i xidt_p2:%i \n",
+		__func__, ch->id, grp->outstanding_xid2,
+		grp->outstanding_xid7, grp->outstanding_xid7_p2);
+	CTCM_PR_DEBUG("ctcmpc:%s() %s grpstate: %s chanstate: %s \n",
+		__func__, ch->id,
+		fsm_getstate_str(grp->fsm), fsm_getstate_str(ch->fsm));
+	return;
+
+}
+
+
+/*
+ * MPC Group Station FSM action
+ * CTCM_PROTO_MPC only
+ */
+static void mpc_action_rcvd_xid7(fsm_instance *fsm, int event, void *arg)
+{
+	struct mpcg_info   *mpcginfo   = arg;
+	struct channel	   *ch	       = mpcginfo->ch;
+	struct net_device  *dev        = ch->netdev;
+	struct ctcm_priv   *priv    = dev->ml_priv;
+	struct mpc_group   *grp     = priv->mpcg;
+
+	CTCM_PR_DEBUG("Enter %s: cp=%i ch=0x%p id=%s\n",
+		__func__, smp_processor_id(), ch, ch->id);
+	CTCM_PR_DEBUG("%s: outstanding_xid7: %i, outstanding_xid7_p2: %i\n",
+		__func__, grp->outstanding_xid7, grp->outstanding_xid7_p2);
+
+	grp->outstanding_xid7--;
+	ch->xid_skb->data = ch->xid_skb_data;
+	skb_reset_tail_pointer(ch->xid_skb);
+	ch->xid_skb->len = 0;
+
+	switch (fsm_getstate(grp->fsm)) {
+	case MPCG_STATE_XID7INITI:
+		fsm_newstate(grp->fsm, MPCG_STATE_XID7INITZ);
+		mpc_validate_xid(mpcginfo);
+		break;
+	case MPCG_STATE_XID7INITW:
+		fsm_newstate(grp->fsm, MPCG_STATE_XID7INITX);
+		mpc_validate_xid(mpcginfo);
+		break;
+	case MPCG_STATE_XID7INITZ:
+	case MPCG_STATE_XID7INITX:
+		if (grp->outstanding_xid7 == 0) {
+			if (grp->outstanding_xid7_p2 > 0) {
+				grp->outstanding_xid7 =
+					grp->outstanding_xid7_p2;
+				grp->outstanding_xid7_p2 = 0;
+			} else
+				fsm_newstate(grp->fsm, MPCG_STATE_XID7INITF);
+
+			mpc_validate_xid(mpcginfo);
+			fsm_event(grp->fsm, MPCG_EVENT_XID7DONE, dev);
+			break;
+		}
+		mpc_validate_xid(mpcginfo);
+		break;
+	}
+	kfree(mpcginfo);
+	return;
+}
+
+/*
+ * mpc_action helper of an MPC Group Station FSM action
+ * CTCM_PROTO_MPC only
+ */
+static int mpc_send_qllc_discontact(struct net_device *dev)
+{
+	__u32	new_len	= 0;
+	struct sk_buff   *skb;
+	struct qllc      *qllcptr;
+	struct ctcm_priv *priv = dev->ml_priv;
+	struct mpc_group *grp = priv->mpcg;
+
+	CTCM_PR_DEBUG("%s: GROUP STATE: %s\n",
+		__func__, mpcg_state_names[grp->saved_state]);
+
+	switch (grp->saved_state) {
+	/*
+	 * establish conn callback function is
+	 * preferred method to report failure
+	 */
+	case MPCG_STATE_XID0IOWAIT:
+	case MPCG_STATE_XID0IOWAIX:
+	case MPCG_STATE_XID7INITI:
+	case MPCG_STATE_XID7INITZ:
+	case MPCG_STATE_XID2INITW:
+	case MPCG_STATE_XID2INITX:
+	case MPCG_STATE_XID7INITW:
+	case MPCG_STATE_XID7INITX:
+		if (grp->estconnfunc) {
+			grp->estconnfunc(grp->port_num, -1, 0);
+			grp->estconnfunc = NULL;
+			break;
+		}
+	case MPCG_STATE_FLOWC:
+	case MPCG_STATE_READY:
+		grp->send_qllc_disc = 2;
+		new_len = sizeof(struct qllc);
+		qllcptr = kzalloc(new_len, gfp_type() | GFP_DMA);
+		if (qllcptr == NULL) {
+			CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR,
+				"%s(%s): qllcptr allocation error",
+						CTCM_FUNTAIL, dev->name);
+			return -ENOMEM;
+		}
+
+		qllcptr->qllc_address = 0xcc;
+		qllcptr->qllc_commands = 0x03;
+
+		skb = __dev_alloc_skb(new_len, GFP_ATOMIC);
+
+		if (skb == NULL) {
+			CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR,
+				"%s(%s): skb allocation error",
+						CTCM_FUNTAIL, dev->name);
+			priv->stats.rx_dropped++;
+			kfree(qllcptr);
+			return -ENOMEM;
+		}
+
+		memcpy(skb_put(skb, new_len), qllcptr, new_len);
+		kfree(qllcptr);
+
+		if (skb_headroom(skb) < 4) {
+			CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR,
+				"%s(%s): skb_headroom error",
+						CTCM_FUNTAIL, dev->name);
+			dev_kfree_skb_any(skb);
+			return -ENOMEM;
+		}
+
+		*((__u32 *)skb_push(skb, 4)) =
+			priv->channel[CTCM_READ]->pdu_seq;
+		priv->channel[CTCM_READ]->pdu_seq++;
+		CTCM_PR_DBGDATA("ctcmpc: %s ToDCM_pdu_seq= %08x\n",
+				__func__, priv->channel[CTCM_READ]->pdu_seq);
+
+		/* receipt of CC03 resets anticipated sequence number on
+		      receiving side */
+		priv->channel[CTCM_READ]->pdu_seq = 0x00;
+		skb_reset_mac_header(skb);
+		skb->dev = dev;
+		skb->protocol = htons(ETH_P_SNAP);
+		skb->ip_summed = CHECKSUM_UNNECESSARY;
+
+		CTCM_D3_DUMP(skb->data, (sizeof(struct qllc) + 4));
+
+		netif_rx(skb);
+		break;
+	default:
+		break;
+
+	}
+
+	return 0;
+}
+/* --- This is the END my friend --- */
+
diff --git a/ap/os/linux/linux-3.4.x/drivers/s390/net/ctcm_mpc.h b/ap/os/linux/linux-3.4.x/drivers/s390/net/ctcm_mpc.h
new file mode 100644
index 0000000..1fa07b0
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/s390/net/ctcm_mpc.h
@@ -0,0 +1,240 @@
+/*
+ * drivers/s390/net/ctcm_mpc.h
+ *
+ * Copyright IBM Corp. 2007
+ * Authors:	Peter Tiedemann (ptiedem@de.ibm.com)
+ *
+ * 	MPC additions:
+ *		Belinda Thompson (belindat@us.ibm.com)
+ *		Andy Richter (richtera@us.ibm.com)
+ */
+
+#ifndef _CTC_MPC_H_
+#define _CTC_MPC_H_
+
+#include <linux/interrupt.h>
+#include <linux/skbuff.h>
+#include "fsm.h"
+
+/*
+ * MPC external interface
+ * Note that ctc_mpc_xyz are called with a lock on ................
+ */
+
+/*  port_number is the mpc device 0, 1, 2 etc mpc2 is port_number 2 */
+
+/*  passive open  Just wait for XID2 exchange */
+extern int ctc_mpc_alloc_channel(int port,
+		void (*callback)(int port_num, int max_write_size));
+/* active open  Alloc then send XID2 */
+extern void ctc_mpc_establish_connectivity(int port,
+		void (*callback)(int port_num, int rc, int max_write_size));
+
+extern void ctc_mpc_dealloc_ch(int port);
+extern void ctc_mpc_flow_control(int port, int flowc);
+
+/*
+ * other MPC Group prototypes and structures
+ */
+
+#define ETH_P_SNA_DIX	0x80D5
+
+/*
+ * Declaration of an XID2
+ *
+ */
+#define ALLZEROS 0x0000000000000000
+
+#define XID_FM2		0x20
+#define XID2_0		0x00
+#define XID2_7		0x07
+#define XID2_WRITE_SIDE 0x04
+#define XID2_READ_SIDE	0x05
+
+struct xid2 {
+	__u8	xid2_type_id;
+	__u8	xid2_len;
+	__u32	xid2_adj_id;
+	__u8	xid2_rlen;
+	__u8	xid2_resv1;
+	__u8	xid2_flag1;
+	__u8	xid2_fmtt;
+	__u8	xid2_flag4;
+	__u16	xid2_resv2;
+	__u8	xid2_tgnum;
+	__u32	xid2_sender_id;
+	__u8	xid2_flag2;
+	__u8	xid2_option;
+	char  xid2_resv3[8];
+	__u16	xid2_resv4;
+	__u8	xid2_dlc_type;
+	__u16	xid2_resv5;
+	__u8	xid2_mpc_flag;
+	__u8	xid2_resv6;
+	__u16	xid2_buf_len;
+	char xid2_buffer[255 - (13 * sizeof(__u8) +
+				2 * sizeof(__u32) +
+				4 * sizeof(__u16) +
+				8 * sizeof(char))];
+} __attribute__ ((packed));
+
+#define XID2_LENGTH  (sizeof(struct xid2))
+
+struct th_header {
+	__u8	th_seg;
+	__u8	th_ch_flag;
+#define TH_HAS_PDU	0xf0
+#define TH_IS_XID	0x01
+#define TH_SWEEP_REQ	0xfe
+#define TH_SWEEP_RESP	0xff
+	__u8	th_blk_flag;
+#define TH_DATA_IS_XID	0x80
+#define TH_RETRY	0x40
+#define TH_DISCONTACT	0xc0
+#define TH_SEG_BLK	0x20
+#define TH_LAST_SEG	0x10
+#define TH_PDU_PART	0x08
+	__u8	th_is_xid;	/* is 0x01 if this is XID  */
+	__u32	th_seq_num;
+} __attribute__ ((packed));
+
+struct th_addon {
+	__u32	th_last_seq;
+	__u32	th_resvd;
+} __attribute__ ((packed));
+
+struct th_sweep {
+	struct th_header th;
+	struct th_addon sw;
+} __attribute__ ((packed));
+
+#define TH_HEADER_LENGTH (sizeof(struct th_header))
+#define TH_SWEEP_LENGTH (sizeof(struct th_sweep))
+
+#define PDU_LAST	0x80
+#define PDU_CNTL	0x40
+#define PDU_FIRST	0x20
+
+struct pdu {
+	__u32	pdu_offset;
+	__u8	pdu_flag;
+	__u8	pdu_proto;   /*  0x01 is APPN SNA  */
+	__u16	pdu_seq;
+} __attribute__ ((packed));
+
+#define PDU_HEADER_LENGTH  (sizeof(struct pdu))
+
+struct qllc {
+	__u8	qllc_address;
+#define QLLC_REQ	0xFF
+#define QLLC_RESP	0x00
+	__u8	qllc_commands;
+#define QLLC_DISCONNECT 0x53
+#define QLLC_UNSEQACK	0x73
+#define QLLC_SETMODE	0x93
+#define QLLC_EXCHID	0xBF
+} __attribute__ ((packed));
+
+
+/*
+ * Definition of one MPC group
+ */
+
+#define MAX_MPCGCHAN		10
+#define MPC_XID_TIMEOUT_VALUE	10000
+#define MPC_CHANNEL_ADD		0
+#define MPC_CHANNEL_REMOVE	1
+#define MPC_CHANNEL_ATTN	2
+#define XSIDE	1
+#define YSIDE	0
+
+struct mpcg_info {
+	struct sk_buff	*skb;
+	struct channel	*ch;
+	struct xid2	*xid;
+	struct th_sweep	*sweep;
+	struct th_header *th;
+};
+
+struct mpc_group {
+	struct tasklet_struct mpc_tasklet;
+	struct tasklet_struct mpc_tasklet2;
+	int	changed_side;
+	int	saved_state;
+	int	channels_terminating;
+	int	out_of_sequence;
+	int	flow_off_called;
+	int	port_num;
+	int	port_persist;
+	int	alloc_called;
+	__u32	xid2_adj_id;
+	__u8	xid2_tgnum;
+	__u32	xid2_sender_id;
+	int	num_channel_paths;
+	int	active_channels[2];
+	__u16	group_max_buflen;
+	int	outstanding_xid2;
+	int	outstanding_xid7;
+	int	outstanding_xid7_p2;
+	int	sweep_req_pend_num;
+	int	sweep_rsp_pend_num;
+	struct sk_buff	*xid_skb;
+	char		*xid_skb_data;
+	struct th_header *xid_th;
+	struct xid2	*xid;
+	char		*xid_id;
+	struct th_header *rcvd_xid_th;
+	struct sk_buff	*rcvd_xid_skb;
+	char		*rcvd_xid_data;
+	__u8		in_sweep;
+	__u8		roll;
+	struct xid2	*saved_xid2;
+	void 		(*allochanfunc)(int, int);
+	int		allocchan_callback_retries;
+	void 		(*estconnfunc)(int, int, int);
+	int		estconn_callback_retries;
+	int		estconn_called;
+	int		xidnogood;
+	int		send_qllc_disc;
+	fsm_timer	timer;
+	fsm_instance	*fsm; /* group xid fsm */
+};
+
+#ifdef DEBUGDATA
+void ctcmpc_dumpit(char *buf, int len);
+#else
+static inline void ctcmpc_dumpit(char *buf, int len)
+{
+}
+#endif
+
+#ifdef DEBUGDATA
+/*
+ * Dump header and first 16 bytes of an sk_buff for debugging purposes.
+ *
+ * skb	 The struct sk_buff to dump.
+ * offset Offset relative to skb-data, where to start the dump.
+ */
+void ctcmpc_dump_skb(struct sk_buff *skb, int offset);
+#else
+static inline void ctcmpc_dump_skb(struct sk_buff *skb, int offset)
+{}
+#endif
+
+static inline void ctcmpc_dump32(char *buf, int len)
+{
+	if (len < 32)
+		ctcmpc_dumpit(buf, len);
+	else
+		ctcmpc_dumpit(buf, 32);
+}
+
+int ctcmpc_open(struct net_device *);
+void ctcm_ccw_check_rc(struct channel *, int, char *);
+void mpc_group_ready(unsigned long adev);
+void mpc_channel_action(struct channel *ch, int direction, int action);
+void mpc_action_send_discontact(unsigned long thischan);
+void mpc_action_discontact(fsm_instance *fi, int event, void *arg);
+void ctcmpc_bh(unsigned long thischan);
+#endif
+/* --- This is the END my friend --- */
diff --git a/ap/os/linux/linux-3.4.x/drivers/s390/net/ctcm_sysfs.c b/ap/os/linux/linux-3.4.x/drivers/s390/net/ctcm_sysfs.c
new file mode 100644
index 0000000..650aec1
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/s390/net/ctcm_sysfs.c
@@ -0,0 +1,223 @@
+/*
+ * drivers/s390/net/ctcm_sysfs.c
+ *
+ * Copyright IBM Corp. 2007, 2007
+ * Authors:	Peter Tiedemann (ptiedem@de.ibm.com)
+ *
+ */
+
+#undef DEBUG
+#undef DEBUGDATA
+#undef DEBUGCCW
+
+#define KMSG_COMPONENT "ctcm"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
+#include <linux/sysfs.h>
+#include <linux/slab.h>
+#include "ctcm_main.h"
+
+/*
+ * sysfs attributes
+ */
+
+static ssize_t ctcm_buffer_show(struct device *dev,
+				struct device_attribute *attr, char *buf)
+{
+	struct ctcm_priv *priv = dev_get_drvdata(dev);
+
+	if (!priv)
+		return -ENODEV;
+	return sprintf(buf, "%d\n", priv->buffer_size);
+}
+
+static ssize_t ctcm_buffer_write(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	struct net_device *ndev;
+	int bs1;
+	struct ctcm_priv *priv = dev_get_drvdata(dev);
+
+	ndev = priv->channel[CTCM_READ]->netdev;
+	if (!(priv && priv->channel[CTCM_READ] && ndev)) {
+		CTCM_DBF_TEXT(SETUP, CTC_DBF_ERROR, "bfnondev");
+		return -ENODEV;
+	}
+
+	sscanf(buf, "%u", &bs1);
+	if (bs1 > CTCM_BUFSIZE_LIMIT)
+					goto einval;
+	if (bs1 < (576 + LL_HEADER_LENGTH + 2))
+					goto einval;
+	priv->buffer_size = bs1;	/* just to overwrite the default */
+
+	if ((ndev->flags & IFF_RUNNING) &&
+	    (bs1 < (ndev->mtu + LL_HEADER_LENGTH + 2)))
+					goto einval;
+
+	priv->channel[CTCM_READ]->max_bufsize = bs1;
+	priv->channel[CTCM_WRITE]->max_bufsize = bs1;
+	if (!(ndev->flags & IFF_RUNNING))
+		ndev->mtu = bs1 - LL_HEADER_LENGTH - 2;
+	priv->channel[CTCM_READ]->flags |= CHANNEL_FLAGS_BUFSIZE_CHANGED;
+	priv->channel[CTCM_WRITE]->flags |= CHANNEL_FLAGS_BUFSIZE_CHANGED;
+
+	CTCM_DBF_DEV(SETUP, ndev, buf);
+	return count;
+
+einval:
+	CTCM_DBF_DEV(SETUP, ndev, "buff_err");
+	return -EINVAL;
+}
+
+static void ctcm_print_statistics(struct ctcm_priv *priv)
+{
+	char *sbuf;
+	char *p;
+
+	if (!priv)
+		return;
+	sbuf = kmalloc(2048, GFP_KERNEL);
+	if (sbuf == NULL)
+		return;
+	p = sbuf;
+
+	p += sprintf(p, "  Device FSM state: %s\n",
+		     fsm_getstate_str(priv->fsm));
+	p += sprintf(p, "  RX channel FSM state: %s\n",
+		     fsm_getstate_str(priv->channel[CTCM_READ]->fsm));
+	p += sprintf(p, "  TX channel FSM state: %s\n",
+		     fsm_getstate_str(priv->channel[CTCM_WRITE]->fsm));
+	p += sprintf(p, "  Max. TX buffer used: %ld\n",
+		     priv->channel[WRITE]->prof.maxmulti);
+	p += sprintf(p, "  Max. chained SKBs: %ld\n",
+		     priv->channel[WRITE]->prof.maxcqueue);
+	p += sprintf(p, "  TX single write ops: %ld\n",
+		     priv->channel[WRITE]->prof.doios_single);
+	p += sprintf(p, "  TX multi write ops: %ld\n",
+		     priv->channel[WRITE]->prof.doios_multi);
+	p += sprintf(p, "  Netto bytes written: %ld\n",
+		     priv->channel[WRITE]->prof.txlen);
+	p += sprintf(p, "  Max. TX IO-time: %ld\n",
+		     priv->channel[WRITE]->prof.tx_time);
+
+	printk(KERN_INFO "Statistics for %s:\n%s",
+				priv->channel[CTCM_WRITE]->netdev->name, sbuf);
+	kfree(sbuf);
+	return;
+}
+
+static ssize_t stats_show(struct device *dev,
+				struct device_attribute *attr, char *buf)
+{
+	struct ctcm_priv *priv = dev_get_drvdata(dev);
+	if (!priv)
+		return -ENODEV;
+	ctcm_print_statistics(priv);
+	return sprintf(buf, "0\n");
+}
+
+static ssize_t stats_write(struct device *dev, struct device_attribute *attr,
+				const char *buf, size_t count)
+{
+	struct ctcm_priv *priv = dev_get_drvdata(dev);
+	if (!priv)
+		return -ENODEV;
+	/* Reset statistics */
+	memset(&priv->channel[WRITE]->prof, 0,
+				sizeof(priv->channel[CTCM_WRITE]->prof));
+	return count;
+}
+
+static ssize_t ctcm_proto_show(struct device *dev,
+				struct device_attribute *attr, char *buf)
+{
+	struct ctcm_priv *priv = dev_get_drvdata(dev);
+	if (!priv)
+		return -ENODEV;
+
+	return sprintf(buf, "%d\n", priv->protocol);
+}
+
+static ssize_t ctcm_proto_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	int value;
+	struct ctcm_priv *priv = dev_get_drvdata(dev);
+
+	if (!priv)
+		return -ENODEV;
+	sscanf(buf, "%u", &value);
+	if (!((value == CTCM_PROTO_S390)  ||
+	      (value == CTCM_PROTO_LINUX) ||
+	      (value == CTCM_PROTO_MPC) ||
+	      (value == CTCM_PROTO_OS390)))
+		return -EINVAL;
+	priv->protocol = value;
+	CTCM_DBF_DEV(SETUP, dev, buf);
+
+	return count;
+}
+
+static const char *ctcm_type[] = {
+	"not a channel",
+	"CTC/A",
+	"FICON channel",
+	"ESCON channel",
+	"unknown channel type",
+	"unsupported channel type",
+};
+
+static ssize_t ctcm_type_show(struct device *dev,
+				struct device_attribute *attr, char *buf)
+{
+	struct ccwgroup_device *cgdev;
+
+	cgdev = to_ccwgroupdev(dev);
+	if (!cgdev)
+		return -ENODEV;
+
+	return sprintf(buf, "%s\n",
+			ctcm_type[cgdev->cdev[0]->id.driver_info]);
+}
+
+static DEVICE_ATTR(buffer, 0644, ctcm_buffer_show, ctcm_buffer_write);
+static DEVICE_ATTR(protocol, 0644, ctcm_proto_show, ctcm_proto_store);
+static DEVICE_ATTR(type, 0444, ctcm_type_show, NULL);
+static DEVICE_ATTR(stats, 0644, stats_show, stats_write);
+
+static struct attribute *ctcm_attr[] = {
+	&dev_attr_protocol.attr,
+	&dev_attr_type.attr,
+	&dev_attr_buffer.attr,
+	NULL,
+};
+
+static struct attribute_group ctcm_attr_group = {
+	.attrs = ctcm_attr,
+};
+
+int ctcm_add_attributes(struct device *dev)
+{
+	int rc;
+
+	rc = device_create_file(dev, &dev_attr_stats);
+
+	return rc;
+}
+
+void ctcm_remove_attributes(struct device *dev)
+{
+	device_remove_file(dev, &dev_attr_stats);
+}
+
+int ctcm_add_files(struct device *dev)
+{
+	return sysfs_create_group(&dev->kobj, &ctcm_attr_group);
+}
+
+void ctcm_remove_files(struct device *dev)
+{
+	sysfs_remove_group(&dev->kobj, &ctcm_attr_group);
+}
+
diff --git a/ap/os/linux/linux-3.4.x/drivers/s390/net/fsm.c b/ap/os/linux/linux-3.4.x/drivers/s390/net/fsm.c
new file mode 100644
index 0000000..e5dea67
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/s390/net/fsm.c
@@ -0,0 +1,214 @@
+/**
+ * A generic FSM based on fsm used in isdn4linux
+ *
+ */
+
+#include "fsm.h"
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/timer.h>
+
+MODULE_AUTHOR("(C) 2000 IBM Corp. by Fritz Elfert (felfert@millenux.com)");
+MODULE_DESCRIPTION("Finite state machine helper functions");
+MODULE_LICENSE("GPL");
+
+fsm_instance *
+init_fsm(char *name, const char **state_names, const char **event_names, int nr_states,
+		int nr_events, const fsm_node *tmpl, int tmpl_len, gfp_t order)
+{
+	int i;
+	fsm_instance *this;
+	fsm_function_t *m;
+	fsm *f;
+
+	this = kzalloc(sizeof(fsm_instance), order);
+	if (this == NULL) {
+		printk(KERN_WARNING
+			"fsm(%s): init_fsm: Couldn't alloc instance\n", name);
+		return NULL;
+	}
+	strlcpy(this->name, name, sizeof(this->name));
+	init_waitqueue_head(&this->wait_q);
+
+	f = kzalloc(sizeof(fsm), order);
+	if (f == NULL) {
+		printk(KERN_WARNING
+			"fsm(%s): init_fsm: Couldn't alloc fsm\n", name);
+		kfree_fsm(this);
+		return NULL;
+	}
+	f->nr_events = nr_events;
+	f->nr_states = nr_states;
+	f->event_names = event_names;
+	f->state_names = state_names;
+	this->f = f;
+
+	m = kcalloc(nr_states*nr_events, sizeof(fsm_function_t), order);
+	if (m == NULL) {
+		printk(KERN_WARNING
+			"fsm(%s): init_fsm: Couldn't alloc jumptable\n", name);
+		kfree_fsm(this);
+		return NULL;
+	}
+	f->jumpmatrix = m;
+
+	for (i = 0; i < tmpl_len; i++) {
+		if ((tmpl[i].cond_state >= nr_states) ||
+		    (tmpl[i].cond_event >= nr_events)   ) {
+			printk(KERN_ERR
+				"fsm(%s): init_fsm: Bad template l=%d st(%ld/%ld) ev(%ld/%ld)\n",
+				name, i, (long)tmpl[i].cond_state, (long)f->nr_states,
+				(long)tmpl[i].cond_event, (long)f->nr_events);
+			kfree_fsm(this);
+			return NULL;
+		} else
+			m[nr_states * tmpl[i].cond_event + tmpl[i].cond_state] =
+				tmpl[i].function;
+	}
+	return this;
+}
+
+void
+kfree_fsm(fsm_instance *this)
+{
+	if (this) {
+		if (this->f) {
+			kfree(this->f->jumpmatrix);
+			kfree(this->f);
+		}
+		kfree(this);
+	} else
+		printk(KERN_WARNING
+			"fsm: kfree_fsm called with NULL argument\n");
+}
+
+#if FSM_DEBUG_HISTORY
+void
+fsm_print_history(fsm_instance *fi)
+{
+	int idx = 0;
+	int i;
+
+	if (fi->history_size >= FSM_HISTORY_SIZE)
+		idx = fi->history_index;
+
+	printk(KERN_DEBUG "fsm(%s): History:\n", fi->name);
+	for (i = 0; i < fi->history_size; i++) {
+		int e = fi->history[idx].event;
+		int s = fi->history[idx++].state;
+		idx %= FSM_HISTORY_SIZE;
+		if (e == -1)
+			printk(KERN_DEBUG "  S=%s\n",
+			       fi->f->state_names[s]);
+		else
+			printk(KERN_DEBUG "  S=%s E=%s\n",
+			       fi->f->state_names[s],
+			       fi->f->event_names[e]);
+	}
+	fi->history_size = fi->history_index = 0;
+}
+
+void
+fsm_record_history(fsm_instance *fi, int state, int event)
+{
+	fi->history[fi->history_index].state = state;
+	fi->history[fi->history_index++].event = event;
+	fi->history_index %= FSM_HISTORY_SIZE;
+	if (fi->history_size < FSM_HISTORY_SIZE)
+		fi->history_size++;
+}
+#endif
+
+const char *
+fsm_getstate_str(fsm_instance *fi)
+{
+	int st = atomic_read(&fi->state);
+	if (st >= fi->f->nr_states)
+		return "Invalid";
+	return fi->f->state_names[st];
+}
+
+static void
+fsm_expire_timer(fsm_timer *this)
+{
+#if FSM_TIMER_DEBUG
+	printk(KERN_DEBUG "fsm(%s): Timer %p expired\n",
+	       this->fi->name, this);
+#endif
+	fsm_event(this->fi, this->expire_event, this->event_arg);
+}
+
+void
+fsm_settimer(fsm_instance *fi, fsm_timer *this)
+{
+	this->fi = fi;
+	this->tl.function = (void *)fsm_expire_timer;
+	this->tl.data = (long)this;
+#if FSM_TIMER_DEBUG
+	printk(KERN_DEBUG "fsm(%s): Create timer %p\n", fi->name,
+	       this);
+#endif
+	init_timer(&this->tl);
+}
+
+void
+fsm_deltimer(fsm_timer *this)
+{
+#if FSM_TIMER_DEBUG
+	printk(KERN_DEBUG "fsm(%s): Delete timer %p\n", this->fi->name,
+		this);
+#endif
+	del_timer(&this->tl);
+}
+
+int
+fsm_addtimer(fsm_timer *this, int millisec, int event, void *arg)
+{
+
+#if FSM_TIMER_DEBUG
+	printk(KERN_DEBUG "fsm(%s): Add timer %p %dms\n",
+	       this->fi->name, this, millisec);
+#endif
+
+	init_timer(&this->tl);
+	this->tl.function = (void *)fsm_expire_timer;
+	this->tl.data = (long)this;
+	this->expire_event = event;
+	this->event_arg = arg;
+	this->tl.expires = jiffies + (millisec * HZ) / 1000;
+	add_timer(&this->tl);
+	return 0;
+}
+
+/* FIXME: this function is never used, why */
+void
+fsm_modtimer(fsm_timer *this, int millisec, int event, void *arg)
+{
+
+#if FSM_TIMER_DEBUG
+	printk(KERN_DEBUG "fsm(%s): Restart timer %p %dms\n",
+		this->fi->name, this, millisec);
+#endif
+
+	del_timer(&this->tl);
+	init_timer(&this->tl);
+	this->tl.function = (void *)fsm_expire_timer;
+	this->tl.data = (long)this;
+	this->expire_event = event;
+	this->event_arg = arg;
+	this->tl.expires = jiffies + (millisec * HZ) / 1000;
+	add_timer(&this->tl);
+}
+
+EXPORT_SYMBOL(init_fsm);
+EXPORT_SYMBOL(kfree_fsm);
+EXPORT_SYMBOL(fsm_settimer);
+EXPORT_SYMBOL(fsm_deltimer);
+EXPORT_SYMBOL(fsm_addtimer);
+EXPORT_SYMBOL(fsm_modtimer);
+EXPORT_SYMBOL(fsm_getstate_str);
+
+#if FSM_DEBUG_HISTORY
+EXPORT_SYMBOL(fsm_print_history);
+EXPORT_SYMBOL(fsm_record_history);
+#endif
diff --git a/ap/os/linux/linux-3.4.x/drivers/s390/net/fsm.h b/ap/os/linux/linux-3.4.x/drivers/s390/net/fsm.h
new file mode 100644
index 0000000..a4510cf
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/s390/net/fsm.h
@@ -0,0 +1,265 @@
+#ifndef _FSM_H_
+#define _FSM_H_
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/timer.h>
+#include <linux/time.h>
+#include <linux/slab.h>
+#include <linux/sched.h>
+#include <linux/string.h>
+#include <linux/atomic.h>
+
+/**
+ * Define this to get debugging messages.
+ */
+#define FSM_DEBUG         0
+
+/**
+ * Define this to get debugging massages for
+ * timer handling.
+ */
+#define FSM_TIMER_DEBUG   0
+
+/**
+ * Define these to record a history of
+ * Events/Statechanges and print it if a
+ * action_function is not found.
+ */
+#define FSM_DEBUG_HISTORY 0
+#define FSM_HISTORY_SIZE  40
+
+struct fsm_instance_t;
+
+/**
+ * Definition of an action function, called by a FSM
+ */
+typedef void (*fsm_function_t)(struct fsm_instance_t *, int, void *);
+
+/**
+ * Internal jump table for a FSM
+ */
+typedef struct {
+	fsm_function_t *jumpmatrix;
+	int nr_events;
+	int nr_states;
+	const char **event_names;
+	const char **state_names;
+} fsm;
+
+#if FSM_DEBUG_HISTORY
+/**
+ * Element of State/Event history used for debugging.
+ */
+typedef struct {
+	int state;
+	int event;
+} fsm_history;
+#endif
+
+/**
+ * Representation of a FSM
+ */
+typedef struct fsm_instance_t {
+	fsm *f;
+	atomic_t state;
+	char name[16];
+	void *userdata;
+	int userint;
+	wait_queue_head_t wait_q;
+#if FSM_DEBUG_HISTORY
+	int         history_index;
+	int         history_size;
+	fsm_history history[FSM_HISTORY_SIZE];
+#endif
+} fsm_instance;
+
+/**
+ * Description of a state-event combination
+ */
+typedef struct {
+	int cond_state;
+	int cond_event;
+	fsm_function_t function;
+} fsm_node;
+
+/**
+ * Description of a FSM Timer.
+ */
+typedef struct {
+	fsm_instance *fi;
+	struct timer_list tl;
+	int expire_event;
+	void *event_arg;
+} fsm_timer;
+
+/**
+ * Creates an FSM
+ *
+ * @param name        Name of this instance for logging purposes.
+ * @param state_names An array of names for all states for logging purposes.
+ * @param event_names An array of names for all events for logging purposes.
+ * @param nr_states   Number of states for this instance.
+ * @param nr_events   Number of events for this instance.
+ * @param tmpl        An array of fsm_nodes, describing this FSM.
+ * @param tmpl_len    Length of the describing array.
+ * @param order       Parameter for allocation of the FSM data structs.
+ */
+extern fsm_instance *
+init_fsm(char *name, const char **state_names,
+	 const char **event_names,
+	 int nr_states, int nr_events, const fsm_node *tmpl,
+	 int tmpl_len, gfp_t order);
+
+/**
+ * Releases an FSM
+ *
+ * @param fi Pointer to an FSM, previously created with init_fsm.
+ */
+extern void kfree_fsm(fsm_instance *fi);
+
+#if FSM_DEBUG_HISTORY
+extern void
+fsm_print_history(fsm_instance *fi);
+
+extern void
+fsm_record_history(fsm_instance *fi, int state, int event);
+#endif
+
+/**
+ * Emits an event to a FSM.
+ * If an action function is defined for the current state/event combination,
+ * this function is called.
+ *
+ * @param fi    Pointer to FSM which should receive the event.
+ * @param event The event do be delivered.
+ * @param arg   A generic argument, handed to the action function.
+ *
+ * @return      0  on success,
+ *              1  if current state or event is out of range
+ *              !0 if state and event in range, but no action defined.
+ */
+static inline int
+fsm_event(fsm_instance *fi, int event, void *arg)
+{
+	fsm_function_t r;
+	int state = atomic_read(&fi->state);
+
+	if ((state >= fi->f->nr_states) ||
+	    (event >= fi->f->nr_events)       ) {
+		printk(KERN_ERR "fsm(%s): Invalid state st(%ld/%ld) ev(%d/%ld)\n",
+			fi->name, (long)state,(long)fi->f->nr_states, event,
+			(long)fi->f->nr_events);
+#if FSM_DEBUG_HISTORY
+		fsm_print_history(fi);
+#endif
+		return 1;
+	}
+	r = fi->f->jumpmatrix[fi->f->nr_states * event + state];
+	if (r) {
+#if FSM_DEBUG
+		printk(KERN_DEBUG "fsm(%s): state %s event %s\n",
+		       fi->name, fi->f->state_names[state],
+		       fi->f->event_names[event]);
+#endif
+#if FSM_DEBUG_HISTORY
+		fsm_record_history(fi, state, event);
+#endif
+		r(fi, event, arg);
+		return 0;
+	} else {
+#if FSM_DEBUG || FSM_DEBUG_HISTORY
+		printk(KERN_DEBUG "fsm(%s): no function for event %s in state %s\n",
+		       fi->name, fi->f->event_names[event],
+		       fi->f->state_names[state]);
+#endif
+#if FSM_DEBUG_HISTORY
+		fsm_print_history(fi);
+#endif
+		return !0;
+	}
+}
+
+/**
+ * Modifies the state of an FSM.
+ * This does <em>not</em> trigger an event or calls an action function.
+ *
+ * @param fi    Pointer to FSM
+ * @param state The new state for this FSM.
+ */
+static inline void
+fsm_newstate(fsm_instance *fi, int newstate)
+{
+	atomic_set(&fi->state,newstate);
+#if FSM_DEBUG_HISTORY
+	fsm_record_history(fi, newstate, -1);
+#endif
+#if FSM_DEBUG
+	printk(KERN_DEBUG "fsm(%s): New state %s\n", fi->name,
+		fi->f->state_names[newstate]);
+#endif
+	wake_up(&fi->wait_q);
+}
+
+/**
+ * Retrieves the state of an FSM
+ *
+ * @param fi Pointer to FSM
+ *
+ * @return The current state of the FSM.
+ */
+static inline int
+fsm_getstate(fsm_instance *fi)
+{
+	return atomic_read(&fi->state);
+}
+
+/**
+ * Retrieves the name of the state of an FSM
+ *
+ * @param fi Pointer to FSM
+ *
+ * @return The current state of the FSM in a human readable form.
+ */
+extern const char *fsm_getstate_str(fsm_instance *fi);
+
+/**
+ * Initializes a timer for an FSM.
+ * This prepares an fsm_timer for usage with fsm_addtimer.
+ *
+ * @param fi    Pointer to FSM
+ * @param timer The timer to be initialized.
+ */
+extern void fsm_settimer(fsm_instance *fi, fsm_timer *);
+
+/**
+ * Clears a pending timer of an FSM instance.
+ *
+ * @param timer The timer to clear.
+ */
+extern void fsm_deltimer(fsm_timer *timer);
+
+/**
+ * Adds and starts a timer to an FSM instance.
+ *
+ * @param timer    The timer to be added. The field fi of that timer
+ *                 must have been set to point to the instance.
+ * @param millisec Duration, after which the timer should expire.
+ * @param event    Event, to trigger if timer expires.
+ * @param arg      Generic argument, provided to expiry function.
+ *
+ * @return         0 on success, -1 if timer is already active.
+ */
+extern int fsm_addtimer(fsm_timer *timer, int millisec, int event, void *arg);
+
+/**
+ * Modifies a timer of an FSM.
+ *
+ * @param timer    The timer to modify.
+ * @param millisec Duration, after which the timer should expire.
+ * @param event    Event, to trigger if timer expires.
+ * @param arg      Generic argument, provided to expiry function.
+ */
+extern void fsm_modtimer(fsm_timer *timer, int millisec, int event, void *arg);
+
+#endif /* _FSM_H_ */
diff --git a/ap/os/linux/linux-3.4.x/drivers/s390/net/lcs.c b/ap/os/linux/linux-3.4.x/drivers/s390/net/lcs.c
new file mode 100644
index 0000000..687efe4
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/s390/net/lcs.c
@@ -0,0 +1,2512 @@
+/*
+ *  Linux for S/390 Lan Channel Station Network Driver
+ *
+ *  Copyright IBM Corp. 1999, 2009
+ *  Author(s): Original Code written by
+ *			DJ Barrow <djbarrow@de.ibm.com,barrow_dj@yahoo.com>
+ *	       Rewritten by
+ *			Frank Pavlic <fpavlic@de.ibm.com> and
+ *			Martin Schwidefsky <schwidefsky@de.ibm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.	 See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#define KMSG_COMPONENT		"lcs"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
+#include <linux/module.h>
+#include <linux/if.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/trdevice.h>
+#include <linux/fddidevice.h>
+#include <linux/inetdevice.h>
+#include <linux/in.h>
+#include <linux/igmp.h>
+#include <linux/delay.h>
+#include <linux/kthread.h>
+#include <linux/slab.h>
+#include <net/arp.h>
+#include <net/ip.h>
+
+#include <asm/debug.h>
+#include <asm/idals.h>
+#include <asm/timex.h>
+#include <linux/device.h>
+#include <asm/ccwgroup.h>
+
+#include "lcs.h"
+
+
+#if !defined(CONFIG_ETHERNET) && \
+    !defined(CONFIG_TR) && !defined(CONFIG_FDDI)
+#error Cannot compile lcs.c without some net devices switched on.
+#endif
+
+/**
+ * initialization string for output
+ */
+
+static char version[] __initdata = "LCS driver";
+
+/**
+  * the root device for lcs group devices
+  */
+static struct device *lcs_root_dev;
+
+/**
+ * Some prototypes.
+ */
+static void lcs_tasklet(unsigned long);
+static void lcs_start_kernel_thread(struct work_struct *);
+static void lcs_get_frames_cb(struct lcs_channel *, struct lcs_buffer *);
+#ifdef CONFIG_IP_MULTICAST
+static int lcs_send_delipm(struct lcs_card *, struct lcs_ipm_list *);
+#endif /* CONFIG_IP_MULTICAST */
+static int lcs_recovery(void *ptr);
+
+/**
+ * Debug Facility Stuff
+ */
+static char debug_buffer[255];
+static debug_info_t *lcs_dbf_setup;
+static debug_info_t *lcs_dbf_trace;
+
+/**
+ *  LCS Debug Facility functions
+ */
+static void
+lcs_unregister_debug_facility(void)
+{
+	if (lcs_dbf_setup)
+		debug_unregister(lcs_dbf_setup);
+	if (lcs_dbf_trace)
+		debug_unregister(lcs_dbf_trace);
+}
+
+static int
+lcs_register_debug_facility(void)
+{
+	lcs_dbf_setup = debug_register("lcs_setup", 2, 1, 8);
+	lcs_dbf_trace = debug_register("lcs_trace", 4, 1, 8);
+	if (lcs_dbf_setup == NULL || lcs_dbf_trace == NULL) {
+		pr_err("Not enough memory for debug facility.\n");
+		lcs_unregister_debug_facility();
+		return -ENOMEM;
+	}
+	debug_register_view(lcs_dbf_setup, &debug_hex_ascii_view);
+	debug_set_level(lcs_dbf_setup, 2);
+	debug_register_view(lcs_dbf_trace, &debug_hex_ascii_view);
+	debug_set_level(lcs_dbf_trace, 2);
+	return 0;
+}
+
+/**
+ * Allocate io buffers.
+ */
+static int
+lcs_alloc_channel(struct lcs_channel *channel)
+{
+	int cnt;
+
+	LCS_DBF_TEXT(2, setup, "ichalloc");
+	for (cnt = 0; cnt < LCS_NUM_BUFFS; cnt++) {
+		/* alloc memory fo iobuffer */
+		channel->iob[cnt].data =
+			kzalloc(LCS_IOBUFFERSIZE, GFP_DMA | GFP_KERNEL);
+		if (channel->iob[cnt].data == NULL)
+			break;
+		channel->iob[cnt].state = LCS_BUF_STATE_EMPTY;
+	}
+	if (cnt < LCS_NUM_BUFFS) {
+		/* Not all io buffers could be allocated. */
+		LCS_DBF_TEXT(2, setup, "echalloc");
+		while (cnt-- > 0)
+			kfree(channel->iob[cnt].data);
+		return -ENOMEM;
+	}
+	return 0;
+}
+
+/**
+ * Free io buffers.
+ */
+static void
+lcs_free_channel(struct lcs_channel *channel)
+{
+	int cnt;
+
+	LCS_DBF_TEXT(2, setup, "ichfree");
+	for (cnt = 0; cnt < LCS_NUM_BUFFS; cnt++) {
+		kfree(channel->iob[cnt].data);
+		channel->iob[cnt].data = NULL;
+	}
+}
+
+/*
+ * Cleanup channel.
+ */
+static void
+lcs_cleanup_channel(struct lcs_channel *channel)
+{
+	LCS_DBF_TEXT(3, setup, "cleanch");
+	/* Kill write channel tasklets. */
+	tasklet_kill(&channel->irq_tasklet);
+	/* Free channel buffers. */
+	lcs_free_channel(channel);
+}
+
+/**
+ * LCS free memory for card and channels.
+ */
+static void
+lcs_free_card(struct lcs_card *card)
+{
+	LCS_DBF_TEXT(2, setup, "remcard");
+	LCS_DBF_HEX(2, setup, &card, sizeof(void*));
+	kfree(card);
+}
+
+/**
+ * LCS alloc memory for card and channels
+ */
+static struct lcs_card *
+lcs_alloc_card(void)
+{
+	struct lcs_card *card;
+	int rc;
+
+	LCS_DBF_TEXT(2, setup, "alloclcs");
+
+	card = kzalloc(sizeof(struct lcs_card), GFP_KERNEL | GFP_DMA);
+	if (card == NULL)
+		return NULL;
+	card->lan_type = LCS_FRAME_TYPE_AUTO;
+	card->pkt_seq = 0;
+	card->lancmd_timeout = LCS_LANCMD_TIMEOUT_DEFAULT;
+	/* Allocate io buffers for the read channel. */
+	rc = lcs_alloc_channel(&card->read);
+	if (rc){
+		LCS_DBF_TEXT(2, setup, "iccwerr");
+		lcs_free_card(card);
+		return NULL;
+	}
+	/* Allocate io buffers for the write channel. */
+	rc = lcs_alloc_channel(&card->write);
+	if (rc) {
+		LCS_DBF_TEXT(2, setup, "iccwerr");
+		lcs_cleanup_channel(&card->read);
+		lcs_free_card(card);
+		return NULL;
+	}
+
+#ifdef CONFIG_IP_MULTICAST
+	INIT_LIST_HEAD(&card->ipm_list);
+#endif
+	LCS_DBF_HEX(2, setup, &card, sizeof(void*));
+	return card;
+}
+
+/*
+ * Setup read channel.
+ */
+static void
+lcs_setup_read_ccws(struct lcs_card *card)
+{
+	int cnt;
+
+	LCS_DBF_TEXT(2, setup, "ireadccw");
+	/* Setup read ccws. */
+	memset(card->read.ccws, 0, sizeof (struct ccw1) * (LCS_NUM_BUFFS + 1));
+	for (cnt = 0; cnt < LCS_NUM_BUFFS; cnt++) {
+		card->read.ccws[cnt].cmd_code = LCS_CCW_READ;
+		card->read.ccws[cnt].count = LCS_IOBUFFERSIZE;
+		card->read.ccws[cnt].flags =
+			CCW_FLAG_CC | CCW_FLAG_SLI | CCW_FLAG_PCI;
+		/*
+		 * Note: we have allocated the buffer with GFP_DMA, so
+		 * we do not need to do set_normalized_cda.
+		 */
+		card->read.ccws[cnt].cda =
+			(__u32) __pa(card->read.iob[cnt].data);
+		((struct lcs_header *)
+		 card->read.iob[cnt].data)->offset = LCS_ILLEGAL_OFFSET;
+		card->read.iob[cnt].callback = lcs_get_frames_cb;
+		card->read.iob[cnt].state = LCS_BUF_STATE_READY;
+		card->read.iob[cnt].count = LCS_IOBUFFERSIZE;
+	}
+	card->read.ccws[0].flags &= ~CCW_FLAG_PCI;
+	card->read.ccws[LCS_NUM_BUFFS - 1].flags &= ~CCW_FLAG_PCI;
+	card->read.ccws[LCS_NUM_BUFFS - 1].flags |= CCW_FLAG_SUSPEND;
+	/* Last ccw is a tic (transfer in channel). */
+	card->read.ccws[LCS_NUM_BUFFS].cmd_code = LCS_CCW_TRANSFER;
+	card->read.ccws[LCS_NUM_BUFFS].cda =
+		(__u32) __pa(card->read.ccws);
+	/* Setg initial state of the read channel. */
+	card->read.state = LCS_CH_STATE_INIT;
+
+	card->read.io_idx = 0;
+	card->read.buf_idx = 0;
+}
+
+static void
+lcs_setup_read(struct lcs_card *card)
+{
+	LCS_DBF_TEXT(3, setup, "initread");
+
+	lcs_setup_read_ccws(card);
+	/* Initialize read channel tasklet. */
+	card->read.irq_tasklet.data = (unsigned long) &card->read;
+	card->read.irq_tasklet.func = lcs_tasklet;
+	/* Initialize waitqueue. */
+	init_waitqueue_head(&card->read.wait_q);
+}
+
+/*
+ * Setup write channel.
+ */
+static void
+lcs_setup_write_ccws(struct lcs_card *card)
+{
+	int cnt;
+
+	LCS_DBF_TEXT(3, setup, "iwritccw");
+	/* Setup write ccws. */
+	memset(card->write.ccws, 0, sizeof(struct ccw1) * LCS_NUM_BUFFS + 1);
+	for (cnt = 0; cnt < LCS_NUM_BUFFS; cnt++) {
+		card->write.ccws[cnt].cmd_code = LCS_CCW_WRITE;
+		card->write.ccws[cnt].count = 0;
+		card->write.ccws[cnt].flags =
+			CCW_FLAG_SUSPEND | CCW_FLAG_CC | CCW_FLAG_SLI;
+		/*
+		 * Note: we have allocated the buffer with GFP_DMA, so
+		 * we do not need to do set_normalized_cda.
+		 */
+		card->write.ccws[cnt].cda =
+			(__u32) __pa(card->write.iob[cnt].data);
+	}
+	/* Last ccw is a tic (transfer in channel). */
+	card->write.ccws[LCS_NUM_BUFFS].cmd_code = LCS_CCW_TRANSFER;
+	card->write.ccws[LCS_NUM_BUFFS].cda =
+		(__u32) __pa(card->write.ccws);
+	/* Set initial state of the write channel. */
+	card->read.state = LCS_CH_STATE_INIT;
+
+	card->write.io_idx = 0;
+	card->write.buf_idx = 0;
+}
+
+static void
+lcs_setup_write(struct lcs_card *card)
+{
+	LCS_DBF_TEXT(3, setup, "initwrit");
+
+	lcs_setup_write_ccws(card);
+	/* Initialize write channel tasklet. */
+	card->write.irq_tasklet.data = (unsigned long) &card->write;
+	card->write.irq_tasklet.func = lcs_tasklet;
+	/* Initialize waitqueue. */
+	init_waitqueue_head(&card->write.wait_q);
+}
+
+static void
+lcs_set_allowed_threads(struct lcs_card *card, unsigned long threads)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&card->mask_lock, flags);
+	card->thread_allowed_mask = threads;
+	spin_unlock_irqrestore(&card->mask_lock, flags);
+	wake_up(&card->wait_q);
+}
+static inline int
+lcs_threads_running(struct lcs_card *card, unsigned long threads)
+{
+        unsigned long flags;
+        int rc = 0;
+
+	spin_lock_irqsave(&card->mask_lock, flags);
+        rc = (card->thread_running_mask & threads);
+	spin_unlock_irqrestore(&card->mask_lock, flags);
+        return rc;
+}
+
+static int
+lcs_wait_for_threads(struct lcs_card *card, unsigned long threads)
+{
+        return wait_event_interruptible(card->wait_q,
+                        lcs_threads_running(card, threads) == 0);
+}
+
+static inline int
+lcs_set_thread_start_bit(struct lcs_card *card, unsigned long thread)
+{
+        unsigned long flags;
+
+	spin_lock_irqsave(&card->mask_lock, flags);
+        if ( !(card->thread_allowed_mask & thread) ||
+              (card->thread_start_mask & thread) ) {
+                spin_unlock_irqrestore(&card->mask_lock, flags);
+                return -EPERM;
+        }
+        card->thread_start_mask |= thread;
+	spin_unlock_irqrestore(&card->mask_lock, flags);
+        return 0;
+}
+
+static void
+lcs_clear_thread_running_bit(struct lcs_card *card, unsigned long thread)
+{
+        unsigned long flags;
+
+	spin_lock_irqsave(&card->mask_lock, flags);
+        card->thread_running_mask &= ~thread;
+	spin_unlock_irqrestore(&card->mask_lock, flags);
+        wake_up(&card->wait_q);
+}
+
+static inline int
+__lcs_do_run_thread(struct lcs_card *card, unsigned long thread)
+{
+        unsigned long flags;
+        int rc = 0;
+
+	spin_lock_irqsave(&card->mask_lock, flags);
+        if (card->thread_start_mask & thread){
+                if ((card->thread_allowed_mask & thread) &&
+                    !(card->thread_running_mask & thread)){
+                        rc = 1;
+                        card->thread_start_mask &= ~thread;
+                        card->thread_running_mask |= thread;
+                } else
+                        rc = -EPERM;
+        }
+	spin_unlock_irqrestore(&card->mask_lock, flags);
+        return rc;
+}
+
+static int
+lcs_do_run_thread(struct lcs_card *card, unsigned long thread)
+{
+        int rc = 0;
+        wait_event(card->wait_q,
+                   (rc = __lcs_do_run_thread(card, thread)) >= 0);
+        return rc;
+}
+
+static int
+lcs_do_start_thread(struct lcs_card *card, unsigned long thread)
+{
+        unsigned long flags;
+        int rc = 0;
+
+	spin_lock_irqsave(&card->mask_lock, flags);
+        LCS_DBF_TEXT_(4, trace, "  %02x%02x%02x",
+                        (u8) card->thread_start_mask,
+                        (u8) card->thread_allowed_mask,
+                        (u8) card->thread_running_mask);
+        rc = (card->thread_start_mask & thread);
+	spin_unlock_irqrestore(&card->mask_lock, flags);
+        return rc;
+}
+
+/**
+ * Initialize channels,card and state machines.
+ */
+static void
+lcs_setup_card(struct lcs_card *card)
+{
+	LCS_DBF_TEXT(2, setup, "initcard");
+	LCS_DBF_HEX(2, setup, &card, sizeof(void*));
+
+	lcs_setup_read(card);
+	lcs_setup_write(card);
+	/* Set cards initial state. */
+	card->state = DEV_STATE_DOWN;
+	card->tx_buffer = NULL;
+	card->tx_emitted = 0;
+
+	init_waitqueue_head(&card->wait_q);
+	spin_lock_init(&card->lock);
+	spin_lock_init(&card->ipm_lock);
+	spin_lock_init(&card->mask_lock);
+#ifdef CONFIG_IP_MULTICAST
+	INIT_LIST_HEAD(&card->ipm_list);
+#endif
+	INIT_LIST_HEAD(&card->lancmd_waiters);
+}
+
+static inline void
+lcs_clear_multicast_list(struct lcs_card *card)
+{
+#ifdef	CONFIG_IP_MULTICAST
+	struct lcs_ipm_list *ipm;
+	unsigned long flags;
+
+	/* Free multicast list. */
+	LCS_DBF_TEXT(3, setup, "clmclist");
+	spin_lock_irqsave(&card->ipm_lock, flags);
+	while (!list_empty(&card->ipm_list)){
+		ipm = list_entry(card->ipm_list.next,
+				 struct lcs_ipm_list, list);
+		list_del(&ipm->list);
+		if (ipm->ipm_state != LCS_IPM_STATE_SET_REQUIRED){
+			spin_unlock_irqrestore(&card->ipm_lock, flags);
+			lcs_send_delipm(card, ipm);
+			spin_lock_irqsave(&card->ipm_lock, flags);
+		}
+		kfree(ipm);
+	}
+	spin_unlock_irqrestore(&card->ipm_lock, flags);
+#endif
+}
+/**
+ * Cleanup channels,card and state machines.
+ */
+static void
+lcs_cleanup_card(struct lcs_card *card)
+{
+
+	LCS_DBF_TEXT(3, setup, "cleancrd");
+	LCS_DBF_HEX(2,setup,&card,sizeof(void*));
+
+	if (card->dev != NULL)
+		free_netdev(card->dev);
+	/* Cleanup channels. */
+	lcs_cleanup_channel(&card->write);
+	lcs_cleanup_channel(&card->read);
+}
+
+/**
+ * Start channel.
+ */
+static int
+lcs_start_channel(struct lcs_channel *channel)
+{
+	unsigned long flags;
+	int rc;
+
+	LCS_DBF_TEXT_(4, trace,"ssch%s", dev_name(&channel->ccwdev->dev));
+	spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags);
+	rc = ccw_device_start(channel->ccwdev,
+			      channel->ccws + channel->io_idx, 0, 0,
+			      DOIO_DENY_PREFETCH | DOIO_ALLOW_SUSPEND);
+	if (rc == 0)
+		channel->state = LCS_CH_STATE_RUNNING;
+	spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags);
+	if (rc) {
+		LCS_DBF_TEXT_(4,trace,"essh%s",
+			      dev_name(&channel->ccwdev->dev));
+		dev_err(&channel->ccwdev->dev,
+			"Starting an LCS device resulted in an error,"
+			" rc=%d!\n", rc);
+	}
+	return rc;
+}
+
+static int
+lcs_clear_channel(struct lcs_channel *channel)
+{
+	unsigned long flags;
+	int rc;
+
+	LCS_DBF_TEXT(4,trace,"clearch");
+	LCS_DBF_TEXT_(4, trace, "%s", dev_name(&channel->ccwdev->dev));
+	spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags);
+	rc = ccw_device_clear(channel->ccwdev, (addr_t) channel);
+	spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags);
+	if (rc) {
+		LCS_DBF_TEXT_(4, trace, "ecsc%s",
+			      dev_name(&channel->ccwdev->dev));
+		return rc;
+	}
+	wait_event(channel->wait_q, (channel->state == LCS_CH_STATE_CLEARED));
+	channel->state = LCS_CH_STATE_STOPPED;
+	return rc;
+}
+
+
+/**
+ * Stop channel.
+ */
+static int
+lcs_stop_channel(struct lcs_channel *channel)
+{
+	unsigned long flags;
+	int rc;
+
+	if (channel->state == LCS_CH_STATE_STOPPED)
+		return 0;
+	LCS_DBF_TEXT(4,trace,"haltsch");
+	LCS_DBF_TEXT_(4, trace, "%s", dev_name(&channel->ccwdev->dev));
+	channel->state = LCS_CH_STATE_INIT;
+	spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags);
+	rc = ccw_device_halt(channel->ccwdev, (addr_t) channel);
+	spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags);
+	if (rc) {
+		LCS_DBF_TEXT_(4, trace, "ehsc%s",
+			      dev_name(&channel->ccwdev->dev));
+		return rc;
+	}
+	/* Asynchronous halt initialted. Wait for its completion. */
+	wait_event(channel->wait_q, (channel->state == LCS_CH_STATE_HALTED));
+	lcs_clear_channel(channel);
+	return 0;
+}
+
+/**
+ * start read and write channel
+ */
+static int
+lcs_start_channels(struct lcs_card *card)
+{
+	int rc;
+
+	LCS_DBF_TEXT(2, trace, "chstart");
+	/* start read channel */
+	rc = lcs_start_channel(&card->read);
+	if (rc)
+		return rc;
+	/* start write channel */
+	rc = lcs_start_channel(&card->write);
+	if (rc)
+		lcs_stop_channel(&card->read);
+	return rc;
+}
+
+/**
+ * stop read and write channel
+ */
+static int
+lcs_stop_channels(struct lcs_card *card)
+{
+	LCS_DBF_TEXT(2, trace, "chhalt");
+	lcs_stop_channel(&card->read);
+	lcs_stop_channel(&card->write);
+	return 0;
+}
+
+/**
+ * Get empty buffer.
+ */
+static struct lcs_buffer *
+__lcs_get_buffer(struct lcs_channel *channel)
+{
+	int index;
+
+	LCS_DBF_TEXT(5, trace, "_getbuff");
+	index = channel->io_idx;
+	do {
+		if (channel->iob[index].state == LCS_BUF_STATE_EMPTY) {
+			channel->iob[index].state = LCS_BUF_STATE_LOCKED;
+			return channel->iob + index;
+		}
+		index = (index + 1) & (LCS_NUM_BUFFS - 1);
+	} while (index != channel->io_idx);
+	return NULL;
+}
+
+static struct lcs_buffer *
+lcs_get_buffer(struct lcs_channel *channel)
+{
+	struct lcs_buffer *buffer;
+	unsigned long flags;
+
+	LCS_DBF_TEXT(5, trace, "getbuff");
+	spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags);
+	buffer = __lcs_get_buffer(channel);
+	spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags);
+	return buffer;
+}
+
+/**
+ * Resume channel program if the channel is suspended.
+ */
+static int
+__lcs_resume_channel(struct lcs_channel *channel)
+{
+	int rc;
+
+	if (channel->state != LCS_CH_STATE_SUSPENDED)
+		return 0;
+	if (channel->ccws[channel->io_idx].flags & CCW_FLAG_SUSPEND)
+		return 0;
+	LCS_DBF_TEXT_(5, trace, "rsch%s", dev_name(&channel->ccwdev->dev));
+	rc = ccw_device_resume(channel->ccwdev);
+	if (rc) {
+		LCS_DBF_TEXT_(4, trace, "ersc%s",
+			      dev_name(&channel->ccwdev->dev));
+		dev_err(&channel->ccwdev->dev,
+			"Sending data from the LCS device to the LAN failed"
+			" with rc=%d\n",rc);
+	} else
+		channel->state = LCS_CH_STATE_RUNNING;
+	return rc;
+
+}
+
+/**
+ * Make a buffer ready for processing.
+ */
+static inline void
+__lcs_ready_buffer_bits(struct lcs_channel *channel, int index)
+{
+	int prev, next;
+
+	LCS_DBF_TEXT(5, trace, "rdybits");
+	prev = (index - 1) & (LCS_NUM_BUFFS - 1);
+	next = (index + 1) & (LCS_NUM_BUFFS - 1);
+	/* Check if we may clear the suspend bit of this buffer. */
+	if (channel->ccws[next].flags & CCW_FLAG_SUSPEND) {
+		/* Check if we have to set the PCI bit. */
+		if (!(channel->ccws[prev].flags & CCW_FLAG_SUSPEND))
+			/* Suspend bit of the previous buffer is not set. */
+			channel->ccws[index].flags |= CCW_FLAG_PCI;
+		/* Suspend bit of the next buffer is set. */
+		channel->ccws[index].flags &= ~CCW_FLAG_SUSPEND;
+	}
+}
+
+static int
+lcs_ready_buffer(struct lcs_channel *channel, struct lcs_buffer *buffer)
+{
+	unsigned long flags;
+	int index, rc;
+
+	LCS_DBF_TEXT(5, trace, "rdybuff");
+	BUG_ON(buffer->state != LCS_BUF_STATE_LOCKED &&
+	       buffer->state != LCS_BUF_STATE_PROCESSED);
+	spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags);
+	buffer->state = LCS_BUF_STATE_READY;
+	index = buffer - channel->iob;
+	/* Set length. */
+	channel->ccws[index].count = buffer->count;
+	/* Check relevant PCI/suspend bits. */
+	__lcs_ready_buffer_bits(channel, index);
+	rc = __lcs_resume_channel(channel);
+	spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags);
+	return rc;
+}
+
+/**
+ * Mark the buffer as processed. Take care of the suspend bit
+ * of the previous buffer. This function is called from
+ * interrupt context, so the lock must not be taken.
+ */
+static int
+__lcs_processed_buffer(struct lcs_channel *channel, struct lcs_buffer *buffer)
+{
+	int index, prev, next;
+
+	LCS_DBF_TEXT(5, trace, "prcsbuff");
+	BUG_ON(buffer->state != LCS_BUF_STATE_READY);
+	buffer->state = LCS_BUF_STATE_PROCESSED;
+	index = buffer - channel->iob;
+	prev = (index - 1) & (LCS_NUM_BUFFS - 1);
+	next = (index + 1) & (LCS_NUM_BUFFS - 1);
+	/* Set the suspend bit and clear the PCI bit of this buffer. */
+	channel->ccws[index].flags |= CCW_FLAG_SUSPEND;
+	channel->ccws[index].flags &= ~CCW_FLAG_PCI;
+	/* Check the suspend bit of the previous buffer. */
+	if (channel->iob[prev].state == LCS_BUF_STATE_READY) {
+		/*
+		 * Previous buffer is in state ready. It might have
+		 * happened in lcs_ready_buffer that the suspend bit
+		 * has not been cleared to avoid an endless loop.
+		 * Do it now.
+		 */
+		__lcs_ready_buffer_bits(channel, prev);
+	}
+	/* Clear PCI bit of next buffer. */
+	channel->ccws[next].flags &= ~CCW_FLAG_PCI;
+	return __lcs_resume_channel(channel);
+}
+
+/**
+ * Put a processed buffer back to state empty.
+ */
+static void
+lcs_release_buffer(struct lcs_channel *channel, struct lcs_buffer *buffer)
+{
+	unsigned long flags;
+
+	LCS_DBF_TEXT(5, trace, "relbuff");
+	BUG_ON(buffer->state != LCS_BUF_STATE_LOCKED &&
+	       buffer->state != LCS_BUF_STATE_PROCESSED);
+	spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags);
+	buffer->state = LCS_BUF_STATE_EMPTY;
+	spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags);
+}
+
+/**
+ * Get buffer for a lan command.
+ */
+static struct lcs_buffer *
+lcs_get_lancmd(struct lcs_card *card, int count)
+{
+	struct lcs_buffer *buffer;
+	struct lcs_cmd *cmd;
+
+	LCS_DBF_TEXT(4, trace, "getlncmd");
+	/* Get buffer and wait if none is available. */
+	wait_event(card->write.wait_q,
+		   ((buffer = lcs_get_buffer(&card->write)) != NULL));
+	count += sizeof(struct lcs_header);
+	*(__u16 *)(buffer->data + count) = 0;
+	buffer->count = count + sizeof(__u16);
+	buffer->callback = lcs_release_buffer;
+	cmd = (struct lcs_cmd *) buffer->data;
+	cmd->offset = count;
+	cmd->type = LCS_FRAME_TYPE_CONTROL;
+	cmd->slot = 0;
+	return buffer;
+}
+
+
+static void
+lcs_get_reply(struct lcs_reply *reply)
+{
+	WARN_ON(atomic_read(&reply->refcnt) <= 0);
+	atomic_inc(&reply->refcnt);
+}
+
+static void
+lcs_put_reply(struct lcs_reply *reply)
+{
+        WARN_ON(atomic_read(&reply->refcnt) <= 0);
+        if (atomic_dec_and_test(&reply->refcnt)) {
+		kfree(reply);
+	}
+
+}
+
+static struct lcs_reply *
+lcs_alloc_reply(struct lcs_cmd *cmd)
+{
+	struct lcs_reply *reply;
+
+	LCS_DBF_TEXT(4, trace, "getreply");
+
+	reply = kzalloc(sizeof(struct lcs_reply), GFP_ATOMIC);
+	if (!reply)
+		return NULL;
+	atomic_set(&reply->refcnt,1);
+	reply->sequence_no = cmd->sequence_no;
+	reply->received = 0;
+	reply->rc = 0;
+	init_waitqueue_head(&reply->wait_q);
+
+	return reply;
+}
+
+/**
+ * Notifier function for lancmd replies. Called from read irq.
+ */
+static void
+lcs_notify_lancmd_waiters(struct lcs_card *card, struct lcs_cmd *cmd)
+{
+	struct list_head *l, *n;
+	struct lcs_reply *reply;
+
+	LCS_DBF_TEXT(4, trace, "notiwait");
+	spin_lock(&card->lock);
+	list_for_each_safe(l, n, &card->lancmd_waiters) {
+		reply = list_entry(l, struct lcs_reply, list);
+		if (reply->sequence_no == cmd->sequence_no) {
+			lcs_get_reply(reply);
+			list_del_init(&reply->list);
+			if (reply->callback != NULL)
+				reply->callback(card, cmd);
+			reply->received = 1;
+			reply->rc = cmd->return_code;
+			wake_up(&reply->wait_q);
+			lcs_put_reply(reply);
+			break;
+		}
+	}
+	spin_unlock(&card->lock);
+}
+
+/**
+ * Emit buffer of a lan command.
+ */
+static void
+lcs_lancmd_timeout(unsigned long data)
+{
+	struct lcs_reply *reply, *list_reply, *r;
+	unsigned long flags;
+
+	LCS_DBF_TEXT(4, trace, "timeout");
+	reply = (struct lcs_reply *) data;
+	spin_lock_irqsave(&reply->card->lock, flags);
+	list_for_each_entry_safe(list_reply, r,
+				 &reply->card->lancmd_waiters,list) {
+		if (reply == list_reply) {
+			lcs_get_reply(reply);
+			list_del_init(&reply->list);
+			spin_unlock_irqrestore(&reply->card->lock, flags);
+			reply->received = 1;
+			reply->rc = -ETIME;
+			wake_up(&reply->wait_q);
+			lcs_put_reply(reply);
+			return;
+		}
+	}
+	spin_unlock_irqrestore(&reply->card->lock, flags);
+}
+
+static int
+lcs_send_lancmd(struct lcs_card *card, struct lcs_buffer *buffer,
+		void (*reply_callback)(struct lcs_card *, struct lcs_cmd *))
+{
+	struct lcs_reply *reply;
+	struct lcs_cmd *cmd;
+	struct timer_list timer;
+	unsigned long flags;
+	int rc;
+
+	LCS_DBF_TEXT(4, trace, "sendcmd");
+	cmd = (struct lcs_cmd *) buffer->data;
+	cmd->return_code = 0;
+	cmd->sequence_no = card->sequence_no++;
+	reply = lcs_alloc_reply(cmd);
+	if (!reply)
+		return -ENOMEM;
+	reply->callback = reply_callback;
+	reply->card = card;
+	spin_lock_irqsave(&card->lock, flags);
+	list_add_tail(&reply->list, &card->lancmd_waiters);
+	spin_unlock_irqrestore(&card->lock, flags);
+
+	buffer->callback = lcs_release_buffer;
+	rc = lcs_ready_buffer(&card->write, buffer);
+	if (rc)
+		return rc;
+	init_timer_on_stack(&timer);
+	timer.function = lcs_lancmd_timeout;
+	timer.data = (unsigned long) reply;
+	timer.expires = jiffies + HZ*card->lancmd_timeout;
+	add_timer(&timer);
+	wait_event(reply->wait_q, reply->received);
+	del_timer_sync(&timer);
+	LCS_DBF_TEXT_(4, trace, "rc:%d",reply->rc);
+	rc = reply->rc;
+	lcs_put_reply(reply);
+	return rc ? -EIO : 0;
+}
+
+/**
+ * LCS startup command
+ */
+static int
+lcs_send_startup(struct lcs_card *card, __u8 initiator)
+{
+	struct lcs_buffer *buffer;
+	struct lcs_cmd *cmd;
+
+	LCS_DBF_TEXT(2, trace, "startup");
+	buffer = lcs_get_lancmd(card, LCS_STD_CMD_SIZE);
+	cmd = (struct lcs_cmd *) buffer->data;
+	cmd->cmd_code = LCS_CMD_STARTUP;
+	cmd->initiator = initiator;
+	cmd->cmd.lcs_startup.buff_size = LCS_IOBUFFERSIZE;
+	return lcs_send_lancmd(card, buffer, NULL);
+}
+
+/**
+ * LCS shutdown command
+ */
+static int
+lcs_send_shutdown(struct lcs_card *card)
+{
+	struct lcs_buffer *buffer;
+	struct lcs_cmd *cmd;
+
+	LCS_DBF_TEXT(2, trace, "shutdown");
+	buffer = lcs_get_lancmd(card, LCS_STD_CMD_SIZE);
+	cmd = (struct lcs_cmd *) buffer->data;
+	cmd->cmd_code = LCS_CMD_SHUTDOWN;
+	cmd->initiator = LCS_INITIATOR_TCPIP;
+	return lcs_send_lancmd(card, buffer, NULL);
+}
+
+/**
+ * LCS lanstat command
+ */
+static void
+__lcs_lanstat_cb(struct lcs_card *card, struct lcs_cmd *cmd)
+{
+	LCS_DBF_TEXT(2, trace, "statcb");
+	memcpy(card->mac, cmd->cmd.lcs_lanstat_cmd.mac_addr, LCS_MAC_LENGTH);
+}
+
+static int
+lcs_send_lanstat(struct lcs_card *card)
+{
+	struct lcs_buffer *buffer;
+	struct lcs_cmd *cmd;
+
+	LCS_DBF_TEXT(2,trace, "cmdstat");
+	buffer = lcs_get_lancmd(card, LCS_STD_CMD_SIZE);
+	cmd = (struct lcs_cmd *) buffer->data;
+	/* Setup lanstat command. */
+	cmd->cmd_code = LCS_CMD_LANSTAT;
+	cmd->initiator = LCS_INITIATOR_TCPIP;
+	cmd->cmd.lcs_std_cmd.lan_type = card->lan_type;
+	cmd->cmd.lcs_std_cmd.portno = card->portno;
+	return lcs_send_lancmd(card, buffer, __lcs_lanstat_cb);
+}
+
+/**
+ * send stoplan command
+ */
+static int
+lcs_send_stoplan(struct lcs_card *card, __u8 initiator)
+{
+	struct lcs_buffer *buffer;
+	struct lcs_cmd *cmd;
+
+	LCS_DBF_TEXT(2, trace, "cmdstpln");
+	buffer = lcs_get_lancmd(card, LCS_STD_CMD_SIZE);
+	cmd = (struct lcs_cmd *) buffer->data;
+	cmd->cmd_code = LCS_CMD_STOPLAN;
+	cmd->initiator = initiator;
+	cmd->cmd.lcs_std_cmd.lan_type = card->lan_type;
+	cmd->cmd.lcs_std_cmd.portno = card->portno;
+	return lcs_send_lancmd(card, buffer, NULL);
+}
+
+/**
+ * send startlan command
+ */
+static void
+__lcs_send_startlan_cb(struct lcs_card *card, struct lcs_cmd *cmd)
+{
+	LCS_DBF_TEXT(2, trace, "srtlancb");
+	card->lan_type = cmd->cmd.lcs_std_cmd.lan_type;
+	card->portno = cmd->cmd.lcs_std_cmd.portno;
+}
+
+static int
+lcs_send_startlan(struct lcs_card *card, __u8 initiator)
+{
+	struct lcs_buffer *buffer;
+	struct lcs_cmd *cmd;
+
+	LCS_DBF_TEXT(2, trace, "cmdstaln");
+	buffer = lcs_get_lancmd(card, LCS_STD_CMD_SIZE);
+	cmd = (struct lcs_cmd *) buffer->data;
+	cmd->cmd_code = LCS_CMD_STARTLAN;
+	cmd->initiator = initiator;
+	cmd->cmd.lcs_std_cmd.lan_type = card->lan_type;
+	cmd->cmd.lcs_std_cmd.portno = card->portno;
+	return lcs_send_lancmd(card, buffer, __lcs_send_startlan_cb);
+}
+
+#ifdef CONFIG_IP_MULTICAST
+/**
+ * send setipm command (Multicast)
+ */
+static int
+lcs_send_setipm(struct lcs_card *card,struct lcs_ipm_list *ipm_list)
+{
+	struct lcs_buffer *buffer;
+	struct lcs_cmd *cmd;
+
+	LCS_DBF_TEXT(2, trace, "cmdsetim");
+	buffer = lcs_get_lancmd(card, LCS_MULTICAST_CMD_SIZE);
+	cmd = (struct lcs_cmd *) buffer->data;
+	cmd->cmd_code = LCS_CMD_SETIPM;
+	cmd->initiator = LCS_INITIATOR_TCPIP;
+	cmd->cmd.lcs_qipassist.lan_type = card->lan_type;
+	cmd->cmd.lcs_qipassist.portno = card->portno;
+	cmd->cmd.lcs_qipassist.version = 4;
+	cmd->cmd.lcs_qipassist.num_ip_pairs = 1;
+	memcpy(cmd->cmd.lcs_qipassist.lcs_ipass_ctlmsg.ip_mac_pair,
+	       &ipm_list->ipm, sizeof (struct lcs_ip_mac_pair));
+	LCS_DBF_TEXT_(2, trace, "%x",ipm_list->ipm.ip_addr);
+	return lcs_send_lancmd(card, buffer, NULL);
+}
+
+/**
+ * send delipm command (Multicast)
+ */
+static int
+lcs_send_delipm(struct lcs_card *card,struct lcs_ipm_list *ipm_list)
+{
+	struct lcs_buffer *buffer;
+	struct lcs_cmd *cmd;
+
+	LCS_DBF_TEXT(2, trace, "cmddelim");
+	buffer = lcs_get_lancmd(card, LCS_MULTICAST_CMD_SIZE);
+	cmd = (struct lcs_cmd *) buffer->data;
+	cmd->cmd_code = LCS_CMD_DELIPM;
+	cmd->initiator = LCS_INITIATOR_TCPIP;
+	cmd->cmd.lcs_qipassist.lan_type = card->lan_type;
+	cmd->cmd.lcs_qipassist.portno = card->portno;
+	cmd->cmd.lcs_qipassist.version = 4;
+	cmd->cmd.lcs_qipassist.num_ip_pairs = 1;
+	memcpy(cmd->cmd.lcs_qipassist.lcs_ipass_ctlmsg.ip_mac_pair,
+	       &ipm_list->ipm, sizeof (struct lcs_ip_mac_pair));
+	LCS_DBF_TEXT_(2, trace, "%x",ipm_list->ipm.ip_addr);
+	return lcs_send_lancmd(card, buffer, NULL);
+}
+
+/**
+ * check if multicast is supported by LCS
+ */
+static void
+__lcs_check_multicast_cb(struct lcs_card *card, struct lcs_cmd *cmd)
+{
+	LCS_DBF_TEXT(2, trace, "chkmccb");
+	card->ip_assists_supported =
+		cmd->cmd.lcs_qipassist.ip_assists_supported;
+	card->ip_assists_enabled =
+		cmd->cmd.lcs_qipassist.ip_assists_enabled;
+}
+
+static int
+lcs_check_multicast_support(struct lcs_card *card)
+{
+	struct lcs_buffer *buffer;
+	struct lcs_cmd *cmd;
+	int rc;
+
+	LCS_DBF_TEXT(2, trace, "cmdqipa");
+	/* Send query ipassist. */
+	buffer = lcs_get_lancmd(card, LCS_STD_CMD_SIZE);
+	cmd = (struct lcs_cmd *) buffer->data;
+	cmd->cmd_code = LCS_CMD_QIPASSIST;
+	cmd->initiator = LCS_INITIATOR_TCPIP;
+	cmd->cmd.lcs_qipassist.lan_type = card->lan_type;
+	cmd->cmd.lcs_qipassist.portno = card->portno;
+	cmd->cmd.lcs_qipassist.version = 4;
+	cmd->cmd.lcs_qipassist.num_ip_pairs = 1;
+	rc = lcs_send_lancmd(card, buffer, __lcs_check_multicast_cb);
+	if (rc != 0) {
+		pr_err("Query IPAssist failed. Assuming unsupported!\n");
+		return -EOPNOTSUPP;
+	}
+	if (card->ip_assists_supported & LCS_IPASS_MULTICAST_SUPPORT)
+		return 0;
+	return -EOPNOTSUPP;
+}
+
+/**
+ * set or del multicast address on LCS card
+ */
+static void
+lcs_fix_multicast_list(struct lcs_card *card)
+{
+	struct list_head failed_list;
+	struct lcs_ipm_list *ipm, *tmp;
+	unsigned long flags;
+	int rc;
+
+	LCS_DBF_TEXT(4,trace, "fixipm");
+	INIT_LIST_HEAD(&failed_list);
+	spin_lock_irqsave(&card->ipm_lock, flags);
+list_modified:
+	list_for_each_entry_safe(ipm, tmp, &card->ipm_list, list){
+		switch (ipm->ipm_state) {
+		case LCS_IPM_STATE_SET_REQUIRED:
+			/* del from ipm_list so no one else can tamper with
+			 * this entry */
+			list_del_init(&ipm->list);
+			spin_unlock_irqrestore(&card->ipm_lock, flags);
+			rc = lcs_send_setipm(card, ipm);
+			spin_lock_irqsave(&card->ipm_lock, flags);
+			if (rc) {
+				pr_info("Adding multicast address failed."
+					" Table possibly full!\n");
+				/* store ipm in failed list -> will be added
+				 * to ipm_list again, so a retry will be done
+				 * during the next call of this function */
+				list_add_tail(&ipm->list, &failed_list);
+			} else {
+				ipm->ipm_state = LCS_IPM_STATE_ON_CARD;
+				/* re-insert into ipm_list */
+				list_add_tail(&ipm->list, &card->ipm_list);
+			}
+			goto list_modified;
+		case LCS_IPM_STATE_DEL_REQUIRED:
+			list_del(&ipm->list);
+			spin_unlock_irqrestore(&card->ipm_lock, flags);
+			lcs_send_delipm(card, ipm);
+			spin_lock_irqsave(&card->ipm_lock, flags);
+			kfree(ipm);
+			goto list_modified;
+		case LCS_IPM_STATE_ON_CARD:
+			break;
+		}
+	}
+	/* re-insert all entries from the failed_list into ipm_list */
+	list_for_each_entry_safe(ipm, tmp, &failed_list, list)
+		list_move_tail(&ipm->list, &card->ipm_list);
+
+	spin_unlock_irqrestore(&card->ipm_lock, flags);
+}
+
+/**
+ * get mac address for the relevant Multicast address
+ */
+static void
+lcs_get_mac_for_ipm(__be32 ipm, char *mac, struct net_device *dev)
+{
+	LCS_DBF_TEXT(4,trace, "getmac");
+	if (dev->type == ARPHRD_IEEE802_TR)
+		ip_tr_mc_map(ipm, mac);
+	else
+		ip_eth_mc_map(ipm, mac);
+}
+
+/**
+ * function called by net device to handle multicast address relevant things
+ */
+static inline void
+lcs_remove_mc_addresses(struct lcs_card *card, struct in_device *in4_dev)
+{
+	struct ip_mc_list *im4;
+	struct list_head *l;
+	struct lcs_ipm_list *ipm;
+	unsigned long flags;
+	char buf[MAX_ADDR_LEN];
+
+	LCS_DBF_TEXT(4, trace, "remmclst");
+	spin_lock_irqsave(&card->ipm_lock, flags);
+	list_for_each(l, &card->ipm_list) {
+		ipm = list_entry(l, struct lcs_ipm_list, list);
+		for (im4 = rcu_dereference(in4_dev->mc_list);
+		     im4 != NULL; im4 = rcu_dereference(im4->next_rcu)) {
+			lcs_get_mac_for_ipm(im4->multiaddr, buf, card->dev);
+			if ( (ipm->ipm.ip_addr == im4->multiaddr) &&
+			     (memcmp(buf, &ipm->ipm.mac_addr,
+				     LCS_MAC_LENGTH) == 0) )
+				break;
+		}
+		if (im4 == NULL)
+			ipm->ipm_state = LCS_IPM_STATE_DEL_REQUIRED;
+	}
+	spin_unlock_irqrestore(&card->ipm_lock, flags);
+}
+
+static inline struct lcs_ipm_list *
+lcs_check_addr_entry(struct lcs_card *card, struct ip_mc_list *im4, char *buf)
+{
+	struct lcs_ipm_list *tmp, *ipm = NULL;
+	struct list_head *l;
+	unsigned long flags;
+
+	LCS_DBF_TEXT(4, trace, "chkmcent");
+	spin_lock_irqsave(&card->ipm_lock, flags);
+	list_for_each(l, &card->ipm_list) {
+		tmp = list_entry(l, struct lcs_ipm_list, list);
+		if ( (tmp->ipm.ip_addr == im4->multiaddr) &&
+		     (memcmp(buf, &tmp->ipm.mac_addr,
+			     LCS_MAC_LENGTH) == 0) ) {
+			ipm = tmp;
+			break;
+		}
+	}
+	spin_unlock_irqrestore(&card->ipm_lock, flags);
+	return ipm;
+}
+
+static inline void
+lcs_set_mc_addresses(struct lcs_card *card, struct in_device *in4_dev)
+{
+
+	struct ip_mc_list *im4;
+	struct lcs_ipm_list *ipm;
+	char buf[MAX_ADDR_LEN];
+	unsigned long flags;
+
+	LCS_DBF_TEXT(4, trace, "setmclst");
+	for (im4 = rcu_dereference(in4_dev->mc_list); im4 != NULL;
+	     im4 = rcu_dereference(im4->next_rcu)) {
+		lcs_get_mac_for_ipm(im4->multiaddr, buf, card->dev);
+		ipm = lcs_check_addr_entry(card, im4, buf);
+		if (ipm != NULL)
+			continue;	/* Address already in list. */
+		ipm = kzalloc(sizeof(struct lcs_ipm_list), GFP_ATOMIC);
+		if (ipm == NULL) {
+			pr_info("Not enough memory to add"
+				" new multicast entry!\n");
+			break;
+		}
+		memcpy(&ipm->ipm.mac_addr, buf, LCS_MAC_LENGTH);
+		ipm->ipm.ip_addr = im4->multiaddr;
+		ipm->ipm_state = LCS_IPM_STATE_SET_REQUIRED;
+		spin_lock_irqsave(&card->ipm_lock, flags);
+		LCS_DBF_HEX(2,trace,&ipm->ipm.ip_addr,4);
+		list_add(&ipm->list, &card->ipm_list);
+		spin_unlock_irqrestore(&card->ipm_lock, flags);
+	}
+}
+
+static int
+lcs_register_mc_addresses(void *data)
+{
+	struct lcs_card *card;
+	struct in_device *in4_dev;
+
+	card = (struct lcs_card *) data;
+
+	if (!lcs_do_run_thread(card, LCS_SET_MC_THREAD))
+		return 0;
+	LCS_DBF_TEXT(4, trace, "regmulti");
+
+	in4_dev = in_dev_get(card->dev);
+	if (in4_dev == NULL)
+		goto out;
+	rcu_read_lock();
+	lcs_remove_mc_addresses(card,in4_dev);
+	lcs_set_mc_addresses(card, in4_dev);
+	rcu_read_unlock();
+	in_dev_put(in4_dev);
+
+	netif_carrier_off(card->dev);
+	netif_tx_disable(card->dev);
+	wait_event(card->write.wait_q,
+			(card->write.state != LCS_CH_STATE_RUNNING));
+	lcs_fix_multicast_list(card);
+	if (card->state == DEV_STATE_UP) {
+		netif_carrier_on(card->dev);
+		netif_wake_queue(card->dev);
+	}
+out:
+	lcs_clear_thread_running_bit(card, LCS_SET_MC_THREAD);
+	return 0;
+}
+#endif /* CONFIG_IP_MULTICAST */
+
+/**
+ * function called by net device to
+ * handle multicast address relevant things
+ */
+static void
+lcs_set_multicast_list(struct net_device *dev)
+{
+#ifdef CONFIG_IP_MULTICAST
+        struct lcs_card *card;
+
+        LCS_DBF_TEXT(4, trace, "setmulti");
+        card = (struct lcs_card *) dev->ml_priv;
+
+        if (!lcs_set_thread_start_bit(card, LCS_SET_MC_THREAD))
+		schedule_work(&card->kernel_thread_starter);
+#endif /* CONFIG_IP_MULTICAST */
+}
+
+static long
+lcs_check_irb_error(struct ccw_device *cdev, struct irb *irb)
+{
+	if (!IS_ERR(irb))
+		return 0;
+
+	switch (PTR_ERR(irb)) {
+	case -EIO:
+		dev_warn(&cdev->dev,
+			"An I/O-error occurred on the LCS device\n");
+		LCS_DBF_TEXT(2, trace, "ckirberr");
+		LCS_DBF_TEXT_(2, trace, "  rc%d", -EIO);
+		break;
+	case -ETIMEDOUT:
+		dev_warn(&cdev->dev,
+			"A command timed out on the LCS device\n");
+		LCS_DBF_TEXT(2, trace, "ckirberr");
+		LCS_DBF_TEXT_(2, trace, "  rc%d", -ETIMEDOUT);
+		break;
+	default:
+		dev_warn(&cdev->dev,
+			"An error occurred on the LCS device, rc=%ld\n",
+			PTR_ERR(irb));
+		LCS_DBF_TEXT(2, trace, "ckirberr");
+		LCS_DBF_TEXT(2, trace, "  rc???");
+	}
+	return PTR_ERR(irb);
+}
+
+static int
+lcs_get_problem(struct ccw_device *cdev, struct irb *irb)
+{
+	int dstat, cstat;
+	char *sense;
+
+	sense = (char *) irb->ecw;
+	cstat = irb->scsw.cmd.cstat;
+	dstat = irb->scsw.cmd.dstat;
+
+	if (cstat & (SCHN_STAT_CHN_CTRL_CHK | SCHN_STAT_INTF_CTRL_CHK |
+		     SCHN_STAT_CHN_DATA_CHK | SCHN_STAT_CHAIN_CHECK |
+		     SCHN_STAT_PROT_CHECK   | SCHN_STAT_PROG_CHECK)) {
+		LCS_DBF_TEXT(2, trace, "CGENCHK");
+		return 1;
+	}
+	if (dstat & DEV_STAT_UNIT_CHECK) {
+		if (sense[LCS_SENSE_BYTE_1] &
+		    LCS_SENSE_RESETTING_EVENT) {
+			LCS_DBF_TEXT(2, trace, "REVIND");
+			return 1;
+		}
+		if (sense[LCS_SENSE_BYTE_0] &
+		    LCS_SENSE_CMD_REJECT) {
+			LCS_DBF_TEXT(2, trace, "CMDREJ");
+			return 0;
+		}
+		if ((!sense[LCS_SENSE_BYTE_0]) &&
+		    (!sense[LCS_SENSE_BYTE_1]) &&
+		    (!sense[LCS_SENSE_BYTE_2]) &&
+		    (!sense[LCS_SENSE_BYTE_3])) {
+			LCS_DBF_TEXT(2, trace, "ZEROSEN");
+			return 0;
+		}
+		LCS_DBF_TEXT(2, trace, "DGENCHK");
+		return 1;
+	}
+	return 0;
+}
+
+static void
+lcs_schedule_recovery(struct lcs_card *card)
+{
+	LCS_DBF_TEXT(2, trace, "startrec");
+	if (!lcs_set_thread_start_bit(card, LCS_RECOVERY_THREAD))
+		schedule_work(&card->kernel_thread_starter);
+}
+
+/**
+ * IRQ Handler for LCS channels
+ */
+static void
+lcs_irq(struct ccw_device *cdev, unsigned long intparm, struct irb *irb)
+{
+	struct lcs_card *card;
+	struct lcs_channel *channel;
+	int rc, index;
+	int cstat, dstat;
+
+	if (lcs_check_irb_error(cdev, irb))
+		return;
+
+	card = CARD_FROM_DEV(cdev);
+	if (card->read.ccwdev == cdev)
+		channel = &card->read;
+	else
+		channel = &card->write;
+
+	cstat = irb->scsw.cmd.cstat;
+	dstat = irb->scsw.cmd.dstat;
+	LCS_DBF_TEXT_(5, trace, "Rint%s", dev_name(&cdev->dev));
+	LCS_DBF_TEXT_(5, trace, "%4x%4x", irb->scsw.cmd.cstat,
+		      irb->scsw.cmd.dstat);
+	LCS_DBF_TEXT_(5, trace, "%4x%4x", irb->scsw.cmd.fctl,
+		      irb->scsw.cmd.actl);
+
+	/* Check for channel and device errors presented */
+	rc = lcs_get_problem(cdev, irb);
+	if (rc || (dstat & DEV_STAT_UNIT_EXCEP)) {
+		dev_warn(&cdev->dev,
+			"The LCS device stopped because of an error,"
+			" dstat=0x%X, cstat=0x%X \n",
+			    dstat, cstat);
+		if (rc) {
+			channel->state = LCS_CH_STATE_ERROR;
+		}
+	}
+	if (channel->state == LCS_CH_STATE_ERROR) {
+		lcs_schedule_recovery(card);
+		wake_up(&card->wait_q);
+		return;
+	}
+	/* How far in the ccw chain have we processed? */
+	if ((channel->state != LCS_CH_STATE_INIT) &&
+	    (irb->scsw.cmd.fctl & SCSW_FCTL_START_FUNC) &&
+	    (irb->scsw.cmd.cpa != 0)) {
+		index = (struct ccw1 *) __va((addr_t) irb->scsw.cmd.cpa)
+			- channel->ccws;
+		if ((irb->scsw.cmd.actl & SCSW_ACTL_SUSPENDED) ||
+		    (irb->scsw.cmd.cstat & SCHN_STAT_PCI))
+			/* Bloody io subsystem tells us lies about cpa... */
+			index = (index - 1) & (LCS_NUM_BUFFS - 1);
+		while (channel->io_idx != index) {
+			__lcs_processed_buffer(channel,
+					       channel->iob + channel->io_idx);
+			channel->io_idx =
+				(channel->io_idx + 1) & (LCS_NUM_BUFFS - 1);
+		}
+	}
+
+	if ((irb->scsw.cmd.dstat & DEV_STAT_DEV_END) ||
+	    (irb->scsw.cmd.dstat & DEV_STAT_CHN_END) ||
+	    (irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK))
+		/* Mark channel as stopped. */
+		channel->state = LCS_CH_STATE_STOPPED;
+	else if (irb->scsw.cmd.actl & SCSW_ACTL_SUSPENDED)
+		/* CCW execution stopped on a suspend bit. */
+		channel->state = LCS_CH_STATE_SUSPENDED;
+	if (irb->scsw.cmd.fctl & SCSW_FCTL_HALT_FUNC) {
+		if (irb->scsw.cmd.cc != 0) {
+			ccw_device_halt(channel->ccwdev, (addr_t) channel);
+			return;
+		}
+		/* The channel has been stopped by halt_IO. */
+		channel->state = LCS_CH_STATE_HALTED;
+	}
+	if (irb->scsw.cmd.fctl & SCSW_FCTL_CLEAR_FUNC)
+		channel->state = LCS_CH_STATE_CLEARED;
+	/* Do the rest in the tasklet. */
+	tasklet_schedule(&channel->irq_tasklet);
+}
+
+/**
+ * Tasklet for IRQ handler
+ */
+static void
+lcs_tasklet(unsigned long data)
+{
+	unsigned long flags;
+	struct lcs_channel *channel;
+	struct lcs_buffer *iob;
+	int buf_idx;
+
+	channel = (struct lcs_channel *) data;
+	LCS_DBF_TEXT_(5, trace, "tlet%s", dev_name(&channel->ccwdev->dev));
+
+	/* Check for processed buffers. */
+	iob = channel->iob;
+	buf_idx = channel->buf_idx;
+	while (iob[buf_idx].state == LCS_BUF_STATE_PROCESSED) {
+		/* Do the callback thing. */
+		if (iob[buf_idx].callback != NULL)
+			iob[buf_idx].callback(channel, iob + buf_idx);
+		buf_idx = (buf_idx + 1) & (LCS_NUM_BUFFS - 1);
+	}
+	channel->buf_idx = buf_idx;
+
+	if (channel->state == LCS_CH_STATE_STOPPED)
+		lcs_start_channel(channel);
+	spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags);
+	if (channel->state == LCS_CH_STATE_SUSPENDED &&
+	    channel->iob[channel->io_idx].state == LCS_BUF_STATE_READY)
+		__lcs_resume_channel(channel);
+	spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags);
+
+	/* Something happened on the channel. Wake up waiters. */
+	wake_up(&channel->wait_q);
+}
+
+/**
+ * Finish current tx buffer and make it ready for transmit.
+ */
+static void
+__lcs_emit_txbuffer(struct lcs_card *card)
+{
+	LCS_DBF_TEXT(5, trace, "emittx");
+	*(__u16 *)(card->tx_buffer->data + card->tx_buffer->count) = 0;
+	card->tx_buffer->count += 2;
+	lcs_ready_buffer(&card->write, card->tx_buffer);
+	card->tx_buffer = NULL;
+	card->tx_emitted++;
+}
+
+/**
+ * Callback for finished tx buffers.
+ */
+static void
+lcs_txbuffer_cb(struct lcs_channel *channel, struct lcs_buffer *buffer)
+{
+	struct lcs_card *card;
+
+	LCS_DBF_TEXT(5, trace, "txbuffcb");
+	/* Put buffer back to pool. */
+	lcs_release_buffer(channel, buffer);
+	card = container_of(channel, struct lcs_card, write);
+	if (netif_queue_stopped(card->dev) && netif_carrier_ok(card->dev))
+		netif_wake_queue(card->dev);
+	spin_lock(&card->lock);
+	card->tx_emitted--;
+	if (card->tx_emitted <= 0 && card->tx_buffer != NULL)
+		/*
+		 * Last running tx buffer has finished. Submit partially
+		 * filled current buffer.
+		 */
+		__lcs_emit_txbuffer(card);
+	spin_unlock(&card->lock);
+}
+
+/**
+ * Packet transmit function called by network stack
+ */
+static int
+__lcs_start_xmit(struct lcs_card *card, struct sk_buff *skb,
+		 struct net_device *dev)
+{
+	struct lcs_header *header;
+	int rc = NETDEV_TX_OK;
+
+	LCS_DBF_TEXT(5, trace, "hardxmit");
+	if (skb == NULL) {
+		card->stats.tx_dropped++;
+		card->stats.tx_errors++;
+		return NETDEV_TX_OK;
+	}
+	if (card->state != DEV_STATE_UP) {
+		dev_kfree_skb(skb);
+		card->stats.tx_dropped++;
+		card->stats.tx_errors++;
+		card->stats.tx_carrier_errors++;
+		return NETDEV_TX_OK;
+	}
+	if (skb->protocol == htons(ETH_P_IPV6)) {
+		dev_kfree_skb(skb);
+		return NETDEV_TX_OK;
+	}
+	netif_stop_queue(card->dev);
+	spin_lock(&card->lock);
+	if (card->tx_buffer != NULL &&
+	    card->tx_buffer->count + sizeof(struct lcs_header) +
+	    skb->len + sizeof(u16) > LCS_IOBUFFERSIZE)
+		/* skb too big for current tx buffer. */
+		__lcs_emit_txbuffer(card);
+	if (card->tx_buffer == NULL) {
+		/* Get new tx buffer */
+		card->tx_buffer = lcs_get_buffer(&card->write);
+		if (card->tx_buffer == NULL) {
+			card->stats.tx_dropped++;
+			rc = NETDEV_TX_BUSY;
+			goto out;
+		}
+		card->tx_buffer->callback = lcs_txbuffer_cb;
+		card->tx_buffer->count = 0;
+	}
+	header = (struct lcs_header *)
+		(card->tx_buffer->data + card->tx_buffer->count);
+	card->tx_buffer->count += skb->len + sizeof(struct lcs_header);
+	header->offset = card->tx_buffer->count;
+	header->type = card->lan_type;
+	header->slot = card->portno;
+	skb_copy_from_linear_data(skb, header + 1, skb->len);
+	spin_unlock(&card->lock);
+	card->stats.tx_bytes += skb->len;
+	card->stats.tx_packets++;
+	dev_kfree_skb(skb);
+	netif_wake_queue(card->dev);
+	spin_lock(&card->lock);
+	if (card->tx_emitted <= 0 && card->tx_buffer != NULL)
+		/* If this is the first tx buffer emit it immediately. */
+		__lcs_emit_txbuffer(card);
+out:
+	spin_unlock(&card->lock);
+	return rc;
+}
+
+static int
+lcs_start_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+	struct lcs_card *card;
+	int rc;
+
+	LCS_DBF_TEXT(5, trace, "pktxmit");
+	card = (struct lcs_card *) dev->ml_priv;
+	rc = __lcs_start_xmit(card, skb, dev);
+	return rc;
+}
+
+/**
+ * send startlan and lanstat command to make LCS device ready
+ */
+static int
+lcs_startlan_auto(struct lcs_card *card)
+{
+	int rc;
+
+	LCS_DBF_TEXT(2, trace, "strtauto");
+#ifdef CONFIG_ETHERNET
+	card->lan_type = LCS_FRAME_TYPE_ENET;
+	rc = lcs_send_startlan(card, LCS_INITIATOR_TCPIP);
+	if (rc == 0)
+		return 0;
+
+#endif
+#ifdef CONFIG_TR
+	card->lan_type = LCS_FRAME_TYPE_TR;
+	rc = lcs_send_startlan(card, LCS_INITIATOR_TCPIP);
+	if (rc == 0)
+		return 0;
+#endif
+#ifdef CONFIG_FDDI
+	card->lan_type = LCS_FRAME_TYPE_FDDI;
+	rc = lcs_send_startlan(card, LCS_INITIATOR_TCPIP);
+	if (rc == 0)
+		return 0;
+#endif
+	return -EIO;
+}
+
+static int
+lcs_startlan(struct lcs_card *card)
+{
+	int rc, i;
+
+	LCS_DBF_TEXT(2, trace, "startlan");
+	rc = 0;
+	if (card->portno != LCS_INVALID_PORT_NO) {
+		if (card->lan_type == LCS_FRAME_TYPE_AUTO)
+			rc = lcs_startlan_auto(card);
+		else
+			rc = lcs_send_startlan(card, LCS_INITIATOR_TCPIP);
+	} else {
+                for (i = 0; i <= 16; i++) {
+                        card->portno = i;
+                        if (card->lan_type != LCS_FRAME_TYPE_AUTO)
+                                rc = lcs_send_startlan(card,
+                                                       LCS_INITIATOR_TCPIP);
+                        else
+                                /* autodetecting lan type */
+                                rc = lcs_startlan_auto(card);
+                        if (rc == 0)
+                                break;
+                }
+        }
+	if (rc == 0)
+		return lcs_send_lanstat(card);
+	return rc;
+}
+
+/**
+ * LCS detect function
+ * setup channels and make them I/O ready
+ */
+static int
+lcs_detect(struct lcs_card *card)
+{
+	int rc = 0;
+
+	LCS_DBF_TEXT(2, setup, "lcsdetct");
+	/* start/reset card */
+	if (card->dev)
+		netif_stop_queue(card->dev);
+	rc = lcs_stop_channels(card);
+	if (rc == 0) {
+		rc = lcs_start_channels(card);
+		if (rc == 0) {
+			rc = lcs_send_startup(card, LCS_INITIATOR_TCPIP);
+			if (rc == 0)
+				rc = lcs_startlan(card);
+		}
+	}
+	if (rc == 0) {
+		card->state = DEV_STATE_UP;
+	} else {
+		card->state = DEV_STATE_DOWN;
+		card->write.state = LCS_CH_STATE_INIT;
+		card->read.state =  LCS_CH_STATE_INIT;
+	}
+	return rc;
+}
+
+/**
+ * LCS Stop card
+ */
+static int
+lcs_stopcard(struct lcs_card *card)
+{
+	int rc;
+
+	LCS_DBF_TEXT(3, setup, "stopcard");
+
+	if (card->read.state != LCS_CH_STATE_STOPPED &&
+	    card->write.state != LCS_CH_STATE_STOPPED &&
+	    card->read.state != LCS_CH_STATE_ERROR &&
+	    card->write.state != LCS_CH_STATE_ERROR &&
+	    card->state == DEV_STATE_UP) {
+		lcs_clear_multicast_list(card);
+		rc = lcs_send_stoplan(card,LCS_INITIATOR_TCPIP);
+		rc = lcs_send_shutdown(card);
+	}
+	rc = lcs_stop_channels(card);
+	card->state = DEV_STATE_DOWN;
+
+	return rc;
+}
+
+/**
+ * Kernel Thread helper functions for LGW initiated commands
+ */
+static void
+lcs_start_kernel_thread(struct work_struct *work)
+{
+	struct lcs_card *card = container_of(work, struct lcs_card, kernel_thread_starter);
+	LCS_DBF_TEXT(5, trace, "krnthrd");
+	if (lcs_do_start_thread(card, LCS_RECOVERY_THREAD))
+		kthread_run(lcs_recovery, card, "lcs_recover");
+#ifdef CONFIG_IP_MULTICAST
+	if (lcs_do_start_thread(card, LCS_SET_MC_THREAD))
+		kthread_run(lcs_register_mc_addresses, card, "regipm");
+#endif
+}
+
+/**
+ * Process control frames.
+ */
+static void
+lcs_get_control(struct lcs_card *card, struct lcs_cmd *cmd)
+{
+	LCS_DBF_TEXT(5, trace, "getctrl");
+	if (cmd->initiator == LCS_INITIATOR_LGW) {
+		switch(cmd->cmd_code) {
+		case LCS_CMD_STARTUP:
+		case LCS_CMD_STARTLAN:
+			lcs_schedule_recovery(card);
+			break;
+		case LCS_CMD_STOPLAN:
+			pr_warning("Stoplan for %s initiated by LGW.\n",
+				   card->dev->name);
+			if (card->dev)
+				netif_carrier_off(card->dev);
+			break;
+		default:
+			LCS_DBF_TEXT(5, trace, "noLGWcmd");
+			break;
+		}
+	} else
+		lcs_notify_lancmd_waiters(card, cmd);
+}
+
+/**
+ * Unpack network packet.
+ */
+static void
+lcs_get_skb(struct lcs_card *card, char *skb_data, unsigned int skb_len)
+{
+	struct sk_buff *skb;
+
+	LCS_DBF_TEXT(5, trace, "getskb");
+	if (card->dev == NULL ||
+	    card->state != DEV_STATE_UP)
+		/* The card isn't up. Ignore the packet. */
+		return;
+
+	skb = dev_alloc_skb(skb_len);
+	if (skb == NULL) {
+		dev_err(&card->dev->dev,
+			" Allocating a socket buffer to interface %s failed\n",
+			  card->dev->name);
+		card->stats.rx_dropped++;
+		return;
+	}
+	memcpy(skb_put(skb, skb_len), skb_data, skb_len);
+	skb->protocol =	card->lan_type_trans(skb, card->dev);
+	card->stats.rx_bytes += skb_len;
+	card->stats.rx_packets++;
+	if (skb->protocol == htons(ETH_P_802_2))
+		*((__u32 *)skb->cb) = ++card->pkt_seq;
+	netif_rx(skb);
+}
+
+/**
+ * LCS main routine to get packets and lancmd replies from the buffers
+ */
+static void
+lcs_get_frames_cb(struct lcs_channel *channel, struct lcs_buffer *buffer)
+{
+	struct lcs_card *card;
+	struct lcs_header *lcs_hdr;
+	__u16 offset;
+
+	LCS_DBF_TEXT(5, trace, "lcsgtpkt");
+	lcs_hdr = (struct lcs_header *) buffer->data;
+	if (lcs_hdr->offset == LCS_ILLEGAL_OFFSET) {
+		LCS_DBF_TEXT(4, trace, "-eiogpkt");
+		return;
+	}
+	card = container_of(channel, struct lcs_card, read);
+	offset = 0;
+	while (lcs_hdr->offset != 0) {
+		if (lcs_hdr->offset <= 0 ||
+		    lcs_hdr->offset > LCS_IOBUFFERSIZE ||
+		    lcs_hdr->offset < offset) {
+			/* Offset invalid. */
+			card->stats.rx_length_errors++;
+			card->stats.rx_errors++;
+			return;
+		}
+		/* What kind of frame is it? */
+		if (lcs_hdr->type == LCS_FRAME_TYPE_CONTROL)
+			/* Control frame. */
+			lcs_get_control(card, (struct lcs_cmd *) lcs_hdr);
+		else if (lcs_hdr->type == LCS_FRAME_TYPE_ENET ||
+			 lcs_hdr->type == LCS_FRAME_TYPE_TR ||
+			 lcs_hdr->type == LCS_FRAME_TYPE_FDDI)
+			/* Normal network packet. */
+			lcs_get_skb(card, (char *)(lcs_hdr + 1),
+				    lcs_hdr->offset - offset -
+				    sizeof(struct lcs_header));
+		else
+			/* Unknown frame type. */
+			; // FIXME: error message ?
+		/* Proceed to next frame. */
+		offset = lcs_hdr->offset;
+		lcs_hdr->offset = LCS_ILLEGAL_OFFSET;
+		lcs_hdr = (struct lcs_header *) (buffer->data + offset);
+	}
+	/* The buffer is now empty. Make it ready again. */
+	lcs_ready_buffer(&card->read, buffer);
+}
+
+/**
+ * get network statistics for ifconfig and other user programs
+ */
+static struct net_device_stats *
+lcs_getstats(struct net_device *dev)
+{
+	struct lcs_card *card;
+
+	LCS_DBF_TEXT(4, trace, "netstats");
+	card = (struct lcs_card *) dev->ml_priv;
+	return &card->stats;
+}
+
+/**
+ * stop lcs device
+ * This function will be called by user doing ifconfig xxx down
+ */
+static int
+lcs_stop_device(struct net_device *dev)
+{
+	struct lcs_card *card;
+	int rc;
+
+	LCS_DBF_TEXT(2, trace, "stopdev");
+	card   = (struct lcs_card *) dev->ml_priv;
+	netif_carrier_off(dev);
+	netif_tx_disable(dev);
+	dev->flags &= ~IFF_UP;
+	wait_event(card->write.wait_q,
+		(card->write.state != LCS_CH_STATE_RUNNING));
+	rc = lcs_stopcard(card);
+	if (rc)
+		dev_err(&card->dev->dev,
+			" Shutting down the LCS device failed\n ");
+	return rc;
+}
+
+/**
+ * start lcs device and make it runnable
+ * This function will be called by user doing ifconfig xxx up
+ */
+static int
+lcs_open_device(struct net_device *dev)
+{
+	struct lcs_card *card;
+	int rc;
+
+	LCS_DBF_TEXT(2, trace, "opendev");
+	card = (struct lcs_card *) dev->ml_priv;
+	/* initialize statistics */
+	rc = lcs_detect(card);
+	if (rc) {
+		pr_err("Error in opening device!\n");
+
+	} else {
+		dev->flags |= IFF_UP;
+		netif_carrier_on(dev);
+		netif_wake_queue(dev);
+		card->state = DEV_STATE_UP;
+	}
+	return rc;
+}
+
+/**
+ * show function for portno called by cat or similar things
+ */
+static ssize_t
+lcs_portno_show (struct device *dev, struct device_attribute *attr, char *buf)
+{
+        struct lcs_card *card;
+
+	card = dev_get_drvdata(dev);
+
+        if (!card)
+                return 0;
+
+        return sprintf(buf, "%d\n", card->portno);
+}
+
+/**
+ * store the value which is piped to file portno
+ */
+static ssize_t
+lcs_portno_store (struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
+{
+        struct lcs_card *card;
+        int value;
+
+	card = dev_get_drvdata(dev);
+
+        if (!card)
+                return 0;
+
+        sscanf(buf, "%u", &value);
+        /* TODO: sanity checks */
+        card->portno = value;
+
+        return count;
+
+}
+
+static DEVICE_ATTR(portno, 0644, lcs_portno_show, lcs_portno_store);
+
+static const char *lcs_type[] = {
+	"not a channel",
+	"2216 parallel",
+	"2216 channel",
+	"OSA LCS card",
+	"unknown channel type",
+	"unsupported channel type",
+};
+
+static ssize_t
+lcs_type_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+	struct ccwgroup_device *cgdev;
+
+	cgdev = to_ccwgroupdev(dev);
+	if (!cgdev)
+		return -ENODEV;
+
+	return sprintf(buf, "%s\n", lcs_type[cgdev->cdev[0]->id.driver_info]);
+}
+
+static DEVICE_ATTR(type, 0444, lcs_type_show, NULL);
+
+static ssize_t
+lcs_timeout_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+	struct lcs_card *card;
+
+	card = dev_get_drvdata(dev);
+
+	return card ? sprintf(buf, "%u\n", card->lancmd_timeout) : 0;
+}
+
+static ssize_t
+lcs_timeout_store (struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
+{
+        struct lcs_card *card;
+        int value;
+
+	card = dev_get_drvdata(dev);
+
+        if (!card)
+                return 0;
+
+        sscanf(buf, "%u", &value);
+        /* TODO: sanity checks */
+        card->lancmd_timeout = value;
+
+        return count;
+
+}
+
+static DEVICE_ATTR(lancmd_timeout, 0644, lcs_timeout_show, lcs_timeout_store);
+
+static ssize_t
+lcs_dev_recover_store(struct device *dev, struct device_attribute *attr,
+		      const char *buf, size_t count)
+{
+	struct lcs_card *card = dev_get_drvdata(dev);
+	char *tmp;
+	int i;
+
+	if (!card)
+		return -EINVAL;
+	if (card->state != DEV_STATE_UP)
+		return -EPERM;
+	i = simple_strtoul(buf, &tmp, 16);
+	if (i == 1)
+		lcs_schedule_recovery(card);
+	return count;
+}
+
+static DEVICE_ATTR(recover, 0200, NULL, lcs_dev_recover_store);
+
+static struct attribute * lcs_attrs[] = {
+	&dev_attr_portno.attr,
+	&dev_attr_type.attr,
+	&dev_attr_lancmd_timeout.attr,
+	&dev_attr_recover.attr,
+	NULL,
+};
+
+static struct attribute_group lcs_attr_group = {
+	.attrs = lcs_attrs,
+};
+
+/**
+ * lcs_probe_device is called on establishing a new ccwgroup_device.
+ */
+static int
+lcs_probe_device(struct ccwgroup_device *ccwgdev)
+{
+	struct lcs_card *card;
+	int ret;
+
+	if (!get_device(&ccwgdev->dev))
+		return -ENODEV;
+
+	LCS_DBF_TEXT(2, setup, "add_dev");
+        card = lcs_alloc_card();
+        if (!card) {
+		LCS_DBF_TEXT_(2, setup, "  rc%d", -ENOMEM);
+		put_device(&ccwgdev->dev);
+                return -ENOMEM;
+        }
+	ret = sysfs_create_group(&ccwgdev->dev.kobj, &lcs_attr_group);
+	if (ret) {
+		lcs_free_card(card);
+		put_device(&ccwgdev->dev);
+		return ret;
+        }
+	dev_set_drvdata(&ccwgdev->dev, card);
+	ccwgdev->cdev[0]->handler = lcs_irq;
+	ccwgdev->cdev[1]->handler = lcs_irq;
+	card->gdev = ccwgdev;
+	INIT_WORK(&card->kernel_thread_starter, lcs_start_kernel_thread);
+	card->thread_start_mask = 0;
+	card->thread_allowed_mask = 0;
+	card->thread_running_mask = 0;
+        return 0;
+}
+
+static int
+lcs_register_netdev(struct ccwgroup_device *ccwgdev)
+{
+	struct lcs_card *card;
+
+	LCS_DBF_TEXT(2, setup, "regnetdv");
+	card = dev_get_drvdata(&ccwgdev->dev);
+	if (card->dev->reg_state != NETREG_UNINITIALIZED)
+		return 0;
+	SET_NETDEV_DEV(card->dev, &ccwgdev->dev);
+	return register_netdev(card->dev);
+}
+
+/**
+ * lcs_new_device will be called by setting the group device online.
+ */
+static const struct net_device_ops lcs_netdev_ops = {
+	.ndo_open		= lcs_open_device,
+	.ndo_stop		= lcs_stop_device,
+	.ndo_get_stats		= lcs_getstats,
+	.ndo_start_xmit		= lcs_start_xmit,
+};
+
+static const struct net_device_ops lcs_mc_netdev_ops = {
+	.ndo_open		= lcs_open_device,
+	.ndo_stop		= lcs_stop_device,
+	.ndo_get_stats		= lcs_getstats,
+	.ndo_start_xmit		= lcs_start_xmit,
+	.ndo_set_rx_mode	= lcs_set_multicast_list,
+};
+
+static int
+lcs_new_device(struct ccwgroup_device *ccwgdev)
+{
+	struct  lcs_card *card;
+	struct net_device *dev=NULL;
+	enum lcs_dev_states recover_state;
+	int rc;
+
+	card = dev_get_drvdata(&ccwgdev->dev);
+	if (!card)
+		return -ENODEV;
+
+	LCS_DBF_TEXT(2, setup, "newdev");
+	LCS_DBF_HEX(3, setup, &card, sizeof(void*));
+	card->read.ccwdev  = ccwgdev->cdev[0];
+	card->write.ccwdev = ccwgdev->cdev[1];
+
+	recover_state = card->state;
+	rc = ccw_device_set_online(card->read.ccwdev);
+	if (rc)
+		goto out_err;
+	rc = ccw_device_set_online(card->write.ccwdev);
+	if (rc)
+		goto out_werr;
+
+	LCS_DBF_TEXT(3, setup, "lcsnewdv");
+
+	lcs_setup_card(card);
+	rc = lcs_detect(card);
+	if (rc) {
+		LCS_DBF_TEXT(2, setup, "dtctfail");
+		dev_err(&card->dev->dev,
+			"Detecting a network adapter for LCS devices"
+			" failed with rc=%d (0x%x)\n", rc, rc);
+		lcs_stopcard(card);
+		goto out;
+	}
+	if (card->dev) {
+		LCS_DBF_TEXT(2, setup, "samedev");
+		LCS_DBF_HEX(3, setup, &card, sizeof(void*));
+		goto netdev_out;
+	}
+	switch (card->lan_type) {
+#ifdef CONFIG_ETHERNET
+	case LCS_FRAME_TYPE_ENET:
+		card->lan_type_trans = eth_type_trans;
+		dev = alloc_etherdev(0);
+		break;
+#endif
+#ifdef CONFIG_TR
+	case LCS_FRAME_TYPE_TR:
+		card->lan_type_trans = tr_type_trans;
+		dev = alloc_trdev(0);
+		break;
+#endif
+#ifdef CONFIG_FDDI
+	case LCS_FRAME_TYPE_FDDI:
+		card->lan_type_trans = fddi_type_trans;
+		dev = alloc_fddidev(0);
+		break;
+#endif
+	default:
+		LCS_DBF_TEXT(3, setup, "errinit");
+		pr_err(" Initialization failed\n");
+		goto out;
+	}
+	if (!dev)
+		goto out;
+	card->dev = dev;
+	card->dev->ml_priv = card;
+	card->dev->netdev_ops = &lcs_netdev_ops;
+	memcpy(card->dev->dev_addr, card->mac, LCS_MAC_LENGTH);
+#ifdef CONFIG_IP_MULTICAST
+	if (!lcs_check_multicast_support(card))
+		card->dev->netdev_ops = &lcs_mc_netdev_ops;
+#endif
+netdev_out:
+	lcs_set_allowed_threads(card,0xffffffff);
+	if (recover_state == DEV_STATE_RECOVER) {
+		lcs_set_multicast_list(card->dev);
+		card->dev->flags |= IFF_UP;
+		netif_carrier_on(card->dev);
+		netif_wake_queue(card->dev);
+		card->state = DEV_STATE_UP;
+	} else {
+		lcs_stopcard(card);
+	}
+
+	if (lcs_register_netdev(ccwgdev) != 0)
+		goto out;
+
+	/* Print out supported assists: IPv6 */
+	pr_info("LCS device %s %s IPv6 support\n", card->dev->name,
+		(card->ip_assists_supported & LCS_IPASS_IPV6_SUPPORT) ?
+		"with" : "without");
+	/* Print out supported assist: Multicast */
+	pr_info("LCS device %s %s Multicast support\n", card->dev->name,
+		(card->ip_assists_supported & LCS_IPASS_MULTICAST_SUPPORT) ?
+		"with" : "without");
+	return 0;
+out:
+
+	ccw_device_set_offline(card->write.ccwdev);
+out_werr:
+	ccw_device_set_offline(card->read.ccwdev);
+out_err:
+	return -ENODEV;
+}
+
+/**
+ * lcs_shutdown_device, called when setting the group device offline.
+ */
+static int
+__lcs_shutdown_device(struct ccwgroup_device *ccwgdev, int recovery_mode)
+{
+	struct lcs_card *card;
+	enum lcs_dev_states recover_state;
+	int ret = 0, ret2 = 0, ret3 = 0;
+
+	LCS_DBF_TEXT(3, setup, "shtdndev");
+	card = dev_get_drvdata(&ccwgdev->dev);
+	if (!card)
+		return -ENODEV;
+	if (recovery_mode == 0) {
+		lcs_set_allowed_threads(card, 0);
+		if (lcs_wait_for_threads(card, LCS_SET_MC_THREAD))
+			return -ERESTARTSYS;
+	}
+	LCS_DBF_HEX(3, setup, &card, sizeof(void*));
+	recover_state = card->state;
+
+	ret = lcs_stop_device(card->dev);
+	ret2 = ccw_device_set_offline(card->read.ccwdev);
+	ret3 = ccw_device_set_offline(card->write.ccwdev);
+	if (!ret)
+		ret = (ret2) ? ret2 : ret3;
+	if (ret)
+		LCS_DBF_TEXT_(3, setup, "1err:%d", ret);
+	if (recover_state == DEV_STATE_UP) {
+		card->state = DEV_STATE_RECOVER;
+	}
+	return 0;
+}
+
+static int
+lcs_shutdown_device(struct ccwgroup_device *ccwgdev)
+{
+	return __lcs_shutdown_device(ccwgdev, 0);
+}
+
+/**
+ * drive lcs recovery after startup and startlan initiated by Lan Gateway
+ */
+static int
+lcs_recovery(void *ptr)
+{
+	struct lcs_card *card;
+	struct ccwgroup_device *gdev;
+        int rc;
+
+	card = (struct lcs_card *) ptr;
+
+	LCS_DBF_TEXT(4, trace, "recover1");
+	if (!lcs_do_run_thread(card, LCS_RECOVERY_THREAD))
+		return 0;
+	LCS_DBF_TEXT(4, trace, "recover2");
+	gdev = card->gdev;
+	dev_warn(&gdev->dev,
+		"A recovery process has been started for the LCS device\n");
+	rc = __lcs_shutdown_device(gdev, 1);
+	rc = lcs_new_device(gdev);
+	if (!rc)
+		pr_info("Device %s successfully recovered!\n",
+			card->dev->name);
+	else
+		pr_info("Device %s could not be recovered!\n",
+			card->dev->name);
+	lcs_clear_thread_running_bit(card, LCS_RECOVERY_THREAD);
+	return 0;
+}
+
+/**
+ * lcs_remove_device, free buffers and card
+ */
+static void
+lcs_remove_device(struct ccwgroup_device *ccwgdev)
+{
+	struct lcs_card *card;
+
+	card = dev_get_drvdata(&ccwgdev->dev);
+	if (!card)
+		return;
+
+	LCS_DBF_TEXT(3, setup, "remdev");
+	LCS_DBF_HEX(3, setup, &card, sizeof(void*));
+	if (ccwgdev->state == CCWGROUP_ONLINE) {
+		lcs_shutdown_device(ccwgdev);
+	}
+	if (card->dev)
+		unregister_netdev(card->dev);
+	sysfs_remove_group(&ccwgdev->dev.kobj, &lcs_attr_group);
+	lcs_cleanup_card(card);
+	lcs_free_card(card);
+	put_device(&ccwgdev->dev);
+}
+
+static int lcs_pm_suspend(struct lcs_card *card)
+{
+	if (card->dev)
+		netif_device_detach(card->dev);
+	lcs_set_allowed_threads(card, 0);
+	lcs_wait_for_threads(card, 0xffffffff);
+	if (card->state != DEV_STATE_DOWN)
+		__lcs_shutdown_device(card->gdev, 1);
+	return 0;
+}
+
+static int lcs_pm_resume(struct lcs_card *card)
+{
+	int rc = 0;
+
+	if (card->state == DEV_STATE_RECOVER)
+		rc = lcs_new_device(card->gdev);
+	if (card->dev)
+		netif_device_attach(card->dev);
+	if (rc) {
+		dev_warn(&card->gdev->dev, "The lcs device driver "
+			"failed to recover the device\n");
+	}
+	return rc;
+}
+
+static int lcs_prepare(struct ccwgroup_device *gdev)
+{
+	return 0;
+}
+
+static void lcs_complete(struct ccwgroup_device *gdev)
+{
+	return;
+}
+
+static int lcs_freeze(struct ccwgroup_device *gdev)
+{
+	struct lcs_card *card = dev_get_drvdata(&gdev->dev);
+	return lcs_pm_suspend(card);
+}
+
+static int lcs_thaw(struct ccwgroup_device *gdev)
+{
+	struct lcs_card *card = dev_get_drvdata(&gdev->dev);
+	return lcs_pm_resume(card);
+}
+
+static int lcs_restore(struct ccwgroup_device *gdev)
+{
+	struct lcs_card *card = dev_get_drvdata(&gdev->dev);
+	return lcs_pm_resume(card);
+}
+
+static struct ccw_device_id lcs_ids[] = {
+	{CCW_DEVICE(0x3088, 0x08), .driver_info = lcs_channel_type_parallel},
+	{CCW_DEVICE(0x3088, 0x1f), .driver_info = lcs_channel_type_2216},
+	{CCW_DEVICE(0x3088, 0x60), .driver_info = lcs_channel_type_osa2},
+	{},
+};
+MODULE_DEVICE_TABLE(ccw, lcs_ids);
+
+static struct ccw_driver lcs_ccw_driver = {
+	.driver = {
+		.owner	= THIS_MODULE,
+		.name	= "lcs",
+	},
+	.ids	= lcs_ids,
+	.probe	= ccwgroup_probe_ccwdev,
+	.remove	= ccwgroup_remove_ccwdev,
+	.int_class = IOINT_LCS,
+};
+
+/**
+ * LCS ccwgroup driver registration
+ */
+static struct ccwgroup_driver lcs_group_driver = {
+	.driver = {
+		.owner	= THIS_MODULE,
+		.name	= "lcs",
+	},
+	.max_slaves  = 2,
+	.driver_id   = 0xD3C3E2,
+	.probe       = lcs_probe_device,
+	.remove      = lcs_remove_device,
+	.set_online  = lcs_new_device,
+	.set_offline = lcs_shutdown_device,
+	.prepare     = lcs_prepare,
+	.complete    = lcs_complete,
+	.freeze	     = lcs_freeze,
+	.thaw	     = lcs_thaw,
+	.restore     = lcs_restore,
+};
+
+static ssize_t
+lcs_driver_group_store(struct device_driver *ddrv, const char *buf,
+		       size_t count)
+{
+	int err;
+	err = ccwgroup_create_from_string(lcs_root_dev,
+					  lcs_group_driver.driver_id,
+					  &lcs_ccw_driver, 2, buf);
+	return err ? err : count;
+}
+
+static DRIVER_ATTR(group, 0200, NULL, lcs_driver_group_store);
+
+static struct attribute *lcs_group_attrs[] = {
+	&driver_attr_group.attr,
+	NULL,
+};
+
+static struct attribute_group lcs_group_attr_group = {
+	.attrs = lcs_group_attrs,
+};
+
+static const struct attribute_group *lcs_group_attr_groups[] = {
+	&lcs_group_attr_group,
+	NULL,
+};
+
+/**
+ *  LCS Module/Kernel initialization function
+ */
+static int
+__init lcs_init_module(void)
+{
+	int rc;
+
+	pr_info("Loading %s\n", version);
+	rc = lcs_register_debug_facility();
+	LCS_DBF_TEXT(0, setup, "lcsinit");
+	if (rc)
+		goto out_err;
+	lcs_root_dev = root_device_register("lcs");
+	rc = IS_ERR(lcs_root_dev) ? PTR_ERR(lcs_root_dev) : 0;
+	if (rc)
+		goto register_err;
+	rc = ccw_driver_register(&lcs_ccw_driver);
+	if (rc)
+		goto ccw_err;
+	lcs_group_driver.driver.groups = lcs_group_attr_groups;
+	rc = ccwgroup_driver_register(&lcs_group_driver);
+	if (rc)
+		goto ccwgroup_err;
+	return 0;
+
+ccwgroup_err:
+	ccw_driver_unregister(&lcs_ccw_driver);
+ccw_err:
+	root_device_unregister(lcs_root_dev);
+register_err:
+	lcs_unregister_debug_facility();
+out_err:
+	pr_err("Initializing the lcs device driver failed\n");
+	return rc;
+}
+
+
+/**
+ *  LCS module cleanup function
+ */
+static void
+__exit lcs_cleanup_module(void)
+{
+	pr_info("Terminating lcs module.\n");
+	LCS_DBF_TEXT(0, trace, "cleanup");
+	driver_remove_file(&lcs_group_driver.driver,
+			   &driver_attr_group);
+	ccwgroup_driver_unregister(&lcs_group_driver);
+	ccw_driver_unregister(&lcs_ccw_driver);
+	root_device_unregister(lcs_root_dev);
+	lcs_unregister_debug_facility();
+}
+
+module_init(lcs_init_module);
+module_exit(lcs_cleanup_module);
+
+MODULE_AUTHOR("Frank Pavlic <fpavlic@de.ibm.com>");
+MODULE_LICENSE("GPL");
+
diff --git a/ap/os/linux/linux-3.4.x/drivers/s390/net/lcs.h b/ap/os/linux/linux-3.4.x/drivers/s390/net/lcs.h
new file mode 100644
index 0000000..8c03392
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/s390/net/lcs.h
@@ -0,0 +1,345 @@
+/*lcs.h*/
+
+#include <linux/interrupt.h>
+#include <linux/netdevice.h>
+#include <linux/skbuff.h>
+#include <linux/workqueue.h>
+#include <asm/ccwdev.h>
+
+#define LCS_DBF_TEXT(level, name, text) \
+	do { \
+		debug_text_event(lcs_dbf_##name, level, text); \
+	} while (0)
+
+#define LCS_DBF_HEX(level,name,addr,len) \
+do { \
+	debug_event(lcs_dbf_##name,level,(void*)(addr),len); \
+} while (0)
+
+/* Allow to sort out low debug levels early to avoid wasted sprints */
+static inline int lcs_dbf_passes(debug_info_t *dbf_grp, int level)
+{
+	return (level <= dbf_grp->level);
+}
+
+#define LCS_DBF_TEXT_(level,name,text...) \
+	do { \
+		if (lcs_dbf_passes(lcs_dbf_##name, level)) { \
+			sprintf(debug_buffer, text); \
+			debug_text_event(lcs_dbf_##name, level, debug_buffer); \
+		} \
+	} while (0)
+
+/**
+ *	sysfs related stuff
+ */
+#define CARD_FROM_DEV(cdev) \
+	(struct lcs_card *) dev_get_drvdata( \
+		&((struct ccwgroup_device *)dev_get_drvdata(&cdev->dev))->dev);
+
+/**
+ * Enum for classifying detected devices.
+ */
+enum lcs_channel_types {
+	/* Device is not a channel  */
+	lcs_channel_type_none,
+
+	/* Device is a 2216 channel */
+	lcs_channel_type_parallel,
+
+	/* Device is a 2216 channel */
+	lcs_channel_type_2216,
+
+	/* Device is a OSA2 card */
+	lcs_channel_type_osa2
+};
+
+/**
+ * CCW commands used in this driver
+ */
+#define LCS_CCW_WRITE		0x01
+#define LCS_CCW_READ		0x02
+#define LCS_CCW_TRANSFER	0x08
+
+/**
+ * LCS device status primitives
+ */
+#define LCS_CMD_STARTLAN	0x01
+#define LCS_CMD_STOPLAN		0x02
+#define LCS_CMD_LANSTAT		0x04
+#define LCS_CMD_STARTUP		0x07
+#define LCS_CMD_SHUTDOWN	0x08
+#define LCS_CMD_QIPASSIST	0xb2
+#define LCS_CMD_SETIPM		0xb4
+#define LCS_CMD_DELIPM		0xb5
+
+#define LCS_INITIATOR_TCPIP	0x00
+#define LCS_INITIATOR_LGW	0x01
+#define LCS_STD_CMD_SIZE	16
+#define LCS_MULTICAST_CMD_SIZE	404
+
+/**
+ * LCS IPASSIST MASKS,only used when multicast is switched on
+ */
+/* Not supported by LCS */
+#define LCS_IPASS_ARP_PROCESSING	0x0001
+#define LCS_IPASS_IN_CHECKSUM_SUPPORT	0x0002
+#define LCS_IPASS_OUT_CHECKSUM_SUPPORT	0x0004
+#define LCS_IPASS_IP_FRAG_REASSEMBLY	0x0008
+#define LCS_IPASS_IP_FILTERING		0x0010
+/* Supported by lcs 3172 */
+#define LCS_IPASS_IPV6_SUPPORT		0x0020
+#define LCS_IPASS_MULTICAST_SUPPORT	0x0040
+
+/**
+ * LCS sense byte definitions
+ */
+#define LCS_SENSE_BYTE_0 		0
+#define LCS_SENSE_BYTE_1 		1
+#define LCS_SENSE_BYTE_2 		2
+#define LCS_SENSE_BYTE_3 		3
+#define LCS_SENSE_INTERFACE_DISCONNECT	0x01
+#define LCS_SENSE_EQUIPMENT_CHECK	0x10
+#define LCS_SENSE_BUS_OUT_CHECK		0x20
+#define LCS_SENSE_INTERVENTION_REQUIRED 0x40
+#define LCS_SENSE_CMD_REJECT		0x80
+#define LCS_SENSE_RESETTING_EVENT	0x80
+#define LCS_SENSE_DEVICE_ONLINE		0x20
+
+/**
+ * LCS packet type definitions
+ */
+#define LCS_FRAME_TYPE_CONTROL		0
+#define LCS_FRAME_TYPE_ENET		1
+#define LCS_FRAME_TYPE_TR		2
+#define LCS_FRAME_TYPE_FDDI		7
+#define LCS_FRAME_TYPE_AUTO		-1
+
+/**
+ * some more definitions,we will sort them later
+ */
+#define LCS_ILLEGAL_OFFSET		0xffff
+#define LCS_IOBUFFERSIZE		0x5000
+#define LCS_NUM_BUFFS			32	/* needs to be power of 2 */
+#define LCS_MAC_LENGTH			6
+#define LCS_INVALID_PORT_NO		-1
+#define LCS_LANCMD_TIMEOUT_DEFAULT      5
+
+/**
+ * Multicast state
+ */
+#define	 LCS_IPM_STATE_SET_REQUIRED	0
+#define	 LCS_IPM_STATE_DEL_REQUIRED	1
+#define	 LCS_IPM_STATE_ON_CARD		2
+
+/**
+ * LCS IP Assist declarations
+ * seems to be only used for multicast
+ */
+#define	 LCS_IPASS_ARP_PROCESSING	0x0001
+#define	 LCS_IPASS_INBOUND_CSUM_SUPP	0x0002
+#define	 LCS_IPASS_OUTBOUND_CSUM_SUPP	0x0004
+#define	 LCS_IPASS_IP_FRAG_REASSEMBLY	0x0008
+#define	 LCS_IPASS_IP_FILTERING		0x0010
+#define	 LCS_IPASS_IPV6_SUPPORT		0x0020
+#define	 LCS_IPASS_MULTICAST_SUPPORT	0x0040
+
+/**
+ * LCS Buffer states
+ */
+enum lcs_buffer_states {
+	LCS_BUF_STATE_EMPTY,	/* buffer is empty */
+	LCS_BUF_STATE_LOCKED,	/* buffer is locked, don't touch */
+	LCS_BUF_STATE_READY,	/* buffer is ready for read/write */
+	LCS_BUF_STATE_PROCESSED,
+};
+
+/**
+ * LCS Channel State Machine declarations
+ */
+enum lcs_channel_states {
+	LCS_CH_STATE_INIT,
+	LCS_CH_STATE_HALTED,
+	LCS_CH_STATE_STOPPED,
+	LCS_CH_STATE_RUNNING,
+	LCS_CH_STATE_SUSPENDED,
+	LCS_CH_STATE_CLEARED,
+	LCS_CH_STATE_ERROR,
+};
+
+/**
+ * LCS device state machine
+ */
+enum lcs_dev_states {
+	DEV_STATE_DOWN,
+	DEV_STATE_UP,
+	DEV_STATE_RECOVER,
+};
+
+enum lcs_threads {
+	LCS_SET_MC_THREAD 	= 1,
+	LCS_RECOVERY_THREAD 	= 2,
+};
+
+/**
+ * LCS struct declarations
+ */
+struct lcs_header {
+	__u16  offset;
+	__u8   type;
+	__u8   slot;
+}  __attribute__ ((packed));
+
+struct lcs_ip_mac_pair {
+	__be32  ip_addr;
+	__u8   mac_addr[LCS_MAC_LENGTH];
+	__u8   reserved[2];
+}  __attribute__ ((packed));
+
+struct lcs_ipm_list {
+	struct list_head list;
+	struct lcs_ip_mac_pair ipm;
+	__u8 ipm_state;
+};
+
+struct lcs_cmd {
+	__u16  offset;
+	__u8   type;
+	__u8   slot;
+	__u8   cmd_code;
+	__u8   initiator;
+	__u16  sequence_no;
+	__u16  return_code;
+	union {
+		struct {
+			__u8   lan_type;
+			__u8   portno;
+			__u16  parameter_count;
+			__u8   operator_flags[3];
+			__u8   reserved[3];
+		} lcs_std_cmd;
+		struct {
+			__u16  unused1;
+			__u16  buff_size;
+			__u8   unused2[6];
+		} lcs_startup;
+		struct {
+			__u8   lan_type;
+			__u8   portno;
+			__u8   unused[10];
+			__u8   mac_addr[LCS_MAC_LENGTH];
+			__u32  num_packets_deblocked;
+			__u32  num_packets_blocked;
+			__u32  num_packets_tx_on_lan;
+			__u32  num_tx_errors_detected;
+			__u32  num_tx_packets_disgarded;
+			__u32  num_packets_rx_from_lan;
+			__u32  num_rx_errors_detected;
+			__u32  num_rx_discarded_nobuffs_avail;
+			__u32  num_rx_packets_too_large;
+		} lcs_lanstat_cmd;
+#ifdef CONFIG_IP_MULTICAST
+		struct {
+			__u8   lan_type;
+			__u8   portno;
+			__u16  num_ip_pairs;
+			__u16  ip_assists_supported;
+			__u16  ip_assists_enabled;
+			__u16  version;
+			struct {
+				struct lcs_ip_mac_pair
+				ip_mac_pair[32];
+				__u32	  response_data;
+			} lcs_ipass_ctlmsg __attribute ((packed));
+		} lcs_qipassist __attribute__ ((packed));
+#endif /*CONFIG_IP_MULTICAST */
+	} cmd __attribute__ ((packed));
+}  __attribute__ ((packed));
+
+/**
+ * Forward declarations.
+ */
+struct lcs_card;
+struct lcs_channel;
+
+/**
+ * Definition of an lcs buffer.
+ */
+struct lcs_buffer {
+	enum lcs_buffer_states state;
+	void *data;
+	int count;
+	/* Callback for completion notification. */
+	void (*callback)(struct lcs_channel *, struct lcs_buffer *);
+};
+
+struct lcs_reply {
+	struct list_head list;
+	__u16 sequence_no;
+	atomic_t refcnt;
+	/* Callback for completion notification. */
+	void (*callback)(struct lcs_card *, struct lcs_cmd *);
+	wait_queue_head_t wait_q;
+	struct lcs_card *card;
+	int received;
+	int rc;
+};
+
+/**
+ * Definition of an lcs channel
+ */
+struct lcs_channel {
+	enum lcs_channel_states state;
+	struct ccw_device *ccwdev;
+	struct ccw1 ccws[LCS_NUM_BUFFS + 1];
+	wait_queue_head_t wait_q;
+	struct tasklet_struct irq_tasklet;
+	struct lcs_buffer iob[LCS_NUM_BUFFS];
+	int io_idx;
+	int buf_idx;
+};
+
+
+/**
+ * definition of the lcs card
+ */
+struct lcs_card {
+	spinlock_t lock;
+	spinlock_t ipm_lock;
+	enum lcs_dev_states state;
+	struct net_device *dev;
+	struct net_device_stats stats;
+	__be16 (*lan_type_trans)(struct sk_buff *skb,
+					 struct net_device *dev);
+	struct ccwgroup_device *gdev;
+	struct lcs_channel read;
+	struct lcs_channel write;
+	struct lcs_buffer *tx_buffer;
+	int tx_emitted;
+	struct list_head lancmd_waiters;
+	int lancmd_timeout;
+
+	struct work_struct kernel_thread_starter;
+	spinlock_t mask_lock;
+	unsigned long thread_start_mask;
+	unsigned long thread_running_mask;
+	unsigned long thread_allowed_mask;
+	wait_queue_head_t wait_q;
+
+#ifdef CONFIG_IP_MULTICAST
+	struct list_head ipm_list;
+#endif
+	__u8 mac[LCS_MAC_LENGTH];
+	__u16 ip_assists_supported;
+	__u16 ip_assists_enabled;
+	__s8 lan_type;
+	__u32 pkt_seq;
+	__u16 sequence_no;
+	__s16 portno;
+	/* Some info copied from probeinfo */
+	u8 device_forced;
+	u8 max_port_no;
+	u8 hint_port_no;
+	s16 port_protocol_no;
+}  __attribute__ ((aligned(8)));
+
diff --git a/ap/os/linux/linux-3.4.x/drivers/s390/net/netiucv.c b/ap/os/linux/linux-3.4.x/drivers/s390/net/netiucv.c
new file mode 100644
index 0000000..8160591
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/s390/net/netiucv.c
@@ -0,0 +1,2299 @@
+/*
+ * IUCV network driver
+ *
+ * Copyright IBM Corp. 2001, 2009
+ *
+ * Author(s):
+ *	Original netiucv driver:
+ *		Fritz Elfert (elfert@de.ibm.com, felfert@millenux.com)
+ *	Sysfs integration and all bugs therein:
+ *		Cornelia Huck (cornelia.huck@de.ibm.com)
+ *	PM functions:
+ *		Ursula Braun (ursula.braun@de.ibm.com)
+ *
+ * Documentation used:
+ *  the source of the original IUCV driver by:
+ *    Stefan Hegewald <hegewald@de.ibm.com>
+ *    Hartmut Penner <hpenner@de.ibm.com>
+ *    Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com)
+ *    Martin Schwidefsky (schwidefsky@de.ibm.com)
+ *    Alan Altmark (Alan_Altmark@us.ibm.com)  Sept. 2000
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ */
+
+#define KMSG_COMPONENT "netiucv"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
+#undef DEBUG
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/errno.h>
+#include <linux/types.h>
+#include <linux/interrupt.h>
+#include <linux/timer.h>
+#include <linux/bitops.h>
+
+#include <linux/signal.h>
+#include <linux/string.h>
+#include <linux/device.h>
+
+#include <linux/ip.h>
+#include <linux/if_arp.h>
+#include <linux/tcp.h>
+#include <linux/skbuff.h>
+#include <linux/ctype.h>
+#include <net/dst.h>
+
+#include <asm/io.h>
+#include <asm/uaccess.h>
+#include <asm/ebcdic.h>
+
+#include <net/iucv/iucv.h>
+#include "fsm.h"
+
+MODULE_AUTHOR
+    ("(C) 2001 IBM Corporation by Fritz Elfert (felfert@millenux.com)");
+MODULE_DESCRIPTION ("Linux for S/390 IUCV network driver");
+
+/**
+ * Debug Facility stuff
+ */
+#define IUCV_DBF_SETUP_NAME "iucv_setup"
+#define IUCV_DBF_SETUP_LEN 64
+#define IUCV_DBF_SETUP_PAGES 2
+#define IUCV_DBF_SETUP_NR_AREAS 1
+#define IUCV_DBF_SETUP_LEVEL 3
+
+#define IUCV_DBF_DATA_NAME "iucv_data"
+#define IUCV_DBF_DATA_LEN 128
+#define IUCV_DBF_DATA_PAGES 2
+#define IUCV_DBF_DATA_NR_AREAS 1
+#define IUCV_DBF_DATA_LEVEL 2
+
+#define IUCV_DBF_TRACE_NAME "iucv_trace"
+#define IUCV_DBF_TRACE_LEN 16
+#define IUCV_DBF_TRACE_PAGES 4
+#define IUCV_DBF_TRACE_NR_AREAS 1
+#define IUCV_DBF_TRACE_LEVEL 3
+
+#define IUCV_DBF_TEXT(name,level,text) \
+	do { \
+		debug_text_event(iucv_dbf_##name,level,text); \
+	} while (0)
+
+#define IUCV_DBF_HEX(name,level,addr,len) \
+	do { \
+		debug_event(iucv_dbf_##name,level,(void*)(addr),len); \
+	} while (0)
+
+DECLARE_PER_CPU(char[256], iucv_dbf_txt_buf);
+
+/* Allow to sort out low debug levels early to avoid wasted sprints */
+static inline int iucv_dbf_passes(debug_info_t *dbf_grp, int level)
+{
+	return (level <= dbf_grp->level);
+}
+
+#define IUCV_DBF_TEXT_(name, level, text...) \
+	do { \
+		if (iucv_dbf_passes(iucv_dbf_##name, level)) { \
+			char* __buf = get_cpu_var(iucv_dbf_txt_buf); \
+			sprintf(__buf, text); \
+			debug_text_event(iucv_dbf_##name, level, __buf); \
+			put_cpu_var(iucv_dbf_txt_buf); \
+		} \
+	} while (0)
+
+#define IUCV_DBF_SPRINTF(name,level,text...) \
+	do { \
+		debug_sprintf_event(iucv_dbf_trace, level, ##text ); \
+		debug_sprintf_event(iucv_dbf_trace, level, text ); \
+	} while (0)
+
+/**
+ * some more debug stuff
+ */
+#define IUCV_HEXDUMP16(importance,header,ptr) \
+PRINT_##importance(header "%02x %02x %02x %02x  %02x %02x %02x %02x  " \
+		   "%02x %02x %02x %02x  %02x %02x %02x %02x\n", \
+		   *(((char*)ptr)),*(((char*)ptr)+1),*(((char*)ptr)+2), \
+		   *(((char*)ptr)+3),*(((char*)ptr)+4),*(((char*)ptr)+5), \
+		   *(((char*)ptr)+6),*(((char*)ptr)+7),*(((char*)ptr)+8), \
+		   *(((char*)ptr)+9),*(((char*)ptr)+10),*(((char*)ptr)+11), \
+		   *(((char*)ptr)+12),*(((char*)ptr)+13), \
+		   *(((char*)ptr)+14),*(((char*)ptr)+15)); \
+PRINT_##importance(header "%02x %02x %02x %02x  %02x %02x %02x %02x  " \
+		   "%02x %02x %02x %02x  %02x %02x %02x %02x\n", \
+		   *(((char*)ptr)+16),*(((char*)ptr)+17), \
+		   *(((char*)ptr)+18),*(((char*)ptr)+19), \
+		   *(((char*)ptr)+20),*(((char*)ptr)+21), \
+		   *(((char*)ptr)+22),*(((char*)ptr)+23), \
+		   *(((char*)ptr)+24),*(((char*)ptr)+25), \
+		   *(((char*)ptr)+26),*(((char*)ptr)+27), \
+		   *(((char*)ptr)+28),*(((char*)ptr)+29), \
+		   *(((char*)ptr)+30),*(((char*)ptr)+31));
+
+#define PRINTK_HEADER " iucv: "       /* for debugging */
+
+/* dummy device to make sure netiucv_pm functions are called */
+static struct device *netiucv_dev;
+
+static int netiucv_pm_prepare(struct device *);
+static void netiucv_pm_complete(struct device *);
+static int netiucv_pm_freeze(struct device *);
+static int netiucv_pm_restore_thaw(struct device *);
+
+static const struct dev_pm_ops netiucv_pm_ops = {
+	.prepare = netiucv_pm_prepare,
+	.complete = netiucv_pm_complete,
+	.freeze = netiucv_pm_freeze,
+	.thaw = netiucv_pm_restore_thaw,
+	.restore = netiucv_pm_restore_thaw,
+};
+
+static struct device_driver netiucv_driver = {
+	.owner = THIS_MODULE,
+	.name = "netiucv",
+	.bus  = &iucv_bus,
+	.pm = &netiucv_pm_ops,
+};
+
+static int netiucv_callback_connreq(struct iucv_path *,
+				    u8 ipvmid[8], u8 ipuser[16]);
+static void netiucv_callback_connack(struct iucv_path *, u8 ipuser[16]);
+static void netiucv_callback_connrej(struct iucv_path *, u8 ipuser[16]);
+static void netiucv_callback_connsusp(struct iucv_path *, u8 ipuser[16]);
+static void netiucv_callback_connres(struct iucv_path *, u8 ipuser[16]);
+static void netiucv_callback_rx(struct iucv_path *, struct iucv_message *);
+static void netiucv_callback_txdone(struct iucv_path *, struct iucv_message *);
+
+static struct iucv_handler netiucv_handler = {
+	.path_pending	  = netiucv_callback_connreq,
+	.path_complete	  = netiucv_callback_connack,
+	.path_severed	  = netiucv_callback_connrej,
+	.path_quiesced	  = netiucv_callback_connsusp,
+	.path_resumed	  = netiucv_callback_connres,
+	.message_pending  = netiucv_callback_rx,
+	.message_complete = netiucv_callback_txdone
+};
+
+/**
+ * Per connection profiling data
+ */
+struct connection_profile {
+	unsigned long maxmulti;
+	unsigned long maxcqueue;
+	unsigned long doios_single;
+	unsigned long doios_multi;
+	unsigned long txlen;
+	unsigned long tx_time;
+	struct timespec send_stamp;
+	unsigned long tx_pending;
+	unsigned long tx_max_pending;
+};
+
+/**
+ * Representation of one iucv connection
+ */
+struct iucv_connection {
+	struct list_head	  list;
+	struct iucv_path	  *path;
+	struct sk_buff            *rx_buff;
+	struct sk_buff            *tx_buff;
+	struct sk_buff_head       collect_queue;
+	struct sk_buff_head	  commit_queue;
+	spinlock_t                collect_lock;
+	int                       collect_len;
+	int                       max_buffsize;
+	fsm_timer                 timer;
+	fsm_instance              *fsm;
+	struct net_device         *netdev;
+	struct connection_profile prof;
+	char                      userid[9];
+	char			  userdata[17];
+};
+
+/**
+ * Linked list of all connection structs.
+ */
+static LIST_HEAD(iucv_connection_list);
+static DEFINE_RWLOCK(iucv_connection_rwlock);
+
+/**
+ * Representation of event-data for the
+ * connection state machine.
+ */
+struct iucv_event {
+	struct iucv_connection *conn;
+	void                   *data;
+};
+
+/**
+ * Private part of the network device structure
+ */
+struct netiucv_priv {
+	struct net_device_stats stats;
+	unsigned long           tbusy;
+	fsm_instance            *fsm;
+        struct iucv_connection  *conn;
+	struct device           *dev;
+	int			 pm_state;
+};
+
+/**
+ * Link level header for a packet.
+ */
+struct ll_header {
+	u16 next;
+};
+
+#define NETIUCV_HDRLEN		 (sizeof(struct ll_header))
+#define NETIUCV_BUFSIZE_MAX	 65537
+#define NETIUCV_BUFSIZE_DEFAULT  NETIUCV_BUFSIZE_MAX
+#define NETIUCV_MTU_MAX          (NETIUCV_BUFSIZE_MAX - NETIUCV_HDRLEN)
+#define NETIUCV_MTU_DEFAULT      9216
+#define NETIUCV_QUEUELEN_DEFAULT 50
+#define NETIUCV_TIMEOUT_5SEC     5000
+
+/**
+ * Compatibility macros for busy handling
+ * of network devices.
+ */
+static inline void netiucv_clear_busy(struct net_device *dev)
+{
+	struct netiucv_priv *priv = netdev_priv(dev);
+	clear_bit(0, &priv->tbusy);
+	netif_wake_queue(dev);
+}
+
+static inline int netiucv_test_and_set_busy(struct net_device *dev)
+{
+	struct netiucv_priv *priv = netdev_priv(dev);
+	netif_stop_queue(dev);
+	return test_and_set_bit(0, &priv->tbusy);
+}
+
+static u8 iucvMagic_ascii[16] = {
+	0x30, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+	0x30, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20
+};
+
+static u8 iucvMagic_ebcdic[16] = {
+	0xF0, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40,
+	0xF0, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40
+};
+
+/**
+ * Convert an iucv userId to its printable
+ * form (strip whitespace at end).
+ *
+ * @param An iucv userId
+ *
+ * @returns The printable string (static data!!)
+ */
+static char *netiucv_printname(char *name, int len)
+{
+	static char tmp[17];
+	char *p = tmp;
+	memcpy(tmp, name, len);
+	tmp[len] = '\0';
+	while (*p && ((p - tmp) < len) && (!isspace(*p)))
+		p++;
+	*p = '\0';
+	return tmp;
+}
+
+static char *netiucv_printuser(struct iucv_connection *conn)
+{
+	static char tmp_uid[9];
+	static char tmp_udat[17];
+	static char buf[100];
+
+	if (memcmp(conn->userdata, iucvMagic_ebcdic, 16)) {
+		tmp_uid[8] = '\0';
+		tmp_udat[16] = '\0';
+		memcpy(tmp_uid, conn->userid, 8);
+		memcpy(tmp_uid, netiucv_printname(tmp_uid, 8), 8);
+		memcpy(tmp_udat, conn->userdata, 16);
+		EBCASC(tmp_udat, 16);
+		memcpy(tmp_udat, netiucv_printname(tmp_udat, 16), 16);
+		sprintf(buf, "%s.%s", tmp_uid, tmp_udat);
+		return buf;
+	} else
+		return netiucv_printname(conn->userid, 8);
+}
+
+/**
+ * States of the interface statemachine.
+ */
+enum dev_states {
+	DEV_STATE_STOPPED,
+	DEV_STATE_STARTWAIT,
+	DEV_STATE_STOPWAIT,
+	DEV_STATE_RUNNING,
+	/**
+	 * MUST be always the last element!!
+	 */
+	NR_DEV_STATES
+};
+
+static const char *dev_state_names[] = {
+	"Stopped",
+	"StartWait",
+	"StopWait",
+	"Running",
+};
+
+/**
+ * Events of the interface statemachine.
+ */
+enum dev_events {
+	DEV_EVENT_START,
+	DEV_EVENT_STOP,
+	DEV_EVENT_CONUP,
+	DEV_EVENT_CONDOWN,
+	/**
+	 * MUST be always the last element!!
+	 */
+	NR_DEV_EVENTS
+};
+
+static const char *dev_event_names[] = {
+	"Start",
+	"Stop",
+	"Connection up",
+	"Connection down",
+};
+
+/**
+ * Events of the connection statemachine
+ */
+enum conn_events {
+	/**
+	 * Events, representing callbacks from
+	 * lowlevel iucv layer)
+	 */
+	CONN_EVENT_CONN_REQ,
+	CONN_EVENT_CONN_ACK,
+	CONN_EVENT_CONN_REJ,
+	CONN_EVENT_CONN_SUS,
+	CONN_EVENT_CONN_RES,
+	CONN_EVENT_RX,
+	CONN_EVENT_TXDONE,
+
+	/**
+	 * Events, representing errors return codes from
+	 * calls to lowlevel iucv layer
+	 */
+
+	/**
+	 * Event, representing timer expiry.
+	 */
+	CONN_EVENT_TIMER,
+
+	/**
+	 * Events, representing commands from upper levels.
+	 */
+	CONN_EVENT_START,
+	CONN_EVENT_STOP,
+
+	/**
+	 * MUST be always the last element!!
+	 */
+	NR_CONN_EVENTS,
+};
+
+static const char *conn_event_names[] = {
+	"Remote connection request",
+	"Remote connection acknowledge",
+	"Remote connection reject",
+	"Connection suspended",
+	"Connection resumed",
+	"Data received",
+	"Data sent",
+
+	"Timer",
+
+	"Start",
+	"Stop",
+};
+
+/**
+ * States of the connection statemachine.
+ */
+enum conn_states {
+	/**
+	 * Connection not assigned to any device,
+	 * initial state, invalid
+	 */
+	CONN_STATE_INVALID,
+
+	/**
+	 * Userid assigned but not operating
+	 */
+	CONN_STATE_STOPPED,
+
+	/**
+	 * Connection registered,
+	 * no connection request sent yet,
+	 * no connection request received
+	 */
+	CONN_STATE_STARTWAIT,
+
+	/**
+	 * Connection registered and connection request sent,
+	 * no acknowledge and no connection request received yet.
+	 */
+	CONN_STATE_SETUPWAIT,
+
+	/**
+	 * Connection up and running idle
+	 */
+	CONN_STATE_IDLE,
+
+	/**
+	 * Data sent, awaiting CONN_EVENT_TXDONE
+	 */
+	CONN_STATE_TX,
+
+	/**
+	 * Error during registration.
+	 */
+	CONN_STATE_REGERR,
+
+	/**
+	 * Error during registration.
+	 */
+	CONN_STATE_CONNERR,
+
+	/**
+	 * MUST be always the last element!!
+	 */
+	NR_CONN_STATES,
+};
+
+static const char *conn_state_names[] = {
+	"Invalid",
+	"Stopped",
+	"StartWait",
+	"SetupWait",
+	"Idle",
+	"TX",
+	"Terminating",
+	"Registration error",
+	"Connect error",
+};
+
+
+/**
+ * Debug Facility Stuff
+ */
+static debug_info_t *iucv_dbf_setup = NULL;
+static debug_info_t *iucv_dbf_data = NULL;
+static debug_info_t *iucv_dbf_trace = NULL;
+
+DEFINE_PER_CPU(char[256], iucv_dbf_txt_buf);
+
+static void iucv_unregister_dbf_views(void)
+{
+	if (iucv_dbf_setup)
+		debug_unregister(iucv_dbf_setup);
+	if (iucv_dbf_data)
+		debug_unregister(iucv_dbf_data);
+	if (iucv_dbf_trace)
+		debug_unregister(iucv_dbf_trace);
+}
+static int iucv_register_dbf_views(void)
+{
+	iucv_dbf_setup = debug_register(IUCV_DBF_SETUP_NAME,
+					IUCV_DBF_SETUP_PAGES,
+					IUCV_DBF_SETUP_NR_AREAS,
+					IUCV_DBF_SETUP_LEN);
+	iucv_dbf_data = debug_register(IUCV_DBF_DATA_NAME,
+				       IUCV_DBF_DATA_PAGES,
+				       IUCV_DBF_DATA_NR_AREAS,
+				       IUCV_DBF_DATA_LEN);
+	iucv_dbf_trace = debug_register(IUCV_DBF_TRACE_NAME,
+					IUCV_DBF_TRACE_PAGES,
+					IUCV_DBF_TRACE_NR_AREAS,
+					IUCV_DBF_TRACE_LEN);
+
+	if ((iucv_dbf_setup == NULL) || (iucv_dbf_data == NULL) ||
+	    (iucv_dbf_trace == NULL)) {
+		iucv_unregister_dbf_views();
+		return -ENOMEM;
+	}
+	debug_register_view(iucv_dbf_setup, &debug_hex_ascii_view);
+	debug_set_level(iucv_dbf_setup, IUCV_DBF_SETUP_LEVEL);
+
+	debug_register_view(iucv_dbf_data, &debug_hex_ascii_view);
+	debug_set_level(iucv_dbf_data, IUCV_DBF_DATA_LEVEL);
+
+	debug_register_view(iucv_dbf_trace, &debug_hex_ascii_view);
+	debug_set_level(iucv_dbf_trace, IUCV_DBF_TRACE_LEVEL);
+
+	return 0;
+}
+
+/*
+ * Callback-wrappers, called from lowlevel iucv layer.
+ */
+
+static void netiucv_callback_rx(struct iucv_path *path,
+				struct iucv_message *msg)
+{
+	struct iucv_connection *conn = path->private;
+	struct iucv_event ev;
+
+	ev.conn = conn;
+	ev.data = msg;
+	fsm_event(conn->fsm, CONN_EVENT_RX, &ev);
+}
+
+static void netiucv_callback_txdone(struct iucv_path *path,
+				    struct iucv_message *msg)
+{
+	struct iucv_connection *conn = path->private;
+	struct iucv_event ev;
+
+	ev.conn = conn;
+	ev.data = msg;
+	fsm_event(conn->fsm, CONN_EVENT_TXDONE, &ev);
+}
+
+static void netiucv_callback_connack(struct iucv_path *path, u8 ipuser[16])
+{
+	struct iucv_connection *conn = path->private;
+
+	fsm_event(conn->fsm, CONN_EVENT_CONN_ACK, conn);
+}
+
+static int netiucv_callback_connreq(struct iucv_path *path,
+				    u8 ipvmid[8], u8 ipuser[16])
+{
+	struct iucv_connection *conn = path->private;
+	struct iucv_event ev;
+	static char tmp_user[9];
+	static char tmp_udat[17];
+	int rc;
+
+	rc = -EINVAL;
+	memcpy(tmp_user, netiucv_printname(ipvmid, 8), 8);
+	memcpy(tmp_udat, ipuser, 16);
+	EBCASC(tmp_udat, 16);
+	read_lock_bh(&iucv_connection_rwlock);
+	list_for_each_entry(conn, &iucv_connection_list, list) {
+		if (strncmp(ipvmid, conn->userid, 8) ||
+		    strncmp(ipuser, conn->userdata, 16))
+			continue;
+		/* Found a matching connection for this path. */
+		conn->path = path;
+		ev.conn = conn;
+		ev.data = path;
+		fsm_event(conn->fsm, CONN_EVENT_CONN_REQ, &ev);
+		rc = 0;
+	}
+	IUCV_DBF_TEXT_(setup, 2, "Connection requested for %s.%s\n",
+		       tmp_user, netiucv_printname(tmp_udat, 16));
+	read_unlock_bh(&iucv_connection_rwlock);
+	return rc;
+}
+
+static void netiucv_callback_connrej(struct iucv_path *path, u8 ipuser[16])
+{
+	struct iucv_connection *conn = path->private;
+
+	fsm_event(conn->fsm, CONN_EVENT_CONN_REJ, conn);
+}
+
+static void netiucv_callback_connsusp(struct iucv_path *path, u8 ipuser[16])
+{
+	struct iucv_connection *conn = path->private;
+
+	fsm_event(conn->fsm, CONN_EVENT_CONN_SUS, conn);
+}
+
+static void netiucv_callback_connres(struct iucv_path *path, u8 ipuser[16])
+{
+	struct iucv_connection *conn = path->private;
+
+	fsm_event(conn->fsm, CONN_EVENT_CONN_RES, conn);
+}
+
+/**
+ * NOP action for statemachines
+ */
+static void netiucv_action_nop(fsm_instance *fi, int event, void *arg)
+{
+}
+
+/*
+ * Actions of the connection statemachine
+ */
+
+/**
+ * netiucv_unpack_skb
+ * @conn: The connection where this skb has been received.
+ * @pskb: The received skb.
+ *
+ * Unpack a just received skb and hand it over to upper layers.
+ * Helper function for conn_action_rx.
+ */
+static void netiucv_unpack_skb(struct iucv_connection *conn,
+			       struct sk_buff *pskb)
+{
+	struct net_device     *dev = conn->netdev;
+	struct netiucv_priv   *privptr = netdev_priv(dev);
+	u16 offset = 0;
+
+	skb_put(pskb, NETIUCV_HDRLEN);
+	pskb->dev = dev;
+	pskb->ip_summed = CHECKSUM_NONE;
+	pskb->protocol = ntohs(ETH_P_IP);
+
+	while (1) {
+		struct sk_buff *skb;
+		struct ll_header *header = (struct ll_header *) pskb->data;
+
+		if (!header->next)
+			break;
+
+		skb_pull(pskb, NETIUCV_HDRLEN);
+		header->next -= offset;
+		offset += header->next;
+		header->next -= NETIUCV_HDRLEN;
+		if (skb_tailroom(pskb) < header->next) {
+			IUCV_DBF_TEXT_(data, 2, "Illegal next field: %d > %d\n",
+				header->next, skb_tailroom(pskb));
+			return;
+		}
+		skb_put(pskb, header->next);
+		skb_reset_mac_header(pskb);
+		skb = dev_alloc_skb(pskb->len);
+		if (!skb) {
+			IUCV_DBF_TEXT(data, 2,
+				"Out of memory in netiucv_unpack_skb\n");
+			privptr->stats.rx_dropped++;
+			return;
+		}
+		skb_copy_from_linear_data(pskb, skb_put(skb, pskb->len),
+					  pskb->len);
+		skb_reset_mac_header(skb);
+		skb->dev = pskb->dev;
+		skb->protocol = pskb->protocol;
+		pskb->ip_summed = CHECKSUM_UNNECESSARY;
+		privptr->stats.rx_packets++;
+		privptr->stats.rx_bytes += skb->len;
+		/*
+		 * Since receiving is always initiated from a tasklet (in iucv.c),
+		 * we must use netif_rx_ni() instead of netif_rx()
+		 */
+		netif_rx_ni(skb);
+		skb_pull(pskb, header->next);
+		skb_put(pskb, NETIUCV_HDRLEN);
+	}
+}
+
+static void conn_action_rx(fsm_instance *fi, int event, void *arg)
+{
+	struct iucv_event *ev = arg;
+	struct iucv_connection *conn = ev->conn;
+	struct iucv_message *msg = ev->data;
+	struct netiucv_priv *privptr = netdev_priv(conn->netdev);
+	int rc;
+
+	IUCV_DBF_TEXT(trace, 4, __func__);
+
+	if (!conn->netdev) {
+		iucv_message_reject(conn->path, msg);
+		IUCV_DBF_TEXT(data, 2,
+			      "Received data for unlinked connection\n");
+		return;
+	}
+	if (msg->length > conn->max_buffsize) {
+		iucv_message_reject(conn->path, msg);
+		privptr->stats.rx_dropped++;
+		IUCV_DBF_TEXT_(data, 2, "msglen %d > max_buffsize %d\n",
+			       msg->length, conn->max_buffsize);
+		return;
+	}
+	conn->rx_buff->data = conn->rx_buff->head;
+	skb_reset_tail_pointer(conn->rx_buff);
+	conn->rx_buff->len = 0;
+	rc = iucv_message_receive(conn->path, msg, 0, conn->rx_buff->data,
+				  msg->length, NULL);
+	if (rc || msg->length < 5) {
+		privptr->stats.rx_errors++;
+		IUCV_DBF_TEXT_(data, 2, "rc %d from iucv_receive\n", rc);
+		return;
+	}
+	netiucv_unpack_skb(conn, conn->rx_buff);
+}
+
+static void conn_action_txdone(fsm_instance *fi, int event, void *arg)
+{
+	struct iucv_event *ev = arg;
+	struct iucv_connection *conn = ev->conn;
+	struct iucv_message *msg = ev->data;
+	struct iucv_message txmsg;
+	struct netiucv_priv *privptr = NULL;
+	u32 single_flag = msg->tag;
+	u32 txbytes = 0;
+	u32 txpackets = 0;
+	u32 stat_maxcq = 0;
+	struct sk_buff *skb;
+	unsigned long saveflags;
+	struct ll_header header;
+	int rc;
+
+	IUCV_DBF_TEXT(trace, 4, __func__);
+
+	if (conn && conn->netdev)
+		privptr = netdev_priv(conn->netdev);
+	conn->prof.tx_pending--;
+	if (single_flag) {
+		if ((skb = skb_dequeue(&conn->commit_queue))) {
+			atomic_dec(&skb->users);
+			if (privptr) {
+				privptr->stats.tx_packets++;
+				privptr->stats.tx_bytes +=
+					(skb->len - NETIUCV_HDRLEN
+						  - NETIUCV_HDRLEN);
+			}
+			dev_kfree_skb_any(skb);
+		}
+	}
+	conn->tx_buff->data = conn->tx_buff->head;
+	skb_reset_tail_pointer(conn->tx_buff);
+	conn->tx_buff->len = 0;
+	spin_lock_irqsave(&conn->collect_lock, saveflags);
+	while ((skb = skb_dequeue(&conn->collect_queue))) {
+		header.next = conn->tx_buff->len + skb->len + NETIUCV_HDRLEN;
+		memcpy(skb_put(conn->tx_buff, NETIUCV_HDRLEN), &header,
+		       NETIUCV_HDRLEN);
+		skb_copy_from_linear_data(skb,
+					  skb_put(conn->tx_buff, skb->len),
+					  skb->len);
+		txbytes += skb->len;
+		txpackets++;
+		stat_maxcq++;
+		atomic_dec(&skb->users);
+		dev_kfree_skb_any(skb);
+	}
+	if (conn->collect_len > conn->prof.maxmulti)
+		conn->prof.maxmulti = conn->collect_len;
+	conn->collect_len = 0;
+	spin_unlock_irqrestore(&conn->collect_lock, saveflags);
+	if (conn->tx_buff->len == 0) {
+		fsm_newstate(fi, CONN_STATE_IDLE);
+		return;
+	}
+
+	header.next = 0;
+	memcpy(skb_put(conn->tx_buff, NETIUCV_HDRLEN), &header, NETIUCV_HDRLEN);
+	conn->prof.send_stamp = current_kernel_time();
+	txmsg.class = 0;
+	txmsg.tag = 0;
+	rc = iucv_message_send(conn->path, &txmsg, 0, 0,
+			       conn->tx_buff->data, conn->tx_buff->len);
+	conn->prof.doios_multi++;
+	conn->prof.txlen += conn->tx_buff->len;
+	conn->prof.tx_pending++;
+	if (conn->prof.tx_pending > conn->prof.tx_max_pending)
+		conn->prof.tx_max_pending = conn->prof.tx_pending;
+	if (rc) {
+		conn->prof.tx_pending--;
+		fsm_newstate(fi, CONN_STATE_IDLE);
+		if (privptr)
+			privptr->stats.tx_errors += txpackets;
+		IUCV_DBF_TEXT_(data, 2, "rc %d from iucv_send\n", rc);
+	} else {
+		if (privptr) {
+			privptr->stats.tx_packets += txpackets;
+			privptr->stats.tx_bytes += txbytes;
+		}
+		if (stat_maxcq > conn->prof.maxcqueue)
+			conn->prof.maxcqueue = stat_maxcq;
+	}
+}
+
+static void conn_action_connaccept(fsm_instance *fi, int event, void *arg)
+{
+	struct iucv_event *ev = arg;
+	struct iucv_connection *conn = ev->conn;
+	struct iucv_path *path = ev->data;
+	struct net_device *netdev = conn->netdev;
+	struct netiucv_priv *privptr = netdev_priv(netdev);
+	int rc;
+
+	IUCV_DBF_TEXT(trace, 3, __func__);
+
+	conn->path = path;
+	path->msglim = NETIUCV_QUEUELEN_DEFAULT;
+	path->flags = 0;
+	rc = iucv_path_accept(path, &netiucv_handler, conn->userdata , conn);
+	if (rc) {
+		IUCV_DBF_TEXT_(setup, 2, "rc %d from iucv_accept", rc);
+		return;
+	}
+	fsm_newstate(fi, CONN_STATE_IDLE);
+	netdev->tx_queue_len = conn->path->msglim;
+	fsm_event(privptr->fsm, DEV_EVENT_CONUP, netdev);
+}
+
+static void conn_action_connreject(fsm_instance *fi, int event, void *arg)
+{
+	struct iucv_event *ev = arg;
+	struct iucv_path *path = ev->data;
+
+	IUCV_DBF_TEXT(trace, 3, __func__);
+	iucv_path_sever(path, NULL);
+}
+
+static void conn_action_connack(fsm_instance *fi, int event, void *arg)
+{
+	struct iucv_connection *conn = arg;
+	struct net_device *netdev = conn->netdev;
+	struct netiucv_priv *privptr = netdev_priv(netdev);
+
+	IUCV_DBF_TEXT(trace, 3, __func__);
+	fsm_deltimer(&conn->timer);
+	fsm_newstate(fi, CONN_STATE_IDLE);
+	netdev->tx_queue_len = conn->path->msglim;
+	fsm_event(privptr->fsm, DEV_EVENT_CONUP, netdev);
+}
+
+static void conn_action_conntimsev(fsm_instance *fi, int event, void *arg)
+{
+	struct iucv_connection *conn = arg;
+
+	IUCV_DBF_TEXT(trace, 3, __func__);
+	fsm_deltimer(&conn->timer);
+	iucv_path_sever(conn->path, conn->userdata);
+	fsm_newstate(fi, CONN_STATE_STARTWAIT);
+}
+
+static void conn_action_connsever(fsm_instance *fi, int event, void *arg)
+{
+	struct iucv_connection *conn = arg;
+	struct net_device *netdev = conn->netdev;
+	struct netiucv_priv *privptr = netdev_priv(netdev);
+
+	IUCV_DBF_TEXT(trace, 3, __func__);
+
+	fsm_deltimer(&conn->timer);
+	iucv_path_sever(conn->path, conn->userdata);
+	dev_info(privptr->dev, "The peer z/VM guest %s has closed the "
+			       "connection\n", netiucv_printuser(conn));
+	IUCV_DBF_TEXT(data, 2,
+		      "conn_action_connsever: Remote dropped connection\n");
+	fsm_newstate(fi, CONN_STATE_STARTWAIT);
+	fsm_event(privptr->fsm, DEV_EVENT_CONDOWN, netdev);
+}
+
+static void conn_action_start(fsm_instance *fi, int event, void *arg)
+{
+	struct iucv_connection *conn = arg;
+	struct net_device *netdev = conn->netdev;
+	struct netiucv_priv *privptr = netdev_priv(netdev);
+	int rc;
+
+	IUCV_DBF_TEXT(trace, 3, __func__);
+
+	fsm_newstate(fi, CONN_STATE_STARTWAIT);
+
+	/*
+	 * We must set the state before calling iucv_connect because the
+	 * callback handler could be called at any point after the connection
+	 * request is sent
+	 */
+
+	fsm_newstate(fi, CONN_STATE_SETUPWAIT);
+	conn->path = iucv_path_alloc(NETIUCV_QUEUELEN_DEFAULT, 0, GFP_KERNEL);
+	IUCV_DBF_TEXT_(setup, 2, "%s: connecting to %s ...\n",
+		netdev->name, netiucv_printuser(conn));
+
+	rc = iucv_path_connect(conn->path, &netiucv_handler, conn->userid,
+			       NULL, conn->userdata, conn);
+	switch (rc) {
+	case 0:
+		netdev->tx_queue_len = conn->path->msglim;
+		fsm_addtimer(&conn->timer, NETIUCV_TIMEOUT_5SEC,
+			     CONN_EVENT_TIMER, conn);
+		return;
+	case 11:
+		dev_warn(privptr->dev,
+			"The IUCV device failed to connect to z/VM guest %s\n",
+			netiucv_printname(conn->userid, 8));
+		fsm_newstate(fi, CONN_STATE_STARTWAIT);
+		break;
+	case 12:
+		dev_warn(privptr->dev,
+			"The IUCV device failed to connect to the peer on z/VM"
+			" guest %s\n", netiucv_printname(conn->userid, 8));
+		fsm_newstate(fi, CONN_STATE_STARTWAIT);
+		break;
+	case 13:
+		dev_err(privptr->dev,
+			"Connecting the IUCV device would exceed the maximum"
+			" number of IUCV connections\n");
+		fsm_newstate(fi, CONN_STATE_CONNERR);
+		break;
+	case 14:
+		dev_err(privptr->dev,
+			"z/VM guest %s has too many IUCV connections"
+			" to connect with the IUCV device\n",
+			netiucv_printname(conn->userid, 8));
+		fsm_newstate(fi, CONN_STATE_CONNERR);
+		break;
+	case 15:
+		dev_err(privptr->dev,
+			"The IUCV device cannot connect to a z/VM guest with no"
+			" IUCV authorization\n");
+		fsm_newstate(fi, CONN_STATE_CONNERR);
+		break;
+	default:
+		dev_err(privptr->dev,
+			"Connecting the IUCV device failed with error %d\n",
+			rc);
+		fsm_newstate(fi, CONN_STATE_CONNERR);
+		break;
+	}
+	IUCV_DBF_TEXT_(setup, 5, "iucv_connect rc is %d\n", rc);
+	kfree(conn->path);
+	conn->path = NULL;
+}
+
+static void netiucv_purge_skb_queue(struct sk_buff_head *q)
+{
+	struct sk_buff *skb;
+
+	while ((skb = skb_dequeue(q))) {
+		atomic_dec(&skb->users);
+		dev_kfree_skb_any(skb);
+	}
+}
+
+static void conn_action_stop(fsm_instance *fi, int event, void *arg)
+{
+	struct iucv_event *ev = arg;
+	struct iucv_connection *conn = ev->conn;
+	struct net_device *netdev = conn->netdev;
+	struct netiucv_priv *privptr = netdev_priv(netdev);
+
+	IUCV_DBF_TEXT(trace, 3, __func__);
+
+	fsm_deltimer(&conn->timer);
+	fsm_newstate(fi, CONN_STATE_STOPPED);
+	netiucv_purge_skb_queue(&conn->collect_queue);
+	if (conn->path) {
+		IUCV_DBF_TEXT(trace, 5, "calling iucv_path_sever\n");
+		iucv_path_sever(conn->path, conn->userdata);
+		kfree(conn->path);
+		conn->path = NULL;
+	}
+	netiucv_purge_skb_queue(&conn->commit_queue);
+	fsm_event(privptr->fsm, DEV_EVENT_CONDOWN, netdev);
+}
+
+static void conn_action_inval(fsm_instance *fi, int event, void *arg)
+{
+	struct iucv_connection *conn = arg;
+	struct net_device *netdev = conn->netdev;
+
+	IUCV_DBF_TEXT_(data, 2, "%s('%s'): conn_action_inval called\n",
+		netdev->name, conn->userid);
+}
+
+static const fsm_node conn_fsm[] = {
+	{ CONN_STATE_INVALID,   CONN_EVENT_START,    conn_action_inval      },
+	{ CONN_STATE_STOPPED,   CONN_EVENT_START,    conn_action_start      },
+
+	{ CONN_STATE_STOPPED,   CONN_EVENT_STOP,     conn_action_stop       },
+	{ CONN_STATE_STARTWAIT, CONN_EVENT_STOP,     conn_action_stop       },
+	{ CONN_STATE_SETUPWAIT, CONN_EVENT_STOP,     conn_action_stop       },
+	{ CONN_STATE_IDLE,      CONN_EVENT_STOP,     conn_action_stop       },
+	{ CONN_STATE_TX,        CONN_EVENT_STOP,     conn_action_stop       },
+	{ CONN_STATE_REGERR,    CONN_EVENT_STOP,     conn_action_stop       },
+	{ CONN_STATE_CONNERR,   CONN_EVENT_STOP,     conn_action_stop       },
+
+	{ CONN_STATE_STOPPED,   CONN_EVENT_CONN_REQ, conn_action_connreject },
+        { CONN_STATE_STARTWAIT, CONN_EVENT_CONN_REQ, conn_action_connaccept },
+	{ CONN_STATE_SETUPWAIT, CONN_EVENT_CONN_REQ, conn_action_connaccept },
+	{ CONN_STATE_IDLE,      CONN_EVENT_CONN_REQ, conn_action_connreject },
+	{ CONN_STATE_TX,        CONN_EVENT_CONN_REQ, conn_action_connreject },
+
+	{ CONN_STATE_SETUPWAIT, CONN_EVENT_CONN_ACK, conn_action_connack    },
+	{ CONN_STATE_SETUPWAIT, CONN_EVENT_TIMER,    conn_action_conntimsev },
+
+	{ CONN_STATE_SETUPWAIT, CONN_EVENT_CONN_REJ, conn_action_connsever  },
+	{ CONN_STATE_IDLE,      CONN_EVENT_CONN_REJ, conn_action_connsever  },
+	{ CONN_STATE_TX,        CONN_EVENT_CONN_REJ, conn_action_connsever  },
+
+	{ CONN_STATE_IDLE,      CONN_EVENT_RX,       conn_action_rx         },
+	{ CONN_STATE_TX,        CONN_EVENT_RX,       conn_action_rx         },
+
+	{ CONN_STATE_TX,        CONN_EVENT_TXDONE,   conn_action_txdone     },
+	{ CONN_STATE_IDLE,      CONN_EVENT_TXDONE,   conn_action_txdone     },
+};
+
+static const int CONN_FSM_LEN = sizeof(conn_fsm) / sizeof(fsm_node);
+
+
+/*
+ * Actions for interface - statemachine.
+ */
+
+/**
+ * dev_action_start
+ * @fi: An instance of an interface statemachine.
+ * @event: The event, just happened.
+ * @arg: Generic pointer, casted from struct net_device * upon call.
+ *
+ * Startup connection by sending CONN_EVENT_START to it.
+ */
+static void dev_action_start(fsm_instance *fi, int event, void *arg)
+{
+	struct net_device   *dev = arg;
+	struct netiucv_priv *privptr = netdev_priv(dev);
+
+	IUCV_DBF_TEXT(trace, 3, __func__);
+
+	fsm_newstate(fi, DEV_STATE_STARTWAIT);
+	fsm_event(privptr->conn->fsm, CONN_EVENT_START, privptr->conn);
+}
+
+/**
+ * Shutdown connection by sending CONN_EVENT_STOP to it.
+ *
+ * @param fi    An instance of an interface statemachine.
+ * @param event The event, just happened.
+ * @param arg   Generic pointer, casted from struct net_device * upon call.
+ */
+static void
+dev_action_stop(fsm_instance *fi, int event, void *arg)
+{
+	struct net_device   *dev = arg;
+	struct netiucv_priv *privptr = netdev_priv(dev);
+	struct iucv_event   ev;
+
+	IUCV_DBF_TEXT(trace, 3, __func__);
+
+	ev.conn = privptr->conn;
+
+	fsm_newstate(fi, DEV_STATE_STOPWAIT);
+	fsm_event(privptr->conn->fsm, CONN_EVENT_STOP, &ev);
+}
+
+/**
+ * Called from connection statemachine
+ * when a connection is up and running.
+ *
+ * @param fi    An instance of an interface statemachine.
+ * @param event The event, just happened.
+ * @param arg   Generic pointer, casted from struct net_device * upon call.
+ */
+static void
+dev_action_connup(fsm_instance *fi, int event, void *arg)
+{
+	struct net_device   *dev = arg;
+	struct netiucv_priv *privptr = netdev_priv(dev);
+
+	IUCV_DBF_TEXT(trace, 3, __func__);
+
+	switch (fsm_getstate(fi)) {
+		case DEV_STATE_STARTWAIT:
+			fsm_newstate(fi, DEV_STATE_RUNNING);
+			dev_info(privptr->dev,
+				"The IUCV device has been connected"
+				" successfully to %s\n",
+				netiucv_printuser(privptr->conn));
+			IUCV_DBF_TEXT(setup, 3,
+				"connection is up and running\n");
+			break;
+		case DEV_STATE_STOPWAIT:
+			IUCV_DBF_TEXT(data, 2,
+				"dev_action_connup: in DEV_STATE_STOPWAIT\n");
+			break;
+	}
+}
+
+/**
+ * Called from connection statemachine
+ * when a connection has been shutdown.
+ *
+ * @param fi    An instance of an interface statemachine.
+ * @param event The event, just happened.
+ * @param arg   Generic pointer, casted from struct net_device * upon call.
+ */
+static void
+dev_action_conndown(fsm_instance *fi, int event, void *arg)
+{
+	IUCV_DBF_TEXT(trace, 3, __func__);
+
+	switch (fsm_getstate(fi)) {
+		case DEV_STATE_RUNNING:
+			fsm_newstate(fi, DEV_STATE_STARTWAIT);
+			break;
+		case DEV_STATE_STOPWAIT:
+			fsm_newstate(fi, DEV_STATE_STOPPED);
+			IUCV_DBF_TEXT(setup, 3, "connection is down\n");
+			break;
+	}
+}
+
+static const fsm_node dev_fsm[] = {
+	{ DEV_STATE_STOPPED,    DEV_EVENT_START,   dev_action_start    },
+
+	{ DEV_STATE_STOPWAIT,   DEV_EVENT_START,   dev_action_start    },
+	{ DEV_STATE_STOPWAIT,   DEV_EVENT_CONDOWN, dev_action_conndown },
+
+	{ DEV_STATE_STARTWAIT,  DEV_EVENT_STOP,    dev_action_stop     },
+	{ DEV_STATE_STARTWAIT,  DEV_EVENT_CONUP,   dev_action_connup   },
+
+	{ DEV_STATE_RUNNING,    DEV_EVENT_STOP,    dev_action_stop     },
+	{ DEV_STATE_RUNNING,    DEV_EVENT_CONDOWN, dev_action_conndown },
+	{ DEV_STATE_RUNNING,    DEV_EVENT_CONUP,   netiucv_action_nop  },
+};
+
+static const int DEV_FSM_LEN = sizeof(dev_fsm) / sizeof(fsm_node);
+
+/**
+ * Transmit a packet.
+ * This is a helper function for netiucv_tx().
+ *
+ * @param conn Connection to be used for sending.
+ * @param skb Pointer to struct sk_buff of packet to send.
+ *            The linklevel header has already been set up
+ *            by netiucv_tx().
+ *
+ * @return 0 on success, -ERRNO on failure. (Never fails.)
+ */
+static int netiucv_transmit_skb(struct iucv_connection *conn,
+				struct sk_buff *skb)
+{
+	struct iucv_message msg;
+	unsigned long saveflags;
+	struct ll_header header;
+	int rc;
+
+	if (fsm_getstate(conn->fsm) != CONN_STATE_IDLE) {
+		int l = skb->len + NETIUCV_HDRLEN;
+
+		spin_lock_irqsave(&conn->collect_lock, saveflags);
+		if (conn->collect_len + l >
+		    (conn->max_buffsize - NETIUCV_HDRLEN)) {
+			rc = -EBUSY;
+			IUCV_DBF_TEXT(data, 2,
+				      "EBUSY from netiucv_transmit_skb\n");
+		} else {
+			atomic_inc(&skb->users);
+			skb_queue_tail(&conn->collect_queue, skb);
+			conn->collect_len += l;
+			rc = 0;
+		}
+		spin_unlock_irqrestore(&conn->collect_lock, saveflags);
+	} else {
+		struct sk_buff *nskb = skb;
+		/**
+		 * Copy the skb to a new allocated skb in lowmem only if the
+		 * data is located above 2G in memory or tailroom is < 2.
+		 */
+		unsigned long hi = ((unsigned long)(skb_tail_pointer(skb) +
+				    NETIUCV_HDRLEN)) >> 31;
+		int copied = 0;
+		if (hi || (skb_tailroom(skb) < 2)) {
+			nskb = alloc_skb(skb->len + NETIUCV_HDRLEN +
+					 NETIUCV_HDRLEN, GFP_ATOMIC | GFP_DMA);
+			if (!nskb) {
+				IUCV_DBF_TEXT(data, 2, "alloc_skb failed\n");
+				rc = -ENOMEM;
+				return rc;
+			} else {
+				skb_reserve(nskb, NETIUCV_HDRLEN);
+				memcpy(skb_put(nskb, skb->len),
+				       skb->data, skb->len);
+			}
+			copied = 1;
+		}
+		/**
+		 * skb now is below 2G and has enough room. Add headers.
+		 */
+		header.next = nskb->len + NETIUCV_HDRLEN;
+		memcpy(skb_push(nskb, NETIUCV_HDRLEN), &header, NETIUCV_HDRLEN);
+		header.next = 0;
+		memcpy(skb_put(nskb, NETIUCV_HDRLEN), &header,  NETIUCV_HDRLEN);
+
+		fsm_newstate(conn->fsm, CONN_STATE_TX);
+		conn->prof.send_stamp = current_kernel_time();
+
+		msg.tag = 1;
+		msg.class = 0;
+		rc = iucv_message_send(conn->path, &msg, 0, 0,
+				       nskb->data, nskb->len);
+		conn->prof.doios_single++;
+		conn->prof.txlen += skb->len;
+		conn->prof.tx_pending++;
+		if (conn->prof.tx_pending > conn->prof.tx_max_pending)
+			conn->prof.tx_max_pending = conn->prof.tx_pending;
+		if (rc) {
+			struct netiucv_priv *privptr;
+			fsm_newstate(conn->fsm, CONN_STATE_IDLE);
+			conn->prof.tx_pending--;
+			privptr = netdev_priv(conn->netdev);
+			if (privptr)
+				privptr->stats.tx_errors++;
+			if (copied)
+				dev_kfree_skb(nskb);
+			else {
+				/**
+				 * Remove our headers. They get added
+				 * again on retransmit.
+				 */
+				skb_pull(skb, NETIUCV_HDRLEN);
+				skb_trim(skb, skb->len - NETIUCV_HDRLEN);
+			}
+			IUCV_DBF_TEXT_(data, 2, "rc %d from iucv_send\n", rc);
+		} else {
+			if (copied)
+				dev_kfree_skb(skb);
+			atomic_inc(&nskb->users);
+			skb_queue_tail(&conn->commit_queue, nskb);
+		}
+	}
+
+	return rc;
+}
+
+/*
+ * Interface API for upper network layers
+ */
+
+/**
+ * Open an interface.
+ * Called from generic network layer when ifconfig up is run.
+ *
+ * @param dev Pointer to interface struct.
+ *
+ * @return 0 on success, -ERRNO on failure. (Never fails.)
+ */
+static int netiucv_open(struct net_device *dev)
+{
+	struct netiucv_priv *priv = netdev_priv(dev);
+
+	fsm_event(priv->fsm, DEV_EVENT_START, dev);
+	return 0;
+}
+
+/**
+ * Close an interface.
+ * Called from generic network layer when ifconfig down is run.
+ *
+ * @param dev Pointer to interface struct.
+ *
+ * @return 0 on success, -ERRNO on failure. (Never fails.)
+ */
+static int netiucv_close(struct net_device *dev)
+{
+	struct netiucv_priv *priv = netdev_priv(dev);
+
+	fsm_event(priv->fsm, DEV_EVENT_STOP, dev);
+	return 0;
+}
+
+static int netiucv_pm_prepare(struct device *dev)
+{
+	IUCV_DBF_TEXT(trace, 3, __func__);
+	return 0;
+}
+
+static void netiucv_pm_complete(struct device *dev)
+{
+	IUCV_DBF_TEXT(trace, 3, __func__);
+	return;
+}
+
+/**
+ * netiucv_pm_freeze() - Freeze PM callback
+ * @dev:	netiucv device
+ *
+ * close open netiucv interfaces
+ */
+static int netiucv_pm_freeze(struct device *dev)
+{
+	struct netiucv_priv *priv = dev_get_drvdata(dev);
+	struct net_device *ndev = NULL;
+	int rc = 0;
+
+	IUCV_DBF_TEXT(trace, 3, __func__);
+	if (priv && priv->conn)
+		ndev = priv->conn->netdev;
+	if (!ndev)
+		goto out;
+	netif_device_detach(ndev);
+	priv->pm_state = fsm_getstate(priv->fsm);
+	rc = netiucv_close(ndev);
+out:
+	return rc;
+}
+
+/**
+ * netiucv_pm_restore_thaw() - Thaw and restore PM callback
+ * @dev:	netiucv device
+ *
+ * re-open netiucv interfaces closed during freeze
+ */
+static int netiucv_pm_restore_thaw(struct device *dev)
+{
+	struct netiucv_priv *priv = dev_get_drvdata(dev);
+	struct net_device *ndev = NULL;
+	int rc = 0;
+
+	IUCV_DBF_TEXT(trace, 3, __func__);
+	if (priv && priv->conn)
+		ndev = priv->conn->netdev;
+	if (!ndev)
+		goto out;
+	switch (priv->pm_state) {
+	case DEV_STATE_RUNNING:
+	case DEV_STATE_STARTWAIT:
+		rc = netiucv_open(ndev);
+		break;
+	default:
+		break;
+	}
+	netif_device_attach(ndev);
+out:
+	return rc;
+}
+
+/**
+ * Start transmission of a packet.
+ * Called from generic network device layer.
+ *
+ * @param skb Pointer to buffer containing the packet.
+ * @param dev Pointer to interface struct.
+ *
+ * @return 0 if packet consumed, !0 if packet rejected.
+ *         Note: If we return !0, then the packet is free'd by
+ *               the generic network layer.
+ */
+static int netiucv_tx(struct sk_buff *skb, struct net_device *dev)
+{
+	struct netiucv_priv *privptr = netdev_priv(dev);
+	int rc;
+
+	IUCV_DBF_TEXT(trace, 4, __func__);
+	/**
+	 * Some sanity checks ...
+	 */
+	if (skb == NULL) {
+		IUCV_DBF_TEXT(data, 2, "netiucv_tx: skb is NULL\n");
+		privptr->stats.tx_dropped++;
+		return NETDEV_TX_OK;
+	}
+	if (skb_headroom(skb) < NETIUCV_HDRLEN) {
+		IUCV_DBF_TEXT(data, 2,
+			"netiucv_tx: skb_headroom < NETIUCV_HDRLEN\n");
+		dev_kfree_skb(skb);
+		privptr->stats.tx_dropped++;
+		return NETDEV_TX_OK;
+	}
+
+	/**
+	 * If connection is not running, try to restart it
+	 * and throw away packet.
+	 */
+	if (fsm_getstate(privptr->fsm) != DEV_STATE_RUNNING) {
+		dev_kfree_skb(skb);
+		privptr->stats.tx_dropped++;
+		privptr->stats.tx_errors++;
+		privptr->stats.tx_carrier_errors++;
+		return NETDEV_TX_OK;
+	}
+
+	if (netiucv_test_and_set_busy(dev)) {
+		IUCV_DBF_TEXT(data, 2, "EBUSY from netiucv_tx\n");
+		return NETDEV_TX_BUSY;
+	}
+	dev->trans_start = jiffies;
+	rc = netiucv_transmit_skb(privptr->conn, skb);
+	netiucv_clear_busy(dev);
+	return rc ? NETDEV_TX_BUSY : NETDEV_TX_OK;
+}
+
+/**
+ * netiucv_stats
+ * @dev: Pointer to interface struct.
+ *
+ * Returns interface statistics of a device.
+ *
+ * Returns pointer to stats struct of this interface.
+ */
+static struct net_device_stats *netiucv_stats (struct net_device * dev)
+{
+	struct netiucv_priv *priv = netdev_priv(dev);
+
+	IUCV_DBF_TEXT(trace, 5, __func__);
+	return &priv->stats;
+}
+
+/**
+ * netiucv_change_mtu
+ * @dev: Pointer to interface struct.
+ * @new_mtu: The new MTU to use for this interface.
+ *
+ * Sets MTU of an interface.
+ *
+ * Returns 0 on success, -EINVAL if MTU is out of valid range.
+ *         (valid range is 576 .. NETIUCV_MTU_MAX).
+ */
+static int netiucv_change_mtu(struct net_device * dev, int new_mtu)
+{
+	IUCV_DBF_TEXT(trace, 3, __func__);
+	if (new_mtu < 576 || new_mtu > NETIUCV_MTU_MAX) {
+		IUCV_DBF_TEXT(setup, 2, "given MTU out of valid range\n");
+		return -EINVAL;
+	}
+	dev->mtu = new_mtu;
+	return 0;
+}
+
+/*
+ * attributes in sysfs
+ */
+
+static ssize_t user_show(struct device *dev, struct device_attribute *attr,
+			 char *buf)
+{
+	struct netiucv_priv *priv = dev_get_drvdata(dev);
+
+	IUCV_DBF_TEXT(trace, 5, __func__);
+	return sprintf(buf, "%s\n", netiucv_printuser(priv->conn));
+}
+
+static int netiucv_check_user(const char *buf, size_t count, char *username,
+			      char *userdata)
+{
+	const char *p;
+	int i;
+
+	p = strchr(buf, '.');
+	if ((p && ((count > 26) ||
+		   ((p - buf) > 8) ||
+		   (buf + count - p > 18))) ||
+	    (!p && (count > 9))) {
+		IUCV_DBF_TEXT(setup, 2, "conn_write: too long\n");
+		return -EINVAL;
+	}
+
+	for (i = 0, p = buf; i < 8 && *p && *p != '.'; i++, p++) {
+		if (isalnum(*p) || *p == '$') {
+			username[i] = toupper(*p);
+			continue;
+		}
+		if (*p == '\n')
+			/* trailing lf, grr */
+			break;
+		IUCV_DBF_TEXT_(setup, 2,
+			       "conn_write: invalid character %02x\n", *p);
+		return -EINVAL;
+	}
+	while (i < 8)
+		username[i++] = ' ';
+	username[8] = '\0';
+
+	if (*p == '.') {
+		p++;
+		for (i = 0; i < 16 && *p; i++, p++) {
+			if (*p == '\n')
+				break;
+			userdata[i] = toupper(*p);
+		}
+		while (i > 0 && i < 16)
+			userdata[i++] = ' ';
+	} else
+		memcpy(userdata, iucvMagic_ascii, 16);
+	userdata[16] = '\0';
+	ASCEBC(userdata, 16);
+
+	return 0;
+}
+
+static ssize_t user_write(struct device *dev, struct device_attribute *attr,
+			  const char *buf, size_t count)
+{
+	struct netiucv_priv *priv = dev_get_drvdata(dev);
+	struct net_device *ndev = priv->conn->netdev;
+	char	username[9];
+	char	userdata[17];
+	int	rc;
+	struct iucv_connection *cp;
+
+	IUCV_DBF_TEXT(trace, 3, __func__);
+	rc = netiucv_check_user(buf, count, username, userdata);
+	if (rc)
+		return rc;
+
+	if (memcmp(username, priv->conn->userid, 9) &&
+	    (ndev->flags & (IFF_UP | IFF_RUNNING))) {
+		/* username changed while the interface is active. */
+		IUCV_DBF_TEXT(setup, 2, "user_write: device active\n");
+		return -EPERM;
+	}
+	read_lock_bh(&iucv_connection_rwlock);
+	list_for_each_entry(cp, &iucv_connection_list, list) {
+		if (!strncmp(username, cp->userid, 9) &&
+		   !strncmp(userdata, cp->userdata, 17) && cp->netdev != ndev) {
+			read_unlock_bh(&iucv_connection_rwlock);
+			IUCV_DBF_TEXT_(setup, 2, "user_write: Connection to %s "
+				"already exists\n", netiucv_printuser(cp));
+			return -EEXIST;
+		}
+	}
+	read_unlock_bh(&iucv_connection_rwlock);
+	memcpy(priv->conn->userid, username, 9);
+	memcpy(priv->conn->userdata, userdata, 17);
+	return count;
+}
+
+static DEVICE_ATTR(user, 0644, user_show, user_write);
+
+static ssize_t buffer_show (struct device *dev, struct device_attribute *attr,
+			    char *buf)
+{
+	struct netiucv_priv *priv = dev_get_drvdata(dev);
+
+	IUCV_DBF_TEXT(trace, 5, __func__);
+	return sprintf(buf, "%d\n", priv->conn->max_buffsize);
+}
+
+static ssize_t buffer_write (struct device *dev, struct device_attribute *attr,
+			     const char *buf, size_t count)
+{
+	struct netiucv_priv *priv = dev_get_drvdata(dev);
+	struct net_device *ndev = priv->conn->netdev;
+	char         *e;
+	int          bs1;
+
+	IUCV_DBF_TEXT(trace, 3, __func__);
+	if (count >= 39)
+		return -EINVAL;
+
+	bs1 = simple_strtoul(buf, &e, 0);
+
+	if (e && (!isspace(*e))) {
+		IUCV_DBF_TEXT_(setup, 2, "buffer_write: invalid char %02x\n",
+			*e);
+		return -EINVAL;
+	}
+	if (bs1 > NETIUCV_BUFSIZE_MAX) {
+		IUCV_DBF_TEXT_(setup, 2,
+			"buffer_write: buffer size %d too large\n",
+			bs1);
+		return -EINVAL;
+	}
+	if ((ndev->flags & IFF_RUNNING) &&
+	    (bs1 < (ndev->mtu + NETIUCV_HDRLEN + 2))) {
+		IUCV_DBF_TEXT_(setup, 2,
+			"buffer_write: buffer size %d too small\n",
+			bs1);
+		return -EINVAL;
+	}
+	if (bs1 < (576 + NETIUCV_HDRLEN + NETIUCV_HDRLEN)) {
+		IUCV_DBF_TEXT_(setup, 2,
+			"buffer_write: buffer size %d too small\n",
+			bs1);
+		return -EINVAL;
+	}
+
+	priv->conn->max_buffsize = bs1;
+	if (!(ndev->flags & IFF_RUNNING))
+		ndev->mtu = bs1 - NETIUCV_HDRLEN - NETIUCV_HDRLEN;
+
+	return count;
+
+}
+
+static DEVICE_ATTR(buffer, 0644, buffer_show, buffer_write);
+
+static ssize_t dev_fsm_show (struct device *dev, struct device_attribute *attr,
+			     char *buf)
+{
+	struct netiucv_priv *priv = dev_get_drvdata(dev);
+
+	IUCV_DBF_TEXT(trace, 5, __func__);
+	return sprintf(buf, "%s\n", fsm_getstate_str(priv->fsm));
+}
+
+static DEVICE_ATTR(device_fsm_state, 0444, dev_fsm_show, NULL);
+
+static ssize_t conn_fsm_show (struct device *dev,
+			      struct device_attribute *attr, char *buf)
+{
+	struct netiucv_priv *priv = dev_get_drvdata(dev);
+
+	IUCV_DBF_TEXT(trace, 5, __func__);
+	return sprintf(buf, "%s\n", fsm_getstate_str(priv->conn->fsm));
+}
+
+static DEVICE_ATTR(connection_fsm_state, 0444, conn_fsm_show, NULL);
+
+static ssize_t maxmulti_show (struct device *dev,
+			      struct device_attribute *attr, char *buf)
+{
+	struct netiucv_priv *priv = dev_get_drvdata(dev);
+
+	IUCV_DBF_TEXT(trace, 5, __func__);
+	return sprintf(buf, "%ld\n", priv->conn->prof.maxmulti);
+}
+
+static ssize_t maxmulti_write (struct device *dev,
+			       struct device_attribute *attr,
+			       const char *buf, size_t count)
+{
+	struct netiucv_priv *priv = dev_get_drvdata(dev);
+
+	IUCV_DBF_TEXT(trace, 4, __func__);
+	priv->conn->prof.maxmulti = 0;
+	return count;
+}
+
+static DEVICE_ATTR(max_tx_buffer_used, 0644, maxmulti_show, maxmulti_write);
+
+static ssize_t maxcq_show (struct device *dev, struct device_attribute *attr,
+			   char *buf)
+{
+	struct netiucv_priv *priv = dev_get_drvdata(dev);
+
+	IUCV_DBF_TEXT(trace, 5, __func__);
+	return sprintf(buf, "%ld\n", priv->conn->prof.maxcqueue);
+}
+
+static ssize_t maxcq_write (struct device *dev, struct device_attribute *attr,
+			    const char *buf, size_t count)
+{
+	struct netiucv_priv *priv = dev_get_drvdata(dev);
+
+	IUCV_DBF_TEXT(trace, 4, __func__);
+	priv->conn->prof.maxcqueue = 0;
+	return count;
+}
+
+static DEVICE_ATTR(max_chained_skbs, 0644, maxcq_show, maxcq_write);
+
+static ssize_t sdoio_show (struct device *dev, struct device_attribute *attr,
+			   char *buf)
+{
+	struct netiucv_priv *priv = dev_get_drvdata(dev);
+
+	IUCV_DBF_TEXT(trace, 5, __func__);
+	return sprintf(buf, "%ld\n", priv->conn->prof.doios_single);
+}
+
+static ssize_t sdoio_write (struct device *dev, struct device_attribute *attr,
+			    const char *buf, size_t count)
+{
+	struct netiucv_priv *priv = dev_get_drvdata(dev);
+
+	IUCV_DBF_TEXT(trace, 4, __func__);
+	priv->conn->prof.doios_single = 0;
+	return count;
+}
+
+static DEVICE_ATTR(tx_single_write_ops, 0644, sdoio_show, sdoio_write);
+
+static ssize_t mdoio_show (struct device *dev, struct device_attribute *attr,
+			   char *buf)
+{
+	struct netiucv_priv *priv = dev_get_drvdata(dev);
+
+	IUCV_DBF_TEXT(trace, 5, __func__);
+	return sprintf(buf, "%ld\n", priv->conn->prof.doios_multi);
+}
+
+static ssize_t mdoio_write (struct device *dev, struct device_attribute *attr,
+			    const char *buf, size_t count)
+{
+	struct netiucv_priv *priv = dev_get_drvdata(dev);
+
+	IUCV_DBF_TEXT(trace, 5, __func__);
+	priv->conn->prof.doios_multi = 0;
+	return count;
+}
+
+static DEVICE_ATTR(tx_multi_write_ops, 0644, mdoio_show, mdoio_write);
+
+static ssize_t txlen_show (struct device *dev, struct device_attribute *attr,
+			   char *buf)
+{
+	struct netiucv_priv *priv = dev_get_drvdata(dev);
+
+	IUCV_DBF_TEXT(trace, 5, __func__);
+	return sprintf(buf, "%ld\n", priv->conn->prof.txlen);
+}
+
+static ssize_t txlen_write (struct device *dev, struct device_attribute *attr,
+			    const char *buf, size_t count)
+{
+	struct netiucv_priv *priv = dev_get_drvdata(dev);
+
+	IUCV_DBF_TEXT(trace, 4, __func__);
+	priv->conn->prof.txlen = 0;
+	return count;
+}
+
+static DEVICE_ATTR(netto_bytes, 0644, txlen_show, txlen_write);
+
+static ssize_t txtime_show (struct device *dev, struct device_attribute *attr,
+			    char *buf)
+{
+	struct netiucv_priv *priv = dev_get_drvdata(dev);
+
+	IUCV_DBF_TEXT(trace, 5, __func__);
+	return sprintf(buf, "%ld\n", priv->conn->prof.tx_time);
+}
+
+static ssize_t txtime_write (struct device *dev, struct device_attribute *attr,
+			     const char *buf, size_t count)
+{
+	struct netiucv_priv *priv = dev_get_drvdata(dev);
+
+	IUCV_DBF_TEXT(trace, 4, __func__);
+	priv->conn->prof.tx_time = 0;
+	return count;
+}
+
+static DEVICE_ATTR(max_tx_io_time, 0644, txtime_show, txtime_write);
+
+static ssize_t txpend_show (struct device *dev, struct device_attribute *attr,
+			    char *buf)
+{
+	struct netiucv_priv *priv = dev_get_drvdata(dev);
+
+	IUCV_DBF_TEXT(trace, 5, __func__);
+	return sprintf(buf, "%ld\n", priv->conn->prof.tx_pending);
+}
+
+static ssize_t txpend_write (struct device *dev, struct device_attribute *attr,
+			     const char *buf, size_t count)
+{
+	struct netiucv_priv *priv = dev_get_drvdata(dev);
+
+	IUCV_DBF_TEXT(trace, 4, __func__);
+	priv->conn->prof.tx_pending = 0;
+	return count;
+}
+
+static DEVICE_ATTR(tx_pending, 0644, txpend_show, txpend_write);
+
+static ssize_t txmpnd_show (struct device *dev, struct device_attribute *attr,
+			    char *buf)
+{
+	struct netiucv_priv *priv = dev_get_drvdata(dev);
+
+	IUCV_DBF_TEXT(trace, 5, __func__);
+	return sprintf(buf, "%ld\n", priv->conn->prof.tx_max_pending);
+}
+
+static ssize_t txmpnd_write (struct device *dev, struct device_attribute *attr,
+			     const char *buf, size_t count)
+{
+	struct netiucv_priv *priv = dev_get_drvdata(dev);
+
+	IUCV_DBF_TEXT(trace, 4, __func__);
+	priv->conn->prof.tx_max_pending = 0;
+	return count;
+}
+
+static DEVICE_ATTR(tx_max_pending, 0644, txmpnd_show, txmpnd_write);
+
+static struct attribute *netiucv_attrs[] = {
+	&dev_attr_buffer.attr,
+	&dev_attr_user.attr,
+	NULL,
+};
+
+static struct attribute_group netiucv_attr_group = {
+	.attrs = netiucv_attrs,
+};
+
+static struct attribute *netiucv_stat_attrs[] = {
+	&dev_attr_device_fsm_state.attr,
+	&dev_attr_connection_fsm_state.attr,
+	&dev_attr_max_tx_buffer_used.attr,
+	&dev_attr_max_chained_skbs.attr,
+	&dev_attr_tx_single_write_ops.attr,
+	&dev_attr_tx_multi_write_ops.attr,
+	&dev_attr_netto_bytes.attr,
+	&dev_attr_max_tx_io_time.attr,
+	&dev_attr_tx_pending.attr,
+	&dev_attr_tx_max_pending.attr,
+	NULL,
+};
+
+static struct attribute_group netiucv_stat_attr_group = {
+	.name  = "stats",
+	.attrs = netiucv_stat_attrs,
+};
+
+static int netiucv_add_files(struct device *dev)
+{
+	int ret;
+
+	IUCV_DBF_TEXT(trace, 3, __func__);
+	ret = sysfs_create_group(&dev->kobj, &netiucv_attr_group);
+	if (ret)
+		return ret;
+	ret = sysfs_create_group(&dev->kobj, &netiucv_stat_attr_group);
+	if (ret)
+		sysfs_remove_group(&dev->kobj, &netiucv_attr_group);
+	return ret;
+}
+
+static void netiucv_remove_files(struct device *dev)
+{
+	IUCV_DBF_TEXT(trace, 3, __func__);
+	sysfs_remove_group(&dev->kobj, &netiucv_stat_attr_group);
+	sysfs_remove_group(&dev->kobj, &netiucv_attr_group);
+}
+
+static int netiucv_register_device(struct net_device *ndev)
+{
+	struct netiucv_priv *priv = netdev_priv(ndev);
+	struct device *dev = kzalloc(sizeof(struct device), GFP_KERNEL);
+	int ret;
+
+	IUCV_DBF_TEXT(trace, 3, __func__);
+
+	if (dev) {
+		dev_set_name(dev, "net%s", ndev->name);
+		dev->bus = &iucv_bus;
+		dev->parent = iucv_root;
+		/*
+		 * The release function could be called after the
+		 * module has been unloaded. It's _only_ task is to
+		 * free the struct. Therefore, we specify kfree()
+		 * directly here. (Probably a little bit obfuscating
+		 * but legitime ...).
+		 */
+		dev->release = (void (*)(struct device *))kfree;
+		dev->driver = &netiucv_driver;
+	} else
+		return -ENOMEM;
+
+	ret = device_register(dev);
+	if (ret) {
+		put_device(dev);
+		return ret;
+	}
+	ret = netiucv_add_files(dev);
+	if (ret)
+		goto out_unreg;
+	priv->dev = dev;
+	dev_set_drvdata(dev, priv);
+	return 0;
+
+out_unreg:
+	device_unregister(dev);
+	return ret;
+}
+
+static void netiucv_unregister_device(struct device *dev)
+{
+	IUCV_DBF_TEXT(trace, 3, __func__);
+	netiucv_remove_files(dev);
+	device_unregister(dev);
+}
+
+/**
+ * Allocate and initialize a new connection structure.
+ * Add it to the list of netiucv connections;
+ */
+static struct iucv_connection *netiucv_new_connection(struct net_device *dev,
+						      char *username,
+						      char *userdata)
+{
+	struct iucv_connection *conn;
+
+	conn = kzalloc(sizeof(*conn), GFP_KERNEL);
+	if (!conn)
+		goto out;
+	skb_queue_head_init(&conn->collect_queue);
+	skb_queue_head_init(&conn->commit_queue);
+	spin_lock_init(&conn->collect_lock);
+	conn->max_buffsize = NETIUCV_BUFSIZE_DEFAULT;
+	conn->netdev = dev;
+
+	conn->rx_buff = alloc_skb(conn->max_buffsize, GFP_KERNEL | GFP_DMA);
+	if (!conn->rx_buff)
+		goto out_conn;
+	conn->tx_buff = alloc_skb(conn->max_buffsize, GFP_KERNEL | GFP_DMA);
+	if (!conn->tx_buff)
+		goto out_rx;
+	conn->fsm = init_fsm("netiucvconn", conn_state_names,
+			     conn_event_names, NR_CONN_STATES,
+			     NR_CONN_EVENTS, conn_fsm, CONN_FSM_LEN,
+			     GFP_KERNEL);
+	if (!conn->fsm)
+		goto out_tx;
+
+	fsm_settimer(conn->fsm, &conn->timer);
+	fsm_newstate(conn->fsm, CONN_STATE_INVALID);
+
+	if (userdata)
+		memcpy(conn->userdata, userdata, 17);
+	if (username) {
+		memcpy(conn->userid, username, 9);
+		fsm_newstate(conn->fsm, CONN_STATE_STOPPED);
+	}
+
+	write_lock_bh(&iucv_connection_rwlock);
+	list_add_tail(&conn->list, &iucv_connection_list);
+	write_unlock_bh(&iucv_connection_rwlock);
+	return conn;
+
+out_tx:
+	kfree_skb(conn->tx_buff);
+out_rx:
+	kfree_skb(conn->rx_buff);
+out_conn:
+	kfree(conn);
+out:
+	return NULL;
+}
+
+/**
+ * Release a connection structure and remove it from the
+ * list of netiucv connections.
+ */
+static void netiucv_remove_connection(struct iucv_connection *conn)
+{
+
+	IUCV_DBF_TEXT(trace, 3, __func__);
+	write_lock_bh(&iucv_connection_rwlock);
+	list_del_init(&conn->list);
+	write_unlock_bh(&iucv_connection_rwlock);
+	fsm_deltimer(&conn->timer);
+	netiucv_purge_skb_queue(&conn->collect_queue);
+	if (conn->path) {
+		iucv_path_sever(conn->path, conn->userdata);
+		kfree(conn->path);
+		conn->path = NULL;
+	}
+	netiucv_purge_skb_queue(&conn->commit_queue);
+	kfree_fsm(conn->fsm);
+	kfree_skb(conn->rx_buff);
+	kfree_skb(conn->tx_buff);
+}
+
+/**
+ * Release everything of a net device.
+ */
+static void netiucv_free_netdevice(struct net_device *dev)
+{
+	struct netiucv_priv *privptr = netdev_priv(dev);
+
+	IUCV_DBF_TEXT(trace, 3, __func__);
+
+	if (!dev)
+		return;
+
+	if (privptr) {
+		if (privptr->conn)
+			netiucv_remove_connection(privptr->conn);
+		if (privptr->fsm)
+			kfree_fsm(privptr->fsm);
+		privptr->conn = NULL; privptr->fsm = NULL;
+		/* privptr gets freed by free_netdev() */
+	}
+	free_netdev(dev);
+}
+
+/**
+ * Initialize a net device. (Called from kernel in alloc_netdev())
+ */
+static const struct net_device_ops netiucv_netdev_ops = {
+	.ndo_open		= netiucv_open,
+	.ndo_stop		= netiucv_close,
+	.ndo_get_stats		= netiucv_stats,
+	.ndo_start_xmit		= netiucv_tx,
+	.ndo_change_mtu	   	= netiucv_change_mtu,
+};
+
+static void netiucv_setup_netdevice(struct net_device *dev)
+{
+	dev->mtu	         = NETIUCV_MTU_DEFAULT;
+	dev->destructor          = netiucv_free_netdevice;
+	dev->hard_header_len     = NETIUCV_HDRLEN;
+	dev->addr_len            = 0;
+	dev->type                = ARPHRD_SLIP;
+	dev->tx_queue_len        = NETIUCV_QUEUELEN_DEFAULT;
+	dev->flags	         = IFF_POINTOPOINT | IFF_NOARP;
+	dev->netdev_ops		 = &netiucv_netdev_ops;
+}
+
+/**
+ * Allocate and initialize everything of a net device.
+ */
+static struct net_device *netiucv_init_netdevice(char *username, char *userdata)
+{
+	struct netiucv_priv *privptr;
+	struct net_device *dev;
+
+	dev = alloc_netdev(sizeof(struct netiucv_priv), "iucv%d",
+			   netiucv_setup_netdevice);
+	if (!dev)
+		return NULL;
+	if (dev_alloc_name(dev, dev->name) < 0)
+		goto out_netdev;
+
+	privptr = netdev_priv(dev);
+	privptr->fsm = init_fsm("netiucvdev", dev_state_names,
+				dev_event_names, NR_DEV_STATES, NR_DEV_EVENTS,
+				dev_fsm, DEV_FSM_LEN, GFP_KERNEL);
+	if (!privptr->fsm)
+		goto out_netdev;
+
+	privptr->conn = netiucv_new_connection(dev, username, userdata);
+	if (!privptr->conn) {
+		IUCV_DBF_TEXT(setup, 2, "NULL from netiucv_new_connection\n");
+		goto out_fsm;
+	}
+	fsm_newstate(privptr->fsm, DEV_STATE_STOPPED);
+	return dev;
+
+out_fsm:
+	kfree_fsm(privptr->fsm);
+out_netdev:
+	free_netdev(dev);
+	return NULL;
+}
+
+static ssize_t conn_write(struct device_driver *drv,
+			  const char *buf, size_t count)
+{
+	char username[9];
+	char userdata[17];
+	int rc;
+	struct net_device *dev;
+	struct netiucv_priv *priv;
+	struct iucv_connection *cp;
+
+	IUCV_DBF_TEXT(trace, 3, __func__);
+	rc = netiucv_check_user(buf, count, username, userdata);
+	if (rc)
+		return rc;
+
+	read_lock_bh(&iucv_connection_rwlock);
+	list_for_each_entry(cp, &iucv_connection_list, list) {
+		if (!strncmp(username, cp->userid, 9) &&
+		    !strncmp(userdata, cp->userdata, 17)) {
+			read_unlock_bh(&iucv_connection_rwlock);
+			IUCV_DBF_TEXT_(setup, 2, "conn_write: Connection to %s "
+				"already exists\n", netiucv_printuser(cp));
+			return -EEXIST;
+		}
+	}
+	read_unlock_bh(&iucv_connection_rwlock);
+
+	dev = netiucv_init_netdevice(username, userdata);
+	if (!dev) {
+		IUCV_DBF_TEXT(setup, 2, "NULL from netiucv_init_netdevice\n");
+		return -ENODEV;
+	}
+
+	rc = netiucv_register_device(dev);
+	if (rc) {
+		IUCV_DBF_TEXT_(setup, 2,
+			"ret %d from netiucv_register_device\n", rc);
+		goto out_free_ndev;
+	}
+
+	/* sysfs magic */
+	priv = netdev_priv(dev);
+	SET_NETDEV_DEV(dev, priv->dev);
+
+	rc = register_netdev(dev);
+	if (rc)
+		goto out_unreg;
+
+	dev_info(priv->dev, "The IUCV interface to %s has been established "
+			    "successfully\n",
+		netiucv_printuser(priv->conn));
+
+	return count;
+
+out_unreg:
+	netiucv_unregister_device(priv->dev);
+out_free_ndev:
+	netiucv_free_netdevice(dev);
+	return rc;
+}
+
+static DRIVER_ATTR(connection, 0200, NULL, conn_write);
+
+static ssize_t remove_write (struct device_driver *drv,
+			     const char *buf, size_t count)
+{
+	struct iucv_connection *cp;
+        struct net_device *ndev;
+        struct netiucv_priv *priv;
+        struct device *dev;
+        char name[IFNAMSIZ];
+	const char *p;
+        int i;
+
+	IUCV_DBF_TEXT(trace, 3, __func__);
+
+        if (count >= IFNAMSIZ)
+                count = IFNAMSIZ - 1;
+
+	for (i = 0, p = buf; i < count && *p; i++, p++) {
+		if (*p == '\n' || *p == ' ')
+                        /* trailing lf, grr */
+                        break;
+		name[i] = *p;
+        }
+        name[i] = '\0';
+
+	read_lock_bh(&iucv_connection_rwlock);
+	list_for_each_entry(cp, &iucv_connection_list, list) {
+		ndev = cp->netdev;
+		priv = netdev_priv(ndev);
+                dev = priv->dev;
+		if (strncmp(name, ndev->name, count))
+			continue;
+		read_unlock_bh(&iucv_connection_rwlock);
+                if (ndev->flags & (IFF_UP | IFF_RUNNING)) {
+			dev_warn(dev, "The IUCV device is connected"
+				" to %s and cannot be removed\n",
+				priv->conn->userid);
+			IUCV_DBF_TEXT(data, 2, "remove_write: still active\n");
+			return -EPERM;
+                }
+                unregister_netdev(ndev);
+                netiucv_unregister_device(dev);
+                return count;
+        }
+	read_unlock_bh(&iucv_connection_rwlock);
+	IUCV_DBF_TEXT(data, 2, "remove_write: unknown device\n");
+        return -EINVAL;
+}
+
+static DRIVER_ATTR(remove, 0200, NULL, remove_write);
+
+static struct attribute * netiucv_drv_attrs[] = {
+	&driver_attr_connection.attr,
+	&driver_attr_remove.attr,
+	NULL,
+};
+
+static struct attribute_group netiucv_drv_attr_group = {
+	.attrs = netiucv_drv_attrs,
+};
+
+static const struct attribute_group *netiucv_drv_attr_groups[] = {
+	&netiucv_drv_attr_group,
+	NULL,
+};
+
+static void netiucv_banner(void)
+{
+	pr_info("driver initialized\n");
+}
+
+static void __exit netiucv_exit(void)
+{
+	struct iucv_connection *cp;
+	struct net_device *ndev;
+	struct netiucv_priv *priv;
+	struct device *dev;
+
+	IUCV_DBF_TEXT(trace, 3, __func__);
+	while (!list_empty(&iucv_connection_list)) {
+		cp = list_entry(iucv_connection_list.next,
+				struct iucv_connection, list);
+		ndev = cp->netdev;
+		priv = netdev_priv(ndev);
+		dev = priv->dev;
+
+		unregister_netdev(ndev);
+		netiucv_unregister_device(dev);
+	}
+
+	device_unregister(netiucv_dev);
+	driver_unregister(&netiucv_driver);
+	iucv_unregister(&netiucv_handler, 1);
+	iucv_unregister_dbf_views();
+
+	pr_info("driver unloaded\n");
+	return;
+}
+
+static int __init netiucv_init(void)
+{
+	int rc;
+
+	rc = iucv_register_dbf_views();
+	if (rc)
+		goto out;
+	rc = iucv_register(&netiucv_handler, 1);
+	if (rc)
+		goto out_dbf;
+	IUCV_DBF_TEXT(trace, 3, __func__);
+	netiucv_driver.groups = netiucv_drv_attr_groups;
+	rc = driver_register(&netiucv_driver);
+	if (rc) {
+		IUCV_DBF_TEXT_(setup, 2, "ret %d from driver_register\n", rc);
+		goto out_iucv;
+	}
+	/* establish dummy device */
+	netiucv_dev = kzalloc(sizeof(struct device), GFP_KERNEL);
+	if (!netiucv_dev) {
+		rc = -ENOMEM;
+		goto out_driver;
+	}
+	dev_set_name(netiucv_dev, "netiucv");
+	netiucv_dev->bus = &iucv_bus;
+	netiucv_dev->parent = iucv_root;
+	netiucv_dev->release = (void (*)(struct device *))kfree;
+	netiucv_dev->driver = &netiucv_driver;
+	rc = device_register(netiucv_dev);
+	if (rc) {
+		put_device(netiucv_dev);
+		goto out_driver;
+	}
+	netiucv_banner();
+	return rc;
+
+out_driver:
+	driver_unregister(&netiucv_driver);
+out_iucv:
+	iucv_unregister(&netiucv_handler, 1);
+out_dbf:
+	iucv_unregister_dbf_views();
+out:
+	return rc;
+}
+
+module_init(netiucv_init);
+module_exit(netiucv_exit);
+MODULE_LICENSE("GPL");
diff --git a/ap/os/linux/linux-3.4.x/drivers/s390/net/qeth_core.h b/ap/os/linux/linux-3.4.x/drivers/s390/net/qeth_core.h
new file mode 100644
index 0000000..ec7921b
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/s390/net/qeth_core.h
@@ -0,0 +1,942 @@
+/*
+ *  drivers/s390/net/qeth_core.h
+ *
+ *    Copyright IBM Corp. 2007
+ *    Author(s): Utz Bacher <utz.bacher@de.ibm.com>,
+ *		 Frank Pavlic <fpavlic@de.ibm.com>,
+ *		 Thomas Spatzier <tspat@de.ibm.com>,
+ *		 Frank Blaschka <frank.blaschka@de.ibm.com>
+ */
+
+#ifndef __QETH_CORE_H__
+#define __QETH_CORE_H__
+
+#include <linux/if.h>
+#include <linux/if_arp.h>
+#include <linux/if_tr.h>
+#include <linux/trdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/if_vlan.h>
+#include <linux/ctype.h>
+#include <linux/in6.h>
+#include <linux/bitops.h>
+#include <linux/seq_file.h>
+#include <linux/ethtool.h>
+
+#include <net/ipv6.h>
+#include <net/if_inet6.h>
+#include <net/addrconf.h>
+
+#include <asm/debug.h>
+#include <asm/qdio.h>
+#include <asm/ccwdev.h>
+#include <asm/ccwgroup.h>
+#include <asm/sysinfo.h>
+
+#include "qeth_core_mpc.h"
+
+/**
+ * Debug Facility stuff
+ */
+enum qeth_dbf_names {
+	QETH_DBF_SETUP,
+	QETH_DBF_MSG,
+	QETH_DBF_CTRL,
+	QETH_DBF_INFOS	/* must be last element */
+};
+
+struct qeth_dbf_info {
+	char name[DEBUG_MAX_NAME_LEN];
+	int pages;
+	int areas;
+	int len;
+	int level;
+	struct debug_view *view;
+	debug_info_t *id;
+};
+
+#define QETH_DBF_CTRL_LEN 256
+
+#define QETH_DBF_TEXT(name, level, text) \
+	debug_text_event(qeth_dbf[QETH_DBF_##name].id, level, text)
+
+#define QETH_DBF_HEX(name, level, addr, len) \
+	debug_event(qeth_dbf[QETH_DBF_##name].id, level, (void *)(addr), len)
+
+#define QETH_DBF_MESSAGE(level, text...) \
+	debug_sprintf_event(qeth_dbf[QETH_DBF_MSG].id, level, text)
+
+#define QETH_DBF_TEXT_(name, level, text...) \
+	qeth_dbf_longtext(qeth_dbf[QETH_DBF_##name].id, level, text)
+
+#define QETH_CARD_TEXT(card, level, text) \
+	debug_text_event(card->debug, level, text)
+
+#define QETH_CARD_HEX(card, level, addr, len) \
+	debug_event(card->debug, level, (void *)(addr), len)
+
+#define QETH_CARD_MESSAGE(card, text...) \
+	debug_sprintf_event(card->debug, level, text)
+
+#define QETH_CARD_TEXT_(card, level, text...) \
+	qeth_dbf_longtext(card->debug, level, text)
+
+#define SENSE_COMMAND_REJECT_BYTE 0
+#define SENSE_COMMAND_REJECT_FLAG 0x80
+#define SENSE_RESETTING_EVENT_BYTE 1
+#define SENSE_RESETTING_EVENT_FLAG 0x80
+
+/*
+ * Common IO related definitions
+ */
+#define CARD_RDEV(card) card->read.ccwdev
+#define CARD_WDEV(card) card->write.ccwdev
+#define CARD_DDEV(card) card->data.ccwdev
+#define CARD_BUS_ID(card) dev_name(&card->gdev->dev)
+#define CARD_RDEV_ID(card) dev_name(&card->read.ccwdev->dev)
+#define CARD_WDEV_ID(card) dev_name(&card->write.ccwdev->dev)
+#define CARD_DDEV_ID(card) dev_name(&card->data.ccwdev->dev)
+#define CHANNEL_ID(channel) dev_name(&channel->ccwdev->dev)
+
+/**
+ * card stuff
+ */
+struct qeth_perf_stats {
+	unsigned int bufs_rec;
+	unsigned int bufs_sent;
+
+	unsigned int skbs_sent_pack;
+	unsigned int bufs_sent_pack;
+
+	unsigned int sc_dp_p;
+	unsigned int sc_p_dp;
+	/* qdio_cq_handler: number of times called, time spent in */
+	__u64 cq_start_time;
+	unsigned int cq_cnt;
+	unsigned int cq_time;
+	/* qdio_input_handler: number of times called, time spent in */
+	__u64 inbound_start_time;
+	unsigned int inbound_cnt;
+	unsigned int inbound_time;
+	/* qeth_send_packet: number of times called, time spent in */
+	__u64 outbound_start_time;
+	unsigned int outbound_cnt;
+	unsigned int outbound_time;
+	/* qdio_output_handler: number of times called, time spent in */
+	__u64 outbound_handler_start_time;
+	unsigned int outbound_handler_cnt;
+	unsigned int outbound_handler_time;
+	/* number of calls to and time spent in do_QDIO for inbound queue */
+	__u64 inbound_do_qdio_start_time;
+	unsigned int inbound_do_qdio_cnt;
+	unsigned int inbound_do_qdio_time;
+	/* number of calls to and time spent in do_QDIO for outbound queues */
+	__u64 outbound_do_qdio_start_time;
+	unsigned int outbound_do_qdio_cnt;
+	unsigned int outbound_do_qdio_time;
+	unsigned int large_send_bytes;
+	unsigned int large_send_cnt;
+	unsigned int sg_skbs_sent;
+	unsigned int sg_frags_sent;
+	/* initial values when measuring starts */
+	unsigned long initial_rx_packets;
+	unsigned long initial_tx_packets;
+	/* inbound scatter gather data */
+	unsigned int sg_skbs_rx;
+	unsigned int sg_frags_rx;
+	unsigned int sg_alloc_page_rx;
+	unsigned int tx_csum;
+	unsigned int tx_lin;
+};
+
+/* Routing stuff */
+struct qeth_routing_info {
+	enum qeth_routing_types type;
+};
+
+/* IPA stuff */
+struct qeth_ipa_info {
+	__u32 supported_funcs;
+	__u32 enabled_funcs;
+};
+
+static inline int qeth_is_ipa_supported(struct qeth_ipa_info *ipa,
+		enum qeth_ipa_funcs func)
+{
+	return (ipa->supported_funcs & func);
+}
+
+static inline int qeth_is_ipa_enabled(struct qeth_ipa_info *ipa,
+		enum qeth_ipa_funcs func)
+{
+	return (ipa->supported_funcs & ipa->enabled_funcs & func);
+}
+
+#define qeth_adp_supported(c, f) \
+	qeth_is_ipa_supported(&c->options.adp, f)
+#define qeth_adp_enabled(c, f) \
+	qeth_is_ipa_enabled(&c->options.adp, f)
+#define qeth_is_supported(c, f) \
+	qeth_is_ipa_supported(&c->options.ipa4, f)
+#define qeth_is_enabled(c, f) \
+	qeth_is_ipa_enabled(&c->options.ipa4, f)
+#define qeth_is_supported6(c, f) \
+	qeth_is_ipa_supported(&c->options.ipa6, f)
+#define qeth_is_enabled6(c, f) \
+	qeth_is_ipa_enabled(&c->options.ipa6, f)
+#define qeth_is_ipafunc_supported(c, prot, f) \
+	 ((prot == QETH_PROT_IPV6) ? \
+		qeth_is_supported6(c, f) : qeth_is_supported(c, f))
+#define qeth_is_ipafunc_enabled(c, prot, f) \
+	 ((prot == QETH_PROT_IPV6) ? \
+		qeth_is_enabled6(c, f) : qeth_is_enabled(c, f))
+
+#define QETH_IDX_FUNC_LEVEL_OSD		 0x0101
+#define QETH_IDX_FUNC_LEVEL_IQD		 0x4108
+
+#define QETH_MODELLIST_ARRAY \
+	{{0x1731, 0x01, 0x1732, QETH_CARD_TYPE_OSD, QETH_MAX_QUEUES, 0}, \
+	 {0x1731, 0x05, 0x1732, QETH_CARD_TYPE_IQD, QETH_MAX_QUEUES, 0x103}, \
+	 {0x1731, 0x06, 0x1732, QETH_CARD_TYPE_OSN, QETH_MAX_QUEUES, 0}, \
+	 {0x1731, 0x02, 0x1732, QETH_CARD_TYPE_OSM, QETH_MAX_QUEUES, 0}, \
+	 {0x1731, 0x02, 0x1732, QETH_CARD_TYPE_OSX, QETH_MAX_QUEUES, 0}, \
+	 {0, 0, 0, 0, 0, 0} }
+#define QETH_CU_TYPE_IND	0
+#define QETH_CU_MODEL_IND	1
+#define QETH_DEV_TYPE_IND	2
+#define QETH_DEV_MODEL_IND	3
+#define QETH_QUEUE_NO_IND	4
+#define QETH_MULTICAST_IND	5
+
+#define QETH_REAL_CARD		1
+#define QETH_VLAN_CARD		2
+#define QETH_BUFSIZE		4096
+
+/**
+ * some more defs
+ */
+#define QETH_TX_TIMEOUT		100 * HZ
+#define QETH_RCD_TIMEOUT	60 * HZ
+#define QETH_RECLAIM_WORK_TIME	HZ
+#define QETH_HEADER_SIZE	32
+#define QETH_MAX_PORTNO		15
+
+/*IPv6 address autoconfiguration stuff*/
+#define UNIQUE_ID_IF_CREATE_ADDR_FAILED 0xfffe
+#define UNIQUE_ID_NOT_BY_CARD		0x10000
+
+/*****************************************************************************/
+/* QDIO queue and buffer handling                                            */
+/*****************************************************************************/
+#define QETH_MAX_QUEUES 4
+#define QETH_IN_BUF_SIZE_DEFAULT 65536
+#define QETH_IN_BUF_COUNT_DEFAULT 64
+#define QETH_IN_BUF_COUNT_HSDEFAULT 128
+#define QETH_IN_BUF_COUNT_MIN 8
+#define QETH_IN_BUF_COUNT_MAX 128
+#define QETH_MAX_BUFFER_ELEMENTS(card) ((card)->qdio.in_buf_size >> 12)
+#define QETH_IN_BUF_REQUEUE_THRESHOLD(card) \
+		 ((card)->qdio.in_buf_pool.buf_count / 2)
+
+/* buffers we have to be behind before we get a PCI */
+#define QETH_PCI_THRESHOLD_A(card) ((card)->qdio.in_buf_pool.buf_count+1)
+/*enqueued free buffers left before we get a PCI*/
+#define QETH_PCI_THRESHOLD_B(card) 0
+/*not used unless the microcode gets patched*/
+#define QETH_PCI_TIMER_VALUE(card) 3
+
+/* priority queing */
+#define QETH_PRIOQ_DEFAULT QETH_NO_PRIO_QUEUEING
+#define QETH_DEFAULT_QUEUE    2
+#define QETH_NO_PRIO_QUEUEING 0
+#define QETH_PRIO_Q_ING_PREC  1
+#define QETH_PRIO_Q_ING_TOS   2
+#define IP_TOS_LOWDELAY 0x10
+#define IP_TOS_HIGHTHROUGHPUT 0x08
+#define IP_TOS_HIGHRELIABILITY 0x04
+#define IP_TOS_NOTIMPORTANT 0x02
+
+/* Packing */
+#define QETH_LOW_WATERMARK_PACK  2
+#define QETH_HIGH_WATERMARK_PACK 5
+#define QETH_WATERMARK_PACK_FUZZ 1
+
+#define QETH_IP_HEADER_SIZE 40
+
+/* large receive scatter gather copy break */
+#define QETH_RX_SG_CB (PAGE_SIZE >> 1)
+#define QETH_RX_PULL_LEN 256
+
+struct qeth_hdr_layer3 {
+	__u8  id;
+	__u8  flags;
+	__u16 inbound_checksum; /*TSO:__u16 seqno */
+	__u32 token;		/*TSO: __u32 reserved */
+	__u16 length;
+	__u8  vlan_prio;
+	__u8  ext_flags;
+	__u16 vlan_id;
+	__u16 frame_offset;
+	__u8  dest_addr[16];
+} __attribute__ ((packed));
+
+struct qeth_hdr_layer2 {
+	__u8 id;
+	__u8 flags[3];
+	__u8 port_no;
+	__u8 hdr_length;
+	__u16 pkt_length;
+	__u16 seq_no;
+	__u16 vlan_id;
+	__u32 reserved;
+	__u8 reserved2[16];
+} __attribute__ ((packed));
+
+struct qeth_hdr_osn {
+	__u8 id;
+	__u8 reserved;
+	__u16 seq_no;
+	__u16 reserved2;
+	__u16 control_flags;
+	__u16 pdu_length;
+	__u8 reserved3[18];
+	__u32 ccid;
+} __attribute__ ((packed));
+
+struct qeth_hdr {
+	union {
+		struct qeth_hdr_layer2 l2;
+		struct qeth_hdr_layer3 l3;
+		struct qeth_hdr_osn    osn;
+	} hdr;
+} __attribute__ ((packed));
+
+/*TCP Segmentation Offload header*/
+struct qeth_hdr_ext_tso {
+	__u16 hdr_tot_len;
+	__u8  imb_hdr_no;
+	__u8  reserved;
+	__u8  hdr_type;
+	__u8  hdr_version;
+	__u16 hdr_len;
+	__u32 payload_len;
+	__u16 mss;
+	__u16 dg_hdr_len;
+	__u8  padding[16];
+} __attribute__ ((packed));
+
+struct qeth_hdr_tso {
+	struct qeth_hdr hdr;	/*hdr->hdr.l3.xxx*/
+	struct qeth_hdr_ext_tso ext;
+} __attribute__ ((packed));
+
+
+/* flags for qeth_hdr.flags */
+#define QETH_HDR_PASSTHRU 0x10
+#define QETH_HDR_IPV6     0x80
+#define QETH_HDR_CAST_MASK 0x07
+enum qeth_cast_flags {
+	QETH_CAST_UNICAST   = 0x06,
+	QETH_CAST_MULTICAST = 0x04,
+	QETH_CAST_BROADCAST = 0x05,
+	QETH_CAST_ANYCAST   = 0x07,
+	QETH_CAST_NOCAST    = 0x00,
+};
+
+enum qeth_layer2_frame_flags {
+	QETH_LAYER2_FLAG_MULTICAST = 0x01,
+	QETH_LAYER2_FLAG_BROADCAST = 0x02,
+	QETH_LAYER2_FLAG_UNICAST   = 0x04,
+	QETH_LAYER2_FLAG_VLAN      = 0x10,
+};
+
+enum qeth_header_ids {
+	QETH_HEADER_TYPE_LAYER3 = 0x01,
+	QETH_HEADER_TYPE_LAYER2 = 0x02,
+	QETH_HEADER_TYPE_TSO	= 0x03,
+	QETH_HEADER_TYPE_OSN    = 0x04,
+};
+/* flags for qeth_hdr.ext_flags */
+#define QETH_HDR_EXT_VLAN_FRAME       0x01
+#define QETH_HDR_EXT_TOKEN_ID         0x02
+#define QETH_HDR_EXT_INCLUDE_VLAN_TAG 0x04
+#define QETH_HDR_EXT_SRC_MAC_ADDR     0x08
+#define QETH_HDR_EXT_CSUM_HDR_REQ     0x10
+#define QETH_HDR_EXT_CSUM_TRANSP_REQ  0x20
+#define QETH_HDR_EXT_UDP	      0x40 /*bit off for TCP*/
+
+static inline int qeth_is_last_sbale(struct qdio_buffer_element *sbale)
+{
+	return (sbale->eflags & SBAL_EFLAGS_LAST_ENTRY);
+}
+
+enum qeth_qdio_buffer_states {
+	/*
+	 * inbound: read out by driver; owned by hardware in order to be filled
+	 * outbound: owned by driver in order to be filled
+	 */
+	QETH_QDIO_BUF_EMPTY,
+	/*
+	 * inbound: filled by hardware; owned by driver in order to be read out
+	 * outbound: filled by driver; owned by hardware in order to be sent
+	 */
+	QETH_QDIO_BUF_PRIMED,
+	/*
+	 * inbound: not applicable
+	 * outbound: identified to be pending in TPQ
+	 */
+	QETH_QDIO_BUF_PENDING,
+	/*
+	 * inbound: not applicable
+	 * outbound: found in completion queue
+	 */
+	QETH_QDIO_BUF_IN_CQ,
+	/*
+	 * inbound: not applicable
+	 * outbound: handled via transfer pending / completion queue
+	 */
+	QETH_QDIO_BUF_HANDLED_DELAYED,
+};
+
+enum qeth_qdio_info_states {
+	QETH_QDIO_UNINITIALIZED,
+	QETH_QDIO_ALLOCATED,
+	QETH_QDIO_ESTABLISHED,
+	QETH_QDIO_CLEANING
+};
+
+struct qeth_buffer_pool_entry {
+	struct list_head list;
+	struct list_head init_list;
+	void *elements[QDIO_MAX_ELEMENTS_PER_BUFFER];
+};
+
+struct qeth_qdio_buffer_pool {
+	struct list_head entry_list;
+	int buf_count;
+};
+
+struct qeth_qdio_buffer {
+	struct qdio_buffer *buffer;
+	/* the buffer pool entry currently associated to this buffer */
+	struct qeth_buffer_pool_entry *pool_entry;
+	struct sk_buff *rx_skb;
+};
+
+struct qeth_qdio_q {
+	struct qdio_buffer qdio_bufs[QDIO_MAX_BUFFERS_PER_Q];
+	struct qeth_qdio_buffer bufs[QDIO_MAX_BUFFERS_PER_Q];
+	int next_buf_to_init;
+} __attribute__ ((aligned(256)));
+
+struct qeth_qdio_out_buffer {
+	struct qdio_buffer *buffer;
+	atomic_t state;
+	int next_element_to_fill;
+	struct sk_buff_head skb_list;
+	int is_header[16];
+
+	struct qaob *aob;
+	struct qeth_qdio_out_q *q;
+	struct qeth_qdio_out_buffer *next_pending;
+};
+
+struct qeth_card;
+
+enum qeth_out_q_states {
+       QETH_OUT_Q_UNLOCKED,
+       QETH_OUT_Q_LOCKED,
+       QETH_OUT_Q_LOCKED_FLUSH,
+};
+
+struct qeth_qdio_out_q {
+	struct qdio_buffer qdio_bufs[QDIO_MAX_BUFFERS_PER_Q];
+	struct qeth_qdio_out_buffer *bufs[QDIO_MAX_BUFFERS_PER_Q];
+	struct qdio_outbuf_state *bufstates; /* convenience pointer */
+	int queue_no;
+	struct qeth_card *card;
+	atomic_t state;
+	int do_pack;
+	/*
+	 * index of buffer to be filled by driver; state EMPTY or PACKING
+	 */
+	int next_buf_to_fill;
+	/*
+	 * number of buffers that are currently filled (PRIMED)
+	 * -> these buffers are hardware-owned
+	 */
+	atomic_t used_buffers;
+	/* indicates whether PCI flag must be set (or if one is outstanding) */
+	atomic_t set_pci_flags_count;
+} __attribute__ ((aligned(256)));
+
+struct qeth_qdio_info {
+	atomic_t state;
+	/* input */
+	int no_in_queues;
+	struct qeth_qdio_q *in_q;
+	struct qeth_qdio_q *c_q;
+	struct qeth_qdio_buffer_pool in_buf_pool;
+	struct qeth_qdio_buffer_pool init_pool;
+	int in_buf_size;
+
+	/* output */
+	int no_out_queues;
+	struct qeth_qdio_out_q **out_qs;
+	struct qdio_outbuf_state *out_bufstates;
+
+	/* priority queueing */
+	int do_prio_queueing;
+	int default_out_queue;
+};
+
+enum qeth_send_errors {
+	QETH_SEND_ERROR_NONE,
+	QETH_SEND_ERROR_LINK_FAILURE,
+	QETH_SEND_ERROR_RETRY,
+	QETH_SEND_ERROR_KICK_IT,
+};
+
+#define QETH_ETH_MAC_V4      0x0100 /* like v4 */
+#define QETH_ETH_MAC_V6      0x3333 /* like v6 */
+/* tr mc mac is longer, but that will be enough to detect mc frames */
+#define QETH_TR_MAC_NC       0xc000 /* non-canonical */
+#define QETH_TR_MAC_C        0x0300 /* canonical */
+
+#define DEFAULT_ADD_HHLEN 0
+#define MAX_ADD_HHLEN 1024
+
+/**
+ * buffer stuff for read channel
+ */
+#define QETH_CMD_BUFFER_NO	8
+
+/**
+ *  channel state machine
+ */
+enum qeth_channel_states {
+	CH_STATE_UP,
+	CH_STATE_DOWN,
+	CH_STATE_ACTIVATING,
+	CH_STATE_HALTED,
+	CH_STATE_STOPPED,
+	CH_STATE_RCD,
+	CH_STATE_RCD_DONE,
+};
+/**
+ * card state machine
+ */
+enum qeth_card_states {
+	CARD_STATE_DOWN,
+	CARD_STATE_HARDSETUP,
+	CARD_STATE_SOFTSETUP,
+	CARD_STATE_UP,
+	CARD_STATE_RECOVER,
+};
+
+/**
+ * Protocol versions
+ */
+enum qeth_prot_versions {
+	QETH_PROT_IPV4 = 0x0004,
+	QETH_PROT_IPV6 = 0x0006,
+};
+
+enum qeth_ip_types {
+	QETH_IP_TYPE_NORMAL,
+	QETH_IP_TYPE_VIPA,
+	QETH_IP_TYPE_RXIP,
+	QETH_IP_TYPE_DEL_ALL_MC,
+};
+
+enum qeth_cmd_buffer_state {
+	BUF_STATE_FREE,
+	BUF_STATE_LOCKED,
+	BUF_STATE_PROCESSED,
+};
+
+enum qeth_cq {
+	QETH_CQ_DISABLED = 0,
+	QETH_CQ_ENABLED = 1,
+	QETH_CQ_NOTAVAILABLE = 2,
+};
+
+struct qeth_ipato {
+	int enabled;
+	int invert4;
+	int invert6;
+	struct list_head entries;
+};
+
+struct qeth_channel;
+
+struct qeth_cmd_buffer {
+	enum qeth_cmd_buffer_state state;
+	struct qeth_channel *channel;
+	unsigned char *data;
+	int rc;
+	void (*callback) (struct qeth_channel *, struct qeth_cmd_buffer *);
+};
+
+/**
+ * definition of a qeth channel, used for read and write
+ */
+struct qeth_channel {
+	enum qeth_channel_states state;
+	struct ccw1 ccw;
+	spinlock_t iob_lock;
+	wait_queue_head_t wait_q;
+	struct tasklet_struct irq_tasklet;
+	struct ccw_device *ccwdev;
+/*command buffer for control data*/
+	struct qeth_cmd_buffer iob[QETH_CMD_BUFFER_NO];
+	atomic_t irq_pending;
+	int io_buf_no;
+	int buf_no;
+};
+
+/**
+ *  OSA card related definitions
+ */
+struct qeth_token {
+	__u32 issuer_rm_w;
+	__u32 issuer_rm_r;
+	__u32 cm_filter_w;
+	__u32 cm_filter_r;
+	__u32 cm_connection_w;
+	__u32 cm_connection_r;
+	__u32 ulp_filter_w;
+	__u32 ulp_filter_r;
+	__u32 ulp_connection_w;
+	__u32 ulp_connection_r;
+};
+
+struct qeth_seqno {
+	__u32 trans_hdr;
+	__u32 pdu_hdr;
+	__u32 pdu_hdr_ack;
+	__u16 ipa;
+	__u32 pkt_seqno;
+};
+
+struct qeth_reply {
+	struct list_head list;
+	wait_queue_head_t wait_q;
+	int (*callback)(struct qeth_card *, struct qeth_reply *,
+		unsigned long);
+	u32 seqno;
+	unsigned long offset;
+	atomic_t received;
+	int rc;
+	void *param;
+	struct qeth_card *card;
+	atomic_t refcnt;
+};
+
+
+struct qeth_card_blkt {
+	int time_total;
+	int inter_packet;
+	int inter_packet_jumbo;
+};
+
+#define QETH_BROADCAST_WITH_ECHO    0x01
+#define QETH_BROADCAST_WITHOUT_ECHO 0x02
+#define QETH_LAYER2_MAC_READ	    0x01
+#define QETH_LAYER2_MAC_REGISTERED  0x02
+struct qeth_card_info {
+	unsigned short unit_addr2;
+	unsigned short cula;
+	unsigned short chpid;
+	__u16 func_level;
+	char mcl_level[QETH_MCL_LENGTH + 1];
+	int guestlan;
+	int mac_bits;
+	int portname_required;
+	int portno;
+	char portname[9];
+	enum qeth_card_types type;
+	enum qeth_link_types link_type;
+	int is_multicast_different;
+	int initial_mtu;
+	int max_mtu;
+	int broadcast_capable;
+	int unique_id;
+	struct qeth_card_blkt blkt;
+	__u32 csum_mask;
+	__u32 tx_csum_mask;
+	enum qeth_ipa_promisc_modes promisc_mode;
+	__u32 diagass_support;
+	__u32 hwtrap;
+};
+
+struct qeth_card_options {
+	struct qeth_routing_info route4;
+	struct qeth_ipa_info ipa4;
+	struct qeth_ipa_info adp; /*Adapter parameters*/
+	struct qeth_routing_info route6;
+	struct qeth_ipa_info ipa6;
+	int broadcast_mode;
+	int macaddr_mode;
+	int fake_broadcast;
+	int add_hhlen;
+	int layer2;
+	int performance_stats;
+	int rx_sg_cb;
+	enum qeth_ipa_isolation_modes isolation;
+	int sniffer;
+	enum qeth_cq cq;
+	char hsuid[9];
+};
+
+/*
+ * thread bits for qeth_card thread masks
+ */
+enum qeth_threads {
+	QETH_RECOVER_THREAD = 1,
+};
+
+struct qeth_osn_info {
+	int (*assist_cb)(struct net_device *dev, void *data);
+	int (*data_cb)(struct sk_buff *skb);
+};
+
+enum qeth_discipline_id {
+	QETH_DISCIPLINE_LAYER3 = 0,
+	QETH_DISCIPLINE_LAYER2 = 1,
+};
+
+struct qeth_discipline {
+	void (*start_poll)(struct ccw_device *, int, unsigned long);
+	qdio_handler_t *input_handler;
+	qdio_handler_t *output_handler;
+	int (*recover)(void *ptr);
+	struct ccwgroup_driver *ccwgdriver;
+};
+
+struct qeth_vlan_vid {
+	struct list_head list;
+	unsigned short vid;
+};
+
+struct qeth_mc_mac {
+	struct list_head list;
+	__u8 mc_addr[MAX_ADDR_LEN];
+	unsigned char mc_addrlen;
+	int is_vmac;
+};
+
+struct qeth_rx {
+	int b_count;
+	int b_index;
+	struct qdio_buffer_element *b_element;
+	int e_offset;
+	int qdio_err;
+};
+
+#define QETH_NAPI_WEIGHT 128
+
+struct qeth_card {
+	struct list_head list;
+	enum qeth_card_states state;
+	int lan_online;
+	spinlock_t lock;
+	struct ccwgroup_device *gdev;
+	struct qeth_channel read;
+	struct qeth_channel write;
+	struct qeth_channel data;
+
+	struct net_device *dev;
+	struct net_device_stats stats;
+
+	struct qeth_card_info info;
+	struct qeth_token token;
+	struct qeth_seqno seqno;
+	struct qeth_card_options options;
+
+	wait_queue_head_t wait_q;
+	spinlock_t vlanlock;
+	spinlock_t mclock;
+	unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
+	struct list_head vid_list;
+	struct list_head mc_list;
+	struct work_struct kernel_thread_starter;
+	spinlock_t thread_mask_lock;
+	unsigned long thread_start_mask;
+	unsigned long thread_allowed_mask;
+	unsigned long thread_running_mask;
+	spinlock_t ip_lock;
+	struct list_head ip_list;
+	struct list_head *ip_tbd_list;
+	struct qeth_ipato ipato;
+	struct list_head cmd_waiter_list;
+	/* QDIO buffer handling */
+	struct qeth_qdio_info qdio;
+	struct qeth_perf_stats perf_stats;
+	int read_or_write_problem;
+	struct qeth_osn_info osn_info;
+	struct qeth_discipline discipline;
+	atomic_t force_alloc_skb;
+	struct service_level qeth_service_level;
+	struct qdio_ssqd_desc ssqd;
+	debug_info_t *debug;
+	struct mutex conf_mutex;
+	struct mutex discipline_mutex;
+	struct napi_struct napi;
+	struct qeth_rx rx;
+	struct delayed_work buffer_reclaim_work;
+	int reclaim_index;
+};
+
+struct qeth_card_list_struct {
+	struct list_head list;
+	rwlock_t rwlock;
+};
+
+struct qeth_trap_id {
+	__u16 lparnr;
+	char vmname[8];
+	__u8 chpid;
+	__u8 ssid;
+	__u16 devno;
+} __packed;
+
+/*some helper functions*/
+#define QETH_CARD_IFNAME(card) (((card)->dev)? (card)->dev->name : "")
+
+static inline struct qeth_card *CARD_FROM_CDEV(struct ccw_device *cdev)
+{
+	struct qeth_card *card = dev_get_drvdata(&((struct ccwgroup_device *)
+		dev_get_drvdata(&cdev->dev))->dev);
+	return card;
+}
+
+static inline int qeth_get_micros(void)
+{
+	return (int) (get_clock() >> 12);
+}
+
+static inline int qeth_get_ip_version(struct sk_buff *skb)
+{
+	struct ethhdr *ehdr = (struct ethhdr *)skb->data;
+	switch (ehdr->h_proto) {
+	case ETH_P_IPV6:
+		return 6;
+	case ETH_P_IP:
+		return 4;
+	default:
+		return 0;
+	}
+}
+
+static inline void qeth_put_buffer_pool_entry(struct qeth_card *card,
+		struct qeth_buffer_pool_entry *entry)
+{
+	list_add_tail(&entry->list, &card->qdio.in_buf_pool.entry_list);
+}
+
+static inline int qeth_is_diagass_supported(struct qeth_card *card,
+		enum qeth_diags_cmds cmd)
+{
+	return card->info.diagass_support & (__u32)cmd;
+}
+
+extern struct ccwgroup_driver qeth_l2_ccwgroup_driver;
+extern struct ccwgroup_driver qeth_l3_ccwgroup_driver;
+const char *qeth_get_cardname_short(struct qeth_card *);
+int qeth_realloc_buffer_pool(struct qeth_card *, int);
+int qeth_core_load_discipline(struct qeth_card *, enum qeth_discipline_id);
+void qeth_core_free_discipline(struct qeth_card *);
+int qeth_core_create_device_attributes(struct device *);
+void qeth_core_remove_device_attributes(struct device *);
+int qeth_core_create_osn_attributes(struct device *);
+void qeth_core_remove_osn_attributes(struct device *);
+void qeth_buffer_reclaim_work(struct work_struct *);
+
+/* exports for qeth discipline device drivers */
+extern struct qeth_card_list_struct qeth_core_card_list;
+extern struct kmem_cache *qeth_core_header_cache;
+extern struct qeth_dbf_info qeth_dbf[QETH_DBF_INFOS];
+
+void qeth_set_allowed_threads(struct qeth_card *, unsigned long , int);
+int qeth_threads_running(struct qeth_card *, unsigned long);
+int qeth_wait_for_threads(struct qeth_card *, unsigned long);
+int qeth_do_run_thread(struct qeth_card *, unsigned long);
+void qeth_clear_thread_start_bit(struct qeth_card *, unsigned long);
+void qeth_clear_thread_running_bit(struct qeth_card *, unsigned long);
+int qeth_core_hardsetup_card(struct qeth_card *);
+void qeth_print_status_message(struct qeth_card *);
+int qeth_init_qdio_queues(struct qeth_card *);
+int qeth_send_startlan(struct qeth_card *);
+int qeth_send_stoplan(struct qeth_card *);
+int qeth_send_ipa_cmd(struct qeth_card *, struct qeth_cmd_buffer *,
+		  int (*reply_cb)
+		  (struct qeth_card *, struct qeth_reply *, unsigned long),
+		  void *);
+struct qeth_cmd_buffer *qeth_get_ipacmd_buffer(struct qeth_card *,
+			enum qeth_ipa_cmds, enum qeth_prot_versions);
+int qeth_query_setadapterparms(struct qeth_card *);
+int qeth_check_qdio_errors(struct qeth_card *, struct qdio_buffer *,
+		unsigned int, const char *);
+void qeth_queue_input_buffer(struct qeth_card *, int);
+struct sk_buff *qeth_core_get_next_skb(struct qeth_card *,
+		struct qeth_qdio_buffer *, struct qdio_buffer_element **, int *,
+		struct qeth_hdr **);
+void qeth_schedule_recovery(struct qeth_card *);
+void qeth_qdio_start_poll(struct ccw_device *, int, unsigned long);
+void qeth_qdio_input_handler(struct ccw_device *,
+		unsigned int, unsigned int, int,
+		int, unsigned long);
+void qeth_qdio_output_handler(struct ccw_device *, unsigned int,
+			int, int, int, unsigned long);
+void qeth_clear_ipacmd_list(struct qeth_card *);
+int qeth_qdio_clear_card(struct qeth_card *, int);
+void qeth_clear_working_pool_list(struct qeth_card *);
+void qeth_clear_cmd_buffers(struct qeth_channel *);
+void qeth_clear_qdio_buffers(struct qeth_card *);
+void qeth_setadp_promisc_mode(struct qeth_card *);
+struct net_device_stats *qeth_get_stats(struct net_device *);
+int qeth_change_mtu(struct net_device *, int);
+int qeth_setadpparms_change_macaddr(struct qeth_card *);
+void qeth_tx_timeout(struct net_device *);
+void qeth_prepare_control_data(struct qeth_card *, int,
+				struct qeth_cmd_buffer *);
+void qeth_release_buffer(struct qeth_channel *, struct qeth_cmd_buffer *);
+void qeth_prepare_ipa_cmd(struct qeth_card *, struct qeth_cmd_buffer *, char);
+struct qeth_cmd_buffer *qeth_wait_for_buffer(struct qeth_channel *);
+int qeth_mdio_read(struct net_device *, int, int);
+int qeth_snmp_command(struct qeth_card *, char __user *);
+int qeth_query_oat_command(struct qeth_card *, char __user *);
+struct qeth_cmd_buffer *qeth_get_adapter_cmd(struct qeth_card *, __u32, __u32);
+int qeth_default_setadapterparms_cb(struct qeth_card *, struct qeth_reply *,
+					unsigned long);
+int qeth_send_control_data(struct qeth_card *, int, struct qeth_cmd_buffer *,
+	int (*reply_cb)(struct qeth_card *, struct qeth_reply*, unsigned long),
+	void *reply_param);
+int qeth_get_priority_queue(struct qeth_card *, struct sk_buff *, int, int);
+int qeth_get_elements_no(struct qeth_card *, void *, struct sk_buff *, int);
+int qeth_do_send_packet_fast(struct qeth_card *, struct qeth_qdio_out_q *,
+			struct sk_buff *, struct qeth_hdr *, int, int, int);
+int qeth_do_send_packet(struct qeth_card *, struct qeth_qdio_out_q *,
+		    struct sk_buff *, struct qeth_hdr *, int);
+int qeth_core_get_sset_count(struct net_device *, int);
+void qeth_core_get_ethtool_stats(struct net_device *,
+				struct ethtool_stats *, u64 *);
+void qeth_core_get_strings(struct net_device *, u32, u8 *);
+void qeth_core_get_drvinfo(struct net_device *, struct ethtool_drvinfo *);
+void qeth_dbf_longtext(debug_info_t *id, int level, char *text, ...);
+int qeth_core_ethtool_get_settings(struct net_device *, struct ethtool_cmd *);
+int qeth_set_access_ctrl_online(struct qeth_card *card);
+int qeth_hdr_chk_and_bounce(struct sk_buff *, int);
+int qeth_configure_cq(struct qeth_card *, enum qeth_cq);
+int qeth_hw_trap(struct qeth_card *, enum qeth_diags_trap_action);
+int qeth_query_ipassists(struct qeth_card *, enum qeth_prot_versions prot);
+
+/* exports for OSN */
+int qeth_osn_assist(struct net_device *, void *, int);
+int qeth_osn_register(unsigned char *read_dev_no, struct net_device **,
+		int (*assist_cb)(struct net_device *, void *),
+		int (*data_cb)(struct sk_buff *));
+void qeth_osn_deregister(struct net_device *);
+
+#endif /* __QETH_CORE_H__ */
diff --git a/ap/os/linux/linux-3.4.x/drivers/s390/net/qeth_core_main.c b/ap/os/linux/linux-3.4.x/drivers/s390/net/qeth_core_main.c
new file mode 100644
index 0000000..be56590
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/s390/net/qeth_core_main.c
@@ -0,0 +1,5623 @@
+/*
+ *  drivers/s390/net/qeth_core_main.c
+ *
+ *    Copyright IBM Corp. 2007, 2009
+ *    Author(s): Utz Bacher <utz.bacher@de.ibm.com>,
+ *		 Frank Pavlic <fpavlic@de.ibm.com>,
+ *		 Thomas Spatzier <tspat@de.ibm.com>,
+ *		 Frank Blaschka <frank.blaschka@de.ibm.com>
+ */
+
+#define KMSG_COMPONENT "qeth"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/ip.h>
+#include <linux/tcp.h>
+#include <linux/mii.h>
+#include <linux/kthread.h>
+#include <linux/slab.h>
+#include <net/iucv/af_iucv.h>
+
+#include <asm/ebcdic.h>
+#include <asm/io.h>
+#include <asm/sysinfo.h>
+#include <asm/compat.h>
+
+#include "qeth_core.h"
+
+struct qeth_dbf_info qeth_dbf[QETH_DBF_INFOS] = {
+	/* define dbf - Name, Pages, Areas, Maxlen, Level, View, Handle */
+	/*                   N  P  A    M  L  V                      H  */
+	[QETH_DBF_SETUP] = {"qeth_setup",
+				8, 1,   8, 5, &debug_hex_ascii_view, NULL},
+	[QETH_DBF_MSG]   = {"qeth_msg",
+				8, 1, 128, 3, &debug_sprintf_view,   NULL},
+	[QETH_DBF_CTRL]  = {"qeth_control",
+		8, 1, QETH_DBF_CTRL_LEN, 5, &debug_hex_ascii_view, NULL},
+};
+EXPORT_SYMBOL_GPL(qeth_dbf);
+
+struct qeth_card_list_struct qeth_core_card_list;
+EXPORT_SYMBOL_GPL(qeth_core_card_list);
+struct kmem_cache *qeth_core_header_cache;
+EXPORT_SYMBOL_GPL(qeth_core_header_cache);
+static struct kmem_cache *qeth_qdio_outbuf_cache;
+
+static struct device *qeth_core_root_dev;
+static unsigned int known_devices[][6] = QETH_MODELLIST_ARRAY;
+static struct lock_class_key qdio_out_skb_queue_key;
+static struct mutex qeth_mod_mutex;
+
+static void qeth_send_control_data_cb(struct qeth_channel *,
+			struct qeth_cmd_buffer *);
+static int qeth_issue_next_read(struct qeth_card *);
+static struct qeth_cmd_buffer *qeth_get_buffer(struct qeth_channel *);
+static void qeth_setup_ccw(struct qeth_channel *, unsigned char *, __u32);
+static void qeth_free_buffer_pool(struct qeth_card *);
+static int qeth_qdio_establish(struct qeth_card *);
+static void qeth_free_qdio_buffers(struct qeth_card *);
+static void qeth_notify_skbs(struct qeth_qdio_out_q *queue,
+		struct qeth_qdio_out_buffer *buf,
+		enum iucv_tx_notify notification);
+static void qeth_release_skbs(struct qeth_qdio_out_buffer *buf);
+static void qeth_clear_output_buffer(struct qeth_qdio_out_q *queue,
+		struct qeth_qdio_out_buffer *buf,
+		enum qeth_qdio_buffer_states newbufstate);
+static int qeth_init_qdio_out_buf(struct qeth_qdio_out_q *, int);
+
+static inline const char *qeth_get_cardname(struct qeth_card *card)
+{
+	if (card->info.guestlan) {
+		switch (card->info.type) {
+		case QETH_CARD_TYPE_OSD:
+			return " Guest LAN QDIO";
+		case QETH_CARD_TYPE_IQD:
+			return " Guest LAN Hiper";
+		case QETH_CARD_TYPE_OSM:
+			return " Guest LAN QDIO - OSM";
+		case QETH_CARD_TYPE_OSX:
+			return " Guest LAN QDIO - OSX";
+		default:
+			return " unknown";
+		}
+	} else {
+		switch (card->info.type) {
+		case QETH_CARD_TYPE_OSD:
+			return " OSD Express";
+		case QETH_CARD_TYPE_IQD:
+			return " HiperSockets";
+		case QETH_CARD_TYPE_OSN:
+			return " OSN QDIO";
+		case QETH_CARD_TYPE_OSM:
+			return " OSM QDIO";
+		case QETH_CARD_TYPE_OSX:
+			return " OSX QDIO";
+		default:
+			return " unknown";
+		}
+	}
+	return " n/a";
+}
+
+/* max length to be returned: 14 */
+const char *qeth_get_cardname_short(struct qeth_card *card)
+{
+	if (card->info.guestlan) {
+		switch (card->info.type) {
+		case QETH_CARD_TYPE_OSD:
+			return "GuestLAN QDIO";
+		case QETH_CARD_TYPE_IQD:
+			return "GuestLAN Hiper";
+		case QETH_CARD_TYPE_OSM:
+			return "GuestLAN OSM";
+		case QETH_CARD_TYPE_OSX:
+			return "GuestLAN OSX";
+		default:
+			return "unknown";
+		}
+	} else {
+		switch (card->info.type) {
+		case QETH_CARD_TYPE_OSD:
+			switch (card->info.link_type) {
+			case QETH_LINK_TYPE_FAST_ETH:
+				return "OSD_100";
+			case QETH_LINK_TYPE_HSTR:
+				return "HSTR";
+			case QETH_LINK_TYPE_GBIT_ETH:
+				return "OSD_1000";
+			case QETH_LINK_TYPE_10GBIT_ETH:
+				return "OSD_10GIG";
+			case QETH_LINK_TYPE_LANE_ETH100:
+				return "OSD_FE_LANE";
+			case QETH_LINK_TYPE_LANE_TR:
+				return "OSD_TR_LANE";
+			case QETH_LINK_TYPE_LANE_ETH1000:
+				return "OSD_GbE_LANE";
+			case QETH_LINK_TYPE_LANE:
+				return "OSD_ATM_LANE";
+			default:
+				return "OSD_Express";
+			}
+		case QETH_CARD_TYPE_IQD:
+			return "HiperSockets";
+		case QETH_CARD_TYPE_OSN:
+			return "OSN";
+		case QETH_CARD_TYPE_OSM:
+			return "OSM_1000";
+		case QETH_CARD_TYPE_OSX:
+			return "OSX_10GIG";
+		default:
+			return "unknown";
+		}
+	}
+	return "n/a";
+}
+
+void qeth_set_allowed_threads(struct qeth_card *card, unsigned long threads,
+			 int clear_start_mask)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&card->thread_mask_lock, flags);
+	card->thread_allowed_mask = threads;
+	if (clear_start_mask)
+		card->thread_start_mask &= threads;
+	spin_unlock_irqrestore(&card->thread_mask_lock, flags);
+	wake_up(&card->wait_q);
+}
+EXPORT_SYMBOL_GPL(qeth_set_allowed_threads);
+
+int qeth_threads_running(struct qeth_card *card, unsigned long threads)
+{
+	unsigned long flags;
+	int rc = 0;
+
+	spin_lock_irqsave(&card->thread_mask_lock, flags);
+	rc = (card->thread_running_mask & threads);
+	spin_unlock_irqrestore(&card->thread_mask_lock, flags);
+	return rc;
+}
+EXPORT_SYMBOL_GPL(qeth_threads_running);
+
+int qeth_wait_for_threads(struct qeth_card *card, unsigned long threads)
+{
+	return wait_event_interruptible(card->wait_q,
+			qeth_threads_running(card, threads) == 0);
+}
+EXPORT_SYMBOL_GPL(qeth_wait_for_threads);
+
+void qeth_clear_working_pool_list(struct qeth_card *card)
+{
+	struct qeth_buffer_pool_entry *pool_entry, *tmp;
+
+	QETH_CARD_TEXT(card, 5, "clwrklst");
+	list_for_each_entry_safe(pool_entry, tmp,
+			    &card->qdio.in_buf_pool.entry_list, list){
+			list_del(&pool_entry->list);
+	}
+}
+EXPORT_SYMBOL_GPL(qeth_clear_working_pool_list);
+
+static int qeth_alloc_buffer_pool(struct qeth_card *card)
+{
+	struct qeth_buffer_pool_entry *pool_entry;
+	void *ptr;
+	int i, j;
+
+	QETH_CARD_TEXT(card, 5, "alocpool");
+	for (i = 0; i < card->qdio.init_pool.buf_count; ++i) {
+		pool_entry = kzalloc(sizeof(*pool_entry), GFP_KERNEL);
+		if (!pool_entry) {
+			qeth_free_buffer_pool(card);
+			return -ENOMEM;
+		}
+		for (j = 0; j < QETH_MAX_BUFFER_ELEMENTS(card); ++j) {
+			ptr = (void *) __get_free_page(GFP_KERNEL);
+			if (!ptr) {
+				while (j > 0)
+					free_page((unsigned long)
+						  pool_entry->elements[--j]);
+				kfree(pool_entry);
+				qeth_free_buffer_pool(card);
+				return -ENOMEM;
+			}
+			pool_entry->elements[j] = ptr;
+		}
+		list_add(&pool_entry->init_list,
+			 &card->qdio.init_pool.entry_list);
+	}
+	return 0;
+}
+
+int qeth_realloc_buffer_pool(struct qeth_card *card, int bufcnt)
+{
+	QETH_CARD_TEXT(card, 2, "realcbp");
+
+	if ((card->state != CARD_STATE_DOWN) &&
+	    (card->state != CARD_STATE_RECOVER))
+		return -EPERM;
+
+	/* TODO: steel/add buffers from/to a running card's buffer pool (?) */
+	qeth_clear_working_pool_list(card);
+	qeth_free_buffer_pool(card);
+	card->qdio.in_buf_pool.buf_count = bufcnt;
+	card->qdio.init_pool.buf_count = bufcnt;
+	return qeth_alloc_buffer_pool(card);
+}
+EXPORT_SYMBOL_GPL(qeth_realloc_buffer_pool);
+
+static inline int qeth_cq_init(struct qeth_card *card)
+{
+	int rc;
+
+	if (card->options.cq == QETH_CQ_ENABLED) {
+		QETH_DBF_TEXT(SETUP, 2, "cqinit");
+		memset(card->qdio.c_q->qdio_bufs, 0,
+		       QDIO_MAX_BUFFERS_PER_Q * sizeof(struct qdio_buffer));
+		card->qdio.c_q->next_buf_to_init = 127;
+		rc = do_QDIO(CARD_DDEV(card), QDIO_FLAG_SYNC_INPUT,
+			     card->qdio.no_in_queues - 1, 0,
+			     127);
+		if (rc) {
+			QETH_DBF_TEXT_(SETUP, 2, "1err%d", rc);
+			goto out;
+		}
+	}
+	rc = 0;
+out:
+	return rc;
+}
+
+static inline int qeth_alloc_cq(struct qeth_card *card)
+{
+	int rc;
+
+	if (card->options.cq == QETH_CQ_ENABLED) {
+		int i;
+		struct qdio_outbuf_state *outbuf_states;
+
+		QETH_DBF_TEXT(SETUP, 2, "cqon");
+		card->qdio.c_q = kzalloc(sizeof(struct qeth_qdio_q),
+					 GFP_KERNEL);
+		if (!card->qdio.c_q) {
+			rc = -1;
+			goto kmsg_out;
+		}
+		QETH_DBF_HEX(SETUP, 2, &card->qdio.c_q, sizeof(void *));
+
+		for (i = 0; i < QDIO_MAX_BUFFERS_PER_Q; ++i) {
+			card->qdio.c_q->bufs[i].buffer =
+				&card->qdio.c_q->qdio_bufs[i];
+		}
+
+		card->qdio.no_in_queues = 2;
+
+		card->qdio.out_bufstates = (struct qdio_outbuf_state *)
+			kzalloc(card->qdio.no_out_queues *
+				QDIO_MAX_BUFFERS_PER_Q *
+				sizeof(struct qdio_outbuf_state), GFP_KERNEL);
+		outbuf_states = card->qdio.out_bufstates;
+		if (outbuf_states == NULL) {
+			rc = -1;
+			goto free_cq_out;
+		}
+		for (i = 0; i < card->qdio.no_out_queues; ++i) {
+			card->qdio.out_qs[i]->bufstates = outbuf_states;
+			outbuf_states += QDIO_MAX_BUFFERS_PER_Q;
+		}
+	} else {
+		QETH_DBF_TEXT(SETUP, 2, "nocq");
+		card->qdio.c_q = NULL;
+		card->qdio.no_in_queues = 1;
+	}
+	QETH_DBF_TEXT_(SETUP, 2, "iqc%d", card->qdio.no_in_queues);
+	rc = 0;
+out:
+	return rc;
+free_cq_out:
+	kfree(card->qdio.c_q);
+	card->qdio.c_q = NULL;
+kmsg_out:
+	dev_err(&card->gdev->dev, "Failed to create completion queue\n");
+	goto out;
+}
+
+static inline void qeth_free_cq(struct qeth_card *card)
+{
+	if (card->qdio.c_q) {
+		--card->qdio.no_in_queues;
+		kfree(card->qdio.c_q);
+		card->qdio.c_q = NULL;
+	}
+	kfree(card->qdio.out_bufstates);
+	card->qdio.out_bufstates = NULL;
+}
+
+static inline enum iucv_tx_notify qeth_compute_cq_notification(int sbalf15,
+	int delayed) {
+	enum iucv_tx_notify n;
+
+	switch (sbalf15) {
+	case 0:
+		n = delayed ? TX_NOTIFY_DELAYED_OK : TX_NOTIFY_OK;
+		break;
+	case 4:
+	case 16:
+	case 17:
+	case 18:
+		n = delayed ? TX_NOTIFY_DELAYED_UNREACHABLE :
+			TX_NOTIFY_UNREACHABLE;
+		break;
+	default:
+		n = delayed ? TX_NOTIFY_DELAYED_GENERALERROR :
+			TX_NOTIFY_GENERALERROR;
+		break;
+	}
+
+	return n;
+}
+
+static inline void qeth_cleanup_handled_pending(struct qeth_qdio_out_q *q,
+	int bidx, int forced_cleanup)
+{
+	if (q->card->options.cq != QETH_CQ_ENABLED)
+		return;
+
+	if (q->bufs[bidx]->next_pending != NULL) {
+		struct qeth_qdio_out_buffer *head = q->bufs[bidx];
+		struct qeth_qdio_out_buffer *c = q->bufs[bidx]->next_pending;
+
+		while (c) {
+			if (forced_cleanup ||
+			    atomic_read(&c->state) ==
+			      QETH_QDIO_BUF_HANDLED_DELAYED) {
+				struct qeth_qdio_out_buffer *f = c;
+				QETH_CARD_TEXT(f->q->card, 5, "fp");
+				QETH_CARD_TEXT_(f->q->card, 5, "%lx", (long) f);
+				/* release here to avoid interleaving between
+				   outbound tasklet and inbound tasklet
+				   regarding notifications and lifecycle */
+				qeth_release_skbs(c);
+
+				c = f->next_pending;
+				BUG_ON(head->next_pending != f);
+				head->next_pending = c;
+				kmem_cache_free(qeth_qdio_outbuf_cache, f);
+			} else {
+				head = c;
+				c = c->next_pending;
+			}
+
+		}
+	}
+	if (forced_cleanup && (atomic_read(&(q->bufs[bidx]->state)) ==
+					QETH_QDIO_BUF_HANDLED_DELAYED)) {
+		/* for recovery situations */
+		q->bufs[bidx]->aob = q->bufstates[bidx].aob;
+		qeth_init_qdio_out_buf(q, bidx);
+		QETH_CARD_TEXT(q->card, 2, "clprecov");
+	}
+}
+
+
+static inline void qeth_qdio_handle_aob(struct qeth_card *card,
+		unsigned long phys_aob_addr) {
+	struct qaob *aob;
+	struct qeth_qdio_out_buffer *buffer;
+	enum iucv_tx_notify notification;
+
+	aob = (struct qaob *) phys_to_virt(phys_aob_addr);
+	QETH_CARD_TEXT(card, 5, "haob");
+	QETH_CARD_TEXT_(card, 5, "%lx", phys_aob_addr);
+	buffer = (struct qeth_qdio_out_buffer *) aob->user1;
+	QETH_CARD_TEXT_(card, 5, "%lx", aob->user1);
+
+	BUG_ON(buffer == NULL);
+
+	if (atomic_cmpxchg(&buffer->state, QETH_QDIO_BUF_PRIMED,
+			   QETH_QDIO_BUF_IN_CQ) == QETH_QDIO_BUF_PRIMED) {
+		notification = TX_NOTIFY_OK;
+	} else {
+		BUG_ON(atomic_read(&buffer->state) != QETH_QDIO_BUF_PENDING);
+		atomic_set(&buffer->state, QETH_QDIO_BUF_IN_CQ);
+		notification = TX_NOTIFY_DELAYED_OK;
+	}
+
+	if (aob->aorc != 0)  {
+		QETH_CARD_TEXT_(card, 2, "aorc%02X", aob->aorc);
+		notification = qeth_compute_cq_notification(aob->aorc, 1);
+	}
+	qeth_notify_skbs(buffer->q, buffer, notification);
+
+	buffer->aob = NULL;
+	qeth_clear_output_buffer(buffer->q, buffer,
+				 QETH_QDIO_BUF_HANDLED_DELAYED);
+
+	/* from here on: do not touch buffer anymore */
+	qdio_release_aob(aob);
+}
+
+static inline int qeth_is_cq(struct qeth_card *card, unsigned int queue)
+{
+	return card->options.cq == QETH_CQ_ENABLED &&
+	    card->qdio.c_q != NULL &&
+	    queue != 0 &&
+	    queue == card->qdio.no_in_queues - 1;
+}
+
+
+static int qeth_issue_next_read(struct qeth_card *card)
+{
+	int rc;
+	struct qeth_cmd_buffer *iob;
+
+	QETH_CARD_TEXT(card, 5, "issnxrd");
+	if (card->read.state != CH_STATE_UP)
+		return -EIO;
+	iob = qeth_get_buffer(&card->read);
+	if (!iob) {
+		dev_warn(&card->gdev->dev, "The qeth device driver "
+			"failed to recover an error on the device\n");
+		QETH_DBF_MESSAGE(2, "%s issue_next_read failed: no iob "
+			"available\n", dev_name(&card->gdev->dev));
+		return -ENOMEM;
+	}
+	qeth_setup_ccw(&card->read, iob->data, QETH_BUFSIZE);
+	QETH_CARD_TEXT(card, 6, "noirqpnd");
+	rc = ccw_device_start(card->read.ccwdev, &card->read.ccw,
+			      (addr_t) iob, 0, 0);
+	if (rc) {
+		QETH_DBF_MESSAGE(2, "%s error in starting next read ccw! "
+			"rc=%i\n", dev_name(&card->gdev->dev), rc);
+		atomic_set(&card->read.irq_pending, 0);
+		card->read_or_write_problem = 1;
+		qeth_schedule_recovery(card);
+		wake_up(&card->wait_q);
+	}
+	return rc;
+}
+
+static struct qeth_reply *qeth_alloc_reply(struct qeth_card *card)
+{
+	struct qeth_reply *reply;
+
+	reply = kzalloc(sizeof(struct qeth_reply), GFP_ATOMIC);
+	if (reply) {
+		atomic_set(&reply->refcnt, 1);
+		atomic_set(&reply->received, 0);
+		reply->card = card;
+	};
+	return reply;
+}
+
+static void qeth_get_reply(struct qeth_reply *reply)
+{
+	WARN_ON(atomic_read(&reply->refcnt) <= 0);
+	atomic_inc(&reply->refcnt);
+}
+
+static void qeth_put_reply(struct qeth_reply *reply)
+{
+	WARN_ON(atomic_read(&reply->refcnt) <= 0);
+	if (atomic_dec_and_test(&reply->refcnt))
+		kfree(reply);
+}
+
+static void qeth_issue_ipa_msg(struct qeth_ipa_cmd *cmd, int rc,
+		struct qeth_card *card)
+{
+	char *ipa_name;
+	int com = cmd->hdr.command;
+	ipa_name = qeth_get_ipa_cmd_name(com);
+	if (rc)
+		QETH_DBF_MESSAGE(2, "IPA: %s(x%X) for %s/%s returned "
+				"x%X \"%s\"\n",
+				ipa_name, com, dev_name(&card->gdev->dev),
+				QETH_CARD_IFNAME(card), rc,
+				qeth_get_ipa_msg(rc));
+	else
+		QETH_DBF_MESSAGE(5, "IPA: %s(x%X) for %s/%s succeeded\n",
+				ipa_name, com, dev_name(&card->gdev->dev),
+				QETH_CARD_IFNAME(card));
+}
+
+static struct qeth_ipa_cmd *qeth_check_ipa_data(struct qeth_card *card,
+		struct qeth_cmd_buffer *iob)
+{
+	struct qeth_ipa_cmd *cmd = NULL;
+
+	QETH_CARD_TEXT(card, 5, "chkipad");
+	if (IS_IPA(iob->data)) {
+		cmd = (struct qeth_ipa_cmd *) PDU_ENCAPSULATION(iob->data);
+		if (IS_IPA_REPLY(cmd)) {
+			if (cmd->hdr.command != IPA_CMD_SETCCID &&
+			    cmd->hdr.command != IPA_CMD_DELCCID &&
+			    cmd->hdr.command != IPA_CMD_MODCCID &&
+			    cmd->hdr.command != IPA_CMD_SET_DIAG_ASS)
+				qeth_issue_ipa_msg(cmd,
+						cmd->hdr.return_code, card);
+			return cmd;
+		} else {
+			switch (cmd->hdr.command) {
+			case IPA_CMD_STOPLAN:
+				dev_warn(&card->gdev->dev,
+					   "The link for interface %s on CHPID"
+					   " 0x%X failed\n",
+					   QETH_CARD_IFNAME(card),
+					   card->info.chpid);
+				card->lan_online = 0;
+				if (card->dev && netif_carrier_ok(card->dev))
+					netif_carrier_off(card->dev);
+				return NULL;
+			case IPA_CMD_STARTLAN:
+				dev_info(&card->gdev->dev,
+					   "The link for %s on CHPID 0x%X has"
+					   " been restored\n",
+					   QETH_CARD_IFNAME(card),
+					   card->info.chpid);
+				netif_carrier_on(card->dev);
+				card->lan_online = 1;
+				if (card->info.hwtrap)
+					card->info.hwtrap = 2;
+				qeth_schedule_recovery(card);
+				return NULL;
+			case IPA_CMD_MODCCID:
+				return cmd;
+			case IPA_CMD_REGISTER_LOCAL_ADDR:
+				QETH_CARD_TEXT(card, 3, "irla");
+				break;
+			case IPA_CMD_UNREGISTER_LOCAL_ADDR:
+				QETH_CARD_TEXT(card, 3, "urla");
+				break;
+			default:
+				QETH_DBF_MESSAGE(2, "Received data is IPA "
+					   "but not a reply!\n");
+				break;
+			}
+		}
+	}
+	return cmd;
+}
+
+void qeth_clear_ipacmd_list(struct qeth_card *card)
+{
+	struct qeth_reply *reply, *r;
+	unsigned long flags;
+
+	QETH_CARD_TEXT(card, 4, "clipalst");
+
+	spin_lock_irqsave(&card->lock, flags);
+	list_for_each_entry_safe(reply, r, &card->cmd_waiter_list, list) {
+		qeth_get_reply(reply);
+		reply->rc = -EIO;
+		atomic_inc(&reply->received);
+		list_del_init(&reply->list);
+		wake_up(&reply->wait_q);
+		qeth_put_reply(reply);
+	}
+	spin_unlock_irqrestore(&card->lock, flags);
+	atomic_set(&card->write.irq_pending, 0);
+}
+EXPORT_SYMBOL_GPL(qeth_clear_ipacmd_list);
+
+static int qeth_check_idx_response(struct qeth_card *card,
+	unsigned char *buffer)
+{
+	if (!buffer)
+		return 0;
+
+	QETH_DBF_HEX(CTRL, 2, buffer, QETH_DBF_CTRL_LEN);
+	if ((buffer[2] & 0xc0) == 0xc0) {
+		QETH_DBF_MESSAGE(2, "received an IDX TERMINATE "
+			   "with cause code 0x%02x%s\n",
+			   buffer[4],
+			   ((buffer[4] == 0x22) ?
+			    " -- try another portname" : ""));
+		QETH_CARD_TEXT(card, 2, "ckidxres");
+		QETH_CARD_TEXT(card, 2, " idxterm");
+		QETH_CARD_TEXT_(card, 2, "  rc%d", -EIO);
+		if (buffer[4] == 0xf6) {
+			dev_err(&card->gdev->dev,
+			"The qeth device is not configured "
+			"for the OSI layer required by z/VM\n");
+			return -EPERM;
+		}
+		return -EIO;
+	}
+	return 0;
+}
+
+static void qeth_setup_ccw(struct qeth_channel *channel, unsigned char *iob,
+		__u32 len)
+{
+	struct qeth_card *card;
+
+	card = CARD_FROM_CDEV(channel->ccwdev);
+	QETH_CARD_TEXT(card, 4, "setupccw");
+	if (channel == &card->read)
+		memcpy(&channel->ccw, READ_CCW, sizeof(struct ccw1));
+	else
+		memcpy(&channel->ccw, WRITE_CCW, sizeof(struct ccw1));
+	channel->ccw.count = len;
+	channel->ccw.cda = (__u32) __pa(iob);
+}
+
+static struct qeth_cmd_buffer *__qeth_get_buffer(struct qeth_channel *channel)
+{
+	__u8 index;
+
+	QETH_CARD_TEXT(CARD_FROM_CDEV(channel->ccwdev), 6, "getbuff");
+	index = channel->io_buf_no;
+	do {
+		if (channel->iob[index].state == BUF_STATE_FREE) {
+			channel->iob[index].state = BUF_STATE_LOCKED;
+			channel->io_buf_no = (channel->io_buf_no + 1) %
+				QETH_CMD_BUFFER_NO;
+			memset(channel->iob[index].data, 0, QETH_BUFSIZE);
+			return channel->iob + index;
+		}
+		index = (index + 1) % QETH_CMD_BUFFER_NO;
+	} while (index != channel->io_buf_no);
+
+	return NULL;
+}
+
+void qeth_release_buffer(struct qeth_channel *channel,
+		struct qeth_cmd_buffer *iob)
+{
+	unsigned long flags;
+
+	QETH_CARD_TEXT(CARD_FROM_CDEV(channel->ccwdev), 6, "relbuff");
+	spin_lock_irqsave(&channel->iob_lock, flags);
+	memset(iob->data, 0, QETH_BUFSIZE);
+	iob->state = BUF_STATE_FREE;
+	iob->callback = qeth_send_control_data_cb;
+	iob->rc = 0;
+	spin_unlock_irqrestore(&channel->iob_lock, flags);
+	wake_up(&channel->wait_q);
+}
+EXPORT_SYMBOL_GPL(qeth_release_buffer);
+
+static struct qeth_cmd_buffer *qeth_get_buffer(struct qeth_channel *channel)
+{
+	struct qeth_cmd_buffer *buffer = NULL;
+	unsigned long flags;
+
+	spin_lock_irqsave(&channel->iob_lock, flags);
+	buffer = __qeth_get_buffer(channel);
+	spin_unlock_irqrestore(&channel->iob_lock, flags);
+	return buffer;
+}
+
+struct qeth_cmd_buffer *qeth_wait_for_buffer(struct qeth_channel *channel)
+{
+	struct qeth_cmd_buffer *buffer;
+	wait_event(channel->wait_q,
+		   ((buffer = qeth_get_buffer(channel)) != NULL));
+	return buffer;
+}
+EXPORT_SYMBOL_GPL(qeth_wait_for_buffer);
+
+void qeth_clear_cmd_buffers(struct qeth_channel *channel)
+{
+	int cnt;
+
+	for (cnt = 0; cnt < QETH_CMD_BUFFER_NO; cnt++)
+		qeth_release_buffer(channel, &channel->iob[cnt]);
+	channel->buf_no = 0;
+	channel->io_buf_no = 0;
+}
+EXPORT_SYMBOL_GPL(qeth_clear_cmd_buffers);
+
+static void qeth_send_control_data_cb(struct qeth_channel *channel,
+		  struct qeth_cmd_buffer *iob)
+{
+	struct qeth_card *card;
+	struct qeth_reply *reply, *r;
+	struct qeth_ipa_cmd *cmd;
+	unsigned long flags;
+	int keep_reply;
+	int rc = 0;
+
+	card = CARD_FROM_CDEV(channel->ccwdev);
+	QETH_CARD_TEXT(card, 4, "sndctlcb");
+	rc = qeth_check_idx_response(card, iob->data);
+	switch (rc) {
+	case 0:
+		break;
+	case -EIO:
+		qeth_clear_ipacmd_list(card);
+		qeth_schedule_recovery(card);
+		/* fall through */
+	default:
+		goto out;
+	}
+
+	cmd = qeth_check_ipa_data(card, iob);
+	if ((cmd == NULL) && (card->state != CARD_STATE_DOWN))
+		goto out;
+	/*in case of OSN : check if cmd is set */
+	if (card->info.type == QETH_CARD_TYPE_OSN &&
+	    cmd &&
+	    cmd->hdr.command != IPA_CMD_STARTLAN &&
+	    card->osn_info.assist_cb != NULL) {
+		card->osn_info.assist_cb(card->dev, cmd);
+		goto out;
+	}
+
+	spin_lock_irqsave(&card->lock, flags);
+	list_for_each_entry_safe(reply, r, &card->cmd_waiter_list, list) {
+		if ((reply->seqno == QETH_IDX_COMMAND_SEQNO) ||
+		    ((cmd) && (reply->seqno == cmd->hdr.seqno))) {
+			qeth_get_reply(reply);
+			list_del_init(&reply->list);
+			spin_unlock_irqrestore(&card->lock, flags);
+			keep_reply = 0;
+			if (reply->callback != NULL) {
+				if (cmd) {
+					reply->offset = (__u16)((char *)cmd -
+							(char *)iob->data);
+					keep_reply = reply->callback(card,
+							reply,
+							(unsigned long)cmd);
+				} else
+					keep_reply = reply->callback(card,
+							reply,
+							(unsigned long)iob);
+			}
+			if (cmd)
+				reply->rc = (u16) cmd->hdr.return_code;
+			else if (iob->rc)
+				reply->rc = iob->rc;
+			if (keep_reply) {
+				spin_lock_irqsave(&card->lock, flags);
+				list_add_tail(&reply->list,
+					      &card->cmd_waiter_list);
+				spin_unlock_irqrestore(&card->lock, flags);
+			} else {
+				atomic_inc(&reply->received);
+				wake_up(&reply->wait_q);
+			}
+			qeth_put_reply(reply);
+			goto out;
+		}
+	}
+	spin_unlock_irqrestore(&card->lock, flags);
+out:
+	memcpy(&card->seqno.pdu_hdr_ack,
+		QETH_PDU_HEADER_SEQ_NO(iob->data),
+		QETH_SEQ_NO_LENGTH);
+	qeth_release_buffer(channel, iob);
+}
+
+static int qeth_setup_channel(struct qeth_channel *channel)
+{
+	int cnt;
+
+	QETH_DBF_TEXT(SETUP, 2, "setupch");
+	for (cnt = 0; cnt < QETH_CMD_BUFFER_NO; cnt++) {
+		channel->iob[cnt].data =
+			kzalloc(QETH_BUFSIZE, GFP_DMA|GFP_KERNEL);
+		if (channel->iob[cnt].data == NULL)
+			break;
+		channel->iob[cnt].state = BUF_STATE_FREE;
+		channel->iob[cnt].channel = channel;
+		channel->iob[cnt].callback = qeth_send_control_data_cb;
+		channel->iob[cnt].rc = 0;
+	}
+	if (cnt < QETH_CMD_BUFFER_NO) {
+		while (cnt-- > 0)
+			kfree(channel->iob[cnt].data);
+		return -ENOMEM;
+	}
+	channel->buf_no = 0;
+	channel->io_buf_no = 0;
+	atomic_set(&channel->irq_pending, 0);
+	spin_lock_init(&channel->iob_lock);
+
+	init_waitqueue_head(&channel->wait_q);
+	return 0;
+}
+
+static int qeth_set_thread_start_bit(struct qeth_card *card,
+		unsigned long thread)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&card->thread_mask_lock, flags);
+	if (!(card->thread_allowed_mask & thread) ||
+	      (card->thread_start_mask & thread)) {
+		spin_unlock_irqrestore(&card->thread_mask_lock, flags);
+		return -EPERM;
+	}
+	card->thread_start_mask |= thread;
+	spin_unlock_irqrestore(&card->thread_mask_lock, flags);
+	return 0;
+}
+
+void qeth_clear_thread_start_bit(struct qeth_card *card, unsigned long thread)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&card->thread_mask_lock, flags);
+	card->thread_start_mask &= ~thread;
+	spin_unlock_irqrestore(&card->thread_mask_lock, flags);
+	wake_up(&card->wait_q);
+}
+EXPORT_SYMBOL_GPL(qeth_clear_thread_start_bit);
+
+void qeth_clear_thread_running_bit(struct qeth_card *card, unsigned long thread)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&card->thread_mask_lock, flags);
+	card->thread_running_mask &= ~thread;
+	spin_unlock_irqrestore(&card->thread_mask_lock, flags);
+	wake_up(&card->wait_q);
+}
+EXPORT_SYMBOL_GPL(qeth_clear_thread_running_bit);
+
+static int __qeth_do_run_thread(struct qeth_card *card, unsigned long thread)
+{
+	unsigned long flags;
+	int rc = 0;
+
+	spin_lock_irqsave(&card->thread_mask_lock, flags);
+	if (card->thread_start_mask & thread) {
+		if ((card->thread_allowed_mask & thread) &&
+		    !(card->thread_running_mask & thread)) {
+			rc = 1;
+			card->thread_start_mask &= ~thread;
+			card->thread_running_mask |= thread;
+		} else
+			rc = -EPERM;
+	}
+	spin_unlock_irqrestore(&card->thread_mask_lock, flags);
+	return rc;
+}
+
+int qeth_do_run_thread(struct qeth_card *card, unsigned long thread)
+{
+	int rc = 0;
+
+	wait_event(card->wait_q,
+		   (rc = __qeth_do_run_thread(card, thread)) >= 0);
+	return rc;
+}
+EXPORT_SYMBOL_GPL(qeth_do_run_thread);
+
+void qeth_schedule_recovery(struct qeth_card *card)
+{
+	QETH_CARD_TEXT(card, 2, "startrec");
+	if (qeth_set_thread_start_bit(card, QETH_RECOVER_THREAD) == 0)
+		schedule_work(&card->kernel_thread_starter);
+}
+EXPORT_SYMBOL_GPL(qeth_schedule_recovery);
+
+static int qeth_get_problem(struct ccw_device *cdev, struct irb *irb)
+{
+	int dstat, cstat;
+	char *sense;
+	struct qeth_card *card;
+
+	sense = (char *) irb->ecw;
+	cstat = irb->scsw.cmd.cstat;
+	dstat = irb->scsw.cmd.dstat;
+	card = CARD_FROM_CDEV(cdev);
+
+	if (cstat & (SCHN_STAT_CHN_CTRL_CHK | SCHN_STAT_INTF_CTRL_CHK |
+		     SCHN_STAT_CHN_DATA_CHK | SCHN_STAT_CHAIN_CHECK |
+		     SCHN_STAT_PROT_CHECK | SCHN_STAT_PROG_CHECK)) {
+		QETH_CARD_TEXT(card, 2, "CGENCHK");
+		dev_warn(&cdev->dev, "The qeth device driver "
+			"failed to recover an error on the device\n");
+		QETH_DBF_MESSAGE(2, "%s check on device dstat=x%x, cstat=x%x\n",
+			dev_name(&cdev->dev), dstat, cstat);
+		print_hex_dump(KERN_WARNING, "qeth: irb ", DUMP_PREFIX_OFFSET,
+				16, 1, irb, 64, 1);
+		return 1;
+	}
+
+	if (dstat & DEV_STAT_UNIT_CHECK) {
+		if (sense[SENSE_RESETTING_EVENT_BYTE] &
+		    SENSE_RESETTING_EVENT_FLAG) {
+			QETH_CARD_TEXT(card, 2, "REVIND");
+			return 1;
+		}
+		if (sense[SENSE_COMMAND_REJECT_BYTE] &
+		    SENSE_COMMAND_REJECT_FLAG) {
+			QETH_CARD_TEXT(card, 2, "CMDREJi");
+			return 1;
+		}
+		if ((sense[2] == 0xaf) && (sense[3] == 0xfe)) {
+			QETH_CARD_TEXT(card, 2, "AFFE");
+			return 1;
+		}
+		if ((!sense[0]) && (!sense[1]) && (!sense[2]) && (!sense[3])) {
+			QETH_CARD_TEXT(card, 2, "ZEROSEN");
+			return 0;
+		}
+		QETH_CARD_TEXT(card, 2, "DGENCHK");
+			return 1;
+	}
+	return 0;
+}
+
+static long __qeth_check_irb_error(struct ccw_device *cdev,
+		unsigned long intparm, struct irb *irb)
+{
+	struct qeth_card *card;
+
+	card = CARD_FROM_CDEV(cdev);
+
+	if (!IS_ERR(irb))
+		return 0;
+
+	switch (PTR_ERR(irb)) {
+	case -EIO:
+		QETH_DBF_MESSAGE(2, "%s i/o-error on device\n",
+			dev_name(&cdev->dev));
+		QETH_CARD_TEXT(card, 2, "ckirberr");
+		QETH_CARD_TEXT_(card, 2, "  rc%d", -EIO);
+		break;
+	case -ETIMEDOUT:
+		dev_warn(&cdev->dev, "A hardware operation timed out"
+			" on the device\n");
+		QETH_CARD_TEXT(card, 2, "ckirberr");
+		QETH_CARD_TEXT_(card, 2, "  rc%d", -ETIMEDOUT);
+		if (intparm == QETH_RCD_PARM) {
+			if (card && (card->data.ccwdev == cdev)) {
+				card->data.state = CH_STATE_DOWN;
+				wake_up(&card->wait_q);
+			}
+		}
+		break;
+	default:
+		QETH_DBF_MESSAGE(2, "%s unknown error %ld on device\n",
+			dev_name(&cdev->dev), PTR_ERR(irb));
+		QETH_CARD_TEXT(card, 2, "ckirberr");
+		QETH_CARD_TEXT(card, 2, "  rc???");
+	}
+	return PTR_ERR(irb);
+}
+
+static void qeth_irq(struct ccw_device *cdev, unsigned long intparm,
+		struct irb *irb)
+{
+	int rc;
+	int cstat, dstat;
+	struct qeth_cmd_buffer *buffer;
+	struct qeth_channel *channel;
+	struct qeth_card *card;
+	struct qeth_cmd_buffer *iob;
+	__u8 index;
+
+	if (__qeth_check_irb_error(cdev, intparm, irb))
+		return;
+	cstat = irb->scsw.cmd.cstat;
+	dstat = irb->scsw.cmd.dstat;
+
+	card = CARD_FROM_CDEV(cdev);
+	if (!card)
+		return;
+
+	QETH_CARD_TEXT(card, 5, "irq");
+
+	if (card->read.ccwdev == cdev) {
+		channel = &card->read;
+		QETH_CARD_TEXT(card, 5, "read");
+	} else if (card->write.ccwdev == cdev) {
+		channel = &card->write;
+		QETH_CARD_TEXT(card, 5, "write");
+	} else {
+		channel = &card->data;
+		QETH_CARD_TEXT(card, 5, "data");
+	}
+	atomic_set(&channel->irq_pending, 0);
+
+	if (irb->scsw.cmd.fctl & (SCSW_FCTL_CLEAR_FUNC))
+		channel->state = CH_STATE_STOPPED;
+
+	if (irb->scsw.cmd.fctl & (SCSW_FCTL_HALT_FUNC))
+		channel->state = CH_STATE_HALTED;
+
+	/*let's wake up immediately on data channel*/
+	if ((channel == &card->data) && (intparm != 0) &&
+	    (intparm != QETH_RCD_PARM))
+		goto out;
+
+	if (intparm == QETH_CLEAR_CHANNEL_PARM) {
+		QETH_CARD_TEXT(card, 6, "clrchpar");
+		/* we don't have to handle this further */
+		intparm = 0;
+	}
+	if (intparm == QETH_HALT_CHANNEL_PARM) {
+		QETH_CARD_TEXT(card, 6, "hltchpar");
+		/* we don't have to handle this further */
+		intparm = 0;
+	}
+	if ((dstat & DEV_STAT_UNIT_EXCEP) ||
+	    (dstat & DEV_STAT_UNIT_CHECK) ||
+	    (cstat)) {
+		if (irb->esw.esw0.erw.cons) {
+			dev_warn(&channel->ccwdev->dev,
+				"The qeth device driver failed to recover "
+				"an error on the device\n");
+			QETH_DBF_MESSAGE(2, "%s sense data available. cstat "
+				"0x%X dstat 0x%X\n",
+				dev_name(&channel->ccwdev->dev), cstat, dstat);
+			print_hex_dump(KERN_WARNING, "qeth: irb ",
+				DUMP_PREFIX_OFFSET, 16, 1, irb, 32, 1);
+			print_hex_dump(KERN_WARNING, "qeth: sense data ",
+				DUMP_PREFIX_OFFSET, 16, 1, irb->ecw, 32, 1);
+		}
+		if (intparm == QETH_RCD_PARM) {
+			channel->state = CH_STATE_DOWN;
+			goto out;
+		}
+		rc = qeth_get_problem(cdev, irb);
+		if (rc) {
+			qeth_clear_ipacmd_list(card);
+			qeth_schedule_recovery(card);
+			goto out;
+		}
+	}
+
+	if (intparm == QETH_RCD_PARM) {
+		channel->state = CH_STATE_RCD_DONE;
+		goto out;
+	}
+	if (intparm) {
+		buffer = (struct qeth_cmd_buffer *) __va((addr_t)intparm);
+		buffer->state = BUF_STATE_PROCESSED;
+	}
+	if (channel == &card->data)
+		return;
+	if (channel == &card->read &&
+	    channel->state == CH_STATE_UP)
+		qeth_issue_next_read(card);
+
+	iob = channel->iob;
+	index = channel->buf_no;
+	while (iob[index].state == BUF_STATE_PROCESSED) {
+		if (iob[index].callback != NULL)
+			iob[index].callback(channel, iob + index);
+
+		index = (index + 1) % QETH_CMD_BUFFER_NO;
+	}
+	channel->buf_no = index;
+out:
+	wake_up(&card->wait_q);
+	return;
+}
+
+static void qeth_notify_skbs(struct qeth_qdio_out_q *q,
+		struct qeth_qdio_out_buffer *buf,
+		enum iucv_tx_notify notification)
+{
+	struct sk_buff *skb;
+
+	if (skb_queue_empty(&buf->skb_list))
+		goto out;
+	skb = skb_peek(&buf->skb_list);
+	while (skb) {
+		QETH_CARD_TEXT_(q->card, 5, "skbn%d", notification);
+		QETH_CARD_TEXT_(q->card, 5, "%lx", (long) skb);
+		if (skb->protocol == ETH_P_AF_IUCV) {
+			if (skb->sk) {
+				struct iucv_sock *iucv = iucv_sk(skb->sk);
+				iucv->sk_txnotify(skb, notification);
+			}
+		}
+		if (skb_queue_is_last(&buf->skb_list, skb))
+			skb = NULL;
+		else
+			skb = skb_queue_next(&buf->skb_list, skb);
+	}
+out:
+	return;
+}
+
+static void qeth_release_skbs(struct qeth_qdio_out_buffer *buf)
+{
+	struct sk_buff *skb;
+	struct iucv_sock *iucv;
+	int notify_general_error = 0;
+
+	if (atomic_read(&buf->state) == QETH_QDIO_BUF_PENDING)
+		notify_general_error = 1;
+
+	/* release may never happen from within CQ tasklet scope */
+	BUG_ON(atomic_read(&buf->state) == QETH_QDIO_BUF_IN_CQ);
+
+	skb = skb_dequeue(&buf->skb_list);
+	while (skb) {
+		QETH_CARD_TEXT(buf->q->card, 5, "skbr");
+		QETH_CARD_TEXT_(buf->q->card, 5, "%lx", (long) skb);
+		if (notify_general_error && skb->protocol == ETH_P_AF_IUCV) {
+			if (skb->sk) {
+				iucv = iucv_sk(skb->sk);
+				iucv->sk_txnotify(skb, TX_NOTIFY_GENERALERROR);
+			}
+		}
+		atomic_dec(&skb->users);
+		dev_kfree_skb_any(skb);
+		skb = skb_dequeue(&buf->skb_list);
+	}
+}
+
+static void qeth_clear_output_buffer(struct qeth_qdio_out_q *queue,
+		struct qeth_qdio_out_buffer *buf,
+		enum qeth_qdio_buffer_states newbufstate)
+{
+	int i;
+
+	/* is PCI flag set on buffer? */
+	if (buf->buffer->element[0].sflags & SBAL_SFLAGS0_PCI_REQ)
+		atomic_dec(&queue->set_pci_flags_count);
+
+	if (newbufstate == QETH_QDIO_BUF_EMPTY) {
+		qeth_release_skbs(buf);
+	}
+	for (i = 0; i < QETH_MAX_BUFFER_ELEMENTS(queue->card); ++i) {
+		if (buf->buffer->element[i].addr && buf->is_header[i])
+			kmem_cache_free(qeth_core_header_cache,
+				buf->buffer->element[i].addr);
+		buf->is_header[i] = 0;
+		buf->buffer->element[i].length = 0;
+		buf->buffer->element[i].addr = NULL;
+		buf->buffer->element[i].eflags = 0;
+		buf->buffer->element[i].sflags = 0;
+	}
+	buf->buffer->element[15].eflags = 0;
+	buf->buffer->element[15].sflags = 0;
+	buf->next_element_to_fill = 0;
+	atomic_set(&buf->state, newbufstate);
+}
+
+static void qeth_clear_outq_buffers(struct qeth_qdio_out_q *q, int free)
+{
+	int j;
+
+	for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j) {
+		if (!q->bufs[j])
+			continue;
+		qeth_cleanup_handled_pending(q, j, 1);
+		qeth_clear_output_buffer(q, q->bufs[j], QETH_QDIO_BUF_EMPTY);
+		if (free) {
+			kmem_cache_free(qeth_qdio_outbuf_cache, q->bufs[j]);
+			q->bufs[j] = NULL;
+		}
+	}
+}
+
+void qeth_clear_qdio_buffers(struct qeth_card *card)
+{
+	int i;
+
+	QETH_CARD_TEXT(card, 2, "clearqdbf");
+	/* clear outbound buffers to free skbs */
+	for (i = 0; i < card->qdio.no_out_queues; ++i) {
+		if (card->qdio.out_qs[i]) {
+			qeth_clear_outq_buffers(card->qdio.out_qs[i], 0);
+		}
+	}
+}
+EXPORT_SYMBOL_GPL(qeth_clear_qdio_buffers);
+
+static void qeth_free_buffer_pool(struct qeth_card *card)
+{
+	struct qeth_buffer_pool_entry *pool_entry, *tmp;
+	int i = 0;
+	list_for_each_entry_safe(pool_entry, tmp,
+				 &card->qdio.init_pool.entry_list, init_list){
+		for (i = 0; i < QETH_MAX_BUFFER_ELEMENTS(card); ++i)
+			free_page((unsigned long)pool_entry->elements[i]);
+		list_del(&pool_entry->init_list);
+		kfree(pool_entry);
+	}
+}
+
+static void qeth_free_qdio_buffers(struct qeth_card *card)
+{
+	int i, j;
+
+	if (atomic_xchg(&card->qdio.state, QETH_QDIO_UNINITIALIZED) ==
+		QETH_QDIO_UNINITIALIZED)
+		return;
+
+	qeth_free_cq(card);
+	cancel_delayed_work_sync(&card->buffer_reclaim_work);
+	for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j)
+		dev_kfree_skb_any(card->qdio.in_q->bufs[j].rx_skb);
+	kfree(card->qdio.in_q);
+	card->qdio.in_q = NULL;
+	/* inbound buffer pool */
+	qeth_free_buffer_pool(card);
+	/* free outbound qdio_qs */
+	if (card->qdio.out_qs) {
+		for (i = 0; i < card->qdio.no_out_queues; ++i) {
+			qeth_clear_outq_buffers(card->qdio.out_qs[i], 1);
+			kfree(card->qdio.out_qs[i]);
+		}
+		kfree(card->qdio.out_qs);
+		card->qdio.out_qs = NULL;
+	}
+}
+
+static void qeth_clean_channel(struct qeth_channel *channel)
+{
+	int cnt;
+
+	QETH_DBF_TEXT(SETUP, 2, "freech");
+	for (cnt = 0; cnt < QETH_CMD_BUFFER_NO; cnt++)
+		kfree(channel->iob[cnt].data);
+}
+
+static void qeth_get_channel_path_desc(struct qeth_card *card)
+{
+	struct ccw_device *ccwdev;
+	struct channelPath_dsc {
+		u8 flags;
+		u8 lsn;
+		u8 desc;
+		u8 chpid;
+		u8 swla;
+		u8 zeroes;
+		u8 chla;
+		u8 chpp;
+	} *chp_dsc;
+
+	QETH_DBF_TEXT(SETUP, 2, "chp_desc");
+
+	ccwdev = card->data.ccwdev;
+	chp_dsc = (struct channelPath_dsc *)ccw_device_get_chp_desc(ccwdev, 0);
+	if (chp_dsc != NULL) {
+		if (card->info.type != QETH_CARD_TYPE_IQD) {
+			/* CHPP field bit 6 == 1 -> single queue */
+			if ((chp_dsc->chpp & 0x02) == 0x02) {
+				if ((atomic_read(&card->qdio.state) !=
+					QETH_QDIO_UNINITIALIZED) &&
+				    (card->qdio.no_out_queues == 4))
+					/* change from 4 to 1 outbound queues */
+					qeth_free_qdio_buffers(card);
+				card->qdio.no_out_queues = 1;
+				if (card->qdio.default_out_queue != 0)
+					dev_info(&card->gdev->dev,
+					"Priority Queueing not supported\n");
+				card->qdio.default_out_queue = 0;
+			} else {
+				if ((atomic_read(&card->qdio.state) !=
+					QETH_QDIO_UNINITIALIZED) &&
+				    (card->qdio.no_out_queues == 1)) {
+					/* change from 1 to 4 outbound queues */
+					qeth_free_qdio_buffers(card);
+					card->qdio.default_out_queue = 2;
+				}
+				card->qdio.no_out_queues = 4;
+			}
+		}
+		card->info.func_level = 0x4100 + chp_dsc->desc;
+		kfree(chp_dsc);
+	}
+	QETH_DBF_TEXT_(SETUP, 2, "nr:%x", card->qdio.no_out_queues);
+	QETH_DBF_TEXT_(SETUP, 2, "lvl:%02x", card->info.func_level);
+	return;
+}
+
+static void qeth_init_qdio_info(struct qeth_card *card)
+{
+	QETH_DBF_TEXT(SETUP, 4, "intqdinf");
+	atomic_set(&card->qdio.state, QETH_QDIO_UNINITIALIZED);
+	/* inbound */
+	card->qdio.in_buf_size = QETH_IN_BUF_SIZE_DEFAULT;
+	if (card->info.type == QETH_CARD_TYPE_IQD)
+		card->qdio.init_pool.buf_count = QETH_IN_BUF_COUNT_HSDEFAULT;
+	else
+		card->qdio.init_pool.buf_count = QETH_IN_BUF_COUNT_DEFAULT;
+	card->qdio.in_buf_pool.buf_count = card->qdio.init_pool.buf_count;
+	INIT_LIST_HEAD(&card->qdio.in_buf_pool.entry_list);
+	INIT_LIST_HEAD(&card->qdio.init_pool.entry_list);
+}
+
+static void qeth_set_intial_options(struct qeth_card *card)
+{
+	card->options.route4.type = NO_ROUTER;
+	card->options.route6.type = NO_ROUTER;
+	card->options.broadcast_mode = QETH_TR_BROADCAST_ALLRINGS;
+	card->options.macaddr_mode = QETH_TR_MACADDR_NONCANONICAL;
+	card->options.fake_broadcast = 0;
+	card->options.add_hhlen = DEFAULT_ADD_HHLEN;
+	card->options.performance_stats = 0;
+	card->options.rx_sg_cb = QETH_RX_SG_CB;
+	card->options.isolation = ISOLATION_MODE_NONE;
+	card->options.cq = QETH_CQ_DISABLED;
+}
+
+static int qeth_do_start_thread(struct qeth_card *card, unsigned long thread)
+{
+	unsigned long flags;
+	int rc = 0;
+
+	spin_lock_irqsave(&card->thread_mask_lock, flags);
+	QETH_CARD_TEXT_(card, 4, "  %02x%02x%02x",
+			(u8) card->thread_start_mask,
+			(u8) card->thread_allowed_mask,
+			(u8) card->thread_running_mask);
+	rc = (card->thread_start_mask & thread);
+	spin_unlock_irqrestore(&card->thread_mask_lock, flags);
+	return rc;
+}
+
+static void qeth_start_kernel_thread(struct work_struct *work)
+{
+	struct task_struct *ts;
+	struct qeth_card *card = container_of(work, struct qeth_card,
+					kernel_thread_starter);
+	QETH_CARD_TEXT(card , 2, "strthrd");
+
+	if (card->read.state != CH_STATE_UP &&
+	    card->write.state != CH_STATE_UP)
+		return;
+	if (qeth_do_start_thread(card, QETH_RECOVER_THREAD)) {
+		ts = kthread_run(card->discipline.recover, (void *)card,
+				"qeth_recover");
+		if (IS_ERR(ts)) {
+			qeth_clear_thread_start_bit(card, QETH_RECOVER_THREAD);
+			qeth_clear_thread_running_bit(card,
+				QETH_RECOVER_THREAD);
+		}
+	}
+}
+
+static int qeth_setup_card(struct qeth_card *card)
+{
+
+	QETH_DBF_TEXT(SETUP, 2, "setupcrd");
+	QETH_DBF_HEX(SETUP, 2, &card, sizeof(void *));
+
+	card->read.state  = CH_STATE_DOWN;
+	card->write.state = CH_STATE_DOWN;
+	card->data.state  = CH_STATE_DOWN;
+	card->state = CARD_STATE_DOWN;
+	card->lan_online = 0;
+	card->read_or_write_problem = 0;
+	card->dev = NULL;
+	spin_lock_init(&card->vlanlock);
+	spin_lock_init(&card->mclock);
+	spin_lock_init(&card->lock);
+	spin_lock_init(&card->ip_lock);
+	spin_lock_init(&card->thread_mask_lock);
+	mutex_init(&card->conf_mutex);
+	mutex_init(&card->discipline_mutex);
+	card->thread_start_mask = 0;
+	card->thread_allowed_mask = 0;
+	card->thread_running_mask = 0;
+	INIT_WORK(&card->kernel_thread_starter, qeth_start_kernel_thread);
+	INIT_LIST_HEAD(&card->ip_list);
+	INIT_LIST_HEAD(card->ip_tbd_list);
+	INIT_LIST_HEAD(&card->cmd_waiter_list);
+	init_waitqueue_head(&card->wait_q);
+	/* initial options */
+	qeth_set_intial_options(card);
+	/* IP address takeover */
+	INIT_LIST_HEAD(&card->ipato.entries);
+	card->ipato.enabled = 0;
+	card->ipato.invert4 = 0;
+	card->ipato.invert6 = 0;
+	/* init QDIO stuff */
+	qeth_init_qdio_info(card);
+	INIT_DELAYED_WORK(&card->buffer_reclaim_work, qeth_buffer_reclaim_work);
+	return 0;
+}
+
+static void qeth_core_sl_print(struct seq_file *m, struct service_level *slr)
+{
+	struct qeth_card *card = container_of(slr, struct qeth_card,
+					qeth_service_level);
+	if (card->info.mcl_level[0])
+		seq_printf(m, "qeth: %s firmware level %s\n",
+			CARD_BUS_ID(card), card->info.mcl_level);
+}
+
+static struct qeth_card *qeth_alloc_card(void)
+{
+	struct qeth_card *card;
+
+	QETH_DBF_TEXT(SETUP, 2, "alloccrd");
+	card = kzalloc(sizeof(struct qeth_card), GFP_DMA|GFP_KERNEL);
+	if (!card)
+		goto out;
+	QETH_DBF_HEX(SETUP, 2, &card, sizeof(void *));
+	card->ip_tbd_list = kzalloc(sizeof(struct list_head), GFP_KERNEL);
+	if (!card->ip_tbd_list) {
+		QETH_DBF_TEXT(SETUP, 0, "iptbdnom");
+		goto out_card;
+	}
+	if (qeth_setup_channel(&card->read))
+		goto out_ip;
+	if (qeth_setup_channel(&card->write))
+		goto out_channel;
+	card->options.layer2 = -1;
+	card->qeth_service_level.seq_print = qeth_core_sl_print;
+	register_service_level(&card->qeth_service_level);
+	return card;
+
+out_channel:
+	qeth_clean_channel(&card->read);
+out_ip:
+	kfree(card->ip_tbd_list);
+out_card:
+	kfree(card);
+out:
+	return NULL;
+}
+
+static int qeth_determine_card_type(struct qeth_card *card)
+{
+	int i = 0;
+
+	QETH_DBF_TEXT(SETUP, 2, "detcdtyp");
+
+	card->qdio.do_prio_queueing = QETH_PRIOQ_DEFAULT;
+	card->qdio.default_out_queue = QETH_DEFAULT_QUEUE;
+	while (known_devices[i][QETH_DEV_MODEL_IND]) {
+		if ((CARD_RDEV(card)->id.dev_type ==
+				known_devices[i][QETH_DEV_TYPE_IND]) &&
+		    (CARD_RDEV(card)->id.dev_model ==
+				known_devices[i][QETH_DEV_MODEL_IND])) {
+			card->info.type = known_devices[i][QETH_DEV_MODEL_IND];
+			card->qdio.no_out_queues =
+				known_devices[i][QETH_QUEUE_NO_IND];
+			card->qdio.no_in_queues = 1;
+			card->info.is_multicast_different =
+				known_devices[i][QETH_MULTICAST_IND];
+			qeth_get_channel_path_desc(card);
+			return 0;
+		}
+		i++;
+	}
+	card->info.type = QETH_CARD_TYPE_UNKNOWN;
+	dev_err(&card->gdev->dev, "The adapter hardware is of an "
+		"unknown type\n");
+	return -ENOENT;
+}
+
+static int qeth_clear_channel(struct qeth_channel *channel)
+{
+	unsigned long flags;
+	struct qeth_card *card;
+	int rc;
+
+	card = CARD_FROM_CDEV(channel->ccwdev);
+	QETH_CARD_TEXT(card, 3, "clearch");
+	spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags);
+	rc = ccw_device_clear(channel->ccwdev, QETH_CLEAR_CHANNEL_PARM);
+	spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags);
+
+	if (rc)
+		return rc;
+	rc = wait_event_interruptible_timeout(card->wait_q,
+			channel->state == CH_STATE_STOPPED, QETH_TIMEOUT);
+	if (rc == -ERESTARTSYS)
+		return rc;
+	if (channel->state != CH_STATE_STOPPED)
+		return -ETIME;
+	channel->state = CH_STATE_DOWN;
+	return 0;
+}
+
+static int qeth_halt_channel(struct qeth_channel *channel)
+{
+	unsigned long flags;
+	struct qeth_card *card;
+	int rc;
+
+	card = CARD_FROM_CDEV(channel->ccwdev);
+	QETH_CARD_TEXT(card, 3, "haltch");
+	spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags);
+	rc = ccw_device_halt(channel->ccwdev, QETH_HALT_CHANNEL_PARM);
+	spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags);
+
+	if (rc)
+		return rc;
+	rc = wait_event_interruptible_timeout(card->wait_q,
+			channel->state == CH_STATE_HALTED, QETH_TIMEOUT);
+	if (rc == -ERESTARTSYS)
+		return rc;
+	if (channel->state != CH_STATE_HALTED)
+		return -ETIME;
+	return 0;
+}
+
+static int qeth_halt_channels(struct qeth_card *card)
+{
+	int rc1 = 0, rc2 = 0, rc3 = 0;
+
+	QETH_CARD_TEXT(card, 3, "haltchs");
+	rc1 = qeth_halt_channel(&card->read);
+	rc2 = qeth_halt_channel(&card->write);
+	rc3 = qeth_halt_channel(&card->data);
+	if (rc1)
+		return rc1;
+	if (rc2)
+		return rc2;
+	return rc3;
+}
+
+static int qeth_clear_channels(struct qeth_card *card)
+{
+	int rc1 = 0, rc2 = 0, rc3 = 0;
+
+	QETH_CARD_TEXT(card, 3, "clearchs");
+	rc1 = qeth_clear_channel(&card->read);
+	rc2 = qeth_clear_channel(&card->write);
+	rc3 = qeth_clear_channel(&card->data);
+	if (rc1)
+		return rc1;
+	if (rc2)
+		return rc2;
+	return rc3;
+}
+
+static int qeth_clear_halt_card(struct qeth_card *card, int halt)
+{
+	int rc = 0;
+
+	QETH_CARD_TEXT(card, 3, "clhacrd");
+
+	if (halt)
+		rc = qeth_halt_channels(card);
+	if (rc)
+		return rc;
+	return qeth_clear_channels(card);
+}
+
+int qeth_qdio_clear_card(struct qeth_card *card, int use_halt)
+{
+	int rc = 0;
+
+	QETH_CARD_TEXT(card, 3, "qdioclr");
+	switch (atomic_cmpxchg(&card->qdio.state, QETH_QDIO_ESTABLISHED,
+		QETH_QDIO_CLEANING)) {
+	case QETH_QDIO_ESTABLISHED:
+		if (card->info.type == QETH_CARD_TYPE_IQD)
+			rc = qdio_shutdown(CARD_DDEV(card),
+				QDIO_FLAG_CLEANUP_USING_HALT);
+		else
+			rc = qdio_shutdown(CARD_DDEV(card),
+				QDIO_FLAG_CLEANUP_USING_CLEAR);
+		if (rc)
+			QETH_CARD_TEXT_(card, 3, "1err%d", rc);
+		qdio_free(CARD_DDEV(card));
+		atomic_set(&card->qdio.state, QETH_QDIO_ALLOCATED);
+		break;
+	case QETH_QDIO_CLEANING:
+		return rc;
+	default:
+		break;
+	}
+	rc = qeth_clear_halt_card(card, use_halt);
+	if (rc)
+		QETH_CARD_TEXT_(card, 3, "2err%d", rc);
+	card->state = CARD_STATE_DOWN;
+	return rc;
+}
+EXPORT_SYMBOL_GPL(qeth_qdio_clear_card);
+
+static int qeth_read_conf_data(struct qeth_card *card, void **buffer,
+			       int *length)
+{
+	struct ciw *ciw;
+	char *rcd_buf;
+	int ret;
+	struct qeth_channel *channel = &card->data;
+	unsigned long flags;
+
+	/*
+	 * scan for RCD command in extended SenseID data
+	 */
+	ciw = ccw_device_get_ciw(channel->ccwdev, CIW_TYPE_RCD);
+	if (!ciw || ciw->cmd == 0)
+		return -EOPNOTSUPP;
+	rcd_buf = kzalloc(ciw->count, GFP_KERNEL | GFP_DMA);
+	if (!rcd_buf)
+		return -ENOMEM;
+
+	channel->ccw.cmd_code = ciw->cmd;
+	channel->ccw.cda = (__u32) __pa(rcd_buf);
+	channel->ccw.count = ciw->count;
+	channel->ccw.flags = CCW_FLAG_SLI;
+	channel->state = CH_STATE_RCD;
+	spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags);
+	ret = ccw_device_start_timeout(channel->ccwdev, &channel->ccw,
+				       QETH_RCD_PARM, LPM_ANYPATH, 0,
+				       QETH_RCD_TIMEOUT);
+	spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags);
+	if (!ret)
+		wait_event(card->wait_q,
+			   (channel->state == CH_STATE_RCD_DONE ||
+			    channel->state == CH_STATE_DOWN));
+	if (channel->state == CH_STATE_DOWN)
+		ret = -EIO;
+	else
+		channel->state = CH_STATE_DOWN;
+	if (ret) {
+		kfree(rcd_buf);
+		*buffer = NULL;
+		*length = 0;
+	} else {
+		*length = ciw->count;
+		*buffer = rcd_buf;
+	}
+	return ret;
+}
+
+static void qeth_configure_unitaddr(struct qeth_card *card, char *prcd)
+{
+	QETH_DBF_TEXT(SETUP, 2, "cfgunit");
+	card->info.chpid = prcd[30];
+	card->info.unit_addr2 = prcd[31];
+	card->info.cula = prcd[63];
+	card->info.guestlan = ((prcd[0x10] == _ascebc['V']) &&
+			       (prcd[0x11] == _ascebc['M']));
+}
+
+static void qeth_configure_blkt_default(struct qeth_card *card, char *prcd)
+{
+	QETH_DBF_TEXT(SETUP, 2, "cfgblkt");
+
+	if (prcd[74] == 0xF0 && prcd[75] == 0xF0 &&
+	    (prcd[76] == 0xF5 || prcd[76] == 0xF6)) {
+		card->info.blkt.time_total = 250;
+		card->info.blkt.inter_packet = 5;
+		card->info.blkt.inter_packet_jumbo = 15;
+	} else {
+		card->info.blkt.time_total = 0;
+		card->info.blkt.inter_packet = 0;
+		card->info.blkt.inter_packet_jumbo = 0;
+	}
+}
+
+static void qeth_init_tokens(struct qeth_card *card)
+{
+	card->token.issuer_rm_w = 0x00010103UL;
+	card->token.cm_filter_w = 0x00010108UL;
+	card->token.cm_connection_w = 0x0001010aUL;
+	card->token.ulp_filter_w = 0x0001010bUL;
+	card->token.ulp_connection_w = 0x0001010dUL;
+}
+
+static void qeth_init_func_level(struct qeth_card *card)
+{
+	switch (card->info.type) {
+	case QETH_CARD_TYPE_IQD:
+		card->info.func_level =	QETH_IDX_FUNC_LEVEL_IQD;
+		break;
+	case QETH_CARD_TYPE_OSD:
+	case QETH_CARD_TYPE_OSN:
+		card->info.func_level = QETH_IDX_FUNC_LEVEL_OSD;
+		break;
+	default:
+		break;
+	}
+}
+
+static int qeth_idx_activate_get_answer(struct qeth_channel *channel,
+		void (*idx_reply_cb)(struct qeth_channel *,
+			struct qeth_cmd_buffer *))
+{
+	struct qeth_cmd_buffer *iob;
+	unsigned long flags;
+	int rc;
+	struct qeth_card *card;
+
+	QETH_DBF_TEXT(SETUP, 2, "idxanswr");
+	card = CARD_FROM_CDEV(channel->ccwdev);
+	iob = qeth_get_buffer(channel);
+	iob->callback = idx_reply_cb;
+	memcpy(&channel->ccw, READ_CCW, sizeof(struct ccw1));
+	channel->ccw.count = QETH_BUFSIZE;
+	channel->ccw.cda = (__u32) __pa(iob->data);
+
+	wait_event(card->wait_q,
+		   atomic_cmpxchg(&channel->irq_pending, 0, 1) == 0);
+	QETH_DBF_TEXT(SETUP, 6, "noirqpnd");
+	spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags);
+	rc = ccw_device_start(channel->ccwdev,
+			      &channel->ccw, (addr_t) iob, 0, 0);
+	spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags);
+
+	if (rc) {
+		QETH_DBF_MESSAGE(2, "Error2 in activating channel rc=%d\n", rc);
+		QETH_DBF_TEXT_(SETUP, 2, "2err%d", rc);
+		atomic_set(&channel->irq_pending, 0);
+		wake_up(&card->wait_q);
+		return rc;
+	}
+	rc = wait_event_interruptible_timeout(card->wait_q,
+			 channel->state == CH_STATE_UP, QETH_TIMEOUT);
+	if (rc == -ERESTARTSYS)
+		return rc;
+	if (channel->state != CH_STATE_UP) {
+		rc = -ETIME;
+		QETH_DBF_TEXT_(SETUP, 2, "3err%d", rc);
+		qeth_clear_cmd_buffers(channel);
+	} else
+		rc = 0;
+	return rc;
+}
+
+static int qeth_idx_activate_channel(struct qeth_channel *channel,
+		void (*idx_reply_cb)(struct qeth_channel *,
+			struct qeth_cmd_buffer *))
+{
+	struct qeth_card *card;
+	struct qeth_cmd_buffer *iob;
+	unsigned long flags;
+	__u16 temp;
+	__u8 tmp;
+	int rc;
+	struct ccw_dev_id temp_devid;
+
+	card = CARD_FROM_CDEV(channel->ccwdev);
+
+	QETH_DBF_TEXT(SETUP, 2, "idxactch");
+
+	iob = qeth_get_buffer(channel);
+	iob->callback = idx_reply_cb;
+	memcpy(&channel->ccw, WRITE_CCW, sizeof(struct ccw1));
+	channel->ccw.count = IDX_ACTIVATE_SIZE;
+	channel->ccw.cda = (__u32) __pa(iob->data);
+	if (channel == &card->write) {
+		memcpy(iob->data, IDX_ACTIVATE_WRITE, IDX_ACTIVATE_SIZE);
+		memcpy(QETH_TRANSPORT_HEADER_SEQ_NO(iob->data),
+		       &card->seqno.trans_hdr, QETH_SEQ_NO_LENGTH);
+		card->seqno.trans_hdr++;
+	} else {
+		memcpy(iob->data, IDX_ACTIVATE_READ, IDX_ACTIVATE_SIZE);
+		memcpy(QETH_TRANSPORT_HEADER_SEQ_NO(iob->data),
+		       &card->seqno.trans_hdr, QETH_SEQ_NO_LENGTH);
+	}
+	tmp = ((__u8)card->info.portno) | 0x80;
+	memcpy(QETH_IDX_ACT_PNO(iob->data), &tmp, 1);
+	memcpy(QETH_IDX_ACT_ISSUER_RM_TOKEN(iob->data),
+	       &card->token.issuer_rm_w, QETH_MPC_TOKEN_LENGTH);
+	memcpy(QETH_IDX_ACT_FUNC_LEVEL(iob->data),
+	       &card->info.func_level, sizeof(__u16));
+	ccw_device_get_id(CARD_DDEV(card), &temp_devid);
+	memcpy(QETH_IDX_ACT_QDIO_DEV_CUA(iob->data), &temp_devid.devno, 2);
+	temp = (card->info.cula << 8) + card->info.unit_addr2;
+	memcpy(QETH_IDX_ACT_QDIO_DEV_REALADDR(iob->data), &temp, 2);
+
+	wait_event(card->wait_q,
+		   atomic_cmpxchg(&channel->irq_pending, 0, 1) == 0);
+	QETH_DBF_TEXT(SETUP, 6, "noirqpnd");
+	spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags);
+	rc = ccw_device_start(channel->ccwdev,
+			      &channel->ccw, (addr_t) iob, 0, 0);
+	spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags);
+
+	if (rc) {
+		QETH_DBF_MESSAGE(2, "Error1 in activating channel. rc=%d\n",
+			rc);
+		QETH_DBF_TEXT_(SETUP, 2, "1err%d", rc);
+		atomic_set(&channel->irq_pending, 0);
+		wake_up(&card->wait_q);
+		return rc;
+	}
+	rc = wait_event_interruptible_timeout(card->wait_q,
+			channel->state == CH_STATE_ACTIVATING, QETH_TIMEOUT);
+	if (rc == -ERESTARTSYS)
+		return rc;
+	if (channel->state != CH_STATE_ACTIVATING) {
+		dev_warn(&channel->ccwdev->dev, "The qeth device driver"
+			" failed to recover an error on the device\n");
+		QETH_DBF_MESSAGE(2, "%s IDX activate timed out\n",
+			dev_name(&channel->ccwdev->dev));
+		QETH_DBF_TEXT_(SETUP, 2, "2err%d", -ETIME);
+		qeth_clear_cmd_buffers(channel);
+		return -ETIME;
+	}
+	return qeth_idx_activate_get_answer(channel, idx_reply_cb);
+}
+
+static int qeth_peer_func_level(int level)
+{
+	if ((level & 0xff) == 8)
+		return (level & 0xff) + 0x400;
+	if (((level >> 8) & 3) == 1)
+		return (level & 0xff) + 0x200;
+	return level;
+}
+
+static void qeth_idx_write_cb(struct qeth_channel *channel,
+		struct qeth_cmd_buffer *iob)
+{
+	struct qeth_card *card;
+	__u16 temp;
+
+	QETH_DBF_TEXT(SETUP , 2, "idxwrcb");
+
+	if (channel->state == CH_STATE_DOWN) {
+		channel->state = CH_STATE_ACTIVATING;
+		goto out;
+	}
+	card = CARD_FROM_CDEV(channel->ccwdev);
+
+	if (!(QETH_IS_IDX_ACT_POS_REPLY(iob->data))) {
+		if (QETH_IDX_ACT_CAUSE_CODE(iob->data) == QETH_IDX_ACT_ERR_EXCL)
+			dev_err(&card->write.ccwdev->dev,
+				"The adapter is used exclusively by another "
+				"host\n");
+		else
+			QETH_DBF_MESSAGE(2, "%s IDX_ACTIVATE on write channel:"
+				" negative reply\n",
+				dev_name(&card->write.ccwdev->dev));
+		goto out;
+	}
+	memcpy(&temp, QETH_IDX_ACT_FUNC_LEVEL(iob->data), 2);
+	if ((temp & ~0x0100) != qeth_peer_func_level(card->info.func_level)) {
+		QETH_DBF_MESSAGE(2, "%s IDX_ACTIVATE on write channel: "
+			"function level mismatch (sent: 0x%x, received: "
+			"0x%x)\n", dev_name(&card->write.ccwdev->dev),
+			card->info.func_level, temp);
+		goto out;
+	}
+	channel->state = CH_STATE_UP;
+out:
+	qeth_release_buffer(channel, iob);
+}
+
+static void qeth_idx_read_cb(struct qeth_channel *channel,
+		struct qeth_cmd_buffer *iob)
+{
+	struct qeth_card *card;
+	__u16 temp;
+
+	QETH_DBF_TEXT(SETUP , 2, "idxrdcb");
+	if (channel->state == CH_STATE_DOWN) {
+		channel->state = CH_STATE_ACTIVATING;
+		goto out;
+	}
+
+	card = CARD_FROM_CDEV(channel->ccwdev);
+	if (qeth_check_idx_response(card, iob->data))
+			goto out;
+
+	if (!(QETH_IS_IDX_ACT_POS_REPLY(iob->data))) {
+		switch (QETH_IDX_ACT_CAUSE_CODE(iob->data)) {
+		case QETH_IDX_ACT_ERR_EXCL:
+			dev_err(&card->write.ccwdev->dev,
+				"The adapter is used exclusively by another "
+				"host\n");
+			break;
+		case QETH_IDX_ACT_ERR_AUTH:
+		case QETH_IDX_ACT_ERR_AUTH_USER:
+			dev_err(&card->read.ccwdev->dev,
+				"Setting the device online failed because of "
+				"insufficient authorization\n");
+			break;
+		default:
+			QETH_DBF_MESSAGE(2, "%s IDX_ACTIVATE on read channel:"
+				" negative reply\n",
+				dev_name(&card->read.ccwdev->dev));
+		}
+		QETH_CARD_TEXT_(card, 2, "idxread%c",
+			QETH_IDX_ACT_CAUSE_CODE(iob->data));
+		goto out;
+	}
+
+/**
+ *  * temporary fix for microcode bug
+ *   * to revert it,replace OR by AND
+ *    */
+	if ((!QETH_IDX_NO_PORTNAME_REQUIRED(iob->data)) ||
+	     (card->info.type == QETH_CARD_TYPE_OSD))
+		card->info.portname_required = 1;
+
+	memcpy(&temp, QETH_IDX_ACT_FUNC_LEVEL(iob->data), 2);
+	if (temp != qeth_peer_func_level(card->info.func_level)) {
+		QETH_DBF_MESSAGE(2, "%s IDX_ACTIVATE on read channel: function "
+			"level mismatch (sent: 0x%x, received: 0x%x)\n",
+			dev_name(&card->read.ccwdev->dev),
+			card->info.func_level, temp);
+		goto out;
+	}
+	memcpy(&card->token.issuer_rm_r,
+	       QETH_IDX_ACT_ISSUER_RM_TOKEN(iob->data),
+	       QETH_MPC_TOKEN_LENGTH);
+	memcpy(&card->info.mcl_level[0],
+	       QETH_IDX_REPLY_LEVEL(iob->data), QETH_MCL_LENGTH);
+	channel->state = CH_STATE_UP;
+out:
+	qeth_release_buffer(channel, iob);
+}
+
+void qeth_prepare_control_data(struct qeth_card *card, int len,
+		struct qeth_cmd_buffer *iob)
+{
+	qeth_setup_ccw(&card->write, iob->data, len);
+	iob->callback = qeth_release_buffer;
+
+	memcpy(QETH_TRANSPORT_HEADER_SEQ_NO(iob->data),
+	       &card->seqno.trans_hdr, QETH_SEQ_NO_LENGTH);
+	card->seqno.trans_hdr++;
+	memcpy(QETH_PDU_HEADER_SEQ_NO(iob->data),
+	       &card->seqno.pdu_hdr, QETH_SEQ_NO_LENGTH);
+	card->seqno.pdu_hdr++;
+	memcpy(QETH_PDU_HEADER_ACK_SEQ_NO(iob->data),
+	       &card->seqno.pdu_hdr_ack, QETH_SEQ_NO_LENGTH);
+	QETH_DBF_HEX(CTRL, 2, iob->data, QETH_DBF_CTRL_LEN);
+}
+EXPORT_SYMBOL_GPL(qeth_prepare_control_data);
+
+int qeth_send_control_data(struct qeth_card *card, int len,
+		struct qeth_cmd_buffer *iob,
+		int (*reply_cb)(struct qeth_card *, struct qeth_reply *,
+			unsigned long),
+		void *reply_param)
+{
+	int rc;
+	unsigned long flags;
+	struct qeth_reply *reply = NULL;
+	unsigned long timeout, event_timeout;
+	struct qeth_ipa_cmd *cmd;
+
+	QETH_CARD_TEXT(card, 2, "sendctl");
+
+	if (card->read_or_write_problem) {
+		qeth_release_buffer(iob->channel, iob);
+		return -EIO;
+	}
+	reply = qeth_alloc_reply(card);
+	if (!reply) {
+		return -ENOMEM;
+	}
+	reply->callback = reply_cb;
+	reply->param = reply_param;
+	if (card->state == CARD_STATE_DOWN)
+		reply->seqno = QETH_IDX_COMMAND_SEQNO;
+	else
+		reply->seqno = card->seqno.ipa++;
+	init_waitqueue_head(&reply->wait_q);
+	spin_lock_irqsave(&card->lock, flags);
+	list_add_tail(&reply->list, &card->cmd_waiter_list);
+	spin_unlock_irqrestore(&card->lock, flags);
+	QETH_DBF_HEX(CTRL, 2, iob->data, QETH_DBF_CTRL_LEN);
+
+	while (atomic_cmpxchg(&card->write.irq_pending, 0, 1)) ;
+	qeth_prepare_control_data(card, len, iob);
+
+	if (IS_IPA(iob->data))
+		event_timeout = QETH_IPA_TIMEOUT;
+	else
+		event_timeout = QETH_TIMEOUT;
+	timeout = jiffies + event_timeout;
+
+	QETH_CARD_TEXT(card, 6, "noirqpnd");
+	spin_lock_irqsave(get_ccwdev_lock(card->write.ccwdev), flags);
+	rc = ccw_device_start(card->write.ccwdev, &card->write.ccw,
+			      (addr_t) iob, 0, 0);
+	spin_unlock_irqrestore(get_ccwdev_lock(card->write.ccwdev), flags);
+	if (rc) {
+		QETH_DBF_MESSAGE(2, "%s qeth_send_control_data: "
+			"ccw_device_start rc = %i\n",
+			dev_name(&card->write.ccwdev->dev), rc);
+		QETH_CARD_TEXT_(card, 2, " err%d", rc);
+		spin_lock_irqsave(&card->lock, flags);
+		list_del_init(&reply->list);
+		qeth_put_reply(reply);
+		spin_unlock_irqrestore(&card->lock, flags);
+		qeth_release_buffer(iob->channel, iob);
+		atomic_set(&card->write.irq_pending, 0);
+		wake_up(&card->wait_q);
+		return rc;
+	}
+
+	/* we have only one long running ipassist, since we can ensure
+	   process context of this command we can sleep */
+	cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
+	if ((cmd->hdr.command == IPA_CMD_SETIP) &&
+	    (cmd->hdr.prot_version == QETH_PROT_IPV4)) {
+		if (!wait_event_timeout(reply->wait_q,
+		    atomic_read(&reply->received), event_timeout))
+			goto time_err;
+	} else {
+		while (!atomic_read(&reply->received)) {
+			if (time_after(jiffies, timeout))
+				goto time_err;
+			cpu_relax();
+		};
+	}
+
+	if (reply->rc == -EIO)
+		goto error;
+	rc = reply->rc;
+	qeth_put_reply(reply);
+	return rc;
+
+time_err:
+	reply->rc = -ETIME;
+	spin_lock_irqsave(&reply->card->lock, flags);
+	list_del_init(&reply->list);
+	spin_unlock_irqrestore(&reply->card->lock, flags);
+	atomic_inc(&reply->received);
+error:
+	atomic_set(&card->write.irq_pending, 0);
+	qeth_release_buffer(iob->channel, iob);
+	card->write.buf_no = (card->write.buf_no + 1) % QETH_CMD_BUFFER_NO;
+	rc = reply->rc;
+	qeth_put_reply(reply);
+	return rc;
+}
+EXPORT_SYMBOL_GPL(qeth_send_control_data);
+
+static int qeth_cm_enable_cb(struct qeth_card *card, struct qeth_reply *reply,
+		unsigned long data)
+{
+	struct qeth_cmd_buffer *iob;
+
+	QETH_DBF_TEXT(SETUP, 2, "cmenblcb");
+
+	iob = (struct qeth_cmd_buffer *) data;
+	memcpy(&card->token.cm_filter_r,
+	       QETH_CM_ENABLE_RESP_FILTER_TOKEN(iob->data),
+	       QETH_MPC_TOKEN_LENGTH);
+	QETH_DBF_TEXT_(SETUP, 2, "  rc%d", iob->rc);
+	return 0;
+}
+
+static int qeth_cm_enable(struct qeth_card *card)
+{
+	int rc;
+	struct qeth_cmd_buffer *iob;
+
+	QETH_DBF_TEXT(SETUP, 2, "cmenable");
+
+	iob = qeth_wait_for_buffer(&card->write);
+	memcpy(iob->data, CM_ENABLE, CM_ENABLE_SIZE);
+	memcpy(QETH_CM_ENABLE_ISSUER_RM_TOKEN(iob->data),
+	       &card->token.issuer_rm_r, QETH_MPC_TOKEN_LENGTH);
+	memcpy(QETH_CM_ENABLE_FILTER_TOKEN(iob->data),
+	       &card->token.cm_filter_w, QETH_MPC_TOKEN_LENGTH);
+
+	rc = qeth_send_control_data(card, CM_ENABLE_SIZE, iob,
+				    qeth_cm_enable_cb, NULL);
+	return rc;
+}
+
+static int qeth_cm_setup_cb(struct qeth_card *card, struct qeth_reply *reply,
+		unsigned long data)
+{
+
+	struct qeth_cmd_buffer *iob;
+
+	QETH_DBF_TEXT(SETUP, 2, "cmsetpcb");
+
+	iob = (struct qeth_cmd_buffer *) data;
+	memcpy(&card->token.cm_connection_r,
+	       QETH_CM_SETUP_RESP_DEST_ADDR(iob->data),
+	       QETH_MPC_TOKEN_LENGTH);
+	QETH_DBF_TEXT_(SETUP, 2, "  rc%d", iob->rc);
+	return 0;
+}
+
+static int qeth_cm_setup(struct qeth_card *card)
+{
+	int rc;
+	struct qeth_cmd_buffer *iob;
+
+	QETH_DBF_TEXT(SETUP, 2, "cmsetup");
+
+	iob = qeth_wait_for_buffer(&card->write);
+	memcpy(iob->data, CM_SETUP, CM_SETUP_SIZE);
+	memcpy(QETH_CM_SETUP_DEST_ADDR(iob->data),
+	       &card->token.issuer_rm_r, QETH_MPC_TOKEN_LENGTH);
+	memcpy(QETH_CM_SETUP_CONNECTION_TOKEN(iob->data),
+	       &card->token.cm_connection_w, QETH_MPC_TOKEN_LENGTH);
+	memcpy(QETH_CM_SETUP_FILTER_TOKEN(iob->data),
+	       &card->token.cm_filter_r, QETH_MPC_TOKEN_LENGTH);
+	rc = qeth_send_control_data(card, CM_SETUP_SIZE, iob,
+				    qeth_cm_setup_cb, NULL);
+	return rc;
+
+}
+
+static inline int qeth_get_initial_mtu_for_card(struct qeth_card *card)
+{
+	switch (card->info.type) {
+	case QETH_CARD_TYPE_UNKNOWN:
+		return 1500;
+	case QETH_CARD_TYPE_IQD:
+		return card->info.max_mtu;
+	case QETH_CARD_TYPE_OSD:
+		switch (card->info.link_type) {
+		case QETH_LINK_TYPE_HSTR:
+		case QETH_LINK_TYPE_LANE_TR:
+			return 2000;
+		default:
+			return 1492;
+		}
+	case QETH_CARD_TYPE_OSM:
+	case QETH_CARD_TYPE_OSX:
+		return 1492;
+	default:
+		return 1500;
+	}
+}
+
+static inline int qeth_get_mtu_outof_framesize(int framesize)
+{
+	switch (framesize) {
+	case 0x4000:
+		return 8192;
+	case 0x6000:
+		return 16384;
+	case 0xa000:
+		return 32768;
+	case 0xffff:
+		return 57344;
+	default:
+		return 0;
+	}
+}
+
+static inline int qeth_mtu_is_valid(struct qeth_card *card, int mtu)
+{
+	switch (card->info.type) {
+	case QETH_CARD_TYPE_OSD:
+	case QETH_CARD_TYPE_OSM:
+	case QETH_CARD_TYPE_OSX:
+	case QETH_CARD_TYPE_IQD:
+		return ((mtu >= 576) &&
+			(mtu <= card->info.max_mtu));
+	case QETH_CARD_TYPE_OSN:
+	case QETH_CARD_TYPE_UNKNOWN:
+	default:
+		return 1;
+	}
+}
+
+static int qeth_ulp_enable_cb(struct qeth_card *card, struct qeth_reply *reply,
+		unsigned long data)
+{
+
+	__u16 mtu, framesize;
+	__u16 len;
+	__u8 link_type;
+	struct qeth_cmd_buffer *iob;
+
+	QETH_DBF_TEXT(SETUP, 2, "ulpenacb");
+
+	iob = (struct qeth_cmd_buffer *) data;
+	memcpy(&card->token.ulp_filter_r,
+	       QETH_ULP_ENABLE_RESP_FILTER_TOKEN(iob->data),
+	       QETH_MPC_TOKEN_LENGTH);
+	if (card->info.type == QETH_CARD_TYPE_IQD) {
+		memcpy(&framesize, QETH_ULP_ENABLE_RESP_MAX_MTU(iob->data), 2);
+		mtu = qeth_get_mtu_outof_framesize(framesize);
+		if (!mtu) {
+			iob->rc = -EINVAL;
+			QETH_DBF_TEXT_(SETUP, 2, "  rc%d", iob->rc);
+			return 0;
+		}
+		if (card->info.initial_mtu && (card->info.initial_mtu != mtu)) {
+			/* frame size has changed */
+			if (card->dev &&
+			    ((card->dev->mtu == card->info.initial_mtu) ||
+			     (card->dev->mtu > mtu)))
+				card->dev->mtu = mtu;
+			qeth_free_qdio_buffers(card);
+		}
+		card->info.initial_mtu = mtu;
+		card->info.max_mtu = mtu;
+		card->qdio.in_buf_size = mtu + 2 * PAGE_SIZE;
+	} else {
+		card->info.initial_mtu = qeth_get_initial_mtu_for_card(card);
+		card->info.max_mtu = *(__u16 *)QETH_ULP_ENABLE_RESP_MAX_MTU(
+			iob->data);
+		card->qdio.in_buf_size = QETH_IN_BUF_SIZE_DEFAULT;
+	}
+
+	memcpy(&len, QETH_ULP_ENABLE_RESP_DIFINFO_LEN(iob->data), 2);
+	if (len >= QETH_MPC_DIFINFO_LEN_INDICATES_LINK_TYPE) {
+		memcpy(&link_type,
+		       QETH_ULP_ENABLE_RESP_LINK_TYPE(iob->data), 1);
+		card->info.link_type = link_type;
+	} else
+		card->info.link_type = 0;
+	QETH_DBF_TEXT_(SETUP, 2, "link%d", card->info.link_type);
+	QETH_DBF_TEXT_(SETUP, 2, "  rc%d", iob->rc);
+	return 0;
+}
+
+static int qeth_ulp_enable(struct qeth_card *card)
+{
+	int rc;
+	char prot_type;
+	struct qeth_cmd_buffer *iob;
+
+	/*FIXME: trace view callbacks*/
+	QETH_DBF_TEXT(SETUP, 2, "ulpenabl");
+
+	iob = qeth_wait_for_buffer(&card->write);
+	memcpy(iob->data, ULP_ENABLE, ULP_ENABLE_SIZE);
+
+	*(QETH_ULP_ENABLE_LINKNUM(iob->data)) =
+		(__u8) card->info.portno;
+	if (card->options.layer2)
+		if (card->info.type == QETH_CARD_TYPE_OSN)
+			prot_type = QETH_PROT_OSN2;
+		else
+			prot_type = QETH_PROT_LAYER2;
+	else
+		prot_type = QETH_PROT_TCPIP;
+
+	memcpy(QETH_ULP_ENABLE_PROT_TYPE(iob->data), &prot_type, 1);
+	memcpy(QETH_ULP_ENABLE_DEST_ADDR(iob->data),
+	       &card->token.cm_connection_r, QETH_MPC_TOKEN_LENGTH);
+	memcpy(QETH_ULP_ENABLE_FILTER_TOKEN(iob->data),
+	       &card->token.ulp_filter_w, QETH_MPC_TOKEN_LENGTH);
+	memcpy(QETH_ULP_ENABLE_PORTNAME_AND_LL(iob->data),
+	       card->info.portname, 9);
+	rc = qeth_send_control_data(card, ULP_ENABLE_SIZE, iob,
+				    qeth_ulp_enable_cb, NULL);
+	return rc;
+
+}
+
+static int qeth_ulp_setup_cb(struct qeth_card *card, struct qeth_reply *reply,
+		unsigned long data)
+{
+	struct qeth_cmd_buffer *iob;
+	int rc = 0;
+
+	QETH_DBF_TEXT(SETUP, 2, "ulpstpcb");
+
+	iob = (struct qeth_cmd_buffer *) data;
+	memcpy(&card->token.ulp_connection_r,
+	       QETH_ULP_SETUP_RESP_CONNECTION_TOKEN(iob->data),
+	       QETH_MPC_TOKEN_LENGTH);
+	if (!strncmp("00S", QETH_ULP_SETUP_RESP_CONNECTION_TOKEN(iob->data),
+		     3)) {
+		QETH_DBF_TEXT(SETUP, 2, "olmlimit");
+		dev_err(&card->gdev->dev, "A connection could not be "
+			"established because of an OLM limit\n");
+		iob->rc = -EMLINK;
+	}
+	QETH_DBF_TEXT_(SETUP, 2, "  rc%d", iob->rc);
+	return rc;
+}
+
+static int qeth_ulp_setup(struct qeth_card *card)
+{
+	int rc;
+	__u16 temp;
+	struct qeth_cmd_buffer *iob;
+	struct ccw_dev_id dev_id;
+
+	QETH_DBF_TEXT(SETUP, 2, "ulpsetup");
+
+	iob = qeth_wait_for_buffer(&card->write);
+	memcpy(iob->data, ULP_SETUP, ULP_SETUP_SIZE);
+
+	memcpy(QETH_ULP_SETUP_DEST_ADDR(iob->data),
+	       &card->token.cm_connection_r, QETH_MPC_TOKEN_LENGTH);
+	memcpy(QETH_ULP_SETUP_CONNECTION_TOKEN(iob->data),
+	       &card->token.ulp_connection_w, QETH_MPC_TOKEN_LENGTH);
+	memcpy(QETH_ULP_SETUP_FILTER_TOKEN(iob->data),
+	       &card->token.ulp_filter_r, QETH_MPC_TOKEN_LENGTH);
+
+	ccw_device_get_id(CARD_DDEV(card), &dev_id);
+	memcpy(QETH_ULP_SETUP_CUA(iob->data), &dev_id.devno, 2);
+	temp = (card->info.cula << 8) + card->info.unit_addr2;
+	memcpy(QETH_ULP_SETUP_REAL_DEVADDR(iob->data), &temp, 2);
+	rc = qeth_send_control_data(card, ULP_SETUP_SIZE, iob,
+				    qeth_ulp_setup_cb, NULL);
+	return rc;
+}
+
+static int qeth_init_qdio_out_buf(struct qeth_qdio_out_q *q, int bidx)
+{
+	int rc;
+	struct qeth_qdio_out_buffer *newbuf;
+
+	rc = 0;
+	newbuf = kmem_cache_zalloc(qeth_qdio_outbuf_cache, GFP_ATOMIC);
+	if (!newbuf) {
+		rc = -ENOMEM;
+		goto out;
+	}
+	newbuf->buffer = &q->qdio_bufs[bidx];
+	skb_queue_head_init(&newbuf->skb_list);
+	lockdep_set_class(&newbuf->skb_list.lock, &qdio_out_skb_queue_key);
+	newbuf->q = q;
+	newbuf->aob = NULL;
+	newbuf->next_pending = q->bufs[bidx];
+	atomic_set(&newbuf->state, QETH_QDIO_BUF_EMPTY);
+	q->bufs[bidx] = newbuf;
+	if (q->bufstates) {
+		q->bufstates[bidx].user = newbuf;
+		QETH_CARD_TEXT_(q->card, 2, "nbs%d", bidx);
+		QETH_CARD_TEXT_(q->card, 2, "%lx", (long) newbuf);
+		QETH_CARD_TEXT_(q->card, 2, "%lx",
+				(long) newbuf->next_pending);
+	}
+out:
+	return rc;
+}
+
+
+static int qeth_alloc_qdio_buffers(struct qeth_card *card)
+{
+	int i, j;
+
+	QETH_DBF_TEXT(SETUP, 2, "allcqdbf");
+
+	if (atomic_cmpxchg(&card->qdio.state, QETH_QDIO_UNINITIALIZED,
+		QETH_QDIO_ALLOCATED) != QETH_QDIO_UNINITIALIZED)
+		return 0;
+
+	card->qdio.in_q = kzalloc(sizeof(struct qeth_qdio_q),
+				   GFP_KERNEL);
+	if (!card->qdio.in_q)
+		goto out_nomem;
+	QETH_DBF_TEXT(SETUP, 2, "inq");
+	QETH_DBF_HEX(SETUP, 2, &card->qdio.in_q, sizeof(void *));
+	memset(card->qdio.in_q, 0, sizeof(struct qeth_qdio_q));
+	/* give inbound qeth_qdio_buffers their qdio_buffers */
+	for (i = 0; i < QDIO_MAX_BUFFERS_PER_Q; ++i) {
+		card->qdio.in_q->bufs[i].buffer =
+			&card->qdio.in_q->qdio_bufs[i];
+		card->qdio.in_q->bufs[i].rx_skb = NULL;
+	}
+	/* inbound buffer pool */
+	if (qeth_alloc_buffer_pool(card))
+		goto out_freeinq;
+
+	/* outbound */
+	card->qdio.out_qs =
+		kzalloc(card->qdio.no_out_queues *
+			sizeof(struct qeth_qdio_out_q *), GFP_KERNEL);
+	if (!card->qdio.out_qs)
+		goto out_freepool;
+	for (i = 0; i < card->qdio.no_out_queues; ++i) {
+		card->qdio.out_qs[i] = kzalloc(sizeof(struct qeth_qdio_out_q),
+					       GFP_KERNEL);
+		if (!card->qdio.out_qs[i])
+			goto out_freeoutq;
+		QETH_DBF_TEXT_(SETUP, 2, "outq %i", i);
+		QETH_DBF_HEX(SETUP, 2, &card->qdio.out_qs[i], sizeof(void *));
+		card->qdio.out_qs[i]->queue_no = i;
+		/* give outbound qeth_qdio_buffers their qdio_buffers */
+		for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j) {
+			BUG_ON(card->qdio.out_qs[i]->bufs[j] != NULL);
+			if (qeth_init_qdio_out_buf(card->qdio.out_qs[i], j))
+				goto out_freeoutqbufs;
+		}
+	}
+
+	/* completion */
+	if (qeth_alloc_cq(card))
+		goto out_freeoutq;
+
+	return 0;
+
+out_freeoutqbufs:
+	while (j > 0) {
+		--j;
+		kmem_cache_free(qeth_qdio_outbuf_cache,
+				card->qdio.out_qs[i]->bufs[j]);
+		card->qdio.out_qs[i]->bufs[j] = NULL;
+	}
+out_freeoutq:
+	while (i > 0) {
+		kfree(card->qdio.out_qs[--i]);
+		qeth_clear_outq_buffers(card->qdio.out_qs[i], 1);
+	}
+	kfree(card->qdio.out_qs);
+	card->qdio.out_qs = NULL;
+out_freepool:
+	qeth_free_buffer_pool(card);
+out_freeinq:
+	kfree(card->qdio.in_q);
+	card->qdio.in_q = NULL;
+out_nomem:
+	atomic_set(&card->qdio.state, QETH_QDIO_UNINITIALIZED);
+	return -ENOMEM;
+}
+
+static void qeth_create_qib_param_field(struct qeth_card *card,
+		char *param_field)
+{
+
+	param_field[0] = _ascebc['P'];
+	param_field[1] = _ascebc['C'];
+	param_field[2] = _ascebc['I'];
+	param_field[3] = _ascebc['T'];
+	*((unsigned int *) (&param_field[4])) = QETH_PCI_THRESHOLD_A(card);
+	*((unsigned int *) (&param_field[8])) = QETH_PCI_THRESHOLD_B(card);
+	*((unsigned int *) (&param_field[12])) = QETH_PCI_TIMER_VALUE(card);
+}
+
+static void qeth_create_qib_param_field_blkt(struct qeth_card *card,
+		char *param_field)
+{
+	param_field[16] = _ascebc['B'];
+	param_field[17] = _ascebc['L'];
+	param_field[18] = _ascebc['K'];
+	param_field[19] = _ascebc['T'];
+	*((unsigned int *) (&param_field[20])) = card->info.blkt.time_total;
+	*((unsigned int *) (&param_field[24])) = card->info.blkt.inter_packet;
+	*((unsigned int *) (&param_field[28])) =
+		card->info.blkt.inter_packet_jumbo;
+}
+
+static int qeth_qdio_activate(struct qeth_card *card)
+{
+	QETH_DBF_TEXT(SETUP, 3, "qdioact");
+	return qdio_activate(CARD_DDEV(card));
+}
+
+static int qeth_dm_act(struct qeth_card *card)
+{
+	int rc;
+	struct qeth_cmd_buffer *iob;
+
+	QETH_DBF_TEXT(SETUP, 2, "dmact");
+
+	iob = qeth_wait_for_buffer(&card->write);
+	memcpy(iob->data, DM_ACT, DM_ACT_SIZE);
+
+	memcpy(QETH_DM_ACT_DEST_ADDR(iob->data),
+	       &card->token.cm_connection_r, QETH_MPC_TOKEN_LENGTH);
+	memcpy(QETH_DM_ACT_CONNECTION_TOKEN(iob->data),
+	       &card->token.ulp_connection_r, QETH_MPC_TOKEN_LENGTH);
+	rc = qeth_send_control_data(card, DM_ACT_SIZE, iob, NULL, NULL);
+	return rc;
+}
+
+static int qeth_mpc_initialize(struct qeth_card *card)
+{
+	int rc;
+
+	QETH_DBF_TEXT(SETUP, 2, "mpcinit");
+
+	rc = qeth_issue_next_read(card);
+	if (rc) {
+		QETH_DBF_TEXT_(SETUP, 2, "1err%d", rc);
+		return rc;
+	}
+	rc = qeth_cm_enable(card);
+	if (rc) {
+		QETH_DBF_TEXT_(SETUP, 2, "2err%d", rc);
+		goto out_qdio;
+	}
+	rc = qeth_cm_setup(card);
+	if (rc) {
+		QETH_DBF_TEXT_(SETUP, 2, "3err%d", rc);
+		goto out_qdio;
+	}
+	rc = qeth_ulp_enable(card);
+	if (rc) {
+		QETH_DBF_TEXT_(SETUP, 2, "4err%d", rc);
+		goto out_qdio;
+	}
+	rc = qeth_ulp_setup(card);
+	if (rc) {
+		QETH_DBF_TEXT_(SETUP, 2, "5err%d", rc);
+		goto out_qdio;
+	}
+	rc = qeth_alloc_qdio_buffers(card);
+	if (rc) {
+		QETH_DBF_TEXT_(SETUP, 2, "5err%d", rc);
+		goto out_qdio;
+	}
+	rc = qeth_qdio_establish(card);
+	if (rc) {
+		QETH_DBF_TEXT_(SETUP, 2, "6err%d", rc);
+		qeth_free_qdio_buffers(card);
+		goto out_qdio;
+	}
+	rc = qeth_qdio_activate(card);
+	if (rc) {
+		QETH_DBF_TEXT_(SETUP, 2, "7err%d", rc);
+		goto out_qdio;
+	}
+	rc = qeth_dm_act(card);
+	if (rc) {
+		QETH_DBF_TEXT_(SETUP, 2, "8err%d", rc);
+		goto out_qdio;
+	}
+
+	return 0;
+out_qdio:
+	qeth_qdio_clear_card(card, card->info.type != QETH_CARD_TYPE_IQD);
+	return rc;
+}
+
+static void qeth_print_status_with_portname(struct qeth_card *card)
+{
+	char dbf_text[15];
+	int i;
+
+	sprintf(dbf_text, "%s", card->info.portname + 1);
+	for (i = 0; i < 8; i++)
+		dbf_text[i] =
+			(char) _ebcasc[(__u8) dbf_text[i]];
+	dbf_text[8] = 0;
+	dev_info(&card->gdev->dev, "Device is a%s card%s%s%s\n"
+	       "with link type %s (portname: %s)\n",
+	       qeth_get_cardname(card),
+	       (card->info.mcl_level[0]) ? " (level: " : "",
+	       (card->info.mcl_level[0]) ? card->info.mcl_level : "",
+	       (card->info.mcl_level[0]) ? ")" : "",
+	       qeth_get_cardname_short(card),
+	       dbf_text);
+
+}
+
+static void qeth_print_status_no_portname(struct qeth_card *card)
+{
+	if (card->info.portname[0])
+		dev_info(&card->gdev->dev, "Device is a%s "
+		       "card%s%s%s\nwith link type %s "
+		       "(no portname needed by interface).\n",
+		       qeth_get_cardname(card),
+		       (card->info.mcl_level[0]) ? " (level: " : "",
+		       (card->info.mcl_level[0]) ? card->info.mcl_level : "",
+		       (card->info.mcl_level[0]) ? ")" : "",
+		       qeth_get_cardname_short(card));
+	else
+		dev_info(&card->gdev->dev, "Device is a%s "
+		       "card%s%s%s\nwith link type %s.\n",
+		       qeth_get_cardname(card),
+		       (card->info.mcl_level[0]) ? " (level: " : "",
+		       (card->info.mcl_level[0]) ? card->info.mcl_level : "",
+		       (card->info.mcl_level[0]) ? ")" : "",
+		       qeth_get_cardname_short(card));
+}
+
+void qeth_print_status_message(struct qeth_card *card)
+{
+	switch (card->info.type) {
+	case QETH_CARD_TYPE_OSD:
+	case QETH_CARD_TYPE_OSM:
+	case QETH_CARD_TYPE_OSX:
+		/* VM will use a non-zero first character
+		 * to indicate a HiperSockets like reporting
+		 * of the level OSA sets the first character to zero
+		 * */
+		if (!card->info.mcl_level[0]) {
+			sprintf(card->info.mcl_level, "%02x%02x",
+				card->info.mcl_level[2],
+				card->info.mcl_level[3]);
+
+			card->info.mcl_level[QETH_MCL_LENGTH] = 0;
+			break;
+		}
+		/* fallthrough */
+	case QETH_CARD_TYPE_IQD:
+		if ((card->info.guestlan) ||
+		    (card->info.mcl_level[0] & 0x80)) {
+			card->info.mcl_level[0] = (char) _ebcasc[(__u8)
+				card->info.mcl_level[0]];
+			card->info.mcl_level[1] = (char) _ebcasc[(__u8)
+				card->info.mcl_level[1]];
+			card->info.mcl_level[2] = (char) _ebcasc[(__u8)
+				card->info.mcl_level[2]];
+			card->info.mcl_level[3] = (char) _ebcasc[(__u8)
+				card->info.mcl_level[3]];
+			card->info.mcl_level[QETH_MCL_LENGTH] = 0;
+		}
+		break;
+	default:
+		memset(&card->info.mcl_level[0], 0, QETH_MCL_LENGTH + 1);
+	}
+	if (card->info.portname_required)
+		qeth_print_status_with_portname(card);
+	else
+		qeth_print_status_no_portname(card);
+}
+EXPORT_SYMBOL_GPL(qeth_print_status_message);
+
+static void qeth_initialize_working_pool_list(struct qeth_card *card)
+{
+	struct qeth_buffer_pool_entry *entry;
+
+	QETH_CARD_TEXT(card, 5, "inwrklst");
+
+	list_for_each_entry(entry,
+			    &card->qdio.init_pool.entry_list, init_list) {
+		qeth_put_buffer_pool_entry(card, entry);
+	}
+}
+
+static inline struct qeth_buffer_pool_entry *qeth_find_free_buffer_pool_entry(
+		struct qeth_card *card)
+{
+	struct list_head *plh;
+	struct qeth_buffer_pool_entry *entry;
+	int i, free;
+	struct page *page;
+
+	if (list_empty(&card->qdio.in_buf_pool.entry_list))
+		return NULL;
+
+	list_for_each(plh, &card->qdio.in_buf_pool.entry_list) {
+		entry = list_entry(plh, struct qeth_buffer_pool_entry, list);
+		free = 1;
+		for (i = 0; i < QETH_MAX_BUFFER_ELEMENTS(card); ++i) {
+			if (page_count(virt_to_page(entry->elements[i])) > 1) {
+				free = 0;
+				break;
+			}
+		}
+		if (free) {
+			list_del_init(&entry->list);
+			return entry;
+		}
+	}
+
+	/* no free buffer in pool so take first one and swap pages */
+	entry = list_entry(card->qdio.in_buf_pool.entry_list.next,
+			struct qeth_buffer_pool_entry, list);
+	for (i = 0; i < QETH_MAX_BUFFER_ELEMENTS(card); ++i) {
+		if (page_count(virt_to_page(entry->elements[i])) > 1) {
+			page = alloc_page(GFP_ATOMIC);
+			if (!page) {
+				return NULL;
+			} else {
+				free_page((unsigned long)entry->elements[i]);
+				entry->elements[i] = page_address(page);
+				if (card->options.performance_stats)
+					card->perf_stats.sg_alloc_page_rx++;
+			}
+		}
+	}
+	list_del_init(&entry->list);
+	return entry;
+}
+
+static int qeth_init_input_buffer(struct qeth_card *card,
+		struct qeth_qdio_buffer *buf)
+{
+	struct qeth_buffer_pool_entry *pool_entry;
+	int i;
+
+	if ((card->options.cq == QETH_CQ_ENABLED) && (!buf->rx_skb)) {
+		buf->rx_skb = dev_alloc_skb(QETH_RX_PULL_LEN + ETH_HLEN);
+		if (!buf->rx_skb)
+			return 1;
+	}
+
+	pool_entry = qeth_find_free_buffer_pool_entry(card);
+	if (!pool_entry)
+		return 1;
+
+	/*
+	 * since the buffer is accessed only from the input_tasklet
+	 * there shouldn't be a need to synchronize; also, since we use
+	 * the QETH_IN_BUF_REQUEUE_THRESHOLD we should never run  out off
+	 * buffers
+	 */
+
+	buf->pool_entry = pool_entry;
+	for (i = 0; i < QETH_MAX_BUFFER_ELEMENTS(card); ++i) {
+		buf->buffer->element[i].length = PAGE_SIZE;
+		buf->buffer->element[i].addr =  pool_entry->elements[i];
+		if (i == QETH_MAX_BUFFER_ELEMENTS(card) - 1)
+			buf->buffer->element[i].eflags = SBAL_EFLAGS_LAST_ENTRY;
+		else
+			buf->buffer->element[i].eflags = 0;
+		buf->buffer->element[i].sflags = 0;
+	}
+	return 0;
+}
+
+int qeth_init_qdio_queues(struct qeth_card *card)
+{
+	int i, j;
+	int rc;
+
+	QETH_DBF_TEXT(SETUP, 2, "initqdqs");
+
+	/* inbound queue */
+	memset(card->qdio.in_q->qdio_bufs, 0,
+	       QDIO_MAX_BUFFERS_PER_Q * sizeof(struct qdio_buffer));
+	qeth_initialize_working_pool_list(card);
+	/*give only as many buffers to hardware as we have buffer pool entries*/
+	for (i = 0; i < card->qdio.in_buf_pool.buf_count - 1; ++i)
+		qeth_init_input_buffer(card, &card->qdio.in_q->bufs[i]);
+	card->qdio.in_q->next_buf_to_init =
+		card->qdio.in_buf_pool.buf_count - 1;
+	rc = do_QDIO(CARD_DDEV(card), QDIO_FLAG_SYNC_INPUT, 0, 0,
+		     card->qdio.in_buf_pool.buf_count - 1);
+	if (rc) {
+		QETH_DBF_TEXT_(SETUP, 2, "1err%d", rc);
+		return rc;
+	}
+
+	/* completion */
+	rc = qeth_cq_init(card);
+	if (rc) {
+		return rc;
+	}
+
+	/* outbound queue */
+	for (i = 0; i < card->qdio.no_out_queues; ++i) {
+		memset(card->qdio.out_qs[i]->qdio_bufs, 0,
+		       QDIO_MAX_BUFFERS_PER_Q * sizeof(struct qdio_buffer));
+		for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j) {
+			qeth_clear_output_buffer(card->qdio.out_qs[i],
+					card->qdio.out_qs[i]->bufs[j],
+					QETH_QDIO_BUF_EMPTY);
+		}
+		card->qdio.out_qs[i]->card = card;
+		card->qdio.out_qs[i]->next_buf_to_fill = 0;
+		card->qdio.out_qs[i]->do_pack = 0;
+		atomic_set(&card->qdio.out_qs[i]->used_buffers, 0);
+		atomic_set(&card->qdio.out_qs[i]->set_pci_flags_count, 0);
+		atomic_set(&card->qdio.out_qs[i]->state,
+			   QETH_OUT_Q_UNLOCKED);
+	}
+	return 0;
+}
+EXPORT_SYMBOL_GPL(qeth_init_qdio_queues);
+
+static inline __u8 qeth_get_ipa_adp_type(enum qeth_link_types link_type)
+{
+	switch (link_type) {
+	case QETH_LINK_TYPE_HSTR:
+		return 2;
+	default:
+		return 1;
+	}
+}
+
+static void qeth_fill_ipacmd_header(struct qeth_card *card,
+		struct qeth_ipa_cmd *cmd, __u8 command,
+		enum qeth_prot_versions prot)
+{
+	memset(cmd, 0, sizeof(struct qeth_ipa_cmd));
+	cmd->hdr.command = command;
+	cmd->hdr.initiator = IPA_CMD_INITIATOR_HOST;
+	cmd->hdr.seqno = card->seqno.ipa;
+	cmd->hdr.adapter_type = qeth_get_ipa_adp_type(card->info.link_type);
+	cmd->hdr.rel_adapter_no = (__u8) card->info.portno;
+	if (card->options.layer2)
+		cmd->hdr.prim_version_no = 2;
+	else
+		cmd->hdr.prim_version_no = 1;
+	cmd->hdr.param_count = 1;
+	cmd->hdr.prot_version = prot;
+	cmd->hdr.ipa_supported = 0;
+	cmd->hdr.ipa_enabled = 0;
+}
+
+struct qeth_cmd_buffer *qeth_get_ipacmd_buffer(struct qeth_card *card,
+		enum qeth_ipa_cmds ipacmd, enum qeth_prot_versions prot)
+{
+	struct qeth_cmd_buffer *iob;
+	struct qeth_ipa_cmd *cmd;
+
+	iob = qeth_wait_for_buffer(&card->write);
+	cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
+	qeth_fill_ipacmd_header(card, cmd, ipacmd, prot);
+
+	return iob;
+}
+EXPORT_SYMBOL_GPL(qeth_get_ipacmd_buffer);
+
+void qeth_prepare_ipa_cmd(struct qeth_card *card, struct qeth_cmd_buffer *iob,
+		char prot_type)
+{
+	memcpy(iob->data, IPA_PDU_HEADER, IPA_PDU_HEADER_SIZE);
+	memcpy(QETH_IPA_CMD_PROT_TYPE(iob->data), &prot_type, 1);
+	memcpy(QETH_IPA_CMD_DEST_ADDR(iob->data),
+	       &card->token.ulp_connection_r, QETH_MPC_TOKEN_LENGTH);
+}
+EXPORT_SYMBOL_GPL(qeth_prepare_ipa_cmd);
+
+int qeth_send_ipa_cmd(struct qeth_card *card, struct qeth_cmd_buffer *iob,
+		int (*reply_cb)(struct qeth_card *, struct qeth_reply*,
+			unsigned long),
+		void *reply_param)
+{
+	int rc;
+	char prot_type;
+
+	QETH_CARD_TEXT(card, 4, "sendipa");
+
+	if (card->options.layer2)
+		if (card->info.type == QETH_CARD_TYPE_OSN)
+			prot_type = QETH_PROT_OSN2;
+		else
+			prot_type = QETH_PROT_LAYER2;
+	else
+		prot_type = QETH_PROT_TCPIP;
+	qeth_prepare_ipa_cmd(card, iob, prot_type);
+	rc = qeth_send_control_data(card, IPA_CMD_LENGTH,
+						iob, reply_cb, reply_param);
+	if (rc == -ETIME) {
+		qeth_clear_ipacmd_list(card);
+		qeth_schedule_recovery(card);
+	}
+	return rc;
+}
+EXPORT_SYMBOL_GPL(qeth_send_ipa_cmd);
+
+int qeth_send_startlan(struct qeth_card *card)
+{
+	int rc;
+	struct qeth_cmd_buffer *iob;
+
+	QETH_DBF_TEXT(SETUP, 2, "strtlan");
+
+	iob = qeth_get_ipacmd_buffer(card, IPA_CMD_STARTLAN, 0);
+	rc = qeth_send_ipa_cmd(card, iob, NULL, NULL);
+	return rc;
+}
+EXPORT_SYMBOL_GPL(qeth_send_startlan);
+
+int qeth_default_setadapterparms_cb(struct qeth_card *card,
+		struct qeth_reply *reply, unsigned long data)
+{
+	struct qeth_ipa_cmd *cmd;
+
+	QETH_CARD_TEXT(card, 4, "defadpcb");
+
+	cmd = (struct qeth_ipa_cmd *) data;
+	if (cmd->hdr.return_code == 0)
+		cmd->hdr.return_code =
+			cmd->data.setadapterparms.hdr.return_code;
+	return 0;
+}
+EXPORT_SYMBOL_GPL(qeth_default_setadapterparms_cb);
+
+static int qeth_query_setadapterparms_cb(struct qeth_card *card,
+		struct qeth_reply *reply, unsigned long data)
+{
+	struct qeth_ipa_cmd *cmd;
+
+	QETH_CARD_TEXT(card, 3, "quyadpcb");
+
+	cmd = (struct qeth_ipa_cmd *) data;
+	if (cmd->data.setadapterparms.data.query_cmds_supp.lan_type & 0x7f) {
+		card->info.link_type =
+		      cmd->data.setadapterparms.data.query_cmds_supp.lan_type;
+		QETH_DBF_TEXT_(SETUP, 2, "lnk %d", card->info.link_type);
+	}
+	card->options.adp.supported_funcs =
+		cmd->data.setadapterparms.data.query_cmds_supp.supported_cmds;
+	return qeth_default_setadapterparms_cb(card, reply, (unsigned long)cmd);
+}
+
+struct qeth_cmd_buffer *qeth_get_adapter_cmd(struct qeth_card *card,
+		__u32 command, __u32 cmdlen)
+{
+	struct qeth_cmd_buffer *iob;
+	struct qeth_ipa_cmd *cmd;
+
+	iob = qeth_get_ipacmd_buffer(card, IPA_CMD_SETADAPTERPARMS,
+				     QETH_PROT_IPV4);
+	cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
+	cmd->data.setadapterparms.hdr.cmdlength = cmdlen;
+	cmd->data.setadapterparms.hdr.command_code = command;
+	cmd->data.setadapterparms.hdr.used_total = 1;
+	cmd->data.setadapterparms.hdr.seq_no = 1;
+
+	return iob;
+}
+EXPORT_SYMBOL_GPL(qeth_get_adapter_cmd);
+
+int qeth_query_setadapterparms(struct qeth_card *card)
+{
+	int rc;
+	struct qeth_cmd_buffer *iob;
+
+	QETH_CARD_TEXT(card, 3, "queryadp");
+	iob = qeth_get_adapter_cmd(card, IPA_SETADP_QUERY_COMMANDS_SUPPORTED,
+				   sizeof(struct qeth_ipacmd_setadpparms));
+	rc = qeth_send_ipa_cmd(card, iob, qeth_query_setadapterparms_cb, NULL);
+	return rc;
+}
+EXPORT_SYMBOL_GPL(qeth_query_setadapterparms);
+
+static int qeth_query_ipassists_cb(struct qeth_card *card,
+		struct qeth_reply *reply, unsigned long data)
+{
+	struct qeth_ipa_cmd *cmd;
+
+	QETH_DBF_TEXT(SETUP, 2, "qipasscb");
+
+	cmd = (struct qeth_ipa_cmd *) data;
+	if (cmd->hdr.prot_version == QETH_PROT_IPV4) {
+		card->options.ipa4.supported_funcs = cmd->hdr.ipa_supported;
+		card->options.ipa4.enabled_funcs = cmd->hdr.ipa_enabled;
+	} else {
+		card->options.ipa6.supported_funcs = cmd->hdr.ipa_supported;
+		card->options.ipa6.enabled_funcs = cmd->hdr.ipa_enabled;
+	}
+	QETH_DBF_TEXT(SETUP, 2, "suppenbl");
+	QETH_DBF_TEXT_(SETUP, 2, "%08x", (__u32)cmd->hdr.ipa_supported);
+	QETH_DBF_TEXT_(SETUP, 2, "%08x", (__u32)cmd->hdr.ipa_enabled);
+	return 0;
+}
+
+int qeth_query_ipassists(struct qeth_card *card, enum qeth_prot_versions prot)
+{
+	int rc;
+	struct qeth_cmd_buffer *iob;
+
+	QETH_DBF_TEXT_(SETUP, 2, "qipassi%i", prot);
+	iob = qeth_get_ipacmd_buffer(card, IPA_CMD_QIPASSIST, prot);
+	rc = qeth_send_ipa_cmd(card, iob, qeth_query_ipassists_cb, NULL);
+	return rc;
+}
+EXPORT_SYMBOL_GPL(qeth_query_ipassists);
+
+static int qeth_query_setdiagass_cb(struct qeth_card *card,
+		struct qeth_reply *reply, unsigned long data)
+{
+	struct qeth_ipa_cmd *cmd;
+	__u16 rc;
+
+	cmd = (struct qeth_ipa_cmd *)data;
+	rc = cmd->hdr.return_code;
+	if (rc)
+		QETH_CARD_TEXT_(card, 2, "diagq:%x", rc);
+	else
+		card->info.diagass_support = cmd->data.diagass.ext;
+	return 0;
+}
+
+static int qeth_query_setdiagass(struct qeth_card *card)
+{
+	struct qeth_cmd_buffer *iob;
+	struct qeth_ipa_cmd    *cmd;
+
+	QETH_DBF_TEXT(SETUP, 2, "qdiagass");
+	iob = qeth_get_ipacmd_buffer(card, IPA_CMD_SET_DIAG_ASS, 0);
+	cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
+	cmd->data.diagass.subcmd_len = 16;
+	cmd->data.diagass.subcmd = QETH_DIAGS_CMD_QUERY;
+	return qeth_send_ipa_cmd(card, iob, qeth_query_setdiagass_cb, NULL);
+}
+
+static void qeth_get_trap_id(struct qeth_card *card, struct qeth_trap_id *tid)
+{
+	unsigned long info = get_zeroed_page(GFP_KERNEL);
+	struct sysinfo_2_2_2 *info222 = (struct sysinfo_2_2_2 *)info;
+	struct sysinfo_3_2_2 *info322 = (struct sysinfo_3_2_2 *)info;
+	struct ccw_dev_id ccwid;
+	int level, rc;
+
+	tid->chpid = card->info.chpid;
+	ccw_device_get_id(CARD_RDEV(card), &ccwid);
+	tid->ssid = ccwid.ssid;
+	tid->devno = ccwid.devno;
+	if (!info)
+		return;
+
+	rc = stsi(NULL, 0, 0, 0);
+	if (rc == -ENOSYS)
+		level = rc;
+	else
+		level = (((unsigned int) rc) >> 28);
+
+	if ((level >= 2) && (stsi(info222, 2, 2, 2) != -ENOSYS))
+		tid->lparnr = info222->lpar_number;
+
+	if ((level >= 3) && (stsi(info322, 3, 2, 2) != -ENOSYS)) {
+		EBCASC(info322->vm[0].name, sizeof(info322->vm[0].name));
+		memcpy(tid->vmname, info322->vm[0].name, sizeof(tid->vmname));
+	}
+	free_page(info);
+	return;
+}
+
+static int qeth_hw_trap_cb(struct qeth_card *card,
+		struct qeth_reply *reply, unsigned long data)
+{
+	struct qeth_ipa_cmd *cmd;
+	__u16 rc;
+
+	cmd = (struct qeth_ipa_cmd *)data;
+	rc = cmd->hdr.return_code;
+	if (rc)
+		QETH_CARD_TEXT_(card, 2, "trapc:%x", rc);
+	return 0;
+}
+
+int qeth_hw_trap(struct qeth_card *card, enum qeth_diags_trap_action action)
+{
+	struct qeth_cmd_buffer *iob;
+	struct qeth_ipa_cmd *cmd;
+
+	QETH_DBF_TEXT(SETUP, 2, "diagtrap");
+	iob = qeth_get_ipacmd_buffer(card, IPA_CMD_SET_DIAG_ASS, 0);
+	cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
+	cmd->data.diagass.subcmd_len = 80;
+	cmd->data.diagass.subcmd = QETH_DIAGS_CMD_TRAP;
+	cmd->data.diagass.type = 1;
+	cmd->data.diagass.action = action;
+	switch (action) {
+	case QETH_DIAGS_TRAP_ARM:
+		cmd->data.diagass.options = 0x0003;
+		cmd->data.diagass.ext = 0x00010000 +
+			sizeof(struct qeth_trap_id);
+		qeth_get_trap_id(card,
+			(struct qeth_trap_id *)cmd->data.diagass.cdata);
+		break;
+	case QETH_DIAGS_TRAP_DISARM:
+		cmd->data.diagass.options = 0x0001;
+		break;
+	case QETH_DIAGS_TRAP_CAPTURE:
+		break;
+	}
+	return qeth_send_ipa_cmd(card, iob, qeth_hw_trap_cb, NULL);
+}
+EXPORT_SYMBOL_GPL(qeth_hw_trap);
+
+int qeth_check_qdio_errors(struct qeth_card *card, struct qdio_buffer *buf,
+		unsigned int qdio_error, const char *dbftext)
+{
+	if (qdio_error) {
+		QETH_CARD_TEXT(card, 2, dbftext);
+		QETH_CARD_TEXT_(card, 2, " F15=%02X",
+			       buf->element[15].sflags);
+		QETH_CARD_TEXT_(card, 2, " F14=%02X",
+			       buf->element[14].sflags);
+		QETH_CARD_TEXT_(card, 2, " qerr=%X", qdio_error);
+		if ((buf->element[15].sflags) == 0x12) {
+			card->stats.rx_dropped++;
+			return 0;
+		} else
+			return 1;
+	}
+	return 0;
+}
+EXPORT_SYMBOL_GPL(qeth_check_qdio_errors);
+
+void qeth_buffer_reclaim_work(struct work_struct *work)
+{
+	struct qeth_card *card = container_of(work, struct qeth_card,
+		buffer_reclaim_work.work);
+
+	QETH_CARD_TEXT_(card, 2, "brw:%x", card->reclaim_index);
+	qeth_queue_input_buffer(card, card->reclaim_index);
+}
+
+void qeth_queue_input_buffer(struct qeth_card *card, int index)
+{
+	struct qeth_qdio_q *queue = card->qdio.in_q;
+	struct list_head *lh;
+	int count;
+	int i;
+	int rc;
+	int newcount = 0;
+
+	count = (index < queue->next_buf_to_init)?
+		card->qdio.in_buf_pool.buf_count -
+		(queue->next_buf_to_init - index) :
+		card->qdio.in_buf_pool.buf_count -
+		(queue->next_buf_to_init + QDIO_MAX_BUFFERS_PER_Q - index);
+	/* only requeue at a certain threshold to avoid SIGAs */
+	if (count >= QETH_IN_BUF_REQUEUE_THRESHOLD(card)) {
+		for (i = queue->next_buf_to_init;
+		     i < queue->next_buf_to_init + count; ++i) {
+			if (qeth_init_input_buffer(card,
+				&queue->bufs[i % QDIO_MAX_BUFFERS_PER_Q])) {
+				break;
+			} else {
+				newcount++;
+			}
+		}
+
+		if (newcount < count) {
+			/* we are in memory shortage so we switch back to
+			   traditional skb allocation and drop packages */
+			atomic_set(&card->force_alloc_skb, 3);
+			count = newcount;
+		} else {
+			atomic_add_unless(&card->force_alloc_skb, -1, 0);
+		}
+
+		if (!count) {
+			i = 0;
+			list_for_each(lh, &card->qdio.in_buf_pool.entry_list)
+				i++;
+			if (i == card->qdio.in_buf_pool.buf_count) {
+				QETH_CARD_TEXT(card, 2, "qsarbw");
+				card->reclaim_index = index;
+				schedule_delayed_work(
+					&card->buffer_reclaim_work,
+					QETH_RECLAIM_WORK_TIME);
+			}
+			return;
+		}
+
+		/*
+		 * according to old code it should be avoided to requeue all
+		 * 128 buffers in order to benefit from PCI avoidance.
+		 * this function keeps at least one buffer (the buffer at
+		 * 'index') un-requeued -> this buffer is the first buffer that
+		 * will be requeued the next time
+		 */
+		if (card->options.performance_stats) {
+			card->perf_stats.inbound_do_qdio_cnt++;
+			card->perf_stats.inbound_do_qdio_start_time =
+				qeth_get_micros();
+		}
+		rc = do_QDIO(CARD_DDEV(card), QDIO_FLAG_SYNC_INPUT, 0,
+			     queue->next_buf_to_init, count);
+		if (card->options.performance_stats)
+			card->perf_stats.inbound_do_qdio_time +=
+				qeth_get_micros() -
+				card->perf_stats.inbound_do_qdio_start_time;
+		if (rc) {
+			QETH_CARD_TEXT(card, 2, "qinberr");
+		}
+		queue->next_buf_to_init = (queue->next_buf_to_init + count) %
+					  QDIO_MAX_BUFFERS_PER_Q;
+	}
+}
+EXPORT_SYMBOL_GPL(qeth_queue_input_buffer);
+
+static int qeth_handle_send_error(struct qeth_card *card,
+		struct qeth_qdio_out_buffer *buffer, unsigned int qdio_err)
+{
+	int sbalf15 = buffer->buffer->element[15].sflags;
+
+	QETH_CARD_TEXT(card, 6, "hdsnderr");
+	if (card->info.type == QETH_CARD_TYPE_IQD) {
+		if (sbalf15 == 0) {
+			qdio_err = 0;
+		} else {
+			qdio_err = 1;
+		}
+	}
+	qeth_check_qdio_errors(card, buffer->buffer, qdio_err, "qouterr");
+
+	if (!qdio_err)
+		return QETH_SEND_ERROR_NONE;
+
+	if ((sbalf15 >= 15) && (sbalf15 <= 31))
+		return QETH_SEND_ERROR_RETRY;
+
+	QETH_CARD_TEXT(card, 1, "lnkfail");
+	QETH_CARD_TEXT_(card, 1, "%04x %02x",
+		       (u16)qdio_err, (u8)sbalf15);
+	return QETH_SEND_ERROR_LINK_FAILURE;
+}
+
+/*
+ * Switched to packing state if the number of used buffers on a queue
+ * reaches a certain limit.
+ */
+static void qeth_switch_to_packing_if_needed(struct qeth_qdio_out_q *queue)
+{
+	if (!queue->do_pack) {
+		if (atomic_read(&queue->used_buffers)
+		    >= QETH_HIGH_WATERMARK_PACK){
+			/* switch non-PACKING -> PACKING */
+			QETH_CARD_TEXT(queue->card, 6, "np->pack");
+			if (queue->card->options.performance_stats)
+				queue->card->perf_stats.sc_dp_p++;
+			queue->do_pack = 1;
+		}
+	}
+}
+
+/*
+ * Switches from packing to non-packing mode. If there is a packing
+ * buffer on the queue this buffer will be prepared to be flushed.
+ * In that case 1 is returned to inform the caller. If no buffer
+ * has to be flushed, zero is returned.
+ */
+static int qeth_switch_to_nonpacking_if_needed(struct qeth_qdio_out_q *queue)
+{
+	struct qeth_qdio_out_buffer *buffer;
+	int flush_count = 0;
+
+	if (queue->do_pack) {
+		if (atomic_read(&queue->used_buffers)
+		    <= QETH_LOW_WATERMARK_PACK) {
+			/* switch PACKING -> non-PACKING */
+			QETH_CARD_TEXT(queue->card, 6, "pack->np");
+			if (queue->card->options.performance_stats)
+				queue->card->perf_stats.sc_p_dp++;
+			queue->do_pack = 0;
+			/* flush packing buffers */
+			buffer = queue->bufs[queue->next_buf_to_fill];
+			if ((atomic_read(&buffer->state) ==
+						QETH_QDIO_BUF_EMPTY) &&
+			    (buffer->next_element_to_fill > 0)) {
+				atomic_set(&buffer->state,
+					   QETH_QDIO_BUF_PRIMED);
+				flush_count++;
+				queue->next_buf_to_fill =
+					(queue->next_buf_to_fill + 1) %
+					QDIO_MAX_BUFFERS_PER_Q;
+			}
+		}
+	}
+	return flush_count;
+}
+
+
+/*
+ * Called to flush a packing buffer if no more pci flags are on the queue.
+ * Checks if there is a packing buffer and prepares it to be flushed.
+ * In that case returns 1, otherwise zero.
+ */
+static int qeth_flush_buffers_on_no_pci(struct qeth_qdio_out_q *queue)
+{
+	struct qeth_qdio_out_buffer *buffer;
+
+	buffer = queue->bufs[queue->next_buf_to_fill];
+	if ((atomic_read(&buffer->state) == QETH_QDIO_BUF_EMPTY) &&
+	   (buffer->next_element_to_fill > 0)) {
+		/* it's a packing buffer */
+		atomic_set(&buffer->state, QETH_QDIO_BUF_PRIMED);
+		queue->next_buf_to_fill =
+			(queue->next_buf_to_fill + 1) % QDIO_MAX_BUFFERS_PER_Q;
+		return 1;
+	}
+	return 0;
+}
+
+static void qeth_flush_buffers(struct qeth_qdio_out_q *queue, int index,
+			       int count)
+{
+	struct qeth_qdio_out_buffer *buf;
+	int rc;
+	int i;
+	unsigned int qdio_flags;
+
+	for (i = index; i < index + count; ++i) {
+		int bidx = i % QDIO_MAX_BUFFERS_PER_Q;
+		buf = queue->bufs[bidx];
+		buf->buffer->element[buf->next_element_to_fill - 1].eflags |=
+				SBAL_EFLAGS_LAST_ENTRY;
+
+		if (queue->bufstates)
+			queue->bufstates[bidx].user = buf;
+
+		if (queue->card->info.type == QETH_CARD_TYPE_IQD)
+			continue;
+
+		if (!queue->do_pack) {
+			if ((atomic_read(&queue->used_buffers) >=
+				(QETH_HIGH_WATERMARK_PACK -
+				 QETH_WATERMARK_PACK_FUZZ)) &&
+			    !atomic_read(&queue->set_pci_flags_count)) {
+				/* it's likely that we'll go to packing
+				 * mode soon */
+				atomic_inc(&queue->set_pci_flags_count);
+				buf->buffer->element[0].sflags |= SBAL_SFLAGS0_PCI_REQ;
+			}
+		} else {
+			if (!atomic_read(&queue->set_pci_flags_count)) {
+				/*
+				 * there's no outstanding PCI any more, so we
+				 * have to request a PCI to be sure the the PCI
+				 * will wake at some time in the future then we
+				 * can flush packed buffers that might still be
+				 * hanging around, which can happen if no
+				 * further send was requested by the stack
+				 */
+				atomic_inc(&queue->set_pci_flags_count);
+				buf->buffer->element[0].sflags |= SBAL_SFLAGS0_PCI_REQ;
+			}
+		}
+	}
+
+	queue->card->dev->trans_start = jiffies;
+	if (queue->card->options.performance_stats) {
+		queue->card->perf_stats.outbound_do_qdio_cnt++;
+		queue->card->perf_stats.outbound_do_qdio_start_time =
+			qeth_get_micros();
+	}
+	qdio_flags = QDIO_FLAG_SYNC_OUTPUT;
+	if (atomic_read(&queue->set_pci_flags_count))
+		qdio_flags |= QDIO_FLAG_PCI_OUT;
+	rc = do_QDIO(CARD_DDEV(queue->card), qdio_flags,
+		     queue->queue_no, index, count);
+	if (queue->card->options.performance_stats)
+		queue->card->perf_stats.outbound_do_qdio_time +=
+			qeth_get_micros() -
+			queue->card->perf_stats.outbound_do_qdio_start_time;
+	atomic_add(count, &queue->used_buffers);
+	if (rc) {
+		queue->card->stats.tx_errors += count;
+		/* ignore temporary SIGA errors without busy condition */
+		if (rc == QDIO_ERROR_SIGA_TARGET)
+			return;
+		QETH_CARD_TEXT(queue->card, 2, "flushbuf");
+		QETH_CARD_TEXT_(queue->card, 2, " q%d", queue->queue_no);
+		QETH_CARD_TEXT_(queue->card, 2, " idx%d", index);
+		QETH_CARD_TEXT_(queue->card, 2, " c%d", count);
+		QETH_CARD_TEXT_(queue->card, 2, " err%d", rc);
+
+		/* this must not happen under normal circumstances. if it
+		 * happens something is really wrong -> recover */
+		qeth_schedule_recovery(queue->card);
+		return;
+	}
+	if (queue->card->options.performance_stats)
+		queue->card->perf_stats.bufs_sent += count;
+}
+
+static void qeth_check_outbound_queue(struct qeth_qdio_out_q *queue)
+{
+	int index;
+	int flush_cnt = 0;
+	int q_was_packing = 0;
+
+	/*
+	 * check if weed have to switch to non-packing mode or if
+	 * we have to get a pci flag out on the queue
+	 */
+	if ((atomic_read(&queue->used_buffers) <= QETH_LOW_WATERMARK_PACK) ||
+	    !atomic_read(&queue->set_pci_flags_count)) {
+		if (atomic_xchg(&queue->state, QETH_OUT_Q_LOCKED_FLUSH) ==
+				QETH_OUT_Q_UNLOCKED) {
+			/*
+			 * If we get in here, there was no action in
+			 * do_send_packet. So, we check if there is a
+			 * packing buffer to be flushed here.
+			 */
+			netif_stop_queue(queue->card->dev);
+			index = queue->next_buf_to_fill;
+			q_was_packing = queue->do_pack;
+			/* queue->do_pack may change */
+			barrier();
+			flush_cnt += qeth_switch_to_nonpacking_if_needed(queue);
+			if (!flush_cnt &&
+			    !atomic_read(&queue->set_pci_flags_count))
+				flush_cnt +=
+					qeth_flush_buffers_on_no_pci(queue);
+			if (queue->card->options.performance_stats &&
+			    q_was_packing)
+				queue->card->perf_stats.bufs_sent_pack +=
+					flush_cnt;
+			if (flush_cnt)
+				qeth_flush_buffers(queue, index, flush_cnt);
+			atomic_set(&queue->state, QETH_OUT_Q_UNLOCKED);
+		}
+	}
+}
+
+void qeth_qdio_start_poll(struct ccw_device *ccwdev, int queue,
+		unsigned long card_ptr)
+{
+	struct qeth_card *card = (struct qeth_card *)card_ptr;
+
+	if (card->dev && (card->dev->flags & IFF_UP))
+		napi_schedule(&card->napi);
+}
+EXPORT_SYMBOL_GPL(qeth_qdio_start_poll);
+
+int qeth_configure_cq(struct qeth_card *card, enum qeth_cq cq)
+{
+	int rc;
+
+	if (card->options.cq ==  QETH_CQ_NOTAVAILABLE) {
+		rc = -1;
+		goto out;
+	} else {
+		if (card->options.cq == cq) {
+			rc = 0;
+			goto out;
+		}
+
+		if (card->state != CARD_STATE_DOWN &&
+		    card->state != CARD_STATE_RECOVER) {
+			rc = -1;
+			goto out;
+		}
+
+		qeth_free_qdio_buffers(card);
+		card->options.cq = cq;
+		rc = 0;
+	}
+out:
+	return rc;
+
+}
+EXPORT_SYMBOL_GPL(qeth_configure_cq);
+
+
+static void qeth_qdio_cq_handler(struct qeth_card *card,
+		unsigned int qdio_err,
+		unsigned int queue, int first_element, int count) {
+	struct qeth_qdio_q *cq = card->qdio.c_q;
+	int i;
+	int rc;
+
+	if (!qeth_is_cq(card, queue))
+		goto out;
+
+	QETH_CARD_TEXT_(card, 5, "qcqhe%d", first_element);
+	QETH_CARD_TEXT_(card, 5, "qcqhc%d", count);
+	QETH_CARD_TEXT_(card, 5, "qcqherr%d", qdio_err);
+
+	if (qdio_err) {
+		netif_stop_queue(card->dev);
+		qeth_schedule_recovery(card);
+		goto out;
+	}
+
+	if (card->options.performance_stats) {
+		card->perf_stats.cq_cnt++;
+		card->perf_stats.cq_start_time = qeth_get_micros();
+	}
+
+	for (i = first_element; i < first_element + count; ++i) {
+		int bidx = i % QDIO_MAX_BUFFERS_PER_Q;
+		struct qdio_buffer *buffer = &cq->qdio_bufs[bidx];
+		int e;
+
+		e = 0;
+		while (buffer->element[e].addr) {
+			unsigned long phys_aob_addr;
+
+			phys_aob_addr = (unsigned long) buffer->element[e].addr;
+			qeth_qdio_handle_aob(card, phys_aob_addr);
+			buffer->element[e].addr = NULL;
+			buffer->element[e].eflags = 0;
+			buffer->element[e].sflags = 0;
+			buffer->element[e].length = 0;
+
+			++e;
+		}
+
+		buffer->element[15].eflags = 0;
+		buffer->element[15].sflags = 0;
+	}
+	rc = do_QDIO(CARD_DDEV(card), QDIO_FLAG_SYNC_INPUT, queue,
+		    card->qdio.c_q->next_buf_to_init,
+		    count);
+	if (rc) {
+		dev_warn(&card->gdev->dev,
+			"QDIO reported an error, rc=%i\n", rc);
+		QETH_CARD_TEXT(card, 2, "qcqherr");
+	}
+	card->qdio.c_q->next_buf_to_init = (card->qdio.c_q->next_buf_to_init
+				   + count) % QDIO_MAX_BUFFERS_PER_Q;
+
+	netif_wake_queue(card->dev);
+
+	if (card->options.performance_stats) {
+		int delta_t = qeth_get_micros();
+		delta_t -= card->perf_stats.cq_start_time;
+		card->perf_stats.cq_time += delta_t;
+	}
+out:
+	return;
+}
+
+void qeth_qdio_input_handler(struct ccw_device *ccwdev, unsigned int qdio_err,
+		unsigned int queue, int first_elem, int count,
+		unsigned long card_ptr)
+{
+	struct qeth_card *card = (struct qeth_card *)card_ptr;
+
+	QETH_CARD_TEXT_(card, 2, "qihq%d", queue);
+	QETH_CARD_TEXT_(card, 2, "qiec%d", qdio_err);
+
+	if (qeth_is_cq(card, queue))
+		qeth_qdio_cq_handler(card, qdio_err, queue, first_elem, count);
+	else if (qdio_err)
+		qeth_schedule_recovery(card);
+
+
+}
+EXPORT_SYMBOL_GPL(qeth_qdio_input_handler);
+
+void qeth_qdio_output_handler(struct ccw_device *ccwdev,
+		unsigned int qdio_error, int __queue, int first_element,
+		int count, unsigned long card_ptr)
+{
+	struct qeth_card *card        = (struct qeth_card *) card_ptr;
+	struct qeth_qdio_out_q *queue = card->qdio.out_qs[__queue];
+	struct qeth_qdio_out_buffer *buffer;
+	int i;
+
+	QETH_CARD_TEXT(card, 6, "qdouhdl");
+	if (qdio_error & QDIO_ERROR_ACTIVATE_CHECK_CONDITION) {
+		QETH_CARD_TEXT(card, 2, "achkcond");
+		netif_stop_queue(card->dev);
+		qeth_schedule_recovery(card);
+		return;
+	}
+	if (card->options.performance_stats) {
+		card->perf_stats.outbound_handler_cnt++;
+		card->perf_stats.outbound_handler_start_time =
+			qeth_get_micros();
+	}
+	for (i = first_element; i < (first_element + count); ++i) {
+		int bidx = i % QDIO_MAX_BUFFERS_PER_Q;
+		buffer = queue->bufs[bidx];
+		qeth_handle_send_error(card, buffer, qdio_error);
+
+		if (queue->bufstates &&
+		    (queue->bufstates[bidx].flags &
+		     QDIO_OUTBUF_STATE_FLAG_PENDING) != 0) {
+			BUG_ON(card->options.cq != QETH_CQ_ENABLED);
+
+			if (atomic_cmpxchg(&buffer->state,
+					   QETH_QDIO_BUF_PRIMED,
+					   QETH_QDIO_BUF_PENDING) ==
+				QETH_QDIO_BUF_PRIMED) {
+				qeth_notify_skbs(queue, buffer,
+						 TX_NOTIFY_PENDING);
+			}
+			buffer->aob = queue->bufstates[bidx].aob;
+			QETH_CARD_TEXT_(queue->card, 5, "pel%d", bidx);
+			QETH_CARD_TEXT(queue->card, 5, "aob");
+			QETH_CARD_TEXT_(queue->card, 5, "%lx",
+					virt_to_phys(buffer->aob));
+			BUG_ON(bidx < 0 || bidx >= QDIO_MAX_BUFFERS_PER_Q);
+			if (qeth_init_qdio_out_buf(queue, bidx)) {
+				QETH_CARD_TEXT(card, 2, "outofbuf");
+				qeth_schedule_recovery(card);
+			}
+		} else {
+			if (card->options.cq == QETH_CQ_ENABLED) {
+				enum iucv_tx_notify n;
+
+				n = qeth_compute_cq_notification(
+					buffer->buffer->element[15].sflags, 0);
+				qeth_notify_skbs(queue, buffer, n);
+			}
+
+			qeth_clear_output_buffer(queue, buffer,
+						QETH_QDIO_BUF_EMPTY);
+		}
+		qeth_cleanup_handled_pending(queue, bidx, 0);
+	}
+	atomic_sub(count, &queue->used_buffers);
+	/* check if we need to do something on this outbound queue */
+	if (card->info.type != QETH_CARD_TYPE_IQD)
+		qeth_check_outbound_queue(queue);
+
+	netif_wake_queue(queue->card->dev);
+	if (card->options.performance_stats)
+		card->perf_stats.outbound_handler_time += qeth_get_micros() -
+			card->perf_stats.outbound_handler_start_time;
+}
+EXPORT_SYMBOL_GPL(qeth_qdio_output_handler);
+
+int qeth_get_priority_queue(struct qeth_card *card, struct sk_buff *skb,
+			int ipv, int cast_type)
+{
+	if (!ipv && (card->info.type == QETH_CARD_TYPE_OSD ||
+		     card->info.type == QETH_CARD_TYPE_OSX))
+		return card->qdio.default_out_queue;
+	switch (card->qdio.no_out_queues) {
+	case 4:
+		if (cast_type && card->info.is_multicast_different)
+			return card->info.is_multicast_different &
+				(card->qdio.no_out_queues - 1);
+		if (card->qdio.do_prio_queueing && (ipv == 4)) {
+			const u8 tos = ip_hdr(skb)->tos;
+
+			if (card->qdio.do_prio_queueing ==
+				QETH_PRIO_Q_ING_TOS) {
+				if (tos & IP_TOS_NOTIMPORTANT)
+					return 3;
+				if (tos & IP_TOS_HIGHRELIABILITY)
+					return 2;
+				if (tos & IP_TOS_HIGHTHROUGHPUT)
+					return 1;
+				if (tos & IP_TOS_LOWDELAY)
+					return 0;
+			}
+			if (card->qdio.do_prio_queueing ==
+				QETH_PRIO_Q_ING_PREC)
+				return 3 - (tos >> 6);
+		} else if (card->qdio.do_prio_queueing && (ipv == 6)) {
+			/* TODO: IPv6!!! */
+		}
+		return card->qdio.default_out_queue;
+	case 1: /* fallthrough for single-out-queue 1920-device */
+	default:
+		return card->qdio.default_out_queue;
+	}
+}
+EXPORT_SYMBOL_GPL(qeth_get_priority_queue);
+
+int qeth_get_elements_no(struct qeth_card *card, void *hdr,
+		     struct sk_buff *skb, int elems)
+{
+	int dlen = skb->len - skb->data_len;
+	int elements_needed = PFN_UP((unsigned long)skb->data + dlen - 1) -
+		PFN_DOWN((unsigned long)skb->data);
+
+	elements_needed += skb_shinfo(skb)->nr_frags;
+	if ((elements_needed + elems) > QETH_MAX_BUFFER_ELEMENTS(card)) {
+		QETH_DBF_MESSAGE(2, "Invalid size of IP packet "
+			"(Number=%d / Length=%d). Discarded.\n",
+			(elements_needed+elems), skb->len);
+		return 0;
+	}
+	return elements_needed;
+}
+EXPORT_SYMBOL_GPL(qeth_get_elements_no);
+
+int qeth_hdr_chk_and_bounce(struct sk_buff *skb, int len)
+{
+	int hroom, inpage, rest;
+
+	if (((unsigned long)skb->data & PAGE_MASK) !=
+	    (((unsigned long)skb->data + len - 1) & PAGE_MASK)) {
+		hroom = skb_headroom(skb);
+		inpage = PAGE_SIZE - ((unsigned long) skb->data % PAGE_SIZE);
+		rest = len - inpage;
+		if (rest > hroom)
+			return 1;
+		memmove(skb->data - rest, skb->data, skb->len - skb->data_len);
+		skb->data -= rest;
+		QETH_DBF_MESSAGE(2, "skb bounce len: %d rest: %d\n", len, rest);
+	}
+	return 0;
+}
+EXPORT_SYMBOL_GPL(qeth_hdr_chk_and_bounce);
+
+static inline void __qeth_fill_buffer(struct sk_buff *skb,
+	struct qdio_buffer *buffer, int is_tso, int *next_element_to_fill,
+	int offset)
+{
+	int length = skb->len - skb->data_len;
+	int length_here;
+	int element;
+	char *data;
+	int first_lap, cnt;
+	struct skb_frag_struct *frag;
+
+	element = *next_element_to_fill;
+	data = skb->data;
+	first_lap = (is_tso == 0 ? 1 : 0);
+
+	if (offset >= 0) {
+		data = skb->data + offset;
+		length -= offset;
+		first_lap = 0;
+	}
+
+	while (length > 0) {
+		/* length_here is the remaining amount of data in this page */
+		length_here = PAGE_SIZE - ((unsigned long) data % PAGE_SIZE);
+		if (length < length_here)
+			length_here = length;
+
+		buffer->element[element].addr = data;
+		buffer->element[element].length = length_here;
+		length -= length_here;
+		if (!length) {
+			if (first_lap)
+				if (skb_shinfo(skb)->nr_frags)
+					buffer->element[element].eflags =
+						SBAL_EFLAGS_FIRST_FRAG;
+				else
+					buffer->element[element].eflags = 0;
+			else
+				buffer->element[element].eflags =
+				    SBAL_EFLAGS_MIDDLE_FRAG;
+		} else {
+			if (first_lap)
+				buffer->element[element].eflags =
+				    SBAL_EFLAGS_FIRST_FRAG;
+			else
+				buffer->element[element].eflags =
+				    SBAL_EFLAGS_MIDDLE_FRAG;
+		}
+		data += length_here;
+		element++;
+		first_lap = 0;
+	}
+
+	for (cnt = 0; cnt < skb_shinfo(skb)->nr_frags; cnt++) {
+		frag = &skb_shinfo(skb)->frags[cnt];
+		buffer->element[element].addr = (char *)
+			page_to_phys(skb_frag_page(frag))
+			+ frag->page_offset;
+		buffer->element[element].length = frag->size;
+		buffer->element[element].eflags = SBAL_EFLAGS_MIDDLE_FRAG;
+		element++;
+	}
+
+	if (buffer->element[element - 1].eflags)
+		buffer->element[element - 1].eflags = SBAL_EFLAGS_LAST_FRAG;
+	*next_element_to_fill = element;
+}
+
+static inline int qeth_fill_buffer(struct qeth_qdio_out_q *queue,
+		struct qeth_qdio_out_buffer *buf, struct sk_buff *skb,
+		struct qeth_hdr *hdr, int offset, int hd_len)
+{
+	struct qdio_buffer *buffer;
+	int flush_cnt = 0, hdr_len, large_send = 0;
+
+	buffer = buf->buffer;
+	atomic_inc(&skb->users);
+	skb_queue_tail(&buf->skb_list, skb);
+
+	/*check first on TSO ....*/
+	if (hdr->hdr.l3.id == QETH_HEADER_TYPE_TSO) {
+		int element = buf->next_element_to_fill;
+
+		hdr_len = sizeof(struct qeth_hdr_tso) +
+			((struct qeth_hdr_tso *)hdr)->ext.dg_hdr_len;
+		/*fill first buffer entry only with header information */
+		buffer->element[element].addr = skb->data;
+		buffer->element[element].length = hdr_len;
+		buffer->element[element].eflags = SBAL_EFLAGS_FIRST_FRAG;
+		buf->next_element_to_fill++;
+		skb->data += hdr_len;
+		skb->len  -= hdr_len;
+		large_send = 1;
+	}
+
+	if (offset >= 0) {
+		int element = buf->next_element_to_fill;
+		buffer->element[element].addr = hdr;
+		buffer->element[element].length = sizeof(struct qeth_hdr) +
+							hd_len;
+		buffer->element[element].eflags = SBAL_EFLAGS_FIRST_FRAG;
+		buf->is_header[element] = 1;
+		buf->next_element_to_fill++;
+	}
+
+	__qeth_fill_buffer(skb, buffer, large_send,
+		(int *)&buf->next_element_to_fill, offset);
+
+	if (!queue->do_pack) {
+		QETH_CARD_TEXT(queue->card, 6, "fillbfnp");
+		/* set state to PRIMED -> will be flushed */
+		atomic_set(&buf->state, QETH_QDIO_BUF_PRIMED);
+		flush_cnt = 1;
+	} else {
+		QETH_CARD_TEXT(queue->card, 6, "fillbfpa");
+		if (queue->card->options.performance_stats)
+			queue->card->perf_stats.skbs_sent_pack++;
+		if (buf->next_element_to_fill >=
+				QETH_MAX_BUFFER_ELEMENTS(queue->card)) {
+			/*
+			 * packed buffer if full -> set state PRIMED
+			 * -> will be flushed
+			 */
+			atomic_set(&buf->state, QETH_QDIO_BUF_PRIMED);
+			flush_cnt = 1;
+		}
+	}
+	return flush_cnt;
+}
+
+int qeth_do_send_packet_fast(struct qeth_card *card,
+		struct qeth_qdio_out_q *queue, struct sk_buff *skb,
+		struct qeth_hdr *hdr, int elements_needed,
+		int offset, int hd_len)
+{
+	struct qeth_qdio_out_buffer *buffer;
+	int index;
+
+	/* spin until we get the queue ... */
+	while (atomic_cmpxchg(&queue->state, QETH_OUT_Q_UNLOCKED,
+			      QETH_OUT_Q_LOCKED) != QETH_OUT_Q_UNLOCKED);
+	/* ... now we've got the queue */
+	index = queue->next_buf_to_fill;
+	buffer = queue->bufs[queue->next_buf_to_fill];
+	/*
+	 * check if buffer is empty to make sure that we do not 'overtake'
+	 * ourselves and try to fill a buffer that is already primed
+	 */
+	if (atomic_read(&buffer->state) != QETH_QDIO_BUF_EMPTY)
+		goto out;
+	queue->next_buf_to_fill = (queue->next_buf_to_fill + 1) %
+					  QDIO_MAX_BUFFERS_PER_Q;
+	atomic_set(&queue->state, QETH_OUT_Q_UNLOCKED);
+	qeth_fill_buffer(queue, buffer, skb, hdr, offset, hd_len);
+	qeth_flush_buffers(queue, index, 1);
+	return 0;
+out:
+	atomic_set(&queue->state, QETH_OUT_Q_UNLOCKED);
+	return -EBUSY;
+}
+EXPORT_SYMBOL_GPL(qeth_do_send_packet_fast);
+
+int qeth_do_send_packet(struct qeth_card *card, struct qeth_qdio_out_q *queue,
+		struct sk_buff *skb, struct qeth_hdr *hdr,
+		int elements_needed)
+{
+	struct qeth_qdio_out_buffer *buffer;
+	int start_index;
+	int flush_count = 0;
+	int do_pack = 0;
+	int tmp;
+	int rc = 0;
+
+	/* spin until we get the queue ... */
+	while (atomic_cmpxchg(&queue->state, QETH_OUT_Q_UNLOCKED,
+			      QETH_OUT_Q_LOCKED) != QETH_OUT_Q_UNLOCKED);
+	start_index = queue->next_buf_to_fill;
+	buffer = queue->bufs[queue->next_buf_to_fill];
+	/*
+	 * check if buffer is empty to make sure that we do not 'overtake'
+	 * ourselves and try to fill a buffer that is already primed
+	 */
+	if (atomic_read(&buffer->state) != QETH_QDIO_BUF_EMPTY) {
+		atomic_set(&queue->state, QETH_OUT_Q_UNLOCKED);
+		return -EBUSY;
+	}
+	/* check if we need to switch packing state of this queue */
+	qeth_switch_to_packing_if_needed(queue);
+	if (queue->do_pack) {
+		do_pack = 1;
+		/* does packet fit in current buffer? */
+		if ((QETH_MAX_BUFFER_ELEMENTS(card) -
+		    buffer->next_element_to_fill) < elements_needed) {
+			/* ... no -> set state PRIMED */
+			atomic_set(&buffer->state, QETH_QDIO_BUF_PRIMED);
+			flush_count++;
+			queue->next_buf_to_fill =
+				(queue->next_buf_to_fill + 1) %
+				QDIO_MAX_BUFFERS_PER_Q;
+			buffer = queue->bufs[queue->next_buf_to_fill];
+			/* we did a step forward, so check buffer state
+			 * again */
+			if (atomic_read(&buffer->state) !=
+			    QETH_QDIO_BUF_EMPTY) {
+				qeth_flush_buffers(queue, start_index,
+							   flush_count);
+				atomic_set(&queue->state,
+						QETH_OUT_Q_UNLOCKED);
+				return -EBUSY;
+			}
+		}
+	}
+	tmp = qeth_fill_buffer(queue, buffer, skb, hdr, -1, 0);
+	queue->next_buf_to_fill = (queue->next_buf_to_fill + tmp) %
+				  QDIO_MAX_BUFFERS_PER_Q;
+	flush_count += tmp;
+	if (flush_count)
+		qeth_flush_buffers(queue, start_index, flush_count);
+	else if (!atomic_read(&queue->set_pci_flags_count))
+		atomic_xchg(&queue->state, QETH_OUT_Q_LOCKED_FLUSH);
+	/*
+	 * queue->state will go from LOCKED -> UNLOCKED or from
+	 * LOCKED_FLUSH -> LOCKED if output_handler wanted to 'notify' us
+	 * (switch packing state or flush buffer to get another pci flag out).
+	 * In that case we will enter this loop
+	 */
+	while (atomic_dec_return(&queue->state)) {
+		flush_count = 0;
+		start_index = queue->next_buf_to_fill;
+		/* check if we can go back to non-packing state */
+		flush_count += qeth_switch_to_nonpacking_if_needed(queue);
+		/*
+		 * check if we need to flush a packing buffer to get a pci
+		 * flag out on the queue
+		 */
+		if (!flush_count && !atomic_read(&queue->set_pci_flags_count))
+			flush_count += qeth_flush_buffers_on_no_pci(queue);
+		if (flush_count)
+			qeth_flush_buffers(queue, start_index, flush_count);
+	}
+	/* at this point the queue is UNLOCKED again */
+	if (queue->card->options.performance_stats && do_pack)
+		queue->card->perf_stats.bufs_sent_pack += flush_count;
+
+	return rc;
+}
+EXPORT_SYMBOL_GPL(qeth_do_send_packet);
+
+static int qeth_setadp_promisc_mode_cb(struct qeth_card *card,
+		struct qeth_reply *reply, unsigned long data)
+{
+	struct qeth_ipa_cmd *cmd;
+	struct qeth_ipacmd_setadpparms *setparms;
+
+	QETH_CARD_TEXT(card, 4, "prmadpcb");
+
+	cmd = (struct qeth_ipa_cmd *) data;
+	setparms = &(cmd->data.setadapterparms);
+
+	qeth_default_setadapterparms_cb(card, reply, (unsigned long)cmd);
+	if (cmd->hdr.return_code) {
+		QETH_CARD_TEXT_(card, 4, "prmrc%2.2x", cmd->hdr.return_code);
+		setparms->data.mode = SET_PROMISC_MODE_OFF;
+	}
+	card->info.promisc_mode = setparms->data.mode;
+	return 0;
+}
+
+void qeth_setadp_promisc_mode(struct qeth_card *card)
+{
+	enum qeth_ipa_promisc_modes mode;
+	struct net_device *dev = card->dev;
+	struct qeth_cmd_buffer *iob;
+	struct qeth_ipa_cmd *cmd;
+
+	QETH_CARD_TEXT(card, 4, "setprom");
+
+	if (((dev->flags & IFF_PROMISC) &&
+	     (card->info.promisc_mode == SET_PROMISC_MODE_ON)) ||
+	    (!(dev->flags & IFF_PROMISC) &&
+	     (card->info.promisc_mode == SET_PROMISC_MODE_OFF)))
+		return;
+	mode = SET_PROMISC_MODE_OFF;
+	if (dev->flags & IFF_PROMISC)
+		mode = SET_PROMISC_MODE_ON;
+	QETH_CARD_TEXT_(card, 4, "mode:%x", mode);
+
+	iob = qeth_get_adapter_cmd(card, IPA_SETADP_SET_PROMISC_MODE,
+			sizeof(struct qeth_ipacmd_setadpparms));
+	cmd = (struct qeth_ipa_cmd *)(iob->data + IPA_PDU_HEADER_SIZE);
+	cmd->data.setadapterparms.data.mode = mode;
+	qeth_send_ipa_cmd(card, iob, qeth_setadp_promisc_mode_cb, NULL);
+}
+EXPORT_SYMBOL_GPL(qeth_setadp_promisc_mode);
+
+int qeth_change_mtu(struct net_device *dev, int new_mtu)
+{
+	struct qeth_card *card;
+	char dbf_text[15];
+
+	card = dev->ml_priv;
+
+	QETH_CARD_TEXT(card, 4, "chgmtu");
+	sprintf(dbf_text, "%8x", new_mtu);
+	QETH_CARD_TEXT(card, 4, dbf_text);
+
+	if (new_mtu < 64)
+		return -EINVAL;
+	if (new_mtu > 65535)
+		return -EINVAL;
+	if ((!qeth_is_supported(card, IPA_IP_FRAGMENTATION)) &&
+	    (!qeth_mtu_is_valid(card, new_mtu)))
+		return -EINVAL;
+	dev->mtu = new_mtu;
+	return 0;
+}
+EXPORT_SYMBOL_GPL(qeth_change_mtu);
+
+struct net_device_stats *qeth_get_stats(struct net_device *dev)
+{
+	struct qeth_card *card;
+
+	card = dev->ml_priv;
+
+	QETH_CARD_TEXT(card, 5, "getstat");
+
+	return &card->stats;
+}
+EXPORT_SYMBOL_GPL(qeth_get_stats);
+
+static int qeth_setadpparms_change_macaddr_cb(struct qeth_card *card,
+		struct qeth_reply *reply, unsigned long data)
+{
+	struct qeth_ipa_cmd *cmd;
+
+	QETH_CARD_TEXT(card, 4, "chgmaccb");
+
+	cmd = (struct qeth_ipa_cmd *) data;
+	if (!card->options.layer2 ||
+	    !(card->info.mac_bits & QETH_LAYER2_MAC_READ)) {
+		memcpy(card->dev->dev_addr,
+		       &cmd->data.setadapterparms.data.change_addr.addr,
+		       OSA_ADDR_LEN);
+		card->info.mac_bits |= QETH_LAYER2_MAC_READ;
+	}
+	qeth_default_setadapterparms_cb(card, reply, (unsigned long) cmd);
+	return 0;
+}
+
+int qeth_setadpparms_change_macaddr(struct qeth_card *card)
+{
+	int rc;
+	struct qeth_cmd_buffer *iob;
+	struct qeth_ipa_cmd *cmd;
+
+	QETH_CARD_TEXT(card, 4, "chgmac");
+
+	iob = qeth_get_adapter_cmd(card, IPA_SETADP_ALTER_MAC_ADDRESS,
+				   sizeof(struct qeth_ipacmd_setadpparms));
+	cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
+	cmd->data.setadapterparms.data.change_addr.cmd = CHANGE_ADDR_READ_MAC;
+	cmd->data.setadapterparms.data.change_addr.addr_size = OSA_ADDR_LEN;
+	memcpy(&cmd->data.setadapterparms.data.change_addr.addr,
+	       card->dev->dev_addr, OSA_ADDR_LEN);
+	rc = qeth_send_ipa_cmd(card, iob, qeth_setadpparms_change_macaddr_cb,
+			       NULL);
+	return rc;
+}
+EXPORT_SYMBOL_GPL(qeth_setadpparms_change_macaddr);
+
+static int qeth_setadpparms_set_access_ctrl_cb(struct qeth_card *card,
+		struct qeth_reply *reply, unsigned long data)
+{
+	struct qeth_ipa_cmd *cmd;
+	struct qeth_set_access_ctrl *access_ctrl_req;
+
+	QETH_CARD_TEXT(card, 4, "setaccb");
+
+	cmd = (struct qeth_ipa_cmd *) data;
+	access_ctrl_req = &cmd->data.setadapterparms.data.set_access_ctrl;
+	QETH_DBF_TEXT_(SETUP, 2, "setaccb");
+	QETH_DBF_TEXT_(SETUP, 2, "%s", card->gdev->dev.kobj.name);
+	QETH_DBF_TEXT_(SETUP, 2, "rc=%d",
+		cmd->data.setadapterparms.hdr.return_code);
+	switch (cmd->data.setadapterparms.hdr.return_code) {
+	case SET_ACCESS_CTRL_RC_SUCCESS:
+	case SET_ACCESS_CTRL_RC_ALREADY_NOT_ISOLATED:
+	case SET_ACCESS_CTRL_RC_ALREADY_ISOLATED:
+	{
+		card->options.isolation = access_ctrl_req->subcmd_code;
+		if (card->options.isolation == ISOLATION_MODE_NONE) {
+			dev_info(&card->gdev->dev,
+			    "QDIO data connection isolation is deactivated\n");
+		} else {
+			dev_info(&card->gdev->dev,
+			    "QDIO data connection isolation is activated\n");
+		}
+		QETH_DBF_MESSAGE(3, "OK:SET_ACCESS_CTRL(%s, %d)==%d\n",
+			card->gdev->dev.kobj.name,
+			access_ctrl_req->subcmd_code,
+			cmd->data.setadapterparms.hdr.return_code);
+		break;
+	}
+	case SET_ACCESS_CTRL_RC_NOT_SUPPORTED:
+	{
+		QETH_DBF_MESSAGE(3, "ERR:SET_ACCESS_CTRL(%s,%d)==%d\n",
+			card->gdev->dev.kobj.name,
+			access_ctrl_req->subcmd_code,
+			cmd->data.setadapterparms.hdr.return_code);
+		dev_err(&card->gdev->dev, "Adapter does not "
+			"support QDIO data connection isolation\n");
+
+		/* ensure isolation mode is "none" */
+		card->options.isolation = ISOLATION_MODE_NONE;
+		break;
+	}
+	case SET_ACCESS_CTRL_RC_NONE_SHARED_ADAPTER:
+	{
+		QETH_DBF_MESSAGE(3, "ERR:SET_ACCESS_MODE(%s,%d)==%d\n",
+			card->gdev->dev.kobj.name,
+			access_ctrl_req->subcmd_code,
+			cmd->data.setadapterparms.hdr.return_code);
+		dev_err(&card->gdev->dev,
+			"Adapter is dedicated. "
+			"QDIO data connection isolation not supported\n");
+
+		/* ensure isolation mode is "none" */
+		card->options.isolation = ISOLATION_MODE_NONE;
+		break;
+	}
+	case SET_ACCESS_CTRL_RC_ACTIVE_CHECKSUM_OFF:
+	{
+		QETH_DBF_MESSAGE(3, "ERR:SET_ACCESS_MODE(%s,%d)==%d\n",
+			card->gdev->dev.kobj.name,
+			access_ctrl_req->subcmd_code,
+			cmd->data.setadapterparms.hdr.return_code);
+		dev_err(&card->gdev->dev,
+			"TSO does not permit QDIO data connection isolation\n");
+
+		/* ensure isolation mode is "none" */
+		card->options.isolation = ISOLATION_MODE_NONE;
+		break;
+	}
+	default:
+	{
+		/* this should never happen */
+		QETH_DBF_MESSAGE(3, "ERR:SET_ACCESS_MODE(%s,%d)==%d"
+			"==UNKNOWN\n",
+			card->gdev->dev.kobj.name,
+			access_ctrl_req->subcmd_code,
+			cmd->data.setadapterparms.hdr.return_code);
+
+		/* ensure isolation mode is "none" */
+		card->options.isolation = ISOLATION_MODE_NONE;
+		break;
+	}
+	}
+	qeth_default_setadapterparms_cb(card, reply, (unsigned long) cmd);
+	return 0;
+}
+
+static int qeth_setadpparms_set_access_ctrl(struct qeth_card *card,
+		enum qeth_ipa_isolation_modes isolation)
+{
+	int rc;
+	struct qeth_cmd_buffer *iob;
+	struct qeth_ipa_cmd *cmd;
+	struct qeth_set_access_ctrl *access_ctrl_req;
+
+	QETH_CARD_TEXT(card, 4, "setacctl");
+
+	QETH_DBF_TEXT_(SETUP, 2, "setacctl");
+	QETH_DBF_TEXT_(SETUP, 2, "%s", card->gdev->dev.kobj.name);
+
+	iob = qeth_get_adapter_cmd(card, IPA_SETADP_SET_ACCESS_CONTROL,
+				   sizeof(struct qeth_ipacmd_setadpparms_hdr) +
+				   sizeof(struct qeth_set_access_ctrl));
+	cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
+	access_ctrl_req = &cmd->data.setadapterparms.data.set_access_ctrl;
+	access_ctrl_req->subcmd_code = isolation;
+
+	rc = qeth_send_ipa_cmd(card, iob, qeth_setadpparms_set_access_ctrl_cb,
+			       NULL);
+	QETH_DBF_TEXT_(SETUP, 2, "rc=%d", rc);
+	return rc;
+}
+
+int qeth_set_access_ctrl_online(struct qeth_card *card)
+{
+	int rc = 0;
+
+	QETH_CARD_TEXT(card, 4, "setactlo");
+
+	if ((card->info.type == QETH_CARD_TYPE_OSD ||
+	     card->info.type == QETH_CARD_TYPE_OSX) &&
+	     qeth_adp_supported(card, IPA_SETADP_SET_ACCESS_CONTROL)) {
+		rc = qeth_setadpparms_set_access_ctrl(card,
+			card->options.isolation);
+		if (rc) {
+			QETH_DBF_MESSAGE(3,
+				"IPA(SET_ACCESS_CTRL,%s,%d) sent failed\n",
+				card->gdev->dev.kobj.name,
+				rc);
+		}
+	} else if (card->options.isolation != ISOLATION_MODE_NONE) {
+		card->options.isolation = ISOLATION_MODE_NONE;
+
+		dev_err(&card->gdev->dev, "Adapter does not "
+			"support QDIO data connection isolation\n");
+		rc = -EOPNOTSUPP;
+	}
+	return rc;
+}
+EXPORT_SYMBOL_GPL(qeth_set_access_ctrl_online);
+
+void qeth_tx_timeout(struct net_device *dev)
+{
+	struct qeth_card *card;
+
+	card = dev->ml_priv;
+	QETH_CARD_TEXT(card, 4, "txtimeo");
+	card->stats.tx_errors++;
+	qeth_schedule_recovery(card);
+}
+EXPORT_SYMBOL_GPL(qeth_tx_timeout);
+
+int qeth_mdio_read(struct net_device *dev, int phy_id, int regnum)
+{
+	struct qeth_card *card = dev->ml_priv;
+	int rc = 0;
+
+	switch (regnum) {
+	case MII_BMCR: /* Basic mode control register */
+		rc = BMCR_FULLDPLX;
+		if ((card->info.link_type != QETH_LINK_TYPE_GBIT_ETH) &&
+		    (card->info.link_type != QETH_LINK_TYPE_OSN) &&
+		    (card->info.link_type != QETH_LINK_TYPE_10GBIT_ETH))
+			rc |= BMCR_SPEED100;
+		break;
+	case MII_BMSR: /* Basic mode status register */
+		rc = BMSR_ERCAP | BMSR_ANEGCOMPLETE | BMSR_LSTATUS |
+		     BMSR_10HALF | BMSR_10FULL | BMSR_100HALF | BMSR_100FULL |
+		     BMSR_100BASE4;
+		break;
+	case MII_PHYSID1: /* PHYS ID 1 */
+		rc = (dev->dev_addr[0] << 16) | (dev->dev_addr[1] << 8) |
+		     dev->dev_addr[2];
+		rc = (rc >> 5) & 0xFFFF;
+		break;
+	case MII_PHYSID2: /* PHYS ID 2 */
+		rc = (dev->dev_addr[2] << 10) & 0xFFFF;
+		break;
+	case MII_ADVERTISE: /* Advertisement control reg */
+		rc = ADVERTISE_ALL;
+		break;
+	case MII_LPA: /* Link partner ability reg */
+		rc = LPA_10HALF | LPA_10FULL | LPA_100HALF | LPA_100FULL |
+		     LPA_100BASE4 | LPA_LPACK;
+		break;
+	case MII_EXPANSION: /* Expansion register */
+		break;
+	case MII_DCOUNTER: /* disconnect counter */
+		break;
+	case MII_FCSCOUNTER: /* false carrier counter */
+		break;
+	case MII_NWAYTEST: /* N-way auto-neg test register */
+		break;
+	case MII_RERRCOUNTER: /* rx error counter */
+		rc = card->stats.rx_errors;
+		break;
+	case MII_SREVISION: /* silicon revision */
+		break;
+	case MII_RESV1: /* reserved 1 */
+		break;
+	case MII_LBRERROR: /* loopback, rx, bypass error */
+		break;
+	case MII_PHYADDR: /* physical address */
+		break;
+	case MII_RESV2: /* reserved 2 */
+		break;
+	case MII_TPISTATUS: /* TPI status for 10mbps */
+		break;
+	case MII_NCONFIG: /* network interface config */
+		break;
+	default:
+		break;
+	}
+	return rc;
+}
+EXPORT_SYMBOL_GPL(qeth_mdio_read);
+
+static int qeth_send_ipa_snmp_cmd(struct qeth_card *card,
+		struct qeth_cmd_buffer *iob, int len,
+		int (*reply_cb)(struct qeth_card *, struct qeth_reply *,
+			unsigned long),
+		void *reply_param)
+{
+	u16 s1, s2;
+
+	QETH_CARD_TEXT(card, 4, "sendsnmp");
+
+	memcpy(iob->data, IPA_PDU_HEADER, IPA_PDU_HEADER_SIZE);
+	memcpy(QETH_IPA_CMD_DEST_ADDR(iob->data),
+	       &card->token.ulp_connection_r, QETH_MPC_TOKEN_LENGTH);
+	/* adjust PDU length fields in IPA_PDU_HEADER */
+	s1 = (u32) IPA_PDU_HEADER_SIZE + len;
+	s2 = (u32) len;
+	memcpy(QETH_IPA_PDU_LEN_TOTAL(iob->data), &s1, 2);
+	memcpy(QETH_IPA_PDU_LEN_PDU1(iob->data), &s2, 2);
+	memcpy(QETH_IPA_PDU_LEN_PDU2(iob->data), &s2, 2);
+	memcpy(QETH_IPA_PDU_LEN_PDU3(iob->data), &s2, 2);
+	return qeth_send_control_data(card, IPA_PDU_HEADER_SIZE + len, iob,
+				      reply_cb, reply_param);
+}
+
+static int qeth_snmp_command_cb(struct qeth_card *card,
+		struct qeth_reply *reply, unsigned long sdata)
+{
+	struct qeth_ipa_cmd *cmd;
+	struct qeth_arp_query_info *qinfo;
+	struct qeth_snmp_cmd *snmp;
+	unsigned char *data;
+	__u16 data_len;
+
+	QETH_CARD_TEXT(card, 3, "snpcmdcb");
+
+	cmd = (struct qeth_ipa_cmd *) sdata;
+	data = (unsigned char *)((char *)cmd - reply->offset);
+	qinfo = (struct qeth_arp_query_info *) reply->param;
+	snmp = &cmd->data.setadapterparms.data.snmp;
+
+	if (cmd->hdr.return_code) {
+		QETH_CARD_TEXT_(card, 4, "scer1%i", cmd->hdr.return_code);
+		return 0;
+	}
+	if (cmd->data.setadapterparms.hdr.return_code) {
+		cmd->hdr.return_code =
+			cmd->data.setadapterparms.hdr.return_code;
+		QETH_CARD_TEXT_(card, 4, "scer2%i", cmd->hdr.return_code);
+		return 0;
+	}
+	data_len = *((__u16 *)QETH_IPA_PDU_LEN_PDU1(data));
+	if (cmd->data.setadapterparms.hdr.seq_no == 1)
+		data_len -= (__u16)((char *)&snmp->data - (char *)cmd);
+	else
+		data_len -= (__u16)((char *)&snmp->request - (char *)cmd);
+
+	/* check if there is enough room in userspace */
+	if ((qinfo->udata_len - qinfo->udata_offset) < data_len) {
+		QETH_CARD_TEXT_(card, 4, "scer3%i", -ENOMEM);
+		cmd->hdr.return_code = IPA_RC_ENOMEM;
+		return 0;
+	}
+	QETH_CARD_TEXT_(card, 4, "snore%i",
+		       cmd->data.setadapterparms.hdr.used_total);
+	QETH_CARD_TEXT_(card, 4, "sseqn%i",
+		cmd->data.setadapterparms.hdr.seq_no);
+	/*copy entries to user buffer*/
+	if (cmd->data.setadapterparms.hdr.seq_no == 1) {
+		memcpy(qinfo->udata + qinfo->udata_offset,
+		       (char *)snmp,
+		       data_len + offsetof(struct qeth_snmp_cmd, data));
+		qinfo->udata_offset += offsetof(struct qeth_snmp_cmd, data);
+	} else {
+		memcpy(qinfo->udata + qinfo->udata_offset,
+		       (char *)&snmp->request, data_len);
+	}
+	qinfo->udata_offset += data_len;
+	/* check if all replies received ... */
+		QETH_CARD_TEXT_(card, 4, "srtot%i",
+			       cmd->data.setadapterparms.hdr.used_total);
+		QETH_CARD_TEXT_(card, 4, "srseq%i",
+			       cmd->data.setadapterparms.hdr.seq_no);
+	if (cmd->data.setadapterparms.hdr.seq_no <
+	    cmd->data.setadapterparms.hdr.used_total)
+		return 1;
+	return 0;
+}
+
+int qeth_snmp_command(struct qeth_card *card, char __user *udata)
+{
+	struct qeth_cmd_buffer *iob;
+	struct qeth_ipa_cmd *cmd;
+	struct qeth_snmp_ureq *ureq;
+	unsigned int req_len;
+	struct qeth_arp_query_info qinfo = {0, };
+	int rc = 0;
+
+	QETH_CARD_TEXT(card, 3, "snmpcmd");
+
+	if (card->info.guestlan)
+		return -EOPNOTSUPP;
+
+	if ((!qeth_adp_supported(card, IPA_SETADP_SET_SNMP_CONTROL)) &&
+	    (!card->options.layer2)) {
+		return -EOPNOTSUPP;
+	}
+	/* skip 4 bytes (data_len struct member) to get req_len */
+	if (copy_from_user(&req_len, udata + sizeof(int), sizeof(int)))
+		return -EFAULT;
+	if (req_len > (QETH_BUFSIZE - IPA_PDU_HEADER_SIZE -
+		       sizeof(struct qeth_ipacmd_hdr) -
+		       sizeof(struct qeth_ipacmd_setadpparms_hdr)))
+		return -EINVAL;
+	ureq = memdup_user(udata, req_len + sizeof(struct qeth_snmp_ureq_hdr));
+	if (IS_ERR(ureq)) {
+		QETH_CARD_TEXT(card, 2, "snmpnome");
+		return PTR_ERR(ureq);
+	}
+	qinfo.udata_len = ureq->hdr.data_len;
+	qinfo.udata = kzalloc(qinfo.udata_len, GFP_KERNEL);
+	if (!qinfo.udata) {
+		kfree(ureq);
+		return -ENOMEM;
+	}
+	qinfo.udata_offset = sizeof(struct qeth_snmp_ureq_hdr);
+
+	iob = qeth_get_adapter_cmd(card, IPA_SETADP_SET_SNMP_CONTROL,
+				   QETH_SNMP_SETADP_CMDLENGTH + req_len);
+	cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
+	memcpy(&cmd->data.setadapterparms.data.snmp, &ureq->cmd, req_len);
+	rc = qeth_send_ipa_snmp_cmd(card, iob, QETH_SETADP_BASE_LEN + req_len,
+				    qeth_snmp_command_cb, (void *)&qinfo);
+	if (rc)
+		QETH_DBF_MESSAGE(2, "SNMP command failed on %s: (0x%x)\n",
+			   QETH_CARD_IFNAME(card), rc);
+	else {
+		if (copy_to_user(udata, qinfo.udata, qinfo.udata_len))
+			rc = -EFAULT;
+	}
+
+	kfree(ureq);
+	kfree(qinfo.udata);
+	return rc;
+}
+EXPORT_SYMBOL_GPL(qeth_snmp_command);
+
+static int qeth_setadpparms_query_oat_cb(struct qeth_card *card,
+		struct qeth_reply *reply, unsigned long data)
+{
+	struct qeth_ipa_cmd *cmd;
+	struct qeth_qoat_priv *priv;
+	char *resdata;
+	int resdatalen;
+
+	QETH_CARD_TEXT(card, 3, "qoatcb");
+
+	cmd = (struct qeth_ipa_cmd *)data;
+	priv = (struct qeth_qoat_priv *)reply->param;
+	resdatalen = cmd->data.setadapterparms.hdr.cmdlength;
+	resdata = (char *)data + 28;
+
+	if (resdatalen > (priv->buffer_len - priv->response_len)) {
+		cmd->hdr.return_code = IPA_RC_FFFF;
+		return 0;
+	}
+
+	memcpy((priv->buffer + priv->response_len), resdata,
+		resdatalen);
+	priv->response_len += resdatalen;
+
+	if (cmd->data.setadapterparms.hdr.seq_no <
+	    cmd->data.setadapterparms.hdr.used_total)
+		return 1;
+	return 0;
+}
+
+int qeth_query_oat_command(struct qeth_card *card, char __user *udata)
+{
+	int rc = 0;
+	struct qeth_cmd_buffer *iob;
+	struct qeth_ipa_cmd *cmd;
+	struct qeth_query_oat *oat_req;
+	struct qeth_query_oat_data oat_data;
+	struct qeth_qoat_priv priv;
+	void __user *tmp;
+
+	QETH_CARD_TEXT(card, 3, "qoatcmd");
+
+	if (!qeth_adp_supported(card, IPA_SETADP_QUERY_OAT)) {
+		rc = -EOPNOTSUPP;
+		goto out;
+	}
+
+	if (copy_from_user(&oat_data, udata,
+	    sizeof(struct qeth_query_oat_data))) {
+			rc = -EFAULT;
+			goto out;
+	}
+
+	priv.buffer_len = oat_data.buffer_len;
+	priv.response_len = 0;
+	priv.buffer =  kzalloc(oat_data.buffer_len, GFP_KERNEL);
+	if (!priv.buffer) {
+		rc = -ENOMEM;
+		goto out;
+	}
+
+	iob = qeth_get_adapter_cmd(card, IPA_SETADP_QUERY_OAT,
+				   sizeof(struct qeth_ipacmd_setadpparms_hdr) +
+				   sizeof(struct qeth_query_oat));
+	cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
+	oat_req = &cmd->data.setadapterparms.data.query_oat;
+	oat_req->subcmd_code = oat_data.command;
+
+	rc = qeth_send_ipa_cmd(card, iob, qeth_setadpparms_query_oat_cb,
+			       &priv);
+	if (!rc) {
+		if (is_compat_task())
+			tmp = compat_ptr(oat_data.ptr);
+		else
+			tmp = (void __user *)(unsigned long)oat_data.ptr;
+
+		if (copy_to_user(tmp, priv.buffer,
+		    priv.response_len)) {
+			rc = -EFAULT;
+			goto out_free;
+		}
+
+		oat_data.response_len = priv.response_len;
+
+		if (copy_to_user(udata, &oat_data,
+		    sizeof(struct qeth_query_oat_data)))
+			rc = -EFAULT;
+	} else
+		if (rc == IPA_RC_FFFF)
+			rc = -EFAULT;
+
+out_free:
+	kfree(priv.buffer);
+out:
+	return rc;
+}
+EXPORT_SYMBOL_GPL(qeth_query_oat_command);
+
+static inline int qeth_get_qdio_q_format(struct qeth_card *card)
+{
+	switch (card->info.type) {
+	case QETH_CARD_TYPE_IQD:
+		return 2;
+	default:
+		return 0;
+	}
+}
+
+static void qeth_determine_capabilities(struct qeth_card *card)
+{
+	int rc;
+	int length;
+	char *prcd;
+	struct ccw_device *ddev;
+	int ddev_offline = 0;
+
+	QETH_DBF_TEXT(SETUP, 2, "detcapab");
+	ddev = CARD_DDEV(card);
+	if (!ddev->online) {
+		ddev_offline = 1;
+		rc = ccw_device_set_online(ddev);
+		if (rc) {
+			QETH_DBF_TEXT_(SETUP, 2, "3err%d", rc);
+			goto out;
+		}
+	}
+
+	rc = qeth_read_conf_data(card, (void **) &prcd, &length);
+	if (rc) {
+		QETH_DBF_MESSAGE(2, "%s qeth_read_conf_data returned %i\n",
+			dev_name(&card->gdev->dev), rc);
+		QETH_DBF_TEXT_(SETUP, 2, "5err%d", rc);
+		goto out_offline;
+	}
+	qeth_configure_unitaddr(card, prcd);
+	if (ddev_offline)
+		qeth_configure_blkt_default(card, prcd);
+	kfree(prcd);
+
+	rc = qdio_get_ssqd_desc(ddev, &card->ssqd);
+	if (rc)
+		QETH_DBF_TEXT_(SETUP, 2, "6err%d", rc);
+
+	QETH_DBF_TEXT_(SETUP, 2, "qfmt%d", card->ssqd.qfmt);
+	QETH_DBF_TEXT_(SETUP, 2, "%d", card->ssqd.qdioac1);
+	QETH_DBF_TEXT_(SETUP, 2, "%d", card->ssqd.qdioac3);
+	QETH_DBF_TEXT_(SETUP, 2, "icnt%d", card->ssqd.icnt);
+	if (!((card->ssqd.qfmt != QDIO_IQDIO_QFMT) ||
+	    ((card->ssqd.qdioac1 & CHSC_AC1_INITIATE_INPUTQ) == 0) ||
+	    ((card->ssqd.qdioac3 & CHSC_AC3_FORMAT2_CQ_AVAILABLE) == 0))) {
+		dev_info(&card->gdev->dev,
+			"Completion Queueing supported\n");
+	} else {
+		card->options.cq = QETH_CQ_NOTAVAILABLE;
+	}
+
+
+out_offline:
+	if (ddev_offline == 1)
+		ccw_device_set_offline(ddev);
+out:
+	return;
+}
+
+static inline void qeth_qdio_establish_cq(struct qeth_card *card,
+	struct qdio_buffer **in_sbal_ptrs,
+	void (**queue_start_poll) (struct ccw_device *, int, unsigned long)) {
+	int i;
+
+	if (card->options.cq == QETH_CQ_ENABLED) {
+		int offset = QDIO_MAX_BUFFERS_PER_Q *
+			     (card->qdio.no_in_queues - 1);
+		i = QDIO_MAX_BUFFERS_PER_Q * (card->qdio.no_in_queues - 1);
+		for (i = 0; i < QDIO_MAX_BUFFERS_PER_Q; ++i) {
+			in_sbal_ptrs[offset + i] = (struct qdio_buffer *)
+				virt_to_phys(card->qdio.c_q->bufs[i].buffer);
+		}
+
+		queue_start_poll[card->qdio.no_in_queues - 1] = NULL;
+	}
+}
+
+static int qeth_qdio_establish(struct qeth_card *card)
+{
+	struct qdio_initialize init_data;
+	char *qib_param_field;
+	struct qdio_buffer **in_sbal_ptrs;
+	void (**queue_start_poll) (struct ccw_device *, int, unsigned long);
+	struct qdio_buffer **out_sbal_ptrs;
+	int i, j, k;
+	int rc = 0;
+
+	QETH_DBF_TEXT(SETUP, 2, "qdioest");
+
+	qib_param_field = kzalloc(QDIO_MAX_BUFFERS_PER_Q * sizeof(char),
+			      GFP_KERNEL);
+	if (!qib_param_field) {
+		rc =  -ENOMEM;
+		goto out_free_nothing;
+	}
+
+	qeth_create_qib_param_field(card, qib_param_field);
+	qeth_create_qib_param_field_blkt(card, qib_param_field);
+
+	in_sbal_ptrs = kzalloc(card->qdio.no_in_queues *
+			       QDIO_MAX_BUFFERS_PER_Q * sizeof(void *),
+			       GFP_KERNEL);
+	if (!in_sbal_ptrs) {
+		rc = -ENOMEM;
+		goto out_free_qib_param;
+	}
+	for (i = 0; i < QDIO_MAX_BUFFERS_PER_Q; ++i) {
+		in_sbal_ptrs[i] = (struct qdio_buffer *)
+			virt_to_phys(card->qdio.in_q->bufs[i].buffer);
+	}
+
+	queue_start_poll = kzalloc(sizeof(void *) * card->qdio.no_in_queues,
+				   GFP_KERNEL);
+	if (!queue_start_poll) {
+		rc = -ENOMEM;
+		goto out_free_in_sbals;
+	}
+	for (i = 0; i < card->qdio.no_in_queues; ++i)
+		queue_start_poll[i] = card->discipline.start_poll;
+
+	qeth_qdio_establish_cq(card, in_sbal_ptrs, queue_start_poll);
+
+	out_sbal_ptrs =
+		kzalloc(card->qdio.no_out_queues * QDIO_MAX_BUFFERS_PER_Q *
+			sizeof(void *), GFP_KERNEL);
+	if (!out_sbal_ptrs) {
+		rc = -ENOMEM;
+		goto out_free_queue_start_poll;
+	}
+	for (i = 0, k = 0; i < card->qdio.no_out_queues; ++i)
+		for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j, ++k) {
+			out_sbal_ptrs[k] = (struct qdio_buffer *)virt_to_phys(
+				card->qdio.out_qs[i]->bufs[j]->buffer);
+		}
+
+	memset(&init_data, 0, sizeof(struct qdio_initialize));
+	init_data.cdev                   = CARD_DDEV(card);
+	init_data.q_format               = qeth_get_qdio_q_format(card);
+	init_data.qib_param_field_format = 0;
+	init_data.qib_param_field        = qib_param_field;
+	init_data.no_input_qs            = card->qdio.no_in_queues;
+	init_data.no_output_qs           = card->qdio.no_out_queues;
+	init_data.input_handler          = card->discipline.input_handler;
+	init_data.output_handler         = card->discipline.output_handler;
+	init_data.queue_start_poll_array = queue_start_poll;
+	init_data.int_parm               = (unsigned long) card;
+	init_data.input_sbal_addr_array  = (void **) in_sbal_ptrs;
+	init_data.output_sbal_addr_array = (void **) out_sbal_ptrs;
+	init_data.output_sbal_state_array = card->qdio.out_bufstates;
+	init_data.scan_threshold =
+		(card->info.type == QETH_CARD_TYPE_IQD) ? 8 : 32;
+
+	if (atomic_cmpxchg(&card->qdio.state, QETH_QDIO_ALLOCATED,
+		QETH_QDIO_ESTABLISHED) == QETH_QDIO_ALLOCATED) {
+		rc = qdio_allocate(&init_data);
+		if (rc) {
+			atomic_set(&card->qdio.state, QETH_QDIO_ALLOCATED);
+			goto out;
+		}
+		rc = qdio_establish(&init_data);
+		if (rc) {
+			atomic_set(&card->qdio.state, QETH_QDIO_ALLOCATED);
+			qdio_free(CARD_DDEV(card));
+		}
+	}
+
+	switch (card->options.cq) {
+	case QETH_CQ_ENABLED:
+		dev_info(&card->gdev->dev, "Completion Queue support enabled");
+		break;
+	case QETH_CQ_DISABLED:
+		dev_info(&card->gdev->dev, "Completion Queue support disabled");
+		break;
+	default:
+		break;
+	}
+out:
+	kfree(out_sbal_ptrs);
+out_free_queue_start_poll:
+	kfree(queue_start_poll);
+out_free_in_sbals:
+	kfree(in_sbal_ptrs);
+out_free_qib_param:
+	kfree(qib_param_field);
+out_free_nothing:
+	return rc;
+}
+
+static void qeth_core_free_card(struct qeth_card *card)
+{
+
+	QETH_DBF_TEXT(SETUP, 2, "freecrd");
+	QETH_DBF_HEX(SETUP, 2, &card, sizeof(void *));
+	qeth_clean_channel(&card->read);
+	qeth_clean_channel(&card->write);
+	if (card->dev)
+		free_netdev(card->dev);
+	kfree(card->ip_tbd_list);
+	qeth_free_qdio_buffers(card);
+	unregister_service_level(&card->qeth_service_level);
+	kfree(card);
+}
+
+static struct ccw_device_id qeth_ids[] = {
+	{CCW_DEVICE_DEVTYPE(0x1731, 0x01, 0x1732, 0x01),
+					.driver_info = QETH_CARD_TYPE_OSD},
+	{CCW_DEVICE_DEVTYPE(0x1731, 0x05, 0x1732, 0x05),
+					.driver_info = QETH_CARD_TYPE_IQD},
+	{CCW_DEVICE_DEVTYPE(0x1731, 0x06, 0x1732, 0x06),
+					.driver_info = QETH_CARD_TYPE_OSN},
+	{CCW_DEVICE_DEVTYPE(0x1731, 0x02, 0x1732, 0x03),
+					.driver_info = QETH_CARD_TYPE_OSM},
+	{CCW_DEVICE_DEVTYPE(0x1731, 0x02, 0x1732, 0x02),
+					.driver_info = QETH_CARD_TYPE_OSX},
+	{},
+};
+MODULE_DEVICE_TABLE(ccw, qeth_ids);
+
+static struct ccw_driver qeth_ccw_driver = {
+	.driver = {
+		.owner = THIS_MODULE,
+		.name = "qeth",
+	},
+	.ids = qeth_ids,
+	.probe = ccwgroup_probe_ccwdev,
+	.remove = ccwgroup_remove_ccwdev,
+};
+
+static int qeth_core_driver_group(const char *buf, struct device *root_dev,
+				unsigned long driver_id)
+{
+	return ccwgroup_create_from_string(root_dev, driver_id,
+					   &qeth_ccw_driver, 3, buf);
+}
+
+int qeth_core_hardsetup_card(struct qeth_card *card)
+{
+	int retries = 0;
+	int rc;
+
+	QETH_DBF_TEXT(SETUP, 2, "hrdsetup");
+	atomic_set(&card->force_alloc_skb, 0);
+	qeth_get_channel_path_desc(card);
+retry:
+	if (retries)
+		QETH_DBF_MESSAGE(2, "%s Retrying to do IDX activates.\n",
+			dev_name(&card->gdev->dev));
+	ccw_device_set_offline(CARD_DDEV(card));
+	ccw_device_set_offline(CARD_WDEV(card));
+	ccw_device_set_offline(CARD_RDEV(card));
+	rc = ccw_device_set_online(CARD_RDEV(card));
+	if (rc)
+		goto retriable;
+	rc = ccw_device_set_online(CARD_WDEV(card));
+	if (rc)
+		goto retriable;
+	rc = ccw_device_set_online(CARD_DDEV(card));
+	if (rc)
+		goto retriable;
+	rc = qeth_qdio_clear_card(card, card->info.type != QETH_CARD_TYPE_IQD);
+retriable:
+	if (rc == -ERESTARTSYS) {
+		QETH_DBF_TEXT(SETUP, 2, "break1");
+		return rc;
+	} else if (rc) {
+		QETH_DBF_TEXT_(SETUP, 2, "1err%d", rc);
+		if (++retries > 3)
+			goto out;
+		else
+			goto retry;
+	}
+	qeth_determine_capabilities(card);
+	qeth_init_tokens(card);
+	qeth_init_func_level(card);
+	rc = qeth_idx_activate_channel(&card->read, qeth_idx_read_cb);
+	if (rc == -ERESTARTSYS) {
+		QETH_DBF_TEXT(SETUP, 2, "break2");
+		return rc;
+	} else if (rc) {
+		QETH_DBF_TEXT_(SETUP, 2, "3err%d", rc);
+		if (--retries < 0)
+			goto out;
+		else
+			goto retry;
+	}
+	rc = qeth_idx_activate_channel(&card->write, qeth_idx_write_cb);
+	if (rc == -ERESTARTSYS) {
+		QETH_DBF_TEXT(SETUP, 2, "break3");
+		return rc;
+	} else if (rc) {
+		QETH_DBF_TEXT_(SETUP, 2, "4err%d", rc);
+		if (--retries < 0)
+			goto out;
+		else
+			goto retry;
+	}
+	card->read_or_write_problem = 0;
+	rc = qeth_mpc_initialize(card);
+	if (rc) {
+		QETH_DBF_TEXT_(SETUP, 2, "5err%d", rc);
+		goto out;
+	}
+
+	card->options.ipa4.supported_funcs = 0;
+	card->options.adp.supported_funcs = 0;
+	card->info.diagass_support = 0;
+	qeth_query_ipassists(card, QETH_PROT_IPV4);
+	if (qeth_is_supported(card, IPA_SETADAPTERPARMS))
+		qeth_query_setadapterparms(card);
+	if (qeth_adp_supported(card, IPA_SETADP_SET_DIAG_ASSIST))
+		qeth_query_setdiagass(card);
+	return 0;
+out:
+	dev_warn(&card->gdev->dev, "The qeth device driver failed to recover "
+		"an error on the device\n");
+	QETH_DBF_MESSAGE(2, "%s Initialization in hardsetup failed! rc=%d\n",
+		dev_name(&card->gdev->dev), rc);
+	return rc;
+}
+EXPORT_SYMBOL_GPL(qeth_core_hardsetup_card);
+
+static inline int qeth_create_skb_frag(struct qeth_qdio_buffer *qethbuffer,
+		struct qdio_buffer_element *element,
+		struct sk_buff **pskb, int offset, int *pfrag, int data_len)
+{
+	struct page *page = virt_to_page(element->addr);
+	if (*pskb == NULL) {
+		if (qethbuffer->rx_skb) {
+			/* only if qeth_card.options.cq == QETH_CQ_ENABLED */
+			*pskb = qethbuffer->rx_skb;
+			qethbuffer->rx_skb = NULL;
+		} else {
+			*pskb = dev_alloc_skb(QETH_RX_PULL_LEN + ETH_HLEN);
+			if (!(*pskb))
+				return -ENOMEM;
+		}
+
+		skb_reserve(*pskb, ETH_HLEN);
+		if (data_len <= QETH_RX_PULL_LEN) {
+			memcpy(skb_put(*pskb, data_len), element->addr + offset,
+				data_len);
+		} else {
+			get_page(page);
+			memcpy(skb_put(*pskb, QETH_RX_PULL_LEN),
+			       element->addr + offset, QETH_RX_PULL_LEN);
+			skb_fill_page_desc(*pskb, *pfrag, page,
+				offset + QETH_RX_PULL_LEN,
+				data_len - QETH_RX_PULL_LEN);
+			(*pskb)->data_len += data_len - QETH_RX_PULL_LEN;
+			(*pskb)->len      += data_len - QETH_RX_PULL_LEN;
+			(*pskb)->truesize += data_len - QETH_RX_PULL_LEN;
+			(*pfrag)++;
+		}
+	} else {
+		get_page(page);
+		skb_fill_page_desc(*pskb, *pfrag, page, offset, data_len);
+		(*pskb)->data_len += data_len;
+		(*pskb)->len      += data_len;
+		(*pskb)->truesize += data_len;
+		(*pfrag)++;
+	}
+
+
+	return 0;
+}
+
+struct sk_buff *qeth_core_get_next_skb(struct qeth_card *card,
+		struct qeth_qdio_buffer *qethbuffer,
+		struct qdio_buffer_element **__element, int *__offset,
+		struct qeth_hdr **hdr)
+{
+	struct qdio_buffer_element *element = *__element;
+	struct qdio_buffer *buffer = qethbuffer->buffer;
+	int offset = *__offset;
+	struct sk_buff *skb = NULL;
+	int skb_len = 0;
+	void *data_ptr;
+	int data_len;
+	int headroom = 0;
+	int use_rx_sg = 0;
+	int frag = 0;
+
+	/* qeth_hdr must not cross element boundaries */
+	if (element->length < offset + sizeof(struct qeth_hdr)) {
+		if (qeth_is_last_sbale(element))
+			return NULL;
+		element++;
+		offset = 0;
+		if (element->length < sizeof(struct qeth_hdr))
+			return NULL;
+	}
+	*hdr = element->addr + offset;
+
+	offset += sizeof(struct qeth_hdr);
+	switch ((*hdr)->hdr.l2.id) {
+	case QETH_HEADER_TYPE_LAYER2:
+		skb_len = (*hdr)->hdr.l2.pkt_length;
+		break;
+	case QETH_HEADER_TYPE_LAYER3:
+		skb_len = (*hdr)->hdr.l3.length;
+		if ((card->info.link_type == QETH_LINK_TYPE_LANE_TR) ||
+		    (card->info.link_type == QETH_LINK_TYPE_HSTR))
+			headroom = TR_HLEN;
+		else
+			headroom = ETH_HLEN;
+		break;
+	case QETH_HEADER_TYPE_OSN:
+		skb_len = (*hdr)->hdr.osn.pdu_length;
+		headroom = sizeof(struct qeth_hdr);
+		break;
+	default:
+		break;
+	}
+
+	if (!skb_len)
+		return NULL;
+
+	if (((skb_len >= card->options.rx_sg_cb) &&
+	     (!(card->info.type == QETH_CARD_TYPE_OSN)) &&
+	     (!atomic_read(&card->force_alloc_skb))) ||
+	    (card->options.cq == QETH_CQ_ENABLED)) {
+		use_rx_sg = 1;
+	} else {
+		skb = dev_alloc_skb(skb_len + headroom);
+		if (!skb)
+			goto no_mem;
+		if (headroom)
+			skb_reserve(skb, headroom);
+	}
+
+	data_ptr = element->addr + offset;
+	while (skb_len) {
+		data_len = min(skb_len, (int)(element->length - offset));
+		if (data_len) {
+			if (use_rx_sg) {
+				if (qeth_create_skb_frag(qethbuffer, element,
+				    &skb, offset, &frag, data_len))
+					goto no_mem;
+			} else {
+				memcpy(skb_put(skb, data_len), data_ptr,
+					data_len);
+			}
+		}
+		skb_len -= data_len;
+		if (skb_len) {
+			if (qeth_is_last_sbale(element)) {
+				QETH_CARD_TEXT(card, 4, "unexeob");
+				QETH_CARD_HEX(card, 2, buffer, sizeof(void *));
+				dev_kfree_skb_any(skb);
+				card->stats.rx_errors++;
+				return NULL;
+			}
+			element++;
+			offset = 0;
+			data_ptr = element->addr;
+		} else {
+			offset += data_len;
+		}
+	}
+	*__element = element;
+	*__offset = offset;
+	if (use_rx_sg && card->options.performance_stats) {
+		card->perf_stats.sg_skbs_rx++;
+		card->perf_stats.sg_frags_rx += skb_shinfo(skb)->nr_frags;
+	}
+	return skb;
+no_mem:
+	if (net_ratelimit()) {
+		QETH_CARD_TEXT(card, 2, "noskbmem");
+	}
+	card->stats.rx_dropped++;
+	return NULL;
+}
+EXPORT_SYMBOL_GPL(qeth_core_get_next_skb);
+
+static void qeth_unregister_dbf_views(void)
+{
+	int x;
+	for (x = 0; x < QETH_DBF_INFOS; x++) {
+		debug_unregister(qeth_dbf[x].id);
+		qeth_dbf[x].id = NULL;
+	}
+}
+
+void qeth_dbf_longtext(debug_info_t *id, int level, char *fmt, ...)
+{
+	char dbf_txt_buf[32];
+	va_list args;
+
+	if (level > id->level)
+		return;
+	va_start(args, fmt);
+	vsnprintf(dbf_txt_buf, sizeof(dbf_txt_buf), fmt, args);
+	va_end(args);
+	debug_text_event(id, level, dbf_txt_buf);
+}
+EXPORT_SYMBOL_GPL(qeth_dbf_longtext);
+
+static int qeth_register_dbf_views(void)
+{
+	int ret;
+	int x;
+
+	for (x = 0; x < QETH_DBF_INFOS; x++) {
+		/* register the areas */
+		qeth_dbf[x].id = debug_register(qeth_dbf[x].name,
+						qeth_dbf[x].pages,
+						qeth_dbf[x].areas,
+						qeth_dbf[x].len);
+		if (qeth_dbf[x].id == NULL) {
+			qeth_unregister_dbf_views();
+			return -ENOMEM;
+		}
+
+		/* register a view */
+		ret = debug_register_view(qeth_dbf[x].id, qeth_dbf[x].view);
+		if (ret) {
+			qeth_unregister_dbf_views();
+			return ret;
+		}
+
+		/* set a passing level */
+		debug_set_level(qeth_dbf[x].id, qeth_dbf[x].level);
+	}
+
+	return 0;
+}
+
+int qeth_core_load_discipline(struct qeth_card *card,
+		enum qeth_discipline_id discipline)
+{
+	int rc = 0;
+	mutex_lock(&qeth_mod_mutex);
+	switch (discipline) {
+	case QETH_DISCIPLINE_LAYER3:
+		card->discipline.ccwgdriver = try_then_request_module(
+			symbol_get(qeth_l3_ccwgroup_driver),
+			"qeth_l3");
+		break;
+	case QETH_DISCIPLINE_LAYER2:
+		card->discipline.ccwgdriver = try_then_request_module(
+			symbol_get(qeth_l2_ccwgroup_driver),
+			"qeth_l2");
+		break;
+	}
+	if (!card->discipline.ccwgdriver) {
+		dev_err(&card->gdev->dev, "There is no kernel module to "
+			"support discipline %d\n", discipline);
+		rc = -EINVAL;
+	}
+	mutex_unlock(&qeth_mod_mutex);
+	return rc;
+}
+
+void qeth_core_free_discipline(struct qeth_card *card)
+{
+	if (card->options.layer2)
+		symbol_put(qeth_l2_ccwgroup_driver);
+	else
+		symbol_put(qeth_l3_ccwgroup_driver);
+	card->discipline.ccwgdriver = NULL;
+}
+
+static int qeth_core_probe_device(struct ccwgroup_device *gdev)
+{
+	struct qeth_card *card;
+	struct device *dev;
+	int rc;
+	unsigned long flags;
+	char dbf_name[20];
+
+	QETH_DBF_TEXT(SETUP, 2, "probedev");
+
+	dev = &gdev->dev;
+	if (!get_device(dev))
+		return -ENODEV;
+
+	QETH_DBF_TEXT_(SETUP, 2, "%s", dev_name(&gdev->dev));
+
+	card = qeth_alloc_card();
+	if (!card) {
+		QETH_DBF_TEXT_(SETUP, 2, "1err%d", -ENOMEM);
+		rc = -ENOMEM;
+		goto err_dev;
+	}
+
+	snprintf(dbf_name, sizeof(dbf_name), "qeth_card_%s",
+		dev_name(&gdev->dev));
+	card->debug = debug_register(dbf_name, 2, 1, 8);
+	if (!card->debug) {
+		QETH_DBF_TEXT_(SETUP, 2, "%s", "qcdbf");
+		rc = -ENOMEM;
+		goto err_card;
+	}
+	debug_register_view(card->debug, &debug_hex_ascii_view);
+
+	card->read.ccwdev  = gdev->cdev[0];
+	card->write.ccwdev = gdev->cdev[1];
+	card->data.ccwdev  = gdev->cdev[2];
+	dev_set_drvdata(&gdev->dev, card);
+	card->gdev = gdev;
+	gdev->cdev[0]->handler = qeth_irq;
+	gdev->cdev[1]->handler = qeth_irq;
+	gdev->cdev[2]->handler = qeth_irq;
+
+	rc = qeth_determine_card_type(card);
+	if (rc) {
+		QETH_DBF_TEXT_(SETUP, 2, "3err%d", rc);
+		goto err_dbf;
+	}
+	rc = qeth_setup_card(card);
+	if (rc) {
+		QETH_DBF_TEXT_(SETUP, 2, "2err%d", rc);
+		goto err_dbf;
+	}
+
+	if (card->info.type == QETH_CARD_TYPE_OSN)
+		rc = qeth_core_create_osn_attributes(dev);
+	else
+		rc = qeth_core_create_device_attributes(dev);
+	if (rc)
+		goto err_dbf;
+	switch (card->info.type) {
+	case QETH_CARD_TYPE_OSN:
+	case QETH_CARD_TYPE_OSM:
+		rc = qeth_core_load_discipline(card, QETH_DISCIPLINE_LAYER2);
+		if (rc)
+			goto err_attr;
+		rc = card->discipline.ccwgdriver->probe(card->gdev);
+		if (rc)
+			goto err_disc;
+	case QETH_CARD_TYPE_OSD:
+	case QETH_CARD_TYPE_OSX:
+	default:
+		break;
+	}
+
+	write_lock_irqsave(&qeth_core_card_list.rwlock, flags);
+	list_add_tail(&card->list, &qeth_core_card_list.list);
+	write_unlock_irqrestore(&qeth_core_card_list.rwlock, flags);
+
+	qeth_determine_capabilities(card);
+	return 0;
+
+err_disc:
+	qeth_core_free_discipline(card);
+err_attr:
+	if (card->info.type == QETH_CARD_TYPE_OSN)
+		qeth_core_remove_osn_attributes(dev);
+	else
+		qeth_core_remove_device_attributes(dev);
+err_dbf:
+	debug_unregister(card->debug);
+err_card:
+	qeth_core_free_card(card);
+err_dev:
+	put_device(dev);
+	return rc;
+}
+
+static void qeth_core_remove_device(struct ccwgroup_device *gdev)
+{
+	unsigned long flags;
+	struct qeth_card *card = dev_get_drvdata(&gdev->dev);
+
+	QETH_DBF_TEXT(SETUP, 2, "removedv");
+
+	if (card->info.type == QETH_CARD_TYPE_OSN) {
+		qeth_core_remove_osn_attributes(&gdev->dev);
+	} else {
+		qeth_core_remove_device_attributes(&gdev->dev);
+	}
+
+	if (card->discipline.ccwgdriver) {
+		card->discipline.ccwgdriver->remove(gdev);
+		qeth_core_free_discipline(card);
+	}
+
+	debug_unregister(card->debug);
+	write_lock_irqsave(&qeth_core_card_list.rwlock, flags);
+	list_del(&card->list);
+	write_unlock_irqrestore(&qeth_core_card_list.rwlock, flags);
+	qeth_core_free_card(card);
+	dev_set_drvdata(&gdev->dev, NULL);
+	put_device(&gdev->dev);
+	return;
+}
+
+static int qeth_core_set_online(struct ccwgroup_device *gdev)
+{
+	struct qeth_card *card = dev_get_drvdata(&gdev->dev);
+	int rc = 0;
+	int def_discipline;
+
+	if (!card->discipline.ccwgdriver) {
+		if (card->info.type == QETH_CARD_TYPE_IQD)
+			def_discipline = QETH_DISCIPLINE_LAYER3;
+		else
+			def_discipline = QETH_DISCIPLINE_LAYER2;
+		rc = qeth_core_load_discipline(card, def_discipline);
+		if (rc)
+			goto err;
+		rc = card->discipline.ccwgdriver->probe(card->gdev);
+		if (rc)
+			goto err;
+	}
+	rc = card->discipline.ccwgdriver->set_online(gdev);
+err:
+	return rc;
+}
+
+static int qeth_core_set_offline(struct ccwgroup_device *gdev)
+{
+	struct qeth_card *card = dev_get_drvdata(&gdev->dev);
+	return card->discipline.ccwgdriver->set_offline(gdev);
+}
+
+static void qeth_core_shutdown(struct ccwgroup_device *gdev)
+{
+	struct qeth_card *card = dev_get_drvdata(&gdev->dev);
+	if (card->discipline.ccwgdriver &&
+	    card->discipline.ccwgdriver->shutdown)
+		card->discipline.ccwgdriver->shutdown(gdev);
+}
+
+static int qeth_core_prepare(struct ccwgroup_device *gdev)
+{
+	struct qeth_card *card = dev_get_drvdata(&gdev->dev);
+	if (card->discipline.ccwgdriver &&
+	    card->discipline.ccwgdriver->prepare)
+		return card->discipline.ccwgdriver->prepare(gdev);
+	return 0;
+}
+
+static void qeth_core_complete(struct ccwgroup_device *gdev)
+{
+	struct qeth_card *card = dev_get_drvdata(&gdev->dev);
+	if (card->discipline.ccwgdriver &&
+	    card->discipline.ccwgdriver->complete)
+		card->discipline.ccwgdriver->complete(gdev);
+}
+
+static int qeth_core_freeze(struct ccwgroup_device *gdev)
+{
+	struct qeth_card *card = dev_get_drvdata(&gdev->dev);
+	if (card->discipline.ccwgdriver &&
+	    card->discipline.ccwgdriver->freeze)
+		return card->discipline.ccwgdriver->freeze(gdev);
+	return 0;
+}
+
+static int qeth_core_thaw(struct ccwgroup_device *gdev)
+{
+	struct qeth_card *card = dev_get_drvdata(&gdev->dev);
+	if (card->discipline.ccwgdriver &&
+	    card->discipline.ccwgdriver->thaw)
+		return card->discipline.ccwgdriver->thaw(gdev);
+	return 0;
+}
+
+static int qeth_core_restore(struct ccwgroup_device *gdev)
+{
+	struct qeth_card *card = dev_get_drvdata(&gdev->dev);
+	if (card->discipline.ccwgdriver &&
+	    card->discipline.ccwgdriver->restore)
+		return card->discipline.ccwgdriver->restore(gdev);
+	return 0;
+}
+
+static struct ccwgroup_driver qeth_core_ccwgroup_driver = {
+	.driver = {
+		.owner = THIS_MODULE,
+		.name = "qeth",
+	},
+	.driver_id = 0xD8C5E3C8,
+	.probe = qeth_core_probe_device,
+	.remove = qeth_core_remove_device,
+	.set_online = qeth_core_set_online,
+	.set_offline = qeth_core_set_offline,
+	.shutdown = qeth_core_shutdown,
+	.prepare = qeth_core_prepare,
+	.complete = qeth_core_complete,
+	.freeze = qeth_core_freeze,
+	.thaw = qeth_core_thaw,
+	.restore = qeth_core_restore,
+};
+
+static ssize_t
+qeth_core_driver_group_store(struct device_driver *ddrv, const char *buf,
+			   size_t count)
+{
+	int err;
+	err = qeth_core_driver_group(buf, qeth_core_root_dev,
+					qeth_core_ccwgroup_driver.driver_id);
+	if (err)
+		return err;
+	else
+		return count;
+}
+
+static DRIVER_ATTR(group, 0200, NULL, qeth_core_driver_group_store);
+
+static struct {
+	const char str[ETH_GSTRING_LEN];
+} qeth_ethtool_stats_keys[] = {
+/*  0 */{"rx skbs"},
+	{"rx buffers"},
+	{"tx skbs"},
+	{"tx buffers"},
+	{"tx skbs no packing"},
+	{"tx buffers no packing"},
+	{"tx skbs packing"},
+	{"tx buffers packing"},
+	{"tx sg skbs"},
+	{"tx sg frags"},
+/* 10 */{"rx sg skbs"},
+	{"rx sg frags"},
+	{"rx sg page allocs"},
+	{"tx large kbytes"},
+	{"tx large count"},
+	{"tx pk state ch n->p"},
+	{"tx pk state ch p->n"},
+	{"tx pk watermark low"},
+	{"tx pk watermark high"},
+	{"queue 0 buffer usage"},
+/* 20 */{"queue 1 buffer usage"},
+	{"queue 2 buffer usage"},
+	{"queue 3 buffer usage"},
+	{"rx poll time"},
+	{"rx poll count"},
+	{"rx do_QDIO time"},
+	{"rx do_QDIO count"},
+	{"tx handler time"},
+	{"tx handler count"},
+	{"tx time"},
+/* 30 */{"tx count"},
+	{"tx do_QDIO time"},
+	{"tx do_QDIO count"},
+	{"tx csum"},
+	{"tx lin"},
+	{"cq handler count"},
+	{"cq handler time"}
+};
+
+int qeth_core_get_sset_count(struct net_device *dev, int stringset)
+{
+	switch (stringset) {
+	case ETH_SS_STATS:
+		return (sizeof(qeth_ethtool_stats_keys) / ETH_GSTRING_LEN);
+	default:
+		return -EINVAL;
+	}
+}
+EXPORT_SYMBOL_GPL(qeth_core_get_sset_count);
+
+void qeth_core_get_ethtool_stats(struct net_device *dev,
+		struct ethtool_stats *stats, u64 *data)
+{
+	struct qeth_card *card = dev->ml_priv;
+	data[0] = card->stats.rx_packets -
+				card->perf_stats.initial_rx_packets;
+	data[1] = card->perf_stats.bufs_rec;
+	data[2] = card->stats.tx_packets -
+				card->perf_stats.initial_tx_packets;
+	data[3] = card->perf_stats.bufs_sent;
+	data[4] = card->stats.tx_packets - card->perf_stats.initial_tx_packets
+			- card->perf_stats.skbs_sent_pack;
+	data[5] = card->perf_stats.bufs_sent - card->perf_stats.bufs_sent_pack;
+	data[6] = card->perf_stats.skbs_sent_pack;
+	data[7] = card->perf_stats.bufs_sent_pack;
+	data[8] = card->perf_stats.sg_skbs_sent;
+	data[9] = card->perf_stats.sg_frags_sent;
+	data[10] = card->perf_stats.sg_skbs_rx;
+	data[11] = card->perf_stats.sg_frags_rx;
+	data[12] = card->perf_stats.sg_alloc_page_rx;
+	data[13] = (card->perf_stats.large_send_bytes >> 10);
+	data[14] = card->perf_stats.large_send_cnt;
+	data[15] = card->perf_stats.sc_dp_p;
+	data[16] = card->perf_stats.sc_p_dp;
+	data[17] = QETH_LOW_WATERMARK_PACK;
+	data[18] = QETH_HIGH_WATERMARK_PACK;
+	data[19] = atomic_read(&card->qdio.out_qs[0]->used_buffers);
+	data[20] = (card->qdio.no_out_queues > 1) ?
+			atomic_read(&card->qdio.out_qs[1]->used_buffers) : 0;
+	data[21] = (card->qdio.no_out_queues > 2) ?
+			atomic_read(&card->qdio.out_qs[2]->used_buffers) : 0;
+	data[22] = (card->qdio.no_out_queues > 3) ?
+			atomic_read(&card->qdio.out_qs[3]->used_buffers) : 0;
+	data[23] = card->perf_stats.inbound_time;
+	data[24] = card->perf_stats.inbound_cnt;
+	data[25] = card->perf_stats.inbound_do_qdio_time;
+	data[26] = card->perf_stats.inbound_do_qdio_cnt;
+	data[27] = card->perf_stats.outbound_handler_time;
+	data[28] = card->perf_stats.outbound_handler_cnt;
+	data[29] = card->perf_stats.outbound_time;
+	data[30] = card->perf_stats.outbound_cnt;
+	data[31] = card->perf_stats.outbound_do_qdio_time;
+	data[32] = card->perf_stats.outbound_do_qdio_cnt;
+	data[33] = card->perf_stats.tx_csum;
+	data[34] = card->perf_stats.tx_lin;
+	data[35] = card->perf_stats.cq_cnt;
+	data[36] = card->perf_stats.cq_time;
+}
+EXPORT_SYMBOL_GPL(qeth_core_get_ethtool_stats);
+
+void qeth_core_get_strings(struct net_device *dev, u32 stringset, u8 *data)
+{
+	switch (stringset) {
+	case ETH_SS_STATS:
+		memcpy(data, &qeth_ethtool_stats_keys,
+			sizeof(qeth_ethtool_stats_keys));
+		break;
+	default:
+		WARN_ON(1);
+		break;
+	}
+}
+EXPORT_SYMBOL_GPL(qeth_core_get_strings);
+
+void qeth_core_get_drvinfo(struct net_device *dev,
+		struct ethtool_drvinfo *info)
+{
+	struct qeth_card *card = dev->ml_priv;
+	if (card->options.layer2)
+		strcpy(info->driver, "qeth_l2");
+	else
+		strcpy(info->driver, "qeth_l3");
+
+	strcpy(info->version, "1.0");
+	strcpy(info->fw_version, card->info.mcl_level);
+	sprintf(info->bus_info, "%s/%s/%s",
+			CARD_RDEV_ID(card),
+			CARD_WDEV_ID(card),
+			CARD_DDEV_ID(card));
+}
+EXPORT_SYMBOL_GPL(qeth_core_get_drvinfo);
+
+int qeth_core_ethtool_get_settings(struct net_device *netdev,
+					struct ethtool_cmd *ecmd)
+{
+	struct qeth_card *card = netdev->ml_priv;
+	enum qeth_link_types link_type;
+
+	if ((card->info.type == QETH_CARD_TYPE_IQD) || (card->info.guestlan))
+		link_type = QETH_LINK_TYPE_10GBIT_ETH;
+	else
+		link_type = card->info.link_type;
+
+	ecmd->transceiver = XCVR_INTERNAL;
+	ecmd->supported = SUPPORTED_Autoneg;
+	ecmd->advertising = ADVERTISED_Autoneg;
+	ecmd->duplex = DUPLEX_FULL;
+	ecmd->autoneg = AUTONEG_ENABLE;
+
+	switch (link_type) {
+	case QETH_LINK_TYPE_FAST_ETH:
+	case QETH_LINK_TYPE_LANE_ETH100:
+		ecmd->supported |= SUPPORTED_10baseT_Half |
+					SUPPORTED_10baseT_Full |
+					SUPPORTED_100baseT_Half |
+					SUPPORTED_100baseT_Full |
+					SUPPORTED_TP;
+		ecmd->advertising |= ADVERTISED_10baseT_Half |
+					ADVERTISED_10baseT_Full |
+					ADVERTISED_100baseT_Half |
+					ADVERTISED_100baseT_Full |
+					ADVERTISED_TP;
+		ecmd->speed = SPEED_100;
+		ecmd->port = PORT_TP;
+		break;
+
+	case QETH_LINK_TYPE_GBIT_ETH:
+	case QETH_LINK_TYPE_LANE_ETH1000:
+		ecmd->supported |= SUPPORTED_10baseT_Half |
+					SUPPORTED_10baseT_Full |
+					SUPPORTED_100baseT_Half |
+					SUPPORTED_100baseT_Full |
+					SUPPORTED_1000baseT_Half |
+					SUPPORTED_1000baseT_Full |
+					SUPPORTED_FIBRE;
+		ecmd->advertising |= ADVERTISED_10baseT_Half |
+					ADVERTISED_10baseT_Full |
+					ADVERTISED_100baseT_Half |
+					ADVERTISED_100baseT_Full |
+					ADVERTISED_1000baseT_Half |
+					ADVERTISED_1000baseT_Full |
+					ADVERTISED_FIBRE;
+		ecmd->speed = SPEED_1000;
+		ecmd->port = PORT_FIBRE;
+		break;
+
+	case QETH_LINK_TYPE_10GBIT_ETH:
+		ecmd->supported |= SUPPORTED_10baseT_Half |
+					SUPPORTED_10baseT_Full |
+					SUPPORTED_100baseT_Half |
+					SUPPORTED_100baseT_Full |
+					SUPPORTED_1000baseT_Half |
+					SUPPORTED_1000baseT_Full |
+					SUPPORTED_10000baseT_Full |
+					SUPPORTED_FIBRE;
+		ecmd->advertising |= ADVERTISED_10baseT_Half |
+					ADVERTISED_10baseT_Full |
+					ADVERTISED_100baseT_Half |
+					ADVERTISED_100baseT_Full |
+					ADVERTISED_1000baseT_Half |
+					ADVERTISED_1000baseT_Full |
+					ADVERTISED_10000baseT_Full |
+					ADVERTISED_FIBRE;
+		ecmd->speed = SPEED_10000;
+		ecmd->port = PORT_FIBRE;
+		break;
+
+	default:
+		ecmd->supported |= SUPPORTED_10baseT_Half |
+					SUPPORTED_10baseT_Full |
+					SUPPORTED_TP;
+		ecmd->advertising |= ADVERTISED_10baseT_Half |
+					ADVERTISED_10baseT_Full |
+					ADVERTISED_TP;
+		ecmd->speed = SPEED_10;
+		ecmd->port = PORT_TP;
+	}
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(qeth_core_ethtool_get_settings);
+
+static int __init qeth_core_init(void)
+{
+	int rc;
+
+	pr_info("loading core functions\n");
+	INIT_LIST_HEAD(&qeth_core_card_list.list);
+	rwlock_init(&qeth_core_card_list.rwlock);
+	mutex_init(&qeth_mod_mutex);
+
+	rc = qeth_register_dbf_views();
+	if (rc)
+		goto out_err;
+	rc = ccw_driver_register(&qeth_ccw_driver);
+	if (rc)
+		goto ccw_err;
+	rc = ccwgroup_driver_register(&qeth_core_ccwgroup_driver);
+	if (rc)
+		goto ccwgroup_err;
+	rc = driver_create_file(&qeth_core_ccwgroup_driver.driver,
+				&driver_attr_group);
+	if (rc)
+		goto driver_err;
+	qeth_core_root_dev = root_device_register("qeth");
+	rc = IS_ERR(qeth_core_root_dev) ? PTR_ERR(qeth_core_root_dev) : 0;
+	if (rc)
+		goto register_err;
+
+	qeth_core_header_cache = kmem_cache_create("qeth_hdr",
+			sizeof(struct qeth_hdr) + ETH_HLEN, 64, 0, NULL);
+	if (!qeth_core_header_cache) {
+		rc = -ENOMEM;
+		goto slab_err;
+	}
+
+	qeth_qdio_outbuf_cache = kmem_cache_create("qeth_buf",
+			sizeof(struct qeth_qdio_out_buffer), 0, 0, NULL);
+	if (!qeth_qdio_outbuf_cache) {
+		rc = -ENOMEM;
+		goto cqslab_err;
+	}
+
+	return 0;
+cqslab_err:
+	kmem_cache_destroy(qeth_core_header_cache);
+slab_err:
+	root_device_unregister(qeth_core_root_dev);
+register_err:
+	driver_remove_file(&qeth_core_ccwgroup_driver.driver,
+			   &driver_attr_group);
+driver_err:
+	ccwgroup_driver_unregister(&qeth_core_ccwgroup_driver);
+ccwgroup_err:
+	ccw_driver_unregister(&qeth_ccw_driver);
+ccw_err:
+	QETH_DBF_MESSAGE(2, "Initialization failed with code %d\n", rc);
+	qeth_unregister_dbf_views();
+out_err:
+	pr_err("Initializing the qeth device driver failed\n");
+	return rc;
+}
+
+static void __exit qeth_core_exit(void)
+{
+	root_device_unregister(qeth_core_root_dev);
+	driver_remove_file(&qeth_core_ccwgroup_driver.driver,
+			   &driver_attr_group);
+	ccwgroup_driver_unregister(&qeth_core_ccwgroup_driver);
+	ccw_driver_unregister(&qeth_ccw_driver);
+	kmem_cache_destroy(qeth_qdio_outbuf_cache);
+	kmem_cache_destroy(qeth_core_header_cache);
+	qeth_unregister_dbf_views();
+	pr_info("core functions removed\n");
+}
+
+module_init(qeth_core_init);
+module_exit(qeth_core_exit);
+MODULE_AUTHOR("Frank Blaschka <frank.blaschka@de.ibm.com>");
+MODULE_DESCRIPTION("qeth core functions");
+MODULE_LICENSE("GPL");
diff --git a/ap/os/linux/linux-3.4.x/drivers/s390/net/qeth_core_mpc.c b/ap/os/linux/linux-3.4.x/drivers/s390/net/qeth_core_mpc.c
new file mode 100644
index 0000000..7fab654
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/s390/net/qeth_core_mpc.c
@@ -0,0 +1,269 @@
+/*
+ *  drivers/s390/net/qeth_core_mpc.c
+ *
+ *    Copyright IBM Corp. 2007
+ *    Author(s): Frank Pavlic <fpavlic@de.ibm.com>,
+ *		 Thomas Spatzier <tspat@de.ibm.com>,
+ *		 Frank Blaschka <frank.blaschka@de.ibm.com>
+ */
+
+#include <linux/module.h>
+#include <asm/cio.h>
+#include "qeth_core_mpc.h"
+
+unsigned char IDX_ACTIVATE_READ[] = {
+	0x00, 0x00, 0x80, 0x00,  0x00, 0x00, 0x00, 0x00,
+	0x19, 0x01, 0x01, 0x80,  0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0xc8, 0xc1,
+	0xd3, 0xd3, 0xd6, 0xd3,  0xc5, 0x40, 0x00, 0x00,
+	0x00, 0x00
+};
+
+unsigned char IDX_ACTIVATE_WRITE[] = {
+	0x00, 0x00, 0x80, 0x00,  0x00, 0x00, 0x00, 0x00,
+	0x15, 0x01, 0x01, 0x80,  0x00, 0x00, 0x00, 0x00,
+	0xff, 0xff, 0x00, 0x00,  0x00, 0x00, 0xc8, 0xc1,
+	0xd3, 0xd3, 0xd6, 0xd3,  0xc5, 0x40, 0x00, 0x00,
+	0x00, 0x00
+};
+
+unsigned char CM_ENABLE[] = {
+	0x00, 0xe0, 0x00, 0x00,  0x00, 0x00, 0x00, 0x01,
+	0x00, 0x00, 0x00, 0x14,  0x00, 0x00, 0x00, 0x63,
+	0x10, 0x00, 0x00, 0x01,
+	0x00, 0x00, 0x00, 0x00,
+	0x81, 0x7e, 0x00, 0x01,  0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,  0x00, 0x24, 0x00, 0x23,
+	0x00, 0x00, 0x23, 0x05,  0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+	0x01, 0x00, 0x00, 0x23,  0x00, 0x00, 0x00, 0x40,
+	0x00, 0x0c, 0x41, 0x02,  0x00, 0x17, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x0b, 0x04, 0x01,
+	0x7e, 0x04, 0x05, 0x00,  0x01, 0x01, 0x0f,
+	0x00,
+	0x0c, 0x04, 0x02, 0xff,  0xff, 0xff, 0xff, 0xff,
+	0xff, 0xff, 0xff
+};
+
+unsigned char CM_SETUP[] = {
+	0x00, 0xe0, 0x00, 0x00,  0x00, 0x00, 0x00, 0x02,
+	0x00, 0x00, 0x00, 0x14,  0x00, 0x00, 0x00, 0x64,
+	0x10, 0x00, 0x00, 0x01,
+	0x00, 0x00, 0x00, 0x00,
+	0x81, 0x7e, 0x00, 0x01,  0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,  0x00, 0x24, 0x00, 0x24,
+	0x00, 0x00, 0x24, 0x05,  0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+	0x01, 0x00, 0x00, 0x24,  0x00, 0x00, 0x00, 0x40,
+	0x00, 0x0c, 0x41, 0x04,  0x00, 0x18, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x09, 0x04, 0x04,
+	0x05, 0x00, 0x01, 0x01,  0x11,
+	0x00, 0x09, 0x04,
+	0x05, 0x05, 0x00, 0x00,  0x00, 0x00,
+	0x00, 0x06,
+	0x04, 0x06, 0xc8, 0x00
+};
+
+unsigned char ULP_ENABLE[] = {
+	0x00, 0xe0, 0x00, 0x00,  0x00, 0x00, 0x00, 0x03,
+	0x00, 0x00, 0x00, 0x14,  0x00, 0x00, 0x00, 0x6b,
+	0x10, 0x00, 0x00, 0x01,
+	0x00, 0x00, 0x00, 0x00,
+	0x41, 0x7e, 0x00, 0x01,  0x00, 0x00, 0x00, 0x01,
+	0x00, 0x00, 0x00, 0x00,  0x00, 0x24, 0x00, 0x2b,
+	0x00, 0x00, 0x2b, 0x05,  0x20, 0x01, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+	0x01, 0x00, 0x00, 0x2b,  0x00, 0x00, 0x00, 0x40,
+	0x00, 0x0c, 0x41, 0x02,  0x00, 0x1f, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x0b, 0x04, 0x01,
+	0x03, 0x04, 0x05, 0x00,  0x01, 0x01, 0x12,
+	0x00,
+	0x14, 0x04, 0x0a, 0x00,  0x20, 0x00, 0x00, 0xff,
+	0xff, 0x00, 0x08, 0xc8,  0xe8, 0xc4, 0xf1, 0xc7,
+	0xf1, 0x00, 0x00
+};
+
+unsigned char ULP_SETUP[] = {
+	0x00, 0xe0, 0x00, 0x00,  0x00, 0x00, 0x00, 0x04,
+	0x00, 0x00, 0x00, 0x14,  0x00, 0x00, 0x00, 0x6c,
+	0x10, 0x00, 0x00, 0x01,
+	0x00, 0x00, 0x00, 0x00,
+	0x41, 0x7e, 0x00, 0x01,  0x00, 0x00, 0x00, 0x02,
+	0x00, 0x00, 0x00, 0x01,  0x00, 0x24, 0x00, 0x2c,
+	0x00, 0x00, 0x2c, 0x05,  0x20, 0x01, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+	0x01, 0x00, 0x00, 0x2c,  0x00, 0x00, 0x00, 0x40,
+	0x00, 0x0c, 0x41, 0x04,  0x00, 0x20, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x09, 0x04, 0x04,
+	0x05, 0x00, 0x01, 0x01,  0x14,
+	0x00, 0x09, 0x04,
+	0x05, 0x05, 0x30, 0x01,  0x00, 0x00,
+	0x00, 0x06,
+	0x04, 0x06, 0x40, 0x00,
+	0x00, 0x08, 0x04, 0x0b,
+	0x00, 0x00, 0x00, 0x00
+};
+
+unsigned char DM_ACT[] = {
+	0x00, 0xe0, 0x00, 0x00,  0x00, 0x00, 0x00, 0x05,
+	0x00, 0x00, 0x00, 0x14,  0x00, 0x00, 0x00, 0x55,
+	0x10, 0x00, 0x00, 0x01,
+	0x00, 0x00, 0x00, 0x00,
+	0x41, 0x7e, 0x00, 0x01,  0x00, 0x00, 0x00, 0x03,
+	0x00, 0x00, 0x00, 0x02,  0x00, 0x24, 0x00, 0x15,
+	0x00, 0x00, 0x2c, 0x05,  0x20, 0x01, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+	0x01, 0x00, 0x00, 0x15,  0x00, 0x00, 0x00, 0x40,
+	0x00, 0x0c, 0x43, 0x60,  0x00, 0x09, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x09, 0x04, 0x04,
+	0x05, 0x40, 0x01, 0x01,  0x00
+};
+
+unsigned char IPA_PDU_HEADER[] = {
+	0x00, 0xe0, 0x00, 0x00,  0x77, 0x77, 0x77, 0x77,
+	0x00, 0x00, 0x00, 0x14,  0x00, 0x00,
+		(IPA_PDU_HEADER_SIZE+sizeof(struct qeth_ipa_cmd)) / 256,
+		(IPA_PDU_HEADER_SIZE+sizeof(struct qeth_ipa_cmd)) % 256,
+	0x10, 0x00, 0x00, 0x01,  0x00, 0x00, 0x00, 0x00,
+	0xc1, 0x03, 0x00, 0x01,  0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,  0x00, 0x24,
+		sizeof(struct qeth_ipa_cmd) / 256,
+		sizeof(struct qeth_ipa_cmd) % 256,
+	0x00,
+		sizeof(struct qeth_ipa_cmd) / 256,
+		sizeof(struct qeth_ipa_cmd) % 256,
+	0x05,
+	0x77, 0x77, 0x77, 0x77,
+	0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+	0x01, 0x00,
+		sizeof(struct qeth_ipa_cmd) / 256,
+		sizeof(struct qeth_ipa_cmd) % 256,
+	0x00, 0x00, 0x00, 0x40,
+};
+EXPORT_SYMBOL_GPL(IPA_PDU_HEADER);
+
+unsigned char WRITE_CCW[] = {
+	0x01, CCW_FLAG_SLI, 0, 0,
+	0, 0, 0, 0
+};
+
+unsigned char READ_CCW[] = {
+	0x02, CCW_FLAG_SLI, 0, 0,
+	0, 0, 0, 0
+};
+
+
+struct ipa_rc_msg {
+	enum qeth_ipa_return_codes rc;
+	char *msg;
+};
+
+static struct ipa_rc_msg qeth_ipa_rc_msg[] = {
+	{IPA_RC_SUCCESS,		"success"},
+	{IPA_RC_NOTSUPP,		"Command not supported"},
+	{IPA_RC_IP_TABLE_FULL,		"Add Addr IP Table Full - ipv6"},
+	{IPA_RC_UNKNOWN_ERROR,		"IPA command failed - reason unknown"},
+	{IPA_RC_UNSUPPORTED_COMMAND,	"Command not supported"},
+	{IPA_RC_DUP_IPV6_REMOTE, "ipv6 address already registered remote"},
+	{IPA_RC_DUP_IPV6_HOME,		"ipv6 address already registered"},
+	{IPA_RC_UNREGISTERED_ADDR,	"Address not registered"},
+	{IPA_RC_NO_ID_AVAILABLE,	"No identifiers available"},
+	{IPA_RC_ID_NOT_FOUND,		"Identifier not found"},
+	{IPA_RC_INVALID_IP_VERSION,	"IP version incorrect"},
+	{IPA_RC_LAN_FRAME_MISMATCH,	"LAN and frame mismatch"},
+	{IPA_RC_L2_UNSUPPORTED_CMD,	"Unsupported layer 2 command"},
+	{IPA_RC_L2_DUP_MAC,		"Duplicate MAC address"},
+	{IPA_RC_L2_ADDR_TABLE_FULL,	"Layer2 address table full"},
+	{IPA_RC_L2_DUP_LAYER3_MAC,	"Duplicate with layer 3 MAC"},
+	{IPA_RC_L2_GMAC_NOT_FOUND,	"GMAC not found"},
+	{IPA_RC_L2_MAC_NOT_AUTH_BY_HYP,	"L2 mac not authorized by hypervisor"},
+	{IPA_RC_L2_MAC_NOT_AUTH_BY_ADP,	"L2 mac not authorized by adapter"},
+	{IPA_RC_L2_MAC_NOT_FOUND,	"L2 mac address not found"},
+	{IPA_RC_L2_INVALID_VLAN_ID,	"L2 invalid vlan id"},
+	{IPA_RC_L2_DUP_VLAN_ID,		"L2 duplicate vlan id"},
+	{IPA_RC_L2_VLAN_ID_NOT_FOUND,	"L2 vlan id not found"},
+	{IPA_RC_DATA_MISMATCH,		"Data field mismatch (v4/v6 mixed)"},
+	{IPA_RC_INVALID_MTU_SIZE,	"Invalid MTU size"},
+	{IPA_RC_INVALID_LANTYPE,	"Invalid LAN type"},
+	{IPA_RC_INVALID_LANNUM,		"Invalid LAN num"},
+	{IPA_RC_DUPLICATE_IP_ADDRESS,	"Address already registered"},
+	{IPA_RC_IP_ADDR_TABLE_FULL,	"IP address table full"},
+	{IPA_RC_LAN_PORT_STATE_ERROR,	"LAN port state error"},
+	{IPA_RC_SETIP_NO_STARTLAN,	"Setip no startlan received"},
+	{IPA_RC_SETIP_ALREADY_RECEIVED,	"Setip already received"},
+	{IPA_RC_IP_ADDR_ALREADY_USED,	"IP address already in use on LAN"},
+	{IPA_RC_MC_ADDR_NOT_FOUND,	"Multicast address not found"},
+	{IPA_RC_SETIP_INVALID_VERSION,	"SETIP invalid IP version"},
+	{IPA_RC_UNSUPPORTED_SUBCMD,	"Unsupported assist subcommand"},
+	{IPA_RC_ARP_ASSIST_NO_ENABLE,	"Only partial success, no enable"},
+	{IPA_RC_PRIMARY_ALREADY_DEFINED, "Primary already defined"},
+	{IPA_RC_SECOND_ALREADY_DEFINED,	"Secondary already defined"},
+	{IPA_RC_INVALID_SETRTG_INDICATOR, "Invalid SETRTG indicator"},
+	{IPA_RC_MC_ADDR_ALREADY_DEFINED, "Multicast address already defined"},
+	{IPA_RC_LAN_OFFLINE,		"STRTLAN_LAN_DISABLED - LAN offline"},
+	{IPA_RC_INVALID_IP_VERSION2,	"Invalid IP version"},
+	{IPA_RC_ENOMEM,			"Memory problem"},
+	{IPA_RC_FFFF,			"Unknown Error"}
+};
+
+
+
+char *qeth_get_ipa_msg(enum qeth_ipa_return_codes rc)
+{
+	int x = 0;
+	qeth_ipa_rc_msg[sizeof(qeth_ipa_rc_msg) /
+			sizeof(struct ipa_rc_msg) - 1].rc = rc;
+	while (qeth_ipa_rc_msg[x].rc != rc)
+		x++;
+	return qeth_ipa_rc_msg[x].msg;
+}
+
+
+struct ipa_cmd_names {
+	enum qeth_ipa_cmds cmd;
+	char *name;
+};
+
+static struct ipa_cmd_names qeth_ipa_cmd_names[] = {
+	{IPA_CMD_STARTLAN,	"startlan"},
+	{IPA_CMD_STOPLAN,	"stoplan"},
+	{IPA_CMD_SETVMAC,	"setvmac"},
+	{IPA_CMD_DELVMAC,	"delvmac"},
+	{IPA_CMD_SETGMAC,	"setgmac"},
+	{IPA_CMD_DELGMAC,	"delgmac"},
+	{IPA_CMD_SETVLAN,	"setvlan"},
+	{IPA_CMD_DELVLAN,	"delvlan"},
+	{IPA_CMD_SETCCID,	"setccid"},
+	{IPA_CMD_DELCCID,	"delccid"},
+	{IPA_CMD_MODCCID,	"modccid"},
+	{IPA_CMD_SETIP,		"setip"},
+	{IPA_CMD_QIPASSIST,	"qipassist"},
+	{IPA_CMD_SETASSPARMS,	"setassparms"},
+	{IPA_CMD_SETIPM,	"setipm"},
+	{IPA_CMD_DELIPM,	"delipm"},
+	{IPA_CMD_SETRTG,	"setrtg"},
+	{IPA_CMD_DELIP,		"delip"},
+	{IPA_CMD_SETADAPTERPARMS, "setadapterparms"},
+	{IPA_CMD_SET_DIAG_ASS,	"set_diag_ass"},
+	{IPA_CMD_CREATE_ADDR,	"create_addr"},
+	{IPA_CMD_DESTROY_ADDR,	"destroy_addr"},
+	{IPA_CMD_REGISTER_LOCAL_ADDR,	"register_local_addr"},
+	{IPA_CMD_UNREGISTER_LOCAL_ADDR,	"unregister_local_addr"},
+	{IPA_CMD_UNKNOWN,	"unknown"},
+};
+
+char *qeth_get_ipa_cmd_name(enum qeth_ipa_cmds cmd)
+{
+	int x = 0;
+	qeth_ipa_cmd_names[
+		sizeof(qeth_ipa_cmd_names) /
+			sizeof(struct ipa_cmd_names)-1].cmd = cmd;
+	while (qeth_ipa_cmd_names[x].cmd != cmd)
+		x++;
+	return qeth_ipa_cmd_names[x].name;
+}
diff --git a/ap/os/linux/linux-3.4.x/drivers/s390/net/qeth_core_mpc.h b/ap/os/linux/linux-3.4.x/drivers/s390/net/qeth_core_mpc.h
new file mode 100644
index 0000000..ff41e42
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/s390/net/qeth_core_mpc.h
@@ -0,0 +1,649 @@
+/*
+ *  drivers/s390/net/qeth_core_mpc.h
+ *
+ *    Copyright IBM Corp. 2007
+ *    Author(s): Frank Pavlic <fpavlic@de.ibm.com>,
+ *		 Thomas Spatzier <tspat@de.ibm.com>,
+ *		 Frank Blaschka <frank.blaschka@de.ibm.com>
+ */
+
+#ifndef __QETH_CORE_MPC_H__
+#define __QETH_CORE_MPC_H__
+
+#include <asm/qeth.h>
+
+#define IPA_PDU_HEADER_SIZE	0x40
+#define QETH_IPA_PDU_LEN_TOTAL(buffer) (buffer + 0x0e)
+#define QETH_IPA_PDU_LEN_PDU1(buffer) (buffer + 0x26)
+#define QETH_IPA_PDU_LEN_PDU2(buffer) (buffer + 0x29)
+#define QETH_IPA_PDU_LEN_PDU3(buffer) (buffer + 0x3a)
+
+extern unsigned char IPA_PDU_HEADER[];
+#define QETH_IPA_CMD_DEST_ADDR(buffer) (buffer + 0x2c)
+
+#define IPA_CMD_LENGTH	(IPA_PDU_HEADER_SIZE + sizeof(struct qeth_ipa_cmd))
+
+#define QETH_SEQ_NO_LENGTH	4
+#define QETH_MPC_TOKEN_LENGTH	4
+#define QETH_MCL_LENGTH		4
+#define OSA_ADDR_LEN		6
+
+#define QETH_TIMEOUT		(10 * HZ)
+#define QETH_IPA_TIMEOUT	(45 * HZ)
+#define QETH_IDX_COMMAND_SEQNO	0xffff0000
+#define SR_INFO_LEN		16
+
+#define QETH_CLEAR_CHANNEL_PARM	-10
+#define QETH_HALT_CHANNEL_PARM	-11
+#define QETH_RCD_PARM -12
+
+/*****************************************************************************/
+/* IP Assist related definitions                                             */
+/*****************************************************************************/
+#define IPA_CMD_INITIATOR_HOST  0x00
+#define IPA_CMD_INITIATOR_OSA   0x01
+#define IPA_CMD_INITIATOR_HOST_REPLY  0x80
+#define IPA_CMD_INITIATOR_OSA_REPLY   0x81
+#define IPA_CMD_PRIM_VERSION_NO 0x01
+
+enum qeth_card_types {
+	QETH_CARD_TYPE_UNKNOWN = 0,
+	QETH_CARD_TYPE_OSD     = 1,
+	QETH_CARD_TYPE_IQD     = 5,
+	QETH_CARD_TYPE_OSN     = 6,
+	QETH_CARD_TYPE_OSM     = 3,
+	QETH_CARD_TYPE_OSX     = 2,
+};
+
+#define QETH_MPC_DIFINFO_LEN_INDICATES_LINK_TYPE 0x18
+/* only the first two bytes are looked at in qeth_get_cardname_short */
+enum qeth_link_types {
+	QETH_LINK_TYPE_FAST_ETH     = 0x01,
+	QETH_LINK_TYPE_HSTR         = 0x02,
+	QETH_LINK_TYPE_GBIT_ETH     = 0x03,
+	QETH_LINK_TYPE_OSN          = 0x04,
+	QETH_LINK_TYPE_10GBIT_ETH   = 0x10,
+	QETH_LINK_TYPE_LANE_ETH100  = 0x81,
+	QETH_LINK_TYPE_LANE_TR      = 0x82,
+	QETH_LINK_TYPE_LANE_ETH1000 = 0x83,
+	QETH_LINK_TYPE_LANE         = 0x88,
+	QETH_LINK_TYPE_ATM_NATIVE   = 0x90,
+};
+
+enum qeth_tr_macaddr_modes {
+	QETH_TR_MACADDR_NONCANONICAL = 0,
+	QETH_TR_MACADDR_CANONICAL    = 1,
+};
+
+enum qeth_tr_broadcast_modes {
+	QETH_TR_BROADCAST_ALLRINGS = 0,
+	QETH_TR_BROADCAST_LOCAL    = 1,
+};
+
+/*
+ * Routing stuff
+ */
+#define RESET_ROUTING_FLAG 0x10 /* indicate that routing type shall be set */
+enum qeth_routing_types {
+	/* TODO: set to bit flag used in IPA Command */
+	NO_ROUTER		= 0,
+	PRIMARY_ROUTER		= 1,
+	SECONDARY_ROUTER	= 2,
+	MULTICAST_ROUTER	= 3,
+	PRIMARY_CONNECTOR	= 4,
+	SECONDARY_CONNECTOR	= 5,
+};
+
+/* IPA Commands */
+enum qeth_ipa_cmds {
+	IPA_CMD_STARTLAN		= 0x01,
+	IPA_CMD_STOPLAN			= 0x02,
+	IPA_CMD_SETVMAC			= 0x21,
+	IPA_CMD_DELVMAC			= 0x22,
+	IPA_CMD_SETGMAC			= 0x23,
+	IPA_CMD_DELGMAC			= 0x24,
+	IPA_CMD_SETVLAN			= 0x25,
+	IPA_CMD_DELVLAN			= 0x26,
+	IPA_CMD_SETCCID			= 0x41,
+	IPA_CMD_DELCCID			= 0x42,
+	IPA_CMD_MODCCID			= 0x43,
+	IPA_CMD_SETIP			= 0xb1,
+	IPA_CMD_QIPASSIST		= 0xb2,
+	IPA_CMD_SETASSPARMS		= 0xb3,
+	IPA_CMD_SETIPM			= 0xb4,
+	IPA_CMD_DELIPM			= 0xb5,
+	IPA_CMD_SETRTG			= 0xb6,
+	IPA_CMD_DELIP			= 0xb7,
+	IPA_CMD_SETADAPTERPARMS		= 0xb8,
+	IPA_CMD_SET_DIAG_ASS		= 0xb9,
+	IPA_CMD_CREATE_ADDR		= 0xc3,
+	IPA_CMD_DESTROY_ADDR		= 0xc4,
+	IPA_CMD_REGISTER_LOCAL_ADDR	= 0xd1,
+	IPA_CMD_UNREGISTER_LOCAL_ADDR	= 0xd2,
+	IPA_CMD_UNKNOWN			= 0x00
+};
+
+enum qeth_ip_ass_cmds {
+	IPA_CMD_ASS_START	= 0x0001,
+	IPA_CMD_ASS_STOP	= 0x0002,
+	IPA_CMD_ASS_CONFIGURE	= 0x0003,
+	IPA_CMD_ASS_ENABLE	= 0x0004,
+};
+
+enum qeth_arp_process_subcmds {
+	IPA_CMD_ASS_ARP_SET_NO_ENTRIES	= 0x0003,
+	IPA_CMD_ASS_ARP_QUERY_CACHE	= 0x0004,
+	IPA_CMD_ASS_ARP_ADD_ENTRY	= 0x0005,
+	IPA_CMD_ASS_ARP_REMOVE_ENTRY	= 0x0006,
+	IPA_CMD_ASS_ARP_FLUSH_CACHE	= 0x0007,
+	IPA_CMD_ASS_ARP_QUERY_INFO	= 0x0104,
+	IPA_CMD_ASS_ARP_QUERY_STATS	= 0x0204,
+};
+
+
+/* Return Codes for IPA Commands
+ * according to OSA card Specs */
+
+enum qeth_ipa_return_codes {
+	IPA_RC_SUCCESS			= 0x0000,
+	IPA_RC_NOTSUPP			= 0x0001,
+	IPA_RC_IP_TABLE_FULL		= 0x0002,
+	IPA_RC_UNKNOWN_ERROR		= 0x0003,
+	IPA_RC_UNSUPPORTED_COMMAND	= 0x0004,
+	IPA_RC_TRACE_ALREADY_ACTIVE	= 0x0005,
+	IPA_RC_INVALID_FORMAT		= 0x0006,
+	IPA_RC_DUP_IPV6_REMOTE		= 0x0008,
+	IPA_RC_DUP_IPV6_HOME		= 0x0010,
+	IPA_RC_UNREGISTERED_ADDR	= 0x0011,
+	IPA_RC_NO_ID_AVAILABLE		= 0x0012,
+	IPA_RC_ID_NOT_FOUND		= 0x0013,
+	IPA_RC_INVALID_IP_VERSION	= 0x0020,
+	IPA_RC_LAN_FRAME_MISMATCH	= 0x0040,
+	IPA_RC_L2_UNSUPPORTED_CMD	= 0x2003,
+	IPA_RC_L2_DUP_MAC		= 0x2005,
+	IPA_RC_L2_ADDR_TABLE_FULL	= 0x2006,
+	IPA_RC_L2_DUP_LAYER3_MAC	= 0x200a,
+	IPA_RC_L2_GMAC_NOT_FOUND	= 0x200b,
+	IPA_RC_L2_MAC_NOT_AUTH_BY_HYP	= 0x200c,
+	IPA_RC_L2_MAC_NOT_AUTH_BY_ADP	= 0x200d,
+	IPA_RC_L2_MAC_NOT_FOUND		= 0x2010,
+	IPA_RC_L2_INVALID_VLAN_ID	= 0x2015,
+	IPA_RC_L2_DUP_VLAN_ID		= 0x2016,
+	IPA_RC_L2_VLAN_ID_NOT_FOUND	= 0x2017,
+	IPA_RC_DATA_MISMATCH		= 0xe001,
+	IPA_RC_INVALID_MTU_SIZE		= 0xe002,
+	IPA_RC_INVALID_LANTYPE		= 0xe003,
+	IPA_RC_INVALID_LANNUM		= 0xe004,
+	IPA_RC_DUPLICATE_IP_ADDRESS	= 0xe005,
+	IPA_RC_IP_ADDR_TABLE_FULL	= 0xe006,
+	IPA_RC_LAN_PORT_STATE_ERROR	= 0xe007,
+	IPA_RC_SETIP_NO_STARTLAN	= 0xe008,
+	IPA_RC_SETIP_ALREADY_RECEIVED	= 0xe009,
+	IPA_RC_IP_ADDR_ALREADY_USED	= 0xe00a,
+	IPA_RC_MC_ADDR_NOT_FOUND	= 0xe00b,
+	IPA_RC_SETIP_INVALID_VERSION	= 0xe00d,
+	IPA_RC_UNSUPPORTED_SUBCMD	= 0xe00e,
+	IPA_RC_ARP_ASSIST_NO_ENABLE	= 0xe00f,
+	IPA_RC_PRIMARY_ALREADY_DEFINED	= 0xe010,
+	IPA_RC_SECOND_ALREADY_DEFINED	= 0xe011,
+	IPA_RC_INVALID_SETRTG_INDICATOR	= 0xe012,
+	IPA_RC_MC_ADDR_ALREADY_DEFINED	= 0xe013,
+	IPA_RC_LAN_OFFLINE		= 0xe080,
+	IPA_RC_INVALID_IP_VERSION2	= 0xf001,
+	IPA_RC_ENOMEM			= 0xfffe,
+	IPA_RC_FFFF			= 0xffff
+};
+/* for DELIP */
+#define IPA_RC_IP_ADDRESS_NOT_DEFINED	IPA_RC_PRIMARY_ALREADY_DEFINED
+/* for SET_DIAGNOSTIC_ASSIST */
+#define IPA_RC_INVALID_SUBCMD		IPA_RC_IP_TABLE_FULL
+#define IPA_RC_HARDWARE_AUTH_ERROR	IPA_RC_UNKNOWN_ERROR
+
+/* IPA function flags; each flag marks availability of respective function */
+enum qeth_ipa_funcs {
+	IPA_ARP_PROCESSING      = 0x00000001L,
+	IPA_INBOUND_CHECKSUM    = 0x00000002L,
+	IPA_OUTBOUND_CHECKSUM   = 0x00000004L,
+	IPA_IP_FRAGMENTATION    = 0x00000008L,
+	IPA_FILTERING           = 0x00000010L,
+	IPA_IPV6                = 0x00000020L,
+	IPA_MULTICASTING        = 0x00000040L,
+	IPA_IP_REASSEMBLY       = 0x00000080L,
+	IPA_QUERY_ARP_COUNTERS  = 0x00000100L,
+	IPA_QUERY_ARP_ADDR_INFO = 0x00000200L,
+	IPA_SETADAPTERPARMS     = 0x00000400L,
+	IPA_VLAN_PRIO           = 0x00000800L,
+	IPA_PASSTHRU            = 0x00001000L,
+	IPA_FLUSH_ARP_SUPPORT   = 0x00002000L,
+	IPA_FULL_VLAN           = 0x00004000L,
+	IPA_INBOUND_PASSTHRU    = 0x00008000L,
+	IPA_SOURCE_MAC          = 0x00010000L,
+	IPA_OSA_MC_ROUTER       = 0x00020000L,
+	IPA_QUERY_ARP_ASSIST	= 0x00040000L,
+	IPA_INBOUND_TSO         = 0x00080000L,
+	IPA_OUTBOUND_TSO        = 0x00100000L,
+};
+
+/* SETIP/DELIP IPA Command: ***************************************************/
+enum qeth_ipa_setdelip_flags {
+	QETH_IPA_SETDELIP_DEFAULT          = 0x00L, /* default */
+	QETH_IPA_SETIP_VIPA_FLAG           = 0x01L, /* no grat. ARP */
+	QETH_IPA_SETIP_TAKEOVER_FLAG       = 0x02L, /* nofail on grat. ARP */
+	QETH_IPA_DELIP_ADDR_2_B_TAKEN_OVER = 0x20L,
+	QETH_IPA_DELIP_VIPA_FLAG           = 0x40L,
+	QETH_IPA_DELIP_ADDR_NEEDS_SETIP    = 0x80L,
+};
+
+/* SETADAPTER IPA Command: ****************************************************/
+enum qeth_ipa_setadp_cmd {
+	IPA_SETADP_QUERY_COMMANDS_SUPPORTED	= 0x00000001L,
+	IPA_SETADP_ALTER_MAC_ADDRESS		= 0x00000002L,
+	IPA_SETADP_ADD_DELETE_GROUP_ADDRESS	= 0x00000004L,
+	IPA_SETADP_ADD_DELETE_FUNCTIONAL_ADDR	= 0x00000008L,
+	IPA_SETADP_SET_ADDRESSING_MODE		= 0x00000010L,
+	IPA_SETADP_SET_CONFIG_PARMS		= 0x00000020L,
+	IPA_SETADP_SET_CONFIG_PARMS_EXTENDED	= 0x00000040L,
+	IPA_SETADP_SET_BROADCAST_MODE		= 0x00000080L,
+	IPA_SETADP_SEND_OSA_MESSAGE		= 0x00000100L,
+	IPA_SETADP_SET_SNMP_CONTROL		= 0x00000200L,
+	IPA_SETADP_QUERY_CARD_INFO		= 0x00000400L,
+	IPA_SETADP_SET_PROMISC_MODE		= 0x00000800L,
+	IPA_SETADP_SET_DIAG_ASSIST		= 0x00002000L,
+	IPA_SETADP_SET_ACCESS_CONTROL		= 0x00010000L,
+	IPA_SETADP_QUERY_OAT			= 0x00080000L,
+};
+enum qeth_ipa_mac_ops {
+	CHANGE_ADDR_READ_MAC		= 0,
+	CHANGE_ADDR_REPLACE_MAC		= 1,
+	CHANGE_ADDR_ADD_MAC		= 2,
+	CHANGE_ADDR_DEL_MAC		= 4,
+	CHANGE_ADDR_RESET_MAC		= 8,
+};
+enum qeth_ipa_addr_ops {
+	CHANGE_ADDR_READ_ADDR		= 0,
+	CHANGE_ADDR_ADD_ADDR		= 1,
+	CHANGE_ADDR_DEL_ADDR		= 2,
+	CHANGE_ADDR_FLUSH_ADDR_TABLE	= 4,
+};
+enum qeth_ipa_promisc_modes {
+	SET_PROMISC_MODE_OFF		= 0,
+	SET_PROMISC_MODE_ON		= 1,
+};
+enum qeth_ipa_isolation_modes {
+	ISOLATION_MODE_NONE		= 0x00000000L,
+	ISOLATION_MODE_FWD		= 0x00000001L,
+	ISOLATION_MODE_DROP		= 0x00000002L,
+};
+enum qeth_ipa_set_access_mode_rc {
+	SET_ACCESS_CTRL_RC_SUCCESS		= 0x0000,
+	SET_ACCESS_CTRL_RC_NOT_SUPPORTED	= 0x0004,
+	SET_ACCESS_CTRL_RC_ALREADY_NOT_ISOLATED	= 0x0008,
+	SET_ACCESS_CTRL_RC_ALREADY_ISOLATED	= 0x0010,
+	SET_ACCESS_CTRL_RC_NONE_SHARED_ADAPTER	= 0x0014,
+	SET_ACCESS_CTRL_RC_ACTIVE_CHECKSUM_OFF	= 0x0018,
+};
+
+
+/* (SET)DELIP(M) IPA stuff ***************************************************/
+struct qeth_ipacmd_setdelip4 {
+	__u8   ip_addr[4];
+	__u8   mask[4];
+	__u32  flags;
+} __attribute__ ((packed));
+
+struct qeth_ipacmd_setdelip6 {
+	__u8   ip_addr[16];
+	__u8   mask[16];
+	__u32  flags;
+} __attribute__ ((packed));
+
+struct qeth_ipacmd_setdelipm {
+	__u8 mac[6];
+	__u8 padding[2];
+	__u8 ip6[12];
+	__u8 ip4[4];
+} __attribute__ ((packed));
+
+struct qeth_ipacmd_layer2setdelmac {
+	__u32 mac_length;
+	__u8 mac[6];
+} __attribute__ ((packed));
+
+struct qeth_ipacmd_layer2setdelvlan {
+	__u16 vlan_id;
+} __attribute__ ((packed));
+
+
+struct qeth_ipacmd_setassparms_hdr {
+	__u32 assist_no;
+	__u16 length;
+	__u16 command_code;
+	__u16 return_code;
+	__u8 number_of_replies;
+	__u8 seq_no;
+} __attribute__((packed));
+
+struct qeth_arp_query_data {
+	__u16 request_bits;
+	__u16 reply_bits;
+	__u32 no_entries;
+	char data; /* only for replies */
+} __attribute__((packed));
+
+/* used as parameter for arp_query reply */
+struct qeth_arp_query_info {
+	__u32 udata_len;
+	__u16 mask_bits;
+	__u32 udata_offset;
+	__u32 no_entries;
+	char *udata;
+};
+
+/* SETASSPARMS IPA Command: */
+struct qeth_ipacmd_setassparms {
+	struct qeth_ipacmd_setassparms_hdr hdr;
+	union {
+		__u32 flags_32bit;
+		struct qeth_arp_cache_entry add_arp_entry;
+		struct qeth_arp_query_data query_arp;
+		__u8 ip[16];
+	} data;
+} __attribute__ ((packed));
+
+
+/* SETRTG IPA Command:    ****************************************************/
+struct qeth_set_routing {
+	__u8 type;
+};
+
+/* SETADAPTERPARMS IPA Command:    *******************************************/
+struct qeth_query_cmds_supp {
+	__u32 no_lantypes_supp;
+	__u8 lan_type;
+	__u8 reserved1[3];
+	__u32 supported_cmds;
+	__u8 reserved2[8];
+} __attribute__ ((packed));
+
+struct qeth_change_addr {
+	__u32 cmd;
+	__u32 addr_size;
+	__u32 no_macs;
+	__u8 addr[OSA_ADDR_LEN];
+} __attribute__ ((packed));
+
+
+struct qeth_snmp_cmd {
+	__u8  token[16];
+	__u32 request;
+	__u32 interface;
+	__u32 returncode;
+	__u32 firmwarelevel;
+	__u32 seqno;
+	__u8  data;
+} __attribute__ ((packed));
+
+struct qeth_snmp_ureq_hdr {
+	__u32   data_len;
+	__u32   req_len;
+	__u32   reserved1;
+	__u32   reserved2;
+} __attribute__ ((packed));
+
+struct qeth_snmp_ureq {
+	struct qeth_snmp_ureq_hdr hdr;
+	struct qeth_snmp_cmd cmd;
+} __attribute__((packed));
+
+/* SET_ACCESS_CONTROL: same format for request and reply */
+struct qeth_set_access_ctrl {
+	__u32 subcmd_code;
+} __attribute__((packed));
+
+struct qeth_query_oat {
+	__u32 subcmd_code;
+	__u8 reserved[12];
+} __packed;
+
+struct qeth_qoat_priv {
+	__u32 buffer_len;
+	__u32 response_len;
+	char *buffer;
+};
+
+struct qeth_ipacmd_setadpparms_hdr {
+	__u32 supp_hw_cmds;
+	__u32 reserved1;
+	__u16 cmdlength;
+	__u16 reserved2;
+	__u32 command_code;
+	__u16 return_code;
+	__u8  used_total;
+	__u8  seq_no;
+	__u32 reserved3;
+} __attribute__ ((packed));
+
+struct qeth_ipacmd_setadpparms {
+	struct qeth_ipacmd_setadpparms_hdr hdr;
+	union {
+		struct qeth_query_cmds_supp query_cmds_supp;
+		struct qeth_change_addr change_addr;
+		struct qeth_snmp_cmd snmp;
+		struct qeth_set_access_ctrl set_access_ctrl;
+		struct qeth_query_oat query_oat;
+		__u32 mode;
+	} data;
+} __attribute__ ((packed));
+
+/* CREATE_ADDR IPA Command:    ***********************************************/
+struct qeth_create_destroy_address {
+	__u8 unique_id[8];
+} __attribute__ ((packed));
+
+/* SET DIAGNOSTIC ASSIST IPA Command:	 *************************************/
+
+enum qeth_diags_cmds {
+	QETH_DIAGS_CMD_QUERY	= 0x0001,
+	QETH_DIAGS_CMD_TRAP	= 0x0002,
+	QETH_DIAGS_CMD_TRACE	= 0x0004,
+	QETH_DIAGS_CMD_NOLOG	= 0x0008,
+	QETH_DIAGS_CMD_DUMP	= 0x0010,
+};
+
+enum qeth_diags_trace_types {
+	QETH_DIAGS_TYPE_HIPERSOCKET	= 0x02,
+};
+
+enum qeth_diags_trace_cmds {
+	QETH_DIAGS_CMD_TRACE_ENABLE	= 0x0001,
+	QETH_DIAGS_CMD_TRACE_DISABLE	= 0x0002,
+	QETH_DIAGS_CMD_TRACE_MODIFY	= 0x0004,
+	QETH_DIAGS_CMD_TRACE_REPLACE	= 0x0008,
+	QETH_DIAGS_CMD_TRACE_QUERY	= 0x0010,
+};
+
+enum qeth_diags_trap_action {
+	QETH_DIAGS_TRAP_ARM	= 0x01,
+	QETH_DIAGS_TRAP_DISARM	= 0x02,
+	QETH_DIAGS_TRAP_CAPTURE = 0x04,
+};
+
+struct qeth_ipacmd_diagass {
+	__u32  host_tod2;
+	__u32:32;
+	__u16  subcmd_len;
+	__u16:16;
+	__u32  subcmd;
+	__u8   type;
+	__u8   action;
+	__u16  options;
+	__u32  ext;
+	__u8   cdata[64];
+} __attribute__ ((packed));
+
+/* Header for each IPA command */
+struct qeth_ipacmd_hdr {
+	__u8   command;
+	__u8   initiator;
+	__u16  seqno;
+	__u16  return_code;
+	__u8   adapter_type;
+	__u8   rel_adapter_no;
+	__u8   prim_version_no;
+	__u8   param_count;
+	__u16  prot_version;
+	__u32  ipa_supported;
+	__u32  ipa_enabled;
+} __attribute__ ((packed));
+
+/* The IPA command itself */
+struct qeth_ipa_cmd {
+	struct qeth_ipacmd_hdr hdr;
+	union {
+		struct qeth_ipacmd_setdelip4		setdelip4;
+		struct qeth_ipacmd_setdelip6		setdelip6;
+		struct qeth_ipacmd_setdelipm		setdelipm;
+		struct qeth_ipacmd_setassparms		setassparms;
+		struct qeth_ipacmd_layer2setdelmac	setdelmac;
+		struct qeth_ipacmd_layer2setdelvlan	setdelvlan;
+		struct qeth_create_destroy_address	create_destroy_addr;
+		struct qeth_ipacmd_setadpparms		setadapterparms;
+		struct qeth_set_routing			setrtg;
+		struct qeth_ipacmd_diagass		diagass;
+	} data;
+} __attribute__ ((packed));
+
+/*
+ * special command for ARP processing.
+ * this is not included in setassparms command before, because we get
+ * problem with the size of struct qeth_ipacmd_setassparms otherwise
+ */
+enum qeth_ipa_arp_return_codes {
+	QETH_IPA_ARP_RC_SUCCESS      = 0x0000,
+	QETH_IPA_ARP_RC_FAILED       = 0x0001,
+	QETH_IPA_ARP_RC_NOTSUPP      = 0x0002,
+	QETH_IPA_ARP_RC_OUT_OF_RANGE = 0x0003,
+	QETH_IPA_ARP_RC_Q_NOTSUPP    = 0x0004,
+	QETH_IPA_ARP_RC_Q_NO_DATA    = 0x0008,
+};
+
+extern char *qeth_get_ipa_msg(enum qeth_ipa_return_codes rc);
+extern char *qeth_get_ipa_cmd_name(enum qeth_ipa_cmds cmd);
+
+#define QETH_SETASS_BASE_LEN (sizeof(struct qeth_ipacmd_hdr) + \
+			       sizeof(struct qeth_ipacmd_setassparms_hdr))
+#define QETH_IPA_ARP_DATA_POS(buffer) (buffer + IPA_PDU_HEADER_SIZE + \
+				       QETH_SETASS_BASE_LEN)
+#define QETH_SETADP_BASE_LEN (sizeof(struct qeth_ipacmd_hdr) + \
+			      sizeof(struct qeth_ipacmd_setadpparms_hdr))
+#define QETH_SNMP_SETADP_CMDLENGTH 16
+
+#define QETH_ARP_DATA_SIZE 3968
+#define QETH_ARP_CMD_LEN (QETH_ARP_DATA_SIZE + 8)
+/* Helper functions */
+#define IS_IPA_REPLY(cmd) ((cmd->hdr.initiator == IPA_CMD_INITIATOR_HOST) || \
+			   (cmd->hdr.initiator == IPA_CMD_INITIATOR_OSA_REPLY))
+
+/*****************************************************************************/
+/* END OF   IP Assist related definitions                                    */
+/*****************************************************************************/
+
+
+extern unsigned char WRITE_CCW[];
+extern unsigned char READ_CCW[];
+
+extern unsigned char CM_ENABLE[];
+#define CM_ENABLE_SIZE 0x63
+#define QETH_CM_ENABLE_ISSUER_RM_TOKEN(buffer) (buffer + 0x2c)
+#define QETH_CM_ENABLE_FILTER_TOKEN(buffer) (buffer + 0x53)
+#define QETH_CM_ENABLE_USER_DATA(buffer) (buffer + 0x5b)
+
+#define QETH_CM_ENABLE_RESP_FILTER_TOKEN(buffer) \
+		(PDU_ENCAPSULATION(buffer) + 0x13)
+
+
+extern unsigned char CM_SETUP[];
+#define CM_SETUP_SIZE 0x64
+#define QETH_CM_SETUP_DEST_ADDR(buffer) (buffer + 0x2c)
+#define QETH_CM_SETUP_CONNECTION_TOKEN(buffer) (buffer + 0x51)
+#define QETH_CM_SETUP_FILTER_TOKEN(buffer) (buffer + 0x5a)
+
+#define QETH_CM_SETUP_RESP_DEST_ADDR(buffer) \
+		(PDU_ENCAPSULATION(buffer) + 0x1a)
+
+extern unsigned char ULP_ENABLE[];
+#define ULP_ENABLE_SIZE 0x6b
+#define QETH_ULP_ENABLE_LINKNUM(buffer) (buffer + 0x61)
+#define QETH_ULP_ENABLE_DEST_ADDR(buffer) (buffer + 0x2c)
+#define QETH_ULP_ENABLE_FILTER_TOKEN(buffer) (buffer + 0x53)
+#define QETH_ULP_ENABLE_PORTNAME_AND_LL(buffer) (buffer + 0x62)
+#define QETH_ULP_ENABLE_RESP_FILTER_TOKEN(buffer) \
+		(PDU_ENCAPSULATION(buffer) + 0x13)
+#define QETH_ULP_ENABLE_RESP_MAX_MTU(buffer) \
+		(PDU_ENCAPSULATION(buffer) + 0x1f)
+#define QETH_ULP_ENABLE_RESP_DIFINFO_LEN(buffer) \
+		(PDU_ENCAPSULATION(buffer) + 0x17)
+#define QETH_ULP_ENABLE_RESP_LINK_TYPE(buffer) \
+		(PDU_ENCAPSULATION(buffer) + 0x2b)
+/* Layer 2 definitions */
+#define QETH_PROT_LAYER2 0x08
+#define QETH_PROT_TCPIP  0x03
+#define QETH_PROT_OSN2   0x0a
+#define QETH_ULP_ENABLE_PROT_TYPE(buffer) (buffer + 0x50)
+#define QETH_IPA_CMD_PROT_TYPE(buffer) (buffer + 0x19)
+
+extern unsigned char ULP_SETUP[];
+#define ULP_SETUP_SIZE 0x6c
+#define QETH_ULP_SETUP_DEST_ADDR(buffer) (buffer + 0x2c)
+#define QETH_ULP_SETUP_CONNECTION_TOKEN(buffer) (buffer + 0x51)
+#define QETH_ULP_SETUP_FILTER_TOKEN(buffer) (buffer + 0x5a)
+#define QETH_ULP_SETUP_CUA(buffer) (buffer + 0x68)
+#define QETH_ULP_SETUP_REAL_DEVADDR(buffer) (buffer + 0x6a)
+
+#define QETH_ULP_SETUP_RESP_CONNECTION_TOKEN(buffer) \
+		(PDU_ENCAPSULATION(buffer) + 0x1a)
+
+
+extern unsigned char DM_ACT[];
+#define DM_ACT_SIZE 0x55
+#define QETH_DM_ACT_DEST_ADDR(buffer) (buffer + 0x2c)
+#define QETH_DM_ACT_CONNECTION_TOKEN(buffer) (buffer + 0x51)
+
+
+
+#define QETH_TRANSPORT_HEADER_SEQ_NO(buffer) (buffer + 4)
+#define QETH_PDU_HEADER_SEQ_NO(buffer) (buffer + 0x1c)
+#define QETH_PDU_HEADER_ACK_SEQ_NO(buffer) (buffer + 0x20)
+
+extern unsigned char IDX_ACTIVATE_READ[];
+extern unsigned char IDX_ACTIVATE_WRITE[];
+
+#define IDX_ACTIVATE_SIZE	0x22
+#define QETH_IDX_ACT_PNO(buffer) (buffer+0x0b)
+#define QETH_IDX_ACT_ISSUER_RM_TOKEN(buffer) (buffer + 0x0c)
+#define QETH_IDX_NO_PORTNAME_REQUIRED(buffer) ((buffer)[0x0b] & 0x80)
+#define QETH_IDX_ACT_FUNC_LEVEL(buffer) (buffer + 0x10)
+#define QETH_IDX_ACT_DATASET_NAME(buffer) (buffer + 0x16)
+#define QETH_IDX_ACT_QDIO_DEV_CUA(buffer) (buffer + 0x1e)
+#define QETH_IDX_ACT_QDIO_DEV_REALADDR(buffer) (buffer + 0x20)
+#define QETH_IS_IDX_ACT_POS_REPLY(buffer) (((buffer)[0x08] & 3) == 2)
+#define QETH_IDX_REPLY_LEVEL(buffer) (buffer + 0x12)
+#define QETH_IDX_ACT_CAUSE_CODE(buffer) (buffer)[0x09]
+#define QETH_IDX_ACT_ERR_EXCL		0x19
+#define QETH_IDX_ACT_ERR_AUTH		0x1E
+#define QETH_IDX_ACT_ERR_AUTH_USER	0x20
+
+#define PDU_ENCAPSULATION(buffer) \
+	(buffer + *(buffer + (*(buffer + 0x0b)) + \
+	 *(buffer + *(buffer + 0x0b) + 0x11) + 0x07))
+
+#define IS_IPA(buffer) \
+	((buffer) && \
+	 (*(buffer + ((*(buffer + 0x0b)) + 4)) == 0xc1))
+
+#define ADDR_FRAME_TYPE_DIX 1
+#define ADDR_FRAME_TYPE_802_3 2
+#define ADDR_FRAME_TYPE_TR_WITHOUT_SR 0x10
+#define ADDR_FRAME_TYPE_TR_WITH_SR 0x20
+
+#endif
diff --git a/ap/os/linux/linux-3.4.x/drivers/s390/net/qeth_core_sys.c b/ap/os/linux/linux-3.4.x/drivers/s390/net/qeth_core_sys.c
new file mode 100644
index 0000000..0a8e86c
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/s390/net/qeth_core_sys.c
@@ -0,0 +1,766 @@
+/*
+ *  drivers/s390/net/qeth_core_sys.c
+ *
+ *    Copyright IBM Corp. 2007
+ *    Author(s): Utz Bacher <utz.bacher@de.ibm.com>,
+ *		 Frank Pavlic <fpavlic@de.ibm.com>,
+ *		 Thomas Spatzier <tspat@de.ibm.com>,
+ *		 Frank Blaschka <frank.blaschka@de.ibm.com>
+ */
+
+#define KMSG_COMPONENT "qeth"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
+#include <linux/list.h>
+#include <linux/rwsem.h>
+#include <asm/ebcdic.h>
+
+#include "qeth_core.h"
+
+static ssize_t qeth_dev_state_show(struct device *dev,
+				struct device_attribute *attr, char *buf)
+{
+	struct qeth_card *card = dev_get_drvdata(dev);
+	if (!card)
+		return -EINVAL;
+
+	switch (card->state) {
+	case CARD_STATE_DOWN:
+		return sprintf(buf, "DOWN\n");
+	case CARD_STATE_HARDSETUP:
+		return sprintf(buf, "HARDSETUP\n");
+	case CARD_STATE_SOFTSETUP:
+		return sprintf(buf, "SOFTSETUP\n");
+	case CARD_STATE_UP:
+		if (card->lan_online)
+		return sprintf(buf, "UP (LAN ONLINE)\n");
+		else
+			return sprintf(buf, "UP (LAN OFFLINE)\n");
+	case CARD_STATE_RECOVER:
+		return sprintf(buf, "RECOVER\n");
+	default:
+		return sprintf(buf, "UNKNOWN\n");
+	}
+}
+
+static DEVICE_ATTR(state, 0444, qeth_dev_state_show, NULL);
+
+static ssize_t qeth_dev_chpid_show(struct device *dev,
+				struct device_attribute *attr, char *buf)
+{
+	struct qeth_card *card = dev_get_drvdata(dev);
+	if (!card)
+		return -EINVAL;
+
+	return sprintf(buf, "%02X\n", card->info.chpid);
+}
+
+static DEVICE_ATTR(chpid, 0444, qeth_dev_chpid_show, NULL);
+
+static ssize_t qeth_dev_if_name_show(struct device *dev,
+				struct device_attribute *attr, char *buf)
+{
+	struct qeth_card *card = dev_get_drvdata(dev);
+	if (!card)
+		return -EINVAL;
+	return sprintf(buf, "%s\n", QETH_CARD_IFNAME(card));
+}
+
+static DEVICE_ATTR(if_name, 0444, qeth_dev_if_name_show, NULL);
+
+static ssize_t qeth_dev_card_type_show(struct device *dev,
+				struct device_attribute *attr, char *buf)
+{
+	struct qeth_card *card = dev_get_drvdata(dev);
+	if (!card)
+		return -EINVAL;
+
+	return sprintf(buf, "%s\n", qeth_get_cardname_short(card));
+}
+
+static DEVICE_ATTR(card_type, 0444, qeth_dev_card_type_show, NULL);
+
+static inline const char *qeth_get_bufsize_str(struct qeth_card *card)
+{
+	if (card->qdio.in_buf_size == 16384)
+		return "16k";
+	else if (card->qdio.in_buf_size == 24576)
+		return "24k";
+	else if (card->qdio.in_buf_size == 32768)
+		return "32k";
+	else if (card->qdio.in_buf_size == 40960)
+		return "40k";
+	else
+		return "64k";
+}
+
+static ssize_t qeth_dev_inbuf_size_show(struct device *dev,
+				struct device_attribute *attr, char *buf)
+{
+	struct qeth_card *card = dev_get_drvdata(dev);
+	if (!card)
+		return -EINVAL;
+
+	return sprintf(buf, "%s\n", qeth_get_bufsize_str(card));
+}
+
+static DEVICE_ATTR(inbuf_size, 0444, qeth_dev_inbuf_size_show, NULL);
+
+static ssize_t qeth_dev_portno_show(struct device *dev,
+			struct device_attribute *attr, char *buf)
+{
+	struct qeth_card *card = dev_get_drvdata(dev);
+	if (!card)
+		return -EINVAL;
+
+	return sprintf(buf, "%i\n", card->info.portno);
+}
+
+static ssize_t qeth_dev_portno_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	struct qeth_card *card = dev_get_drvdata(dev);
+	char *tmp;
+	unsigned int portno, limit;
+	int rc = 0;
+
+	if (!card)
+		return -EINVAL;
+
+	mutex_lock(&card->conf_mutex);
+	if ((card->state != CARD_STATE_DOWN) &&
+	    (card->state != CARD_STATE_RECOVER)) {
+		rc = -EPERM;
+		goto out;
+	}
+
+	portno = simple_strtoul(buf, &tmp, 16);
+	if (portno > QETH_MAX_PORTNO) {
+		rc = -EINVAL;
+		goto out;
+	}
+	limit = (card->ssqd.pcnt ? card->ssqd.pcnt - 1 : card->ssqd.pcnt);
+	if (portno > limit) {
+		rc = -EINVAL;
+		goto out;
+	}
+	card->info.portno = portno;
+out:
+	mutex_unlock(&card->conf_mutex);
+	return rc ? rc : count;
+}
+
+static DEVICE_ATTR(portno, 0644, qeth_dev_portno_show, qeth_dev_portno_store);
+
+static ssize_t qeth_dev_portname_show(struct device *dev,
+				struct device_attribute *attr, char *buf)
+{
+	struct qeth_card *card = dev_get_drvdata(dev);
+	char portname[9] = {0, };
+
+	if (!card)
+		return -EINVAL;
+
+	if (card->info.portname_required) {
+		memcpy(portname, card->info.portname + 1, 8);
+		EBCASC(portname, 8);
+		return sprintf(buf, "%s\n", portname);
+	} else
+		return sprintf(buf, "no portname required\n");
+}
+
+static ssize_t qeth_dev_portname_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	struct qeth_card *card = dev_get_drvdata(dev);
+	char *tmp;
+	int i, rc = 0;
+
+	if (!card)
+		return -EINVAL;
+
+	mutex_lock(&card->conf_mutex);
+	if ((card->state != CARD_STATE_DOWN) &&
+	    (card->state != CARD_STATE_RECOVER)) {
+		rc = -EPERM;
+		goto out;
+	}
+
+	tmp = strsep((char **) &buf, "\n");
+	if ((strlen(tmp) > 8) || (strlen(tmp) == 0)) {
+		rc = -EINVAL;
+		goto out;
+	}
+
+	card->info.portname[0] = strlen(tmp);
+	/* for beauty reasons */
+	for (i = 1; i < 9; i++)
+		card->info.portname[i] = ' ';
+	strcpy(card->info.portname + 1, tmp);
+	ASCEBC(card->info.portname + 1, 8);
+out:
+	mutex_unlock(&card->conf_mutex);
+	return rc ? rc : count;
+}
+
+static DEVICE_ATTR(portname, 0644, qeth_dev_portname_show,
+		qeth_dev_portname_store);
+
+static ssize_t qeth_dev_prioqing_show(struct device *dev,
+				struct device_attribute *attr, char *buf)
+{
+	struct qeth_card *card = dev_get_drvdata(dev);
+
+	if (!card)
+		return -EINVAL;
+
+	switch (card->qdio.do_prio_queueing) {
+	case QETH_PRIO_Q_ING_PREC:
+		return sprintf(buf, "%s\n", "by precedence");
+	case QETH_PRIO_Q_ING_TOS:
+		return sprintf(buf, "%s\n", "by type of service");
+	default:
+		return sprintf(buf, "always queue %i\n",
+			       card->qdio.default_out_queue);
+	}
+}
+
+static ssize_t qeth_dev_prioqing_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	struct qeth_card *card = dev_get_drvdata(dev);
+	char *tmp;
+	int rc = 0;
+
+	if (!card)
+		return -EINVAL;
+
+	mutex_lock(&card->conf_mutex);
+	if ((card->state != CARD_STATE_DOWN) &&
+	    (card->state != CARD_STATE_RECOVER)) {
+		rc = -EPERM;
+		goto out;
+	}
+
+	/* check if 1920 devices are supported ,
+	 * if though we have to permit priority queueing
+	 */
+	if (card->qdio.no_out_queues == 1) {
+		card->qdio.do_prio_queueing = QETH_PRIOQ_DEFAULT;
+		rc = -EPERM;
+		goto out;
+	}
+
+	tmp = strsep((char **) &buf, "\n");
+	if (!strcmp(tmp, "prio_queueing_prec"))
+		card->qdio.do_prio_queueing = QETH_PRIO_Q_ING_PREC;
+	else if (!strcmp(tmp, "prio_queueing_tos"))
+		card->qdio.do_prio_queueing = QETH_PRIO_Q_ING_TOS;
+	else if (!strcmp(tmp, "no_prio_queueing:0")) {
+		card->qdio.do_prio_queueing = QETH_NO_PRIO_QUEUEING;
+		card->qdio.default_out_queue = 0;
+	} else if (!strcmp(tmp, "no_prio_queueing:1")) {
+		card->qdio.do_prio_queueing = QETH_NO_PRIO_QUEUEING;
+		card->qdio.default_out_queue = 1;
+	} else if (!strcmp(tmp, "no_prio_queueing:2")) {
+		card->qdio.do_prio_queueing = QETH_NO_PRIO_QUEUEING;
+		card->qdio.default_out_queue = 2;
+	} else if (!strcmp(tmp, "no_prio_queueing:3")) {
+		card->qdio.do_prio_queueing = QETH_NO_PRIO_QUEUEING;
+		card->qdio.default_out_queue = 3;
+	} else if (!strcmp(tmp, "no_prio_queueing")) {
+		card->qdio.do_prio_queueing = QETH_NO_PRIO_QUEUEING;
+		card->qdio.default_out_queue = QETH_DEFAULT_QUEUE;
+	} else
+		rc = -EINVAL;
+out:
+	mutex_unlock(&card->conf_mutex);
+	return rc ? rc : count;
+}
+
+static DEVICE_ATTR(priority_queueing, 0644, qeth_dev_prioqing_show,
+		qeth_dev_prioqing_store);
+
+static ssize_t qeth_dev_bufcnt_show(struct device *dev,
+				struct device_attribute *attr, char *buf)
+{
+	struct qeth_card *card = dev_get_drvdata(dev);
+
+	if (!card)
+		return -EINVAL;
+
+	return sprintf(buf, "%i\n", card->qdio.in_buf_pool.buf_count);
+}
+
+static ssize_t qeth_dev_bufcnt_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	struct qeth_card *card = dev_get_drvdata(dev);
+	char *tmp;
+	int cnt, old_cnt;
+	int rc = 0;
+
+	if (!card)
+		return -EINVAL;
+
+	mutex_lock(&card->conf_mutex);
+	if ((card->state != CARD_STATE_DOWN) &&
+	    (card->state != CARD_STATE_RECOVER)) {
+		rc = -EPERM;
+		goto out;
+	}
+
+	old_cnt = card->qdio.in_buf_pool.buf_count;
+	cnt = simple_strtoul(buf, &tmp, 10);
+	cnt = (cnt < QETH_IN_BUF_COUNT_MIN) ? QETH_IN_BUF_COUNT_MIN :
+		((cnt > QETH_IN_BUF_COUNT_MAX) ? QETH_IN_BUF_COUNT_MAX : cnt);
+	if (old_cnt != cnt) {
+		rc = qeth_realloc_buffer_pool(card, cnt);
+	}
+out:
+	mutex_unlock(&card->conf_mutex);
+	return rc ? rc : count;
+}
+
+static DEVICE_ATTR(buffer_count, 0644, qeth_dev_bufcnt_show,
+		qeth_dev_bufcnt_store);
+
+static ssize_t qeth_dev_recover_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	struct qeth_card *card = dev_get_drvdata(dev);
+	char *tmp;
+	int i;
+
+	if (!card)
+		return -EINVAL;
+
+	if (card->state != CARD_STATE_UP)
+		return -EPERM;
+
+	i = simple_strtoul(buf, &tmp, 16);
+	if (i == 1)
+		qeth_schedule_recovery(card);
+
+	return count;
+}
+
+static DEVICE_ATTR(recover, 0200, NULL, qeth_dev_recover_store);
+
+static ssize_t qeth_dev_performance_stats_show(struct device *dev,
+				struct device_attribute *attr, char *buf)
+{
+	struct qeth_card *card = dev_get_drvdata(dev);
+
+	if (!card)
+		return -EINVAL;
+
+	return sprintf(buf, "%i\n", card->options.performance_stats ? 1:0);
+}
+
+static ssize_t qeth_dev_performance_stats_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	struct qeth_card *card = dev_get_drvdata(dev);
+	char *tmp;
+	int i, rc = 0;
+
+	if (!card)
+		return -EINVAL;
+
+	mutex_lock(&card->conf_mutex);
+	i = simple_strtoul(buf, &tmp, 16);
+	if ((i == 0) || (i == 1)) {
+		if (i == card->options.performance_stats)
+			goto out;
+		card->options.performance_stats = i;
+		if (i == 0)
+			memset(&card->perf_stats, 0,
+				sizeof(struct qeth_perf_stats));
+		card->perf_stats.initial_rx_packets = card->stats.rx_packets;
+		card->perf_stats.initial_tx_packets = card->stats.tx_packets;
+	} else
+		rc = -EINVAL;
+out:
+	mutex_unlock(&card->conf_mutex);
+	return rc ? rc : count;
+}
+
+static DEVICE_ATTR(performance_stats, 0644, qeth_dev_performance_stats_show,
+		   qeth_dev_performance_stats_store);
+
+static ssize_t qeth_dev_layer2_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct qeth_card *card = dev_get_drvdata(dev);
+
+	if (!card)
+		return -EINVAL;
+
+	return sprintf(buf, "%i\n", card->options.layer2);
+}
+
+static ssize_t qeth_dev_layer2_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	struct qeth_card *card = dev_get_drvdata(dev);
+	char *tmp;
+	int i, rc = 0;
+	enum qeth_discipline_id newdis;
+
+	if (!card)
+		return -EINVAL;
+
+	mutex_lock(&card->discipline_mutex);
+	if (card->state != CARD_STATE_DOWN) {
+		rc = -EPERM;
+		goto out;
+	}
+
+	i = simple_strtoul(buf, &tmp, 16);
+	switch (i) {
+	case 0:
+		newdis = QETH_DISCIPLINE_LAYER3;
+		break;
+	case 1:
+		newdis = QETH_DISCIPLINE_LAYER2;
+		break;
+	default:
+		rc = -EINVAL;
+		goto out;
+	}
+
+	if (card->options.layer2 == newdis)
+		goto out;
+	else {
+		card->info.mac_bits  = 0;
+		if (card->discipline.ccwgdriver) {
+			card->discipline.ccwgdriver->remove(card->gdev);
+			qeth_core_free_discipline(card);
+		}
+	}
+
+	rc = qeth_core_load_discipline(card, newdis);
+	if (rc)
+		goto out;
+
+	rc = card->discipline.ccwgdriver->probe(card->gdev);
+out:
+	mutex_unlock(&card->discipline_mutex);
+	return rc ? rc : count;
+}
+
+static DEVICE_ATTR(layer2, 0644, qeth_dev_layer2_show,
+		   qeth_dev_layer2_store);
+
+#define ATTR_QETH_ISOLATION_NONE	("none")
+#define ATTR_QETH_ISOLATION_FWD		("forward")
+#define ATTR_QETH_ISOLATION_DROP	("drop")
+
+static ssize_t qeth_dev_isolation_show(struct device *dev,
+				struct device_attribute *attr, char *buf)
+{
+	struct qeth_card *card = dev_get_drvdata(dev);
+
+	if (!card)
+		return -EINVAL;
+
+	switch (card->options.isolation) {
+	case ISOLATION_MODE_NONE:
+		return snprintf(buf, 6, "%s\n", ATTR_QETH_ISOLATION_NONE);
+	case ISOLATION_MODE_FWD:
+		return snprintf(buf, 9, "%s\n", ATTR_QETH_ISOLATION_FWD);
+	case ISOLATION_MODE_DROP:
+		return snprintf(buf, 6, "%s\n", ATTR_QETH_ISOLATION_DROP);
+	default:
+		return snprintf(buf, 5, "%s\n", "N/A");
+	}
+}
+
+static ssize_t qeth_dev_isolation_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	struct qeth_card *card = dev_get_drvdata(dev);
+	enum qeth_ipa_isolation_modes isolation;
+	int rc = 0;
+	char *tmp, *curtoken;
+	curtoken = (char *) buf;
+
+	if (!card)
+		return -EINVAL;
+
+	mutex_lock(&card->conf_mutex);
+	/* check for unknown, too, in case we do not yet know who we are */
+	if (card->info.type != QETH_CARD_TYPE_OSD &&
+	    card->info.type != QETH_CARD_TYPE_OSX &&
+	    card->info.type != QETH_CARD_TYPE_UNKNOWN) {
+		rc = -EOPNOTSUPP;
+		dev_err(&card->gdev->dev, "Adapter does not "
+			"support QDIO data connection isolation\n");
+		goto out;
+	}
+
+	/* parse input into isolation mode */
+	tmp = strsep(&curtoken, "\n");
+	if (!strcmp(tmp, ATTR_QETH_ISOLATION_NONE)) {
+		isolation = ISOLATION_MODE_NONE;
+	} else if (!strcmp(tmp, ATTR_QETH_ISOLATION_FWD)) {
+		isolation = ISOLATION_MODE_FWD;
+	} else if (!strcmp(tmp, ATTR_QETH_ISOLATION_DROP)) {
+		isolation = ISOLATION_MODE_DROP;
+	} else {
+		rc = -EINVAL;
+		goto out;
+	}
+	rc = count;
+
+	/* defer IP assist if device is offline (until discipline->set_online)*/
+	card->options.isolation = isolation;
+	if (card->state == CARD_STATE_SOFTSETUP ||
+	    card->state == CARD_STATE_UP) {
+		int ipa_rc = qeth_set_access_ctrl_online(card);
+		if (ipa_rc != 0)
+			rc = ipa_rc;
+	}
+out:
+	mutex_unlock(&card->conf_mutex);
+	return rc;
+}
+
+static DEVICE_ATTR(isolation, 0644, qeth_dev_isolation_show,
+		   qeth_dev_isolation_store);
+
+static ssize_t qeth_hw_trap_show(struct device *dev,
+				struct device_attribute *attr, char *buf)
+{
+	struct qeth_card *card = dev_get_drvdata(dev);
+
+	if (!card)
+		return -EINVAL;
+	if (card->info.hwtrap)
+		return snprintf(buf, 5, "arm\n");
+	else
+		return snprintf(buf, 8, "disarm\n");
+}
+
+static ssize_t qeth_hw_trap_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	struct qeth_card *card = dev_get_drvdata(dev);
+	int rc = 0;
+	char *tmp, *curtoken;
+	int state = 0;
+	curtoken = (char *)buf;
+
+	if (!card)
+		return -EINVAL;
+
+	mutex_lock(&card->conf_mutex);
+	if (card->state == CARD_STATE_SOFTSETUP || card->state == CARD_STATE_UP)
+		state = 1;
+	tmp = strsep(&curtoken, "\n");
+
+	if (!strcmp(tmp, "arm") && !card->info.hwtrap) {
+		if (state) {
+			if (qeth_is_diagass_supported(card,
+			    QETH_DIAGS_CMD_TRAP)) {
+				rc = qeth_hw_trap(card, QETH_DIAGS_TRAP_ARM);
+				if (!rc)
+					card->info.hwtrap = 1;
+			} else
+				rc = -EINVAL;
+		} else
+			card->info.hwtrap = 1;
+	} else if (!strcmp(tmp, "disarm") && card->info.hwtrap) {
+		if (state) {
+			rc = qeth_hw_trap(card, QETH_DIAGS_TRAP_DISARM);
+			if (!rc)
+				card->info.hwtrap = 0;
+		} else
+			card->info.hwtrap = 0;
+	} else if (!strcmp(tmp, "trap") && state && card->info.hwtrap)
+		rc = qeth_hw_trap(card, QETH_DIAGS_TRAP_CAPTURE);
+	else
+		rc = -EINVAL;
+
+	mutex_unlock(&card->conf_mutex);
+	return rc ? rc : count;
+}
+
+static DEVICE_ATTR(hw_trap, 0644, qeth_hw_trap_show,
+		   qeth_hw_trap_store);
+
+static ssize_t qeth_dev_blkt_show(char *buf, struct qeth_card *card, int value)
+{
+
+	if (!card)
+		return -EINVAL;
+
+	return sprintf(buf, "%i\n", value);
+}
+
+static ssize_t qeth_dev_blkt_store(struct qeth_card *card,
+		const char *buf, size_t count, int *value, int max_value)
+{
+	char *tmp;
+	int i, rc = 0;
+
+	if (!card)
+		return -EINVAL;
+
+	mutex_lock(&card->conf_mutex);
+	if ((card->state != CARD_STATE_DOWN) &&
+	    (card->state != CARD_STATE_RECOVER)) {
+		rc = -EPERM;
+		goto out;
+	}
+	i = simple_strtoul(buf, &tmp, 10);
+	if (i <= max_value)
+		*value = i;
+	else
+		rc = -EINVAL;
+out:
+	mutex_unlock(&card->conf_mutex);
+	return rc ? rc : count;
+}
+
+static ssize_t qeth_dev_blkt_total_show(struct device *dev,
+				struct device_attribute *attr, char *buf)
+{
+	struct qeth_card *card = dev_get_drvdata(dev);
+
+	return qeth_dev_blkt_show(buf, card, card->info.blkt.time_total);
+}
+
+static ssize_t qeth_dev_blkt_total_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	struct qeth_card *card = dev_get_drvdata(dev);
+
+	return qeth_dev_blkt_store(card, buf, count,
+				   &card->info.blkt.time_total, 5000);
+}
+
+
+
+static DEVICE_ATTR(total, 0644, qeth_dev_blkt_total_show,
+		   qeth_dev_blkt_total_store);
+
+static ssize_t qeth_dev_blkt_inter_show(struct device *dev,
+				struct device_attribute *attr, char *buf)
+{
+	struct qeth_card *card = dev_get_drvdata(dev);
+
+	return qeth_dev_blkt_show(buf, card, card->info.blkt.inter_packet);
+}
+
+static ssize_t qeth_dev_blkt_inter_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	struct qeth_card *card = dev_get_drvdata(dev);
+
+	return qeth_dev_blkt_store(card, buf, count,
+				   &card->info.blkt.inter_packet, 1000);
+}
+
+static DEVICE_ATTR(inter, 0644, qeth_dev_blkt_inter_show,
+		   qeth_dev_blkt_inter_store);
+
+static ssize_t qeth_dev_blkt_inter_jumbo_show(struct device *dev,
+				struct device_attribute *attr, char *buf)
+{
+	struct qeth_card *card = dev_get_drvdata(dev);
+
+	return qeth_dev_blkt_show(buf, card,
+				  card->info.blkt.inter_packet_jumbo);
+}
+
+static ssize_t qeth_dev_blkt_inter_jumbo_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	struct qeth_card *card = dev_get_drvdata(dev);
+
+	return qeth_dev_blkt_store(card, buf, count,
+				   &card->info.blkt.inter_packet_jumbo, 1000);
+}
+
+static DEVICE_ATTR(inter_jumbo, 0644, qeth_dev_blkt_inter_jumbo_show,
+		   qeth_dev_blkt_inter_jumbo_store);
+
+static struct attribute *qeth_blkt_device_attrs[] = {
+	&dev_attr_total.attr,
+	&dev_attr_inter.attr,
+	&dev_attr_inter_jumbo.attr,
+	NULL,
+};
+
+static struct attribute_group qeth_device_blkt_group = {
+	.name = "blkt",
+	.attrs = qeth_blkt_device_attrs,
+};
+
+static struct attribute *qeth_device_attrs[] = {
+	&dev_attr_state.attr,
+	&dev_attr_chpid.attr,
+	&dev_attr_if_name.attr,
+	&dev_attr_card_type.attr,
+	&dev_attr_inbuf_size.attr,
+	&dev_attr_portno.attr,
+	&dev_attr_portname.attr,
+	&dev_attr_priority_queueing.attr,
+	&dev_attr_buffer_count.attr,
+	&dev_attr_recover.attr,
+	&dev_attr_performance_stats.attr,
+	&dev_attr_layer2.attr,
+	&dev_attr_isolation.attr,
+	&dev_attr_hw_trap.attr,
+	NULL,
+};
+
+static struct attribute_group qeth_device_attr_group = {
+	.attrs = qeth_device_attrs,
+};
+
+static struct attribute *qeth_osn_device_attrs[] = {
+	&dev_attr_state.attr,
+	&dev_attr_chpid.attr,
+	&dev_attr_if_name.attr,
+	&dev_attr_card_type.attr,
+	&dev_attr_buffer_count.attr,
+	&dev_attr_recover.attr,
+	NULL,
+};
+
+static struct attribute_group qeth_osn_device_attr_group = {
+	.attrs = qeth_osn_device_attrs,
+};
+
+int qeth_core_create_device_attributes(struct device *dev)
+{
+	int ret;
+	ret = sysfs_create_group(&dev->kobj, &qeth_device_attr_group);
+	if (ret)
+		return ret;
+	ret = sysfs_create_group(&dev->kobj, &qeth_device_blkt_group);
+	if (ret)
+		sysfs_remove_group(&dev->kobj, &qeth_device_attr_group);
+
+	return 0;
+}
+
+void qeth_core_remove_device_attributes(struct device *dev)
+{
+	sysfs_remove_group(&dev->kobj, &qeth_device_attr_group);
+	sysfs_remove_group(&dev->kobj, &qeth_device_blkt_group);
+}
+
+int qeth_core_create_osn_attributes(struct device *dev)
+{
+	return sysfs_create_group(&dev->kobj, &qeth_osn_device_attr_group);
+}
+
+void qeth_core_remove_osn_attributes(struct device *dev)
+{
+	sysfs_remove_group(&dev->kobj, &qeth_osn_device_attr_group);
+	return;
+}
diff --git a/ap/os/linux/linux-3.4.x/drivers/s390/net/qeth_l2_main.c b/ap/os/linux/linux-3.4.x/drivers/s390/net/qeth_l2_main.c
new file mode 100644
index 0000000..0e7c29d
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/s390/net/qeth_l2_main.c
@@ -0,0 +1,1349 @@
+/*
+ *  drivers/s390/net/qeth_l2_main.c
+ *
+ *    Copyright IBM Corp. 2007, 2009
+ *    Author(s): Utz Bacher <utz.bacher@de.ibm.com>,
+ *		 Frank Pavlic <fpavlic@de.ibm.com>,
+ *		 Thomas Spatzier <tspat@de.ibm.com>,
+ *		 Frank Blaschka <frank.blaschka@de.ibm.com>
+ */
+
+#define KMSG_COMPONENT "qeth"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/etherdevice.h>
+#include <linux/mii.h>
+#include <linux/ip.h>
+#include <linux/list.h>
+
+#include "qeth_core.h"
+
+static int qeth_l2_set_offline(struct ccwgroup_device *);
+static int qeth_l2_stop(struct net_device *);
+static int qeth_l2_send_delmac(struct qeth_card *, __u8 *);
+static int qeth_l2_send_setdelmac(struct qeth_card *, __u8 *,
+			   enum qeth_ipa_cmds,
+			   int (*reply_cb) (struct qeth_card *,
+					    struct qeth_reply*,
+					    unsigned long));
+static void qeth_l2_set_multicast_list(struct net_device *);
+static int qeth_l2_recover(void *);
+
+static int qeth_l2_do_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
+{
+	struct qeth_card *card = dev->ml_priv;
+	struct mii_ioctl_data *mii_data;
+	int rc = 0;
+
+	if (!card)
+		return -ENODEV;
+
+	if ((card->state != CARD_STATE_UP) &&
+		(card->state != CARD_STATE_SOFTSETUP))
+		return -ENODEV;
+
+	if (card->info.type == QETH_CARD_TYPE_OSN)
+		return -EPERM;
+
+	switch (cmd) {
+	case SIOC_QETH_ADP_SET_SNMP_CONTROL:
+		rc = qeth_snmp_command(card, rq->ifr_ifru.ifru_data);
+		break;
+	case SIOC_QETH_GET_CARD_TYPE:
+		if ((card->info.type == QETH_CARD_TYPE_OSD ||
+		     card->info.type == QETH_CARD_TYPE_OSM ||
+		     card->info.type == QETH_CARD_TYPE_OSX) &&
+		    !card->info.guestlan)
+			return 1;
+		return 0;
+		break;
+	case SIOCGMIIPHY:
+		mii_data = if_mii(rq);
+		mii_data->phy_id = 0;
+		break;
+	case SIOCGMIIREG:
+		mii_data = if_mii(rq);
+		if (mii_data->phy_id != 0)
+			rc = -EINVAL;
+		else
+			mii_data->val_out = qeth_mdio_read(dev,
+				mii_data->phy_id, mii_data->reg_num);
+		break;
+	case SIOC_QETH_QUERY_OAT:
+		rc = qeth_query_oat_command(card, rq->ifr_ifru.ifru_data);
+		break;
+	default:
+		rc = -EOPNOTSUPP;
+	}
+	if (rc)
+		QETH_CARD_TEXT_(card, 2, "ioce%d", rc);
+	return rc;
+}
+
+static int qeth_l2_verify_dev(struct net_device *dev)
+{
+	struct qeth_card *card;
+	unsigned long flags;
+	int rc = 0;
+
+	read_lock_irqsave(&qeth_core_card_list.rwlock, flags);
+	list_for_each_entry(card, &qeth_core_card_list.list, list) {
+		if (card->dev == dev) {
+			rc = QETH_REAL_CARD;
+			break;
+		}
+	}
+	read_unlock_irqrestore(&qeth_core_card_list.rwlock, flags);
+
+	return rc;
+}
+
+static struct net_device *qeth_l2_netdev_by_devno(unsigned char *read_dev_no)
+{
+	struct qeth_card *card;
+	struct net_device *ndev;
+	__u16 temp_dev_no;
+	unsigned long flags;
+	struct ccw_dev_id read_devid;
+
+	ndev = NULL;
+	memcpy(&temp_dev_no, read_dev_no, 2);
+	read_lock_irqsave(&qeth_core_card_list.rwlock, flags);
+	list_for_each_entry(card, &qeth_core_card_list.list, list) {
+		ccw_device_get_id(CARD_RDEV(card), &read_devid);
+		if (read_devid.devno == temp_dev_no) {
+			ndev = card->dev;
+			break;
+		}
+	}
+	read_unlock_irqrestore(&qeth_core_card_list.rwlock, flags);
+	return ndev;
+}
+
+static int qeth_l2_send_setgroupmac_cb(struct qeth_card *card,
+				struct qeth_reply *reply,
+				unsigned long data)
+{
+	struct qeth_ipa_cmd *cmd;
+	__u8 *mac;
+
+	QETH_CARD_TEXT(card, 2, "L2Sgmacb");
+	cmd = (struct qeth_ipa_cmd *) data;
+	mac = &cmd->data.setdelmac.mac[0];
+	/* MAC already registered, needed in couple/uncouple case */
+	if (cmd->hdr.return_code ==  IPA_RC_L2_DUP_MAC) {
+		QETH_DBF_MESSAGE(2, "Group MAC %pM already existing on %s \n",
+			  mac, QETH_CARD_IFNAME(card));
+		cmd->hdr.return_code = 0;
+	}
+	if (cmd->hdr.return_code)
+		QETH_DBF_MESSAGE(2, "Could not set group MAC %pM on %s: %x\n",
+			  mac, QETH_CARD_IFNAME(card), cmd->hdr.return_code);
+	return 0;
+}
+
+static int qeth_l2_send_setgroupmac(struct qeth_card *card, __u8 *mac)
+{
+	QETH_CARD_TEXT(card, 2, "L2Sgmac");
+	return qeth_l2_send_setdelmac(card, mac, IPA_CMD_SETGMAC,
+					  qeth_l2_send_setgroupmac_cb);
+}
+
+static int qeth_l2_send_delgroupmac_cb(struct qeth_card *card,
+				struct qeth_reply *reply,
+				unsigned long data)
+{
+	struct qeth_ipa_cmd *cmd;
+	__u8 *mac;
+
+	QETH_CARD_TEXT(card, 2, "L2Dgmacb");
+	cmd = (struct qeth_ipa_cmd *) data;
+	mac = &cmd->data.setdelmac.mac[0];
+	if (cmd->hdr.return_code)
+		QETH_DBF_MESSAGE(2, "Could not delete group MAC %pM on %s: %x\n",
+			  mac, QETH_CARD_IFNAME(card), cmd->hdr.return_code);
+	return 0;
+}
+
+static int qeth_l2_send_delgroupmac(struct qeth_card *card, __u8 *mac)
+{
+	QETH_CARD_TEXT(card, 2, "L2Dgmac");
+	return qeth_l2_send_setdelmac(card, mac, IPA_CMD_DELGMAC,
+					  qeth_l2_send_delgroupmac_cb);
+}
+
+static void qeth_l2_add_mc(struct qeth_card *card, __u8 *mac, int vmac)
+{
+	struct qeth_mc_mac *mc;
+	int rc;
+
+	mc = kmalloc(sizeof(struct qeth_mc_mac), GFP_ATOMIC);
+
+	if (!mc)
+		return;
+
+	memcpy(mc->mc_addr, mac, OSA_ADDR_LEN);
+	mc->mc_addrlen = OSA_ADDR_LEN;
+	mc->is_vmac = vmac;
+
+	if (vmac) {
+		rc = qeth_l2_send_setdelmac(card, mac, IPA_CMD_SETVMAC,
+					NULL);
+	} else {
+		rc = qeth_l2_send_setgroupmac(card, mac);
+	}
+
+	if (!rc)
+		list_add_tail(&mc->list, &card->mc_list);
+	else
+		kfree(mc);
+}
+
+static void qeth_l2_del_all_mc(struct qeth_card *card, int del)
+{
+	struct qeth_mc_mac *mc, *tmp;
+
+	spin_lock_bh(&card->mclock);
+	list_for_each_entry_safe(mc, tmp, &card->mc_list, list) {
+		if (del) {
+			if (mc->is_vmac)
+				qeth_l2_send_setdelmac(card, mc->mc_addr,
+					IPA_CMD_DELVMAC, NULL);
+			else
+				qeth_l2_send_delgroupmac(card, mc->mc_addr);
+		}
+		list_del(&mc->list);
+		kfree(mc);
+	}
+	spin_unlock_bh(&card->mclock);
+}
+
+static inline int qeth_l2_get_cast_type(struct qeth_card *card,
+			struct sk_buff *skb)
+{
+	if (card->info.type == QETH_CARD_TYPE_OSN)
+		return RTN_UNSPEC;
+	if (is_broadcast_ether_addr(skb->data))
+		return RTN_BROADCAST;
+	if (is_multicast_ether_addr(skb->data))
+		return RTN_MULTICAST;
+	return RTN_UNSPEC;
+}
+
+static void qeth_l2_fill_header(struct qeth_card *card, struct qeth_hdr *hdr,
+			struct sk_buff *skb, int ipv, int cast_type)
+{
+	struct vlan_ethhdr *veth = (struct vlan_ethhdr *)skb_mac_header(skb);
+
+	memset(hdr, 0, sizeof(struct qeth_hdr));
+	hdr->hdr.l2.id = QETH_HEADER_TYPE_LAYER2;
+
+	/* set byte byte 3 to casting flags */
+	if (cast_type == RTN_MULTICAST)
+		hdr->hdr.l2.flags[2] |= QETH_LAYER2_FLAG_MULTICAST;
+	else if (cast_type == RTN_BROADCAST)
+		hdr->hdr.l2.flags[2] |= QETH_LAYER2_FLAG_BROADCAST;
+	else
+		hdr->hdr.l2.flags[2] |= QETH_LAYER2_FLAG_UNICAST;
+
+	hdr->hdr.l2.pkt_length = skb->len-QETH_HEADER_SIZE;
+	/* VSWITCH relies on the VLAN
+	 * information to be present in
+	 * the QDIO header */
+	if (veth->h_vlan_proto == __constant_htons(ETH_P_8021Q)) {
+		hdr->hdr.l2.flags[2] |= QETH_LAYER2_FLAG_VLAN;
+		hdr->hdr.l2.vlan_id = ntohs(veth->h_vlan_TCI);
+	}
+}
+
+static int qeth_l2_send_setdelvlan_cb(struct qeth_card *card,
+			struct qeth_reply *reply, unsigned long data)
+{
+	struct qeth_ipa_cmd *cmd;
+
+	QETH_CARD_TEXT(card, 2, "L2sdvcb");
+	cmd = (struct qeth_ipa_cmd *) data;
+	if (cmd->hdr.return_code) {
+		QETH_DBF_MESSAGE(2, "Error in processing VLAN %i on %s: 0x%x. "
+			  "Continuing\n", cmd->data.setdelvlan.vlan_id,
+			  QETH_CARD_IFNAME(card), cmd->hdr.return_code);
+		QETH_CARD_TEXT_(card, 2, "L2VL%4x", cmd->hdr.command);
+		QETH_CARD_TEXT_(card, 2, "err%d", cmd->hdr.return_code);
+	}
+	return 0;
+}
+
+static int qeth_l2_send_setdelvlan(struct qeth_card *card, __u16 i,
+				enum qeth_ipa_cmds ipacmd)
+{
+	struct qeth_ipa_cmd *cmd;
+	struct qeth_cmd_buffer *iob;
+
+	QETH_CARD_TEXT_(card, 4, "L2sdv%x", ipacmd);
+	iob = qeth_get_ipacmd_buffer(card, ipacmd, QETH_PROT_IPV4);
+	cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
+	cmd->data.setdelvlan.vlan_id = i;
+	return qeth_send_ipa_cmd(card, iob,
+				 qeth_l2_send_setdelvlan_cb, NULL);
+}
+
+static void qeth_l2_process_vlans(struct qeth_card *card)
+{
+	struct qeth_vlan_vid *id;
+	QETH_CARD_TEXT(card, 3, "L2prcvln");
+	spin_lock_bh(&card->vlanlock);
+	list_for_each_entry(id, &card->vid_list, list) {
+		qeth_l2_send_setdelvlan(card, id->vid, IPA_CMD_SETVLAN);
+	}
+	spin_unlock_bh(&card->vlanlock);
+}
+
+static int qeth_l2_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
+{
+	struct qeth_card *card = dev->ml_priv;
+	struct qeth_vlan_vid *id;
+
+	QETH_CARD_TEXT_(card, 4, "aid:%d", vid);
+	if (!vid)
+		return 0;
+	if (card->info.type == QETH_CARD_TYPE_OSM) {
+		QETH_CARD_TEXT(card, 3, "aidOSM");
+		return 0;
+	}
+	if (qeth_wait_for_threads(card, QETH_RECOVER_THREAD)) {
+		QETH_CARD_TEXT(card, 3, "aidREC");
+		return 0;
+	}
+	id = kmalloc(sizeof(struct qeth_vlan_vid), GFP_ATOMIC);
+	if (id) {
+		id->vid = vid;
+		qeth_l2_send_setdelvlan(card, vid, IPA_CMD_SETVLAN);
+		spin_lock_bh(&card->vlanlock);
+		list_add_tail(&id->list, &card->vid_list);
+		spin_unlock_bh(&card->vlanlock);
+	} else {
+		return -ENOMEM;
+	}
+	return 0;
+}
+
+static int qeth_l2_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
+{
+	struct qeth_vlan_vid *id, *tmpid = NULL;
+	struct qeth_card *card = dev->ml_priv;
+
+	QETH_CARD_TEXT_(card, 4, "kid:%d", vid);
+	if (card->info.type == QETH_CARD_TYPE_OSM) {
+		QETH_CARD_TEXT(card, 3, "kidOSM");
+		return 0;
+	}
+	if (qeth_wait_for_threads(card, QETH_RECOVER_THREAD)) {
+		QETH_CARD_TEXT(card, 3, "kidREC");
+		return 0;
+	}
+	spin_lock_bh(&card->vlanlock);
+	list_for_each_entry(id, &card->vid_list, list) {
+		if (id->vid == vid) {
+			list_del(&id->list);
+			tmpid = id;
+			break;
+		}
+	}
+	spin_unlock_bh(&card->vlanlock);
+	if (tmpid) {
+		qeth_l2_send_setdelvlan(card, vid, IPA_CMD_DELVLAN);
+		kfree(tmpid);
+	}
+	qeth_l2_set_multicast_list(card->dev);
+	return 0;
+}
+
+static int qeth_l2_stop_card(struct qeth_card *card, int recovery_mode)
+{
+	int rc = 0;
+
+	QETH_DBF_TEXT(SETUP , 2, "stopcard");
+	QETH_DBF_HEX(SETUP, 2, &card, sizeof(void *));
+
+	qeth_set_allowed_threads(card, 0, 1);
+	if (card->read.state == CH_STATE_UP &&
+	    card->write.state == CH_STATE_UP &&
+	    (card->state == CARD_STATE_UP)) {
+		if (recovery_mode &&
+		    card->info.type != QETH_CARD_TYPE_OSN) {
+			qeth_l2_stop(card->dev);
+		} else {
+			rtnl_lock();
+			dev_close(card->dev);
+			rtnl_unlock();
+		}
+		card->info.mac_bits &= ~QETH_LAYER2_MAC_REGISTERED;
+		card->state = CARD_STATE_SOFTSETUP;
+	}
+	if (card->state == CARD_STATE_SOFTSETUP) {
+		qeth_l2_del_all_mc(card, 0);
+		qeth_clear_ipacmd_list(card);
+		card->state = CARD_STATE_HARDSETUP;
+	}
+	if (card->state == CARD_STATE_HARDSETUP) {
+		qeth_qdio_clear_card(card, 0);
+		qeth_clear_qdio_buffers(card);
+		qeth_clear_working_pool_list(card);
+		card->state = CARD_STATE_DOWN;
+	}
+	if (card->state == CARD_STATE_DOWN) {
+		qeth_clear_cmd_buffers(&card->read);
+		qeth_clear_cmd_buffers(&card->write);
+	}
+	return rc;
+}
+
+static int qeth_l2_process_inbound_buffer(struct qeth_card *card,
+				int budget, int *done)
+{
+	int work_done = 0;
+	struct sk_buff *skb;
+	struct qeth_hdr *hdr;
+	unsigned int len;
+
+	*done = 0;
+	BUG_ON(!budget);
+	while (budget) {
+		skb = qeth_core_get_next_skb(card,
+			&card->qdio.in_q->bufs[card->rx.b_index],
+			&card->rx.b_element, &card->rx.e_offset, &hdr);
+		if (!skb) {
+			*done = 1;
+			break;
+		}
+		skb->dev = card->dev;
+		switch (hdr->hdr.l2.id) {
+		case QETH_HEADER_TYPE_LAYER2:
+			skb->pkt_type = PACKET_HOST;
+			skb->protocol = eth_type_trans(skb, skb->dev);
+			skb->ip_summed = CHECKSUM_NONE;
+			if (skb->protocol == htons(ETH_P_802_2))
+				*((__u32 *)skb->cb) = ++card->seqno.pkt_seqno;
+			len = skb->len;
+			netif_receive_skb(skb);
+			break;
+		case QETH_HEADER_TYPE_OSN:
+			if (card->info.type == QETH_CARD_TYPE_OSN) {
+				skb_push(skb, sizeof(struct qeth_hdr));
+				skb_copy_to_linear_data(skb, hdr,
+						sizeof(struct qeth_hdr));
+				len = skb->len;
+				card->osn_info.data_cb(skb);
+				break;
+			}
+			/* else unknown */
+		default:
+			dev_kfree_skb_any(skb);
+			QETH_CARD_TEXT(card, 3, "inbunkno");
+			QETH_DBF_HEX(CTRL, 3, hdr, QETH_DBF_CTRL_LEN);
+			continue;
+		}
+		work_done++;
+		budget--;
+		card->stats.rx_packets++;
+		card->stats.rx_bytes += len;
+	}
+	return work_done;
+}
+
+static int qeth_l2_poll(struct napi_struct *napi, int budget)
+{
+	struct qeth_card *card = container_of(napi, struct qeth_card, napi);
+	int work_done = 0;
+	struct qeth_qdio_buffer *buffer;
+	int done;
+	int new_budget = budget;
+
+	if (card->options.performance_stats) {
+		card->perf_stats.inbound_cnt++;
+		card->perf_stats.inbound_start_time = qeth_get_micros();
+	}
+
+	while (1) {
+		if (!card->rx.b_count) {
+			card->rx.qdio_err = 0;
+			card->rx.b_count = qdio_get_next_buffers(
+				card->data.ccwdev, 0, &card->rx.b_index,
+				&card->rx.qdio_err);
+			if (card->rx.b_count <= 0) {
+				card->rx.b_count = 0;
+				break;
+			}
+			card->rx.b_element =
+				&card->qdio.in_q->bufs[card->rx.b_index]
+				.buffer->element[0];
+			card->rx.e_offset = 0;
+		}
+
+		while (card->rx.b_count) {
+			buffer = &card->qdio.in_q->bufs[card->rx.b_index];
+			if (!(card->rx.qdio_err &&
+			    qeth_check_qdio_errors(card, buffer->buffer,
+			    card->rx.qdio_err, "qinerr")))
+				work_done += qeth_l2_process_inbound_buffer(
+					card, new_budget, &done);
+			else
+				done = 1;
+
+			if (done) {
+				if (card->options.performance_stats)
+					card->perf_stats.bufs_rec++;
+				qeth_put_buffer_pool_entry(card,
+					buffer->pool_entry);
+				qeth_queue_input_buffer(card, card->rx.b_index);
+				card->rx.b_count--;
+				if (card->rx.b_count) {
+					card->rx.b_index =
+						(card->rx.b_index + 1) %
+						QDIO_MAX_BUFFERS_PER_Q;
+					card->rx.b_element =
+						&card->qdio.in_q
+						->bufs[card->rx.b_index]
+						.buffer->element[0];
+					card->rx.e_offset = 0;
+				}
+			}
+
+			if (work_done >= budget)
+				goto out;
+			else
+				new_budget = budget - work_done;
+		}
+	}
+
+	napi_complete(napi);
+	if (qdio_start_irq(card->data.ccwdev, 0))
+		napi_schedule(&card->napi);
+out:
+	if (card->options.performance_stats)
+		card->perf_stats.inbound_time += qeth_get_micros() -
+			card->perf_stats.inbound_start_time;
+	return work_done;
+}
+
+static int qeth_l2_send_setdelmac(struct qeth_card *card, __u8 *mac,
+			   enum qeth_ipa_cmds ipacmd,
+			   int (*reply_cb) (struct qeth_card *,
+					    struct qeth_reply*,
+					    unsigned long))
+{
+	struct qeth_ipa_cmd *cmd;
+	struct qeth_cmd_buffer *iob;
+
+	QETH_CARD_TEXT(card, 2, "L2sdmac");
+	iob = qeth_get_ipacmd_buffer(card, ipacmd, QETH_PROT_IPV4);
+	cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
+	cmd->data.setdelmac.mac_length = OSA_ADDR_LEN;
+	memcpy(&cmd->data.setdelmac.mac, mac, OSA_ADDR_LEN);
+	return qeth_send_ipa_cmd(card, iob, reply_cb, NULL);
+}
+
+static int qeth_l2_send_setmac_cb(struct qeth_card *card,
+			   struct qeth_reply *reply,
+			   unsigned long data)
+{
+	struct qeth_ipa_cmd *cmd;
+
+	QETH_CARD_TEXT(card, 2, "L2Smaccb");
+	cmd = (struct qeth_ipa_cmd *) data;
+	if (cmd->hdr.return_code) {
+		QETH_CARD_TEXT_(card, 2, "L2er%x", cmd->hdr.return_code);
+		card->info.mac_bits &= ~QETH_LAYER2_MAC_REGISTERED;
+		switch (cmd->hdr.return_code) {
+		case IPA_RC_L2_DUP_MAC:
+		case IPA_RC_L2_DUP_LAYER3_MAC:
+			dev_warn(&card->gdev->dev,
+				"MAC address %pM already exists\n",
+				cmd->data.setdelmac.mac);
+			break;
+		case IPA_RC_L2_MAC_NOT_AUTH_BY_HYP:
+		case IPA_RC_L2_MAC_NOT_AUTH_BY_ADP:
+			dev_warn(&card->gdev->dev,
+				"MAC address %pM is not authorized\n",
+				cmd->data.setdelmac.mac);
+			break;
+		default:
+			break;
+		}
+	} else {
+		card->info.mac_bits |= QETH_LAYER2_MAC_REGISTERED;
+		memcpy(card->dev->dev_addr, cmd->data.setdelmac.mac,
+		       OSA_ADDR_LEN);
+		dev_info(&card->gdev->dev,
+			"MAC address %pM successfully registered on device %s\n",
+			card->dev->dev_addr, card->dev->name);
+	}
+	return 0;
+}
+
+static int qeth_l2_send_setmac(struct qeth_card *card, __u8 *mac)
+{
+	QETH_CARD_TEXT(card, 2, "L2Setmac");
+	return qeth_l2_send_setdelmac(card, mac, IPA_CMD_SETVMAC,
+					  qeth_l2_send_setmac_cb);
+}
+
+static int qeth_l2_send_delmac_cb(struct qeth_card *card,
+			   struct qeth_reply *reply,
+			   unsigned long data)
+{
+	struct qeth_ipa_cmd *cmd;
+
+	QETH_CARD_TEXT(card, 2, "L2Dmaccb");
+	cmd = (struct qeth_ipa_cmd *) data;
+	if (cmd->hdr.return_code) {
+		QETH_CARD_TEXT_(card, 2, "err%d", cmd->hdr.return_code);
+		return 0;
+	}
+	card->info.mac_bits &= ~QETH_LAYER2_MAC_REGISTERED;
+
+	return 0;
+}
+
+static int qeth_l2_send_delmac(struct qeth_card *card, __u8 *mac)
+{
+	QETH_CARD_TEXT(card, 2, "L2Delmac");
+	if (!(card->info.mac_bits & QETH_LAYER2_MAC_REGISTERED))
+		return 0;
+	return qeth_l2_send_setdelmac(card, mac, IPA_CMD_DELVMAC,
+					  qeth_l2_send_delmac_cb);
+}
+
+static int qeth_l2_request_initial_mac(struct qeth_card *card)
+{
+	int rc = 0;
+	char vendor_pre[] = {0x02, 0x00, 0x00};
+
+	QETH_DBF_TEXT(SETUP, 2, "doL2init");
+	QETH_DBF_TEXT_(SETUP, 2, "doL2%s", CARD_BUS_ID(card));
+
+	rc = qeth_query_setadapterparms(card);
+	if (rc) {
+		QETH_DBF_MESSAGE(2, "could not query adapter parameters on "
+			"device %s: x%x\n", CARD_BUS_ID(card), rc);
+	}
+
+	if (card->info.type == QETH_CARD_TYPE_IQD ||
+	    card->info.type == QETH_CARD_TYPE_OSM ||
+	    card->info.type == QETH_CARD_TYPE_OSX ||
+	    card->info.guestlan) {
+		rc = qeth_setadpparms_change_macaddr(card);
+		if (rc) {
+			QETH_DBF_MESSAGE(2, "couldn't get MAC address on "
+				"device %s: x%x\n", CARD_BUS_ID(card), rc);
+			QETH_DBF_TEXT_(SETUP, 2, "1err%d", rc);
+			return rc;
+		}
+		QETH_DBF_HEX(SETUP, 2, card->dev->dev_addr, OSA_ADDR_LEN);
+	} else {
+		random_ether_addr(card->dev->dev_addr);
+		memcpy(card->dev->dev_addr, vendor_pre, 3);
+	}
+	return 0;
+}
+
+static int qeth_l2_set_mac_address(struct net_device *dev, void *p)
+{
+	struct sockaddr *addr = p;
+	struct qeth_card *card = dev->ml_priv;
+	int rc = 0;
+
+	QETH_CARD_TEXT(card, 3, "setmac");
+
+	if (qeth_l2_verify_dev(dev) != QETH_REAL_CARD) {
+		QETH_CARD_TEXT(card, 3, "setmcINV");
+		return -EOPNOTSUPP;
+	}
+
+	if (card->info.type == QETH_CARD_TYPE_OSN ||
+	    card->info.type == QETH_CARD_TYPE_OSM ||
+	    card->info.type == QETH_CARD_TYPE_OSX) {
+		QETH_CARD_TEXT(card, 3, "setmcTYP");
+		return -EOPNOTSUPP;
+	}
+	QETH_CARD_HEX(card, 3, addr->sa_data, OSA_ADDR_LEN);
+	if (qeth_wait_for_threads(card, QETH_RECOVER_THREAD)) {
+		QETH_CARD_TEXT(card, 3, "setmcREC");
+		return -ERESTARTSYS;
+	}
+	rc = qeth_l2_send_delmac(card, &card->dev->dev_addr[0]);
+	if (!rc)
+		rc = qeth_l2_send_setmac(card, addr->sa_data);
+	return rc ? -EINVAL : 0;
+}
+
+static void qeth_l2_set_multicast_list(struct net_device *dev)
+{
+	struct qeth_card *card = dev->ml_priv;
+	struct netdev_hw_addr *ha;
+
+	if (card->info.type == QETH_CARD_TYPE_OSN)
+		return ;
+
+	QETH_CARD_TEXT(card, 3, "setmulti");
+	if (qeth_threads_running(card, QETH_RECOVER_THREAD) &&
+	    (card->state != CARD_STATE_UP))
+		return;
+	qeth_l2_del_all_mc(card, 1);
+	spin_lock_bh(&card->mclock);
+	netdev_for_each_mc_addr(ha, dev)
+		qeth_l2_add_mc(card, ha->addr, 0);
+
+	netdev_for_each_uc_addr(ha, dev)
+		qeth_l2_add_mc(card, ha->addr, 1);
+
+	spin_unlock_bh(&card->mclock);
+	if (!qeth_adp_supported(card, IPA_SETADP_SET_PROMISC_MODE))
+		return;
+	qeth_setadp_promisc_mode(card);
+}
+
+static int qeth_l2_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+	int rc;
+	struct qeth_hdr *hdr = NULL;
+	int elements = 0;
+	struct qeth_card *card = dev->ml_priv;
+	struct sk_buff *new_skb = skb;
+	int ipv = qeth_get_ip_version(skb);
+	int cast_type = qeth_l2_get_cast_type(card, skb);
+	struct qeth_qdio_out_q *queue = card->qdio.out_qs
+		[qeth_get_priority_queue(card, skb, ipv, cast_type)];
+	int tx_bytes = skb->len;
+	int data_offset = -1;
+	int elements_needed = 0;
+	int hd_len = 0;
+
+	if ((card->state != CARD_STATE_UP) || !card->lan_online) {
+		card->stats.tx_carrier_errors++;
+		goto tx_drop;
+	}
+
+	if ((card->info.type == QETH_CARD_TYPE_OSN) &&
+	    (skb->protocol == htons(ETH_P_IPV6)))
+		goto tx_drop;
+
+	if (card->options.performance_stats) {
+		card->perf_stats.outbound_cnt++;
+		card->perf_stats.outbound_start_time = qeth_get_micros();
+	}
+	netif_stop_queue(dev);
+
+	if (card->info.type == QETH_CARD_TYPE_OSN)
+		hdr = (struct qeth_hdr *)skb->data;
+	else {
+		if (card->info.type == QETH_CARD_TYPE_IQD) {
+			new_skb = skb;
+			data_offset = ETH_HLEN;
+			hd_len = ETH_HLEN;
+			hdr = kmem_cache_alloc(qeth_core_header_cache,
+						GFP_ATOMIC);
+			if (!hdr)
+				goto tx_drop;
+			elements_needed++;
+			skb_reset_mac_header(new_skb);
+			qeth_l2_fill_header(card, hdr, new_skb, ipv, cast_type);
+			hdr->hdr.l2.pkt_length = new_skb->len;
+			memcpy(((char *)hdr) + sizeof(struct qeth_hdr),
+				skb_mac_header(new_skb), ETH_HLEN);
+		} else {
+			/* create a clone with writeable headroom */
+			new_skb = skb_realloc_headroom(skb,
+						sizeof(struct qeth_hdr));
+			if (!new_skb)
+				goto tx_drop;
+			hdr = (struct qeth_hdr *)skb_push(new_skb,
+						sizeof(struct qeth_hdr));
+			skb_set_mac_header(new_skb, sizeof(struct qeth_hdr));
+			qeth_l2_fill_header(card, hdr, new_skb, ipv, cast_type);
+		}
+	}
+
+	elements = qeth_get_elements_no(card, (void *)hdr, new_skb,
+						elements_needed);
+	if (!elements) {
+		if (data_offset >= 0)
+			kmem_cache_free(qeth_core_header_cache, hdr);
+		goto tx_drop;
+	}
+
+	if (card->info.type != QETH_CARD_TYPE_IQD) {
+		if (qeth_hdr_chk_and_bounce(new_skb,
+		    sizeof(struct qeth_hdr_layer2)))
+			goto tx_drop;
+		rc = qeth_do_send_packet(card, queue, new_skb, hdr,
+					 elements);
+	} else
+		rc = qeth_do_send_packet_fast(card, queue, new_skb, hdr,
+					elements, data_offset, hd_len);
+	if (!rc) {
+		card->stats.tx_packets++;
+		card->stats.tx_bytes += tx_bytes;
+		if (new_skb != skb)
+			dev_kfree_skb_any(skb);
+		rc = NETDEV_TX_OK;
+	} else {
+		if (data_offset >= 0)
+			kmem_cache_free(qeth_core_header_cache, hdr);
+
+		if (rc == -EBUSY) {
+			if (new_skb != skb)
+				dev_kfree_skb_any(new_skb);
+			return NETDEV_TX_BUSY;
+		} else
+			goto tx_drop;
+	}
+
+	netif_wake_queue(dev);
+	if (card->options.performance_stats)
+		card->perf_stats.outbound_time += qeth_get_micros() -
+			card->perf_stats.outbound_start_time;
+	return rc;
+
+tx_drop:
+	card->stats.tx_dropped++;
+	card->stats.tx_errors++;
+	if ((new_skb != skb) && new_skb)
+		dev_kfree_skb_any(new_skb);
+	dev_kfree_skb_any(skb);
+	netif_wake_queue(dev);
+	return NETDEV_TX_OK;
+}
+
+static int __qeth_l2_open(struct net_device *dev)
+{
+	struct qeth_card *card = dev->ml_priv;
+	int rc = 0;
+
+	QETH_CARD_TEXT(card, 4, "qethopen");
+	if (card->state == CARD_STATE_UP)
+		return rc;
+	if (card->state != CARD_STATE_SOFTSETUP)
+		return -ENODEV;
+
+	if ((card->info.type != QETH_CARD_TYPE_OSN) &&
+	     (!(card->info.mac_bits & QETH_LAYER2_MAC_REGISTERED))) {
+		QETH_CARD_TEXT(card, 4, "nomacadr");
+		return -EPERM;
+	}
+	card->data.state = CH_STATE_UP;
+	card->state = CARD_STATE_UP;
+	netif_start_queue(dev);
+
+	if (qdio_stop_irq(card->data.ccwdev, 0) >= 0) {
+		napi_enable(&card->napi);
+		napi_schedule(&card->napi);
+	} else
+		rc = -EIO;
+	return rc;
+}
+
+static int qeth_l2_open(struct net_device *dev)
+{
+	struct qeth_card *card = dev->ml_priv;
+
+	QETH_CARD_TEXT(card, 5, "qethope_");
+	if (qeth_wait_for_threads(card, QETH_RECOVER_THREAD)) {
+		QETH_CARD_TEXT(card, 3, "openREC");
+		return -ERESTARTSYS;
+	}
+	return __qeth_l2_open(dev);
+}
+
+static int qeth_l2_stop(struct net_device *dev)
+{
+	struct qeth_card *card = dev->ml_priv;
+
+	QETH_CARD_TEXT(card, 4, "qethstop");
+	netif_tx_disable(dev);
+	if (card->state == CARD_STATE_UP) {
+		card->state = CARD_STATE_SOFTSETUP;
+		napi_disable(&card->napi);
+	}
+	return 0;
+}
+
+static int qeth_l2_probe_device(struct ccwgroup_device *gdev)
+{
+	struct qeth_card *card = dev_get_drvdata(&gdev->dev);
+
+	INIT_LIST_HEAD(&card->vid_list);
+	INIT_LIST_HEAD(&card->mc_list);
+	card->options.layer2 = 1;
+	card->info.hwtrap = 0;
+	card->discipline.start_poll = qeth_qdio_start_poll;
+	card->discipline.input_handler = (qdio_handler_t *)
+		qeth_qdio_input_handler;
+	card->discipline.output_handler = (qdio_handler_t *)
+		qeth_qdio_output_handler;
+	card->discipline.recover = qeth_l2_recover;
+	return 0;
+}
+
+static void qeth_l2_remove_device(struct ccwgroup_device *cgdev)
+{
+	struct qeth_card *card = dev_get_drvdata(&cgdev->dev);
+
+	qeth_set_allowed_threads(card, 0, 1);
+	wait_event(card->wait_q, qeth_threads_running(card, 0xffffffff) == 0);
+
+	if (cgdev->state == CCWGROUP_ONLINE)
+		qeth_l2_set_offline(cgdev);
+
+	if (card->dev) {
+		unregister_netdev(card->dev);
+		card->dev = NULL;
+	}
+	return;
+}
+
+static const struct ethtool_ops qeth_l2_ethtool_ops = {
+	.get_link = ethtool_op_get_link,
+	.get_strings = qeth_core_get_strings,
+	.get_ethtool_stats = qeth_core_get_ethtool_stats,
+	.get_sset_count = qeth_core_get_sset_count,
+	.get_drvinfo = qeth_core_get_drvinfo,
+	.get_settings = qeth_core_ethtool_get_settings,
+};
+
+static const struct ethtool_ops qeth_l2_osn_ops = {
+	.get_strings = qeth_core_get_strings,
+	.get_ethtool_stats = qeth_core_get_ethtool_stats,
+	.get_sset_count = qeth_core_get_sset_count,
+	.get_drvinfo = qeth_core_get_drvinfo,
+};
+
+static const struct net_device_ops qeth_l2_netdev_ops = {
+	.ndo_open		= qeth_l2_open,
+	.ndo_stop		= qeth_l2_stop,
+	.ndo_get_stats		= qeth_get_stats,
+	.ndo_start_xmit		= qeth_l2_hard_start_xmit,
+	.ndo_validate_addr	= eth_validate_addr,
+	.ndo_set_rx_mode	= qeth_l2_set_multicast_list,
+	.ndo_do_ioctl	   	= qeth_l2_do_ioctl,
+	.ndo_set_mac_address    = qeth_l2_set_mac_address,
+	.ndo_change_mtu	   	= qeth_change_mtu,
+	.ndo_vlan_rx_add_vid	= qeth_l2_vlan_rx_add_vid,
+	.ndo_vlan_rx_kill_vid   = qeth_l2_vlan_rx_kill_vid,
+	.ndo_tx_timeout	   	= qeth_tx_timeout,
+};
+
+static int qeth_l2_setup_netdev(struct qeth_card *card)
+{
+	switch (card->info.type) {
+	case QETH_CARD_TYPE_IQD:
+		card->dev = alloc_netdev(0, "hsi%d", ether_setup);
+		break;
+	case QETH_CARD_TYPE_OSN:
+		card->dev = alloc_netdev(0, "osn%d", ether_setup);
+		card->dev->flags |= IFF_NOARP;
+		break;
+	default:
+		card->dev = alloc_etherdev(0);
+	}
+
+	if (!card->dev)
+		return -ENODEV;
+
+	card->dev->ml_priv = card;
+	card->dev->watchdog_timeo = QETH_TX_TIMEOUT;
+	card->dev->mtu = card->info.initial_mtu;
+	card->dev->netdev_ops = &qeth_l2_netdev_ops;
+	if (card->info.type != QETH_CARD_TYPE_OSN)
+		SET_ETHTOOL_OPS(card->dev, &qeth_l2_ethtool_ops);
+	else
+		SET_ETHTOOL_OPS(card->dev, &qeth_l2_osn_ops);
+	card->dev->features |= NETIF_F_HW_VLAN_FILTER;
+	card->info.broadcast_capable = 1;
+	qeth_l2_request_initial_mac(card);
+	SET_NETDEV_DEV(card->dev, &card->gdev->dev);
+	netif_napi_add(card->dev, &card->napi, qeth_l2_poll, QETH_NAPI_WEIGHT);
+	return register_netdev(card->dev);
+}
+
+static int __qeth_l2_set_online(struct ccwgroup_device *gdev, int recovery_mode)
+{
+	struct qeth_card *card = dev_get_drvdata(&gdev->dev);
+	int rc = 0;
+	enum qeth_card_states recover_flag;
+
+	BUG_ON(!card);
+	mutex_lock(&card->discipline_mutex);
+	mutex_lock(&card->conf_mutex);
+	QETH_DBF_TEXT(SETUP, 2, "setonlin");
+	QETH_DBF_HEX(SETUP, 2, &card, sizeof(void *));
+
+	recover_flag = card->state;
+	rc = qeth_core_hardsetup_card(card);
+	if (rc) {
+		QETH_DBF_TEXT_(SETUP, 2, "2err%d", rc);
+		rc = -ENODEV;
+		goto out_remove;
+	}
+
+	if (!card->dev && qeth_l2_setup_netdev(card)) {
+		rc = -ENODEV;
+		goto out_remove;
+	}
+
+	if (card->info.type != QETH_CARD_TYPE_OSN)
+		qeth_l2_send_setmac(card, &card->dev->dev_addr[0]);
+
+	if (qeth_is_diagass_supported(card, QETH_DIAGS_CMD_TRAP)) {
+		if (card->info.hwtrap &&
+		    qeth_hw_trap(card, QETH_DIAGS_TRAP_ARM))
+			card->info.hwtrap = 0;
+	} else
+		card->info.hwtrap = 0;
+
+	card->state = CARD_STATE_HARDSETUP;
+	memset(&card->rx, 0, sizeof(struct qeth_rx));
+	qeth_print_status_message(card);
+
+	/* softsetup */
+	QETH_DBF_TEXT(SETUP, 2, "softsetp");
+
+	rc = qeth_send_startlan(card);
+	if (rc) {
+		QETH_DBF_TEXT_(SETUP, 2, "1err%d", rc);
+		if (rc == 0xe080) {
+			dev_warn(&card->gdev->dev,
+				"The LAN is offline\n");
+			card->lan_online = 0;
+			goto contin;
+		}
+		rc = -ENODEV;
+		goto out_remove;
+	} else
+		card->lan_online = 1;
+
+contin:
+	if ((card->info.type == QETH_CARD_TYPE_OSD) ||
+	    (card->info.type == QETH_CARD_TYPE_OSX))
+		/* configure isolation level */
+		qeth_set_access_ctrl_online(card);
+
+	if (card->info.type != QETH_CARD_TYPE_OSN &&
+	    card->info.type != QETH_CARD_TYPE_OSM)
+		qeth_l2_process_vlans(card);
+
+	netif_tx_disable(card->dev);
+
+	rc = qeth_init_qdio_queues(card);
+	if (rc) {
+		QETH_DBF_TEXT_(SETUP, 2, "6err%d", rc);
+		rc = -ENODEV;
+		goto out_remove;
+	}
+	card->state = CARD_STATE_SOFTSETUP;
+	if (card->lan_online)
+		netif_carrier_on(card->dev);
+	else
+		netif_carrier_off(card->dev);
+
+	qeth_set_allowed_threads(card, 0xffffffff, 0);
+	if (recover_flag == CARD_STATE_RECOVER) {
+		if (recovery_mode &&
+		    card->info.type != QETH_CARD_TYPE_OSN) {
+			__qeth_l2_open(card->dev);
+		} else {
+			rtnl_lock();
+			dev_open(card->dev);
+			rtnl_unlock();
+		}
+		/* this also sets saved unicast addresses */
+		qeth_l2_set_multicast_list(card->dev);
+	}
+	/* let user_space know that device is online */
+	kobject_uevent(&gdev->dev.kobj, KOBJ_CHANGE);
+	mutex_unlock(&card->conf_mutex);
+	mutex_unlock(&card->discipline_mutex);
+	return 0;
+
+out_remove:
+	qeth_l2_stop_card(card, 0);
+	ccw_device_set_offline(CARD_DDEV(card));
+	ccw_device_set_offline(CARD_WDEV(card));
+	ccw_device_set_offline(CARD_RDEV(card));
+	if (recover_flag == CARD_STATE_RECOVER)
+		card->state = CARD_STATE_RECOVER;
+	else
+		card->state = CARD_STATE_DOWN;
+	mutex_unlock(&card->conf_mutex);
+	mutex_unlock(&card->discipline_mutex);
+	return rc;
+}
+
+static int qeth_l2_set_online(struct ccwgroup_device *gdev)
+{
+	return __qeth_l2_set_online(gdev, 0);
+}
+
+static int __qeth_l2_set_offline(struct ccwgroup_device *cgdev,
+					int recovery_mode)
+{
+	struct qeth_card *card = dev_get_drvdata(&cgdev->dev);
+	int rc = 0, rc2 = 0, rc3 = 0;
+	enum qeth_card_states recover_flag;
+
+	mutex_lock(&card->discipline_mutex);
+	mutex_lock(&card->conf_mutex);
+	QETH_DBF_TEXT(SETUP, 3, "setoffl");
+	QETH_DBF_HEX(SETUP, 3, &card, sizeof(void *));
+
+	if (card->dev && netif_carrier_ok(card->dev))
+		netif_carrier_off(card->dev);
+	recover_flag = card->state;
+	if ((!recovery_mode && card->info.hwtrap) || card->info.hwtrap == 2) {
+		qeth_hw_trap(card, QETH_DIAGS_TRAP_DISARM);
+		card->info.hwtrap = 1;
+	}
+	qeth_l2_stop_card(card, recovery_mode);
+	rc  = ccw_device_set_offline(CARD_DDEV(card));
+	rc2 = ccw_device_set_offline(CARD_WDEV(card));
+	rc3 = ccw_device_set_offline(CARD_RDEV(card));
+	if (!rc)
+		rc = (rc2) ? rc2 : rc3;
+	if (rc)
+		QETH_DBF_TEXT_(SETUP, 2, "1err%d", rc);
+	if (recover_flag == CARD_STATE_UP)
+		card->state = CARD_STATE_RECOVER;
+	/* let user_space know that device is offline */
+	kobject_uevent(&cgdev->dev.kobj, KOBJ_CHANGE);
+	mutex_unlock(&card->conf_mutex);
+	mutex_unlock(&card->discipline_mutex);
+	return 0;
+}
+
+static int qeth_l2_set_offline(struct ccwgroup_device *cgdev)
+{
+	return __qeth_l2_set_offline(cgdev, 0);
+}
+
+static int qeth_l2_recover(void *ptr)
+{
+	struct qeth_card *card;
+	int rc = 0;
+
+	card = (struct qeth_card *) ptr;
+	QETH_CARD_TEXT(card, 2, "recover1");
+	if (!qeth_do_run_thread(card, QETH_RECOVER_THREAD))
+		return 0;
+	QETH_CARD_TEXT(card, 2, "recover2");
+	dev_warn(&card->gdev->dev,
+		"A recovery process has been started for the device\n");
+	__qeth_l2_set_offline(card->gdev, 1);
+	rc = __qeth_l2_set_online(card->gdev, 1);
+	if (!rc)
+		dev_info(&card->gdev->dev,
+			"Device successfully recovered!\n");
+	else {
+		rtnl_lock();
+		dev_close(card->dev);
+		rtnl_unlock();
+		dev_warn(&card->gdev->dev, "The qeth device driver "
+			"failed to recover an error on the device\n");
+	}
+	qeth_clear_thread_start_bit(card, QETH_RECOVER_THREAD);
+	qeth_clear_thread_running_bit(card, QETH_RECOVER_THREAD);
+	return 0;
+}
+
+static int __init qeth_l2_init(void)
+{
+	pr_info("register layer 2 discipline\n");
+	return 0;
+}
+
+static void __exit qeth_l2_exit(void)
+{
+	pr_info("unregister layer 2 discipline\n");
+}
+
+static void qeth_l2_shutdown(struct ccwgroup_device *gdev)
+{
+	struct qeth_card *card = dev_get_drvdata(&gdev->dev);
+	qeth_set_allowed_threads(card, 0, 1);
+	if ((gdev->state == CCWGROUP_ONLINE) && card->info.hwtrap)
+		qeth_hw_trap(card, QETH_DIAGS_TRAP_DISARM);
+	qeth_qdio_clear_card(card, 0);
+	qeth_clear_qdio_buffers(card);
+}
+
+static int qeth_l2_pm_suspend(struct ccwgroup_device *gdev)
+{
+	struct qeth_card *card = dev_get_drvdata(&gdev->dev);
+
+	if (card->dev)
+		netif_device_detach(card->dev);
+	qeth_set_allowed_threads(card, 0, 1);
+	wait_event(card->wait_q, qeth_threads_running(card, 0xffffffff) == 0);
+	if (gdev->state == CCWGROUP_OFFLINE)
+		return 0;
+	if (card->state == CARD_STATE_UP) {
+		if (card->info.hwtrap)
+			qeth_hw_trap(card, QETH_DIAGS_TRAP_DISARM);
+		__qeth_l2_set_offline(card->gdev, 1);
+	} else
+		__qeth_l2_set_offline(card->gdev, 0);
+	return 0;
+}
+
+static int qeth_l2_pm_resume(struct ccwgroup_device *gdev)
+{
+	struct qeth_card *card = dev_get_drvdata(&gdev->dev);
+	int rc = 0;
+
+	if (gdev->state == CCWGROUP_OFFLINE)
+		goto out;
+
+	if (card->state == CARD_STATE_RECOVER) {
+		rc = __qeth_l2_set_online(card->gdev, 1);
+		if (rc) {
+			rtnl_lock();
+			dev_close(card->dev);
+			rtnl_unlock();
+		}
+	} else
+		rc = __qeth_l2_set_online(card->gdev, 0);
+out:
+	qeth_set_allowed_threads(card, 0xffffffff, 0);
+	if (card->dev)
+		netif_device_attach(card->dev);
+	if (rc)
+		dev_warn(&card->gdev->dev, "The qeth device driver "
+			"failed to recover an error on the device\n");
+	return rc;
+}
+
+struct ccwgroup_driver qeth_l2_ccwgroup_driver = {
+	.probe = qeth_l2_probe_device,
+	.remove = qeth_l2_remove_device,
+	.set_online = qeth_l2_set_online,
+	.set_offline = qeth_l2_set_offline,
+	.shutdown = qeth_l2_shutdown,
+	.freeze = qeth_l2_pm_suspend,
+	.thaw = qeth_l2_pm_resume,
+	.restore = qeth_l2_pm_resume,
+};
+EXPORT_SYMBOL_GPL(qeth_l2_ccwgroup_driver);
+
+static int qeth_osn_send_control_data(struct qeth_card *card, int len,
+			   struct qeth_cmd_buffer *iob)
+{
+	unsigned long flags;
+	int rc = 0;
+
+	QETH_CARD_TEXT(card, 5, "osndctrd");
+
+	wait_event(card->wait_q,
+		   atomic_cmpxchg(&card->write.irq_pending, 0, 1) == 0);
+	qeth_prepare_control_data(card, len, iob);
+	QETH_CARD_TEXT(card, 6, "osnoirqp");
+	spin_lock_irqsave(get_ccwdev_lock(card->write.ccwdev), flags);
+	rc = ccw_device_start(card->write.ccwdev, &card->write.ccw,
+			      (addr_t) iob, 0, 0);
+	spin_unlock_irqrestore(get_ccwdev_lock(card->write.ccwdev), flags);
+	if (rc) {
+		QETH_DBF_MESSAGE(2, "qeth_osn_send_control_data: "
+			   "ccw_device_start rc = %i\n", rc);
+		QETH_CARD_TEXT_(card, 2, " err%d", rc);
+		qeth_release_buffer(iob->channel, iob);
+		atomic_set(&card->write.irq_pending, 0);
+		wake_up(&card->wait_q);
+	}
+	return rc;
+}
+
+static int qeth_osn_send_ipa_cmd(struct qeth_card *card,
+			struct qeth_cmd_buffer *iob, int data_len)
+{
+	u16 s1, s2;
+
+	QETH_CARD_TEXT(card, 4, "osndipa");
+
+	qeth_prepare_ipa_cmd(card, iob, QETH_PROT_OSN2);
+	s1 = (u16)(IPA_PDU_HEADER_SIZE + data_len);
+	s2 = (u16)data_len;
+	memcpy(QETH_IPA_PDU_LEN_TOTAL(iob->data), &s1, 2);
+	memcpy(QETH_IPA_PDU_LEN_PDU1(iob->data), &s2, 2);
+	memcpy(QETH_IPA_PDU_LEN_PDU2(iob->data), &s2, 2);
+	memcpy(QETH_IPA_PDU_LEN_PDU3(iob->data), &s2, 2);
+	return qeth_osn_send_control_data(card, s1, iob);
+}
+
+int qeth_osn_assist(struct net_device *dev, void *data, int data_len)
+{
+	struct qeth_cmd_buffer *iob;
+	struct qeth_card *card;
+	int rc;
+
+	if (!dev)
+		return -ENODEV;
+	card = dev->ml_priv;
+	if (!card)
+		return -ENODEV;
+	QETH_CARD_TEXT(card, 2, "osnsdmc");
+	if ((card->state != CARD_STATE_UP) &&
+	    (card->state != CARD_STATE_SOFTSETUP))
+		return -ENODEV;
+	iob = qeth_wait_for_buffer(&card->write);
+	memcpy(iob->data+IPA_PDU_HEADER_SIZE, data, data_len);
+	rc = qeth_osn_send_ipa_cmd(card, iob, data_len);
+	return rc;
+}
+EXPORT_SYMBOL(qeth_osn_assist);
+
+int qeth_osn_register(unsigned char *read_dev_no, struct net_device **dev,
+		  int (*assist_cb)(struct net_device *, void *),
+		  int (*data_cb)(struct sk_buff *))
+{
+	struct qeth_card *card;
+
+	*dev = qeth_l2_netdev_by_devno(read_dev_no);
+	if (*dev == NULL)
+		return -ENODEV;
+	card = (*dev)->ml_priv;
+	if (!card)
+		return -ENODEV;
+	QETH_CARD_TEXT(card, 2, "osnreg");
+	if ((assist_cb == NULL) || (data_cb == NULL))
+		return -EINVAL;
+	card->osn_info.assist_cb = assist_cb;
+	card->osn_info.data_cb = data_cb;
+	return 0;
+}
+EXPORT_SYMBOL(qeth_osn_register);
+
+void qeth_osn_deregister(struct net_device *dev)
+{
+	struct qeth_card *card;
+
+	if (!dev)
+		return;
+	card = dev->ml_priv;
+	if (!card)
+		return;
+	QETH_CARD_TEXT(card, 2, "osndereg");
+	card->osn_info.assist_cb = NULL;
+	card->osn_info.data_cb = NULL;
+	return;
+}
+EXPORT_SYMBOL(qeth_osn_deregister);
+
+module_init(qeth_l2_init);
+module_exit(qeth_l2_exit);
+MODULE_AUTHOR("Frank Blaschka <frank.blaschka@de.ibm.com>");
+MODULE_DESCRIPTION("qeth layer 2 discipline");
+MODULE_LICENSE("GPL");
diff --git a/ap/os/linux/linux-3.4.x/drivers/s390/net/qeth_l3.h b/ap/os/linux/linux-3.4.x/drivers/s390/net/qeth_l3.h
new file mode 100644
index 0000000..e367315
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/s390/net/qeth_l3.h
@@ -0,0 +1,71 @@
+/*
+ *  drivers/s390/net/qeth_l3.h
+ *
+ *    Copyright IBM Corp. 2007
+ *    Author(s): Utz Bacher <utz.bacher@de.ibm.com>,
+ *		 Frank Pavlic <fpavlic@de.ibm.com>,
+ *		 Thomas Spatzier <tspat@de.ibm.com>,
+ *		 Frank Blaschka <frank.blaschka@de.ibm.com>
+ */
+
+#ifndef __QETH_L3_H__
+#define __QETH_L3_H__
+
+#include "qeth_core.h"
+
+#define QETH_SNIFF_AVAIL	0x0008
+
+struct qeth_ipaddr {
+	struct list_head entry;
+	enum qeth_ip_types type;
+	enum qeth_ipa_setdelip_flags set_flags;
+	enum qeth_ipa_setdelip_flags del_flags;
+	int is_multicast;
+	int users;
+	enum qeth_prot_versions proto;
+	unsigned char mac[OSA_ADDR_LEN];
+	union {
+		struct {
+			unsigned int addr;
+			unsigned int mask;
+		} a4;
+		struct {
+			struct in6_addr addr;
+			unsigned int pfxlen;
+		} a6;
+	} u;
+};
+
+struct qeth_ipato_entry {
+	struct list_head entry;
+	enum qeth_prot_versions proto;
+	char addr[16];
+	int mask_bits;
+};
+
+
+void qeth_l3_ipaddr4_to_string(const __u8 *, char *);
+int qeth_l3_string_to_ipaddr4(const char *, __u8 *);
+void qeth_l3_ipaddr6_to_string(const __u8 *, char *);
+int qeth_l3_string_to_ipaddr6(const char *, __u8 *);
+void qeth_l3_ipaddr_to_string(enum qeth_prot_versions, const __u8 *, char *);
+int qeth_l3_string_to_ipaddr(const char *, enum qeth_prot_versions, __u8 *);
+int qeth_l3_create_device_attributes(struct device *);
+void qeth_l3_remove_device_attributes(struct device *);
+int qeth_l3_setrouting_v4(struct qeth_card *);
+int qeth_l3_setrouting_v6(struct qeth_card *);
+int qeth_l3_add_ipato_entry(struct qeth_card *, struct qeth_ipato_entry *);
+void qeth_l3_del_ipato_entry(struct qeth_card *, enum qeth_prot_versions,
+			u8 *, int);
+int qeth_l3_add_vipa(struct qeth_card *, enum qeth_prot_versions, const u8 *);
+void qeth_l3_del_vipa(struct qeth_card *, enum qeth_prot_versions, const u8 *);
+int qeth_l3_add_rxip(struct qeth_card *, enum qeth_prot_versions, const u8 *);
+void qeth_l3_del_rxip(struct qeth_card *card, enum qeth_prot_versions,
+			const u8 *);
+int qeth_l3_is_addr_covered_by_ipato(struct qeth_card *, struct qeth_ipaddr *);
+struct qeth_ipaddr *qeth_l3_get_addr_buffer(enum qeth_prot_versions);
+int qeth_l3_add_ip(struct qeth_card *, struct qeth_ipaddr *);
+int qeth_l3_delete_ip(struct qeth_card *, struct qeth_ipaddr *);
+void qeth_l3_set_ip_addr_list(struct qeth_card *);
+
+#endif /* __QETH_L3_H__ */
diff --git a/ap/os/linux/linux-3.4.x/drivers/s390/net/qeth_l3_main.c b/ap/os/linux/linux-3.4.x/drivers/s390/net/qeth_l3_main.c
new file mode 100644
index 0000000..951b6cf
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/s390/net/qeth_l3_main.c
@@ -0,0 +1,3820 @@
+/*
+ *  drivers/s390/net/qeth_l3_main.c
+ *
+ *    Copyright IBM Corp. 2007, 2009
+ *    Author(s): Utz Bacher <utz.bacher@de.ibm.com>,
+ *		 Frank Pavlic <fpavlic@de.ibm.com>,
+ *		 Thomas Spatzier <tspat@de.ibm.com>,
+ *		 Frank Blaschka <frank.blaschka@de.ibm.com>
+ */
+
+#define KMSG_COMPONENT "qeth"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/bitops.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/etherdevice.h>
+#include <linux/mii.h>
+#include <linux/ip.h>
+#include <linux/ipv6.h>
+#include <linux/inetdevice.h>
+#include <linux/igmp.h>
+#include <linux/slab.h>
+#include <linux/if_vlan.h>
+
+#include <net/ip.h>
+#include <net/arp.h>
+#include <net/route.h>
+#include <net/ip6_fib.h>
+#include <net/ip6_checksum.h>
+#include <net/iucv/af_iucv.h>
+
+#include "qeth_l3.h"
+
+
+static int qeth_l3_set_offline(struct ccwgroup_device *);
+static int qeth_l3_recover(void *);
+static int qeth_l3_stop(struct net_device *);
+static void qeth_l3_set_multicast_list(struct net_device *);
+static int qeth_l3_neigh_setup(struct net_device *, struct neigh_parms *);
+static int qeth_l3_register_addr_entry(struct qeth_card *,
+		struct qeth_ipaddr *);
+static int qeth_l3_deregister_addr_entry(struct qeth_card *,
+		struct qeth_ipaddr *);
+static int __qeth_l3_set_online(struct ccwgroup_device *, int);
+static int __qeth_l3_set_offline(struct ccwgroup_device *, int);
+
+static int qeth_l3_isxdigit(char *buf)
+{
+	while (*buf) {
+		if (!isxdigit(*buf++))
+			return 0;
+	}
+	return 1;
+}
+
+void qeth_l3_ipaddr4_to_string(const __u8 *addr, char *buf)
+{
+	sprintf(buf, "%i.%i.%i.%i", addr[0], addr[1], addr[2], addr[3]);
+}
+
+int qeth_l3_string_to_ipaddr4(const char *buf, __u8 *addr)
+{
+	int count = 0, rc = 0;
+	int in[4];
+	char c;
+
+	rc = sscanf(buf, "%u.%u.%u.%u%c",
+		    &in[0], &in[1], &in[2], &in[3], &c);
+	if (rc != 4 && (rc != 5 || c != '\n'))
+		return -EINVAL;
+	for (count = 0; count < 4; count++) {
+		if (in[count] > 255)
+			return -EINVAL;
+		addr[count] = in[count];
+	}
+	return 0;
+}
+
+void qeth_l3_ipaddr6_to_string(const __u8 *addr, char *buf)
+{
+	sprintf(buf, "%pI6", addr);
+}
+
+int qeth_l3_string_to_ipaddr6(const char *buf, __u8 *addr)
+{
+	const char *end, *end_tmp, *start;
+	__u16 *in;
+	char num[5];
+	int num2, cnt, out, found, save_cnt;
+	unsigned short in_tmp[8] = {0, };
+
+	cnt = out = found = save_cnt = num2 = 0;
+	end = start = buf;
+	in = (__u16 *) addr;
+	memset(in, 0, 16);
+	while (*end) {
+		end = strchr(start, ':');
+		if (end == NULL) {
+			end = buf + strlen(buf);
+			end_tmp = strchr(start, '\n');
+			if (end_tmp != NULL)
+				end = end_tmp;
+			out = 1;
+		}
+		if ((end - start)) {
+			memset(num, 0, 5);
+			if ((end - start) > 4)
+				return -EINVAL;
+			memcpy(num, start, end - start);
+			if (!qeth_l3_isxdigit(num))
+				return -EINVAL;
+			sscanf(start, "%x", &num2);
+			if (found)
+				in_tmp[save_cnt++] = num2;
+			else
+				in[cnt++] = num2;
+			if (out)
+				break;
+		} else {
+			if (found)
+				return -EINVAL;
+			found = 1;
+		}
+		start = ++end;
+	}
+	if (cnt + save_cnt > 8)
+		return -EINVAL;
+	cnt = 7;
+	while (save_cnt)
+		in[cnt--] = in_tmp[--save_cnt];
+	return 0;
+}
+
+void qeth_l3_ipaddr_to_string(enum qeth_prot_versions proto, const __u8 *addr,
+				char *buf)
+{
+	if (proto == QETH_PROT_IPV4)
+		qeth_l3_ipaddr4_to_string(addr, buf);
+	else if (proto == QETH_PROT_IPV6)
+		qeth_l3_ipaddr6_to_string(addr, buf);
+}
+
+int qeth_l3_string_to_ipaddr(const char *buf, enum qeth_prot_versions proto,
+				__u8 *addr)
+{
+	if (proto == QETH_PROT_IPV4)
+		return qeth_l3_string_to_ipaddr4(buf, addr);
+	else if (proto == QETH_PROT_IPV6)
+		return qeth_l3_string_to_ipaddr6(buf, addr);
+	else
+		return -EINVAL;
+}
+
+static void qeth_l3_convert_addr_to_bits(u8 *addr, u8 *bits, int len)
+{
+	int i, j;
+	u8 octet;
+
+	for (i = 0; i < len; ++i) {
+		octet = addr[i];
+		for (j = 7; j >= 0; --j) {
+			bits[i*8 + j] = octet & 1;
+			octet >>= 1;
+		}
+	}
+}
+
+int qeth_l3_is_addr_covered_by_ipato(struct qeth_card *card,
+						struct qeth_ipaddr *addr)
+{
+	struct qeth_ipato_entry *ipatoe;
+	u8 addr_bits[128] = {0, };
+	u8 ipatoe_bits[128] = {0, };
+	int rc = 0;
+
+	if (!card->ipato.enabled)
+		return 0;
+
+	qeth_l3_convert_addr_to_bits((u8 *) &addr->u, addr_bits,
+				  (addr->proto == QETH_PROT_IPV4)? 4:16);
+	list_for_each_entry(ipatoe, &card->ipato.entries, entry) {
+		if (addr->proto != ipatoe->proto)
+			continue;
+		qeth_l3_convert_addr_to_bits(ipatoe->addr, ipatoe_bits,
+					  (ipatoe->proto == QETH_PROT_IPV4) ?
+					  4 : 16);
+		if (addr->proto == QETH_PROT_IPV4)
+			rc = !memcmp(addr_bits, ipatoe_bits,
+				     min(32, ipatoe->mask_bits));
+		else
+			rc = !memcmp(addr_bits, ipatoe_bits,
+				     min(128, ipatoe->mask_bits));
+		if (rc)
+			break;
+	}
+	/* invert? */
+	if ((addr->proto == QETH_PROT_IPV4) && card->ipato.invert4)
+		rc = !rc;
+	else if ((addr->proto == QETH_PROT_IPV6) && card->ipato.invert6)
+		rc = !rc;
+
+	return rc;
+}
+
+/*
+ * Add IP to be added to todo list. If there is already an "add todo"
+ * in this list we just incremenent the reference count.
+ * Returns 0 if we  just incremented reference count.
+ */
+static int __qeth_l3_insert_ip_todo(struct qeth_card *card,
+					struct qeth_ipaddr *addr, int add)
+{
+	struct qeth_ipaddr *tmp, *t;
+	int found = 0;
+
+	if (card->options.sniffer)
+		return 0;
+	list_for_each_entry_safe(tmp, t, card->ip_tbd_list, entry) {
+		if ((addr->type == QETH_IP_TYPE_DEL_ALL_MC) &&
+		    (tmp->type == QETH_IP_TYPE_DEL_ALL_MC))
+			return 0;
+		if ((tmp->proto        == QETH_PROT_IPV4)     &&
+		    (addr->proto       == QETH_PROT_IPV4)     &&
+		    (tmp->type         == addr->type)         &&
+		    (tmp->is_multicast == addr->is_multicast) &&
+		    (tmp->u.a4.addr    == addr->u.a4.addr)    &&
+		    (tmp->u.a4.mask    == addr->u.a4.mask)) {
+			found = 1;
+			break;
+		}
+		if ((tmp->proto        == QETH_PROT_IPV6)      &&
+		    (addr->proto       == QETH_PROT_IPV6)      &&
+		    (tmp->type         == addr->type)          &&
+		    (tmp->is_multicast == addr->is_multicast)  &&
+		    (tmp->u.a6.pfxlen  == addr->u.a6.pfxlen)   &&
+		    (memcmp(&tmp->u.a6.addr, &addr->u.a6.addr,
+			    sizeof(struct in6_addr)) == 0)) {
+			found = 1;
+			break;
+		}
+	}
+	if (found) {
+		if (addr->users != 0)
+			tmp->users += addr->users;
+		else
+			tmp->users += add ? 1 : -1;
+		if (tmp->users == 0) {
+			list_del(&tmp->entry);
+			kfree(tmp);
+		}
+		return 0;
+	} else {
+		if (addr->type == QETH_IP_TYPE_DEL_ALL_MC)
+			list_add(&addr->entry, card->ip_tbd_list);
+		else {
+			if (addr->users == 0)
+				addr->users += add ? 1 : -1;
+			if (add && (addr->type == QETH_IP_TYPE_NORMAL) &&
+			    qeth_l3_is_addr_covered_by_ipato(card, addr)) {
+				QETH_CARD_TEXT(card, 2, "tkovaddr");
+				addr->set_flags |= QETH_IPA_SETIP_TAKEOVER_FLAG;
+			}
+			list_add_tail(&addr->entry, card->ip_tbd_list);
+		}
+		return 1;
+	}
+}
+
+int qeth_l3_delete_ip(struct qeth_card *card, struct qeth_ipaddr *addr)
+{
+	unsigned long flags;
+	int rc = 0;
+
+	QETH_CARD_TEXT(card, 4, "delip");
+
+	if (addr->proto == QETH_PROT_IPV4)
+		QETH_CARD_HEX(card, 4, &addr->u.a4.addr, 4);
+	else {
+		QETH_CARD_HEX(card, 4, &addr->u.a6.addr, 8);
+		QETH_CARD_HEX(card, 4, ((char *)&addr->u.a6.addr) + 8, 8);
+	}
+	spin_lock_irqsave(&card->ip_lock, flags);
+	rc = __qeth_l3_insert_ip_todo(card, addr, 0);
+	spin_unlock_irqrestore(&card->ip_lock, flags);
+	return rc;
+}
+
+int qeth_l3_add_ip(struct qeth_card *card, struct qeth_ipaddr *addr)
+{
+	unsigned long flags;
+	int rc = 0;
+
+	QETH_CARD_TEXT(card, 4, "addip");
+	if (addr->proto == QETH_PROT_IPV4)
+		QETH_CARD_HEX(card, 4, &addr->u.a4.addr, 4);
+	else {
+		QETH_CARD_HEX(card, 4, &addr->u.a6.addr, 8);
+		QETH_CARD_HEX(card, 4, ((char *)&addr->u.a6.addr) + 8, 8);
+	}
+	spin_lock_irqsave(&card->ip_lock, flags);
+	rc = __qeth_l3_insert_ip_todo(card, addr, 1);
+	spin_unlock_irqrestore(&card->ip_lock, flags);
+	return rc;
+}
+
+
+struct qeth_ipaddr *qeth_l3_get_addr_buffer(
+				enum qeth_prot_versions prot)
+{
+	struct qeth_ipaddr *addr;
+
+	addr = kzalloc(sizeof(struct qeth_ipaddr), GFP_ATOMIC);
+	if (addr == NULL) {
+		return NULL;
+	}
+	addr->type = QETH_IP_TYPE_NORMAL;
+	addr->proto = prot;
+	return addr;
+}
+
+static void qeth_l3_delete_mc_addresses(struct qeth_card *card)
+{
+	struct qeth_ipaddr *iptodo;
+	unsigned long flags;
+
+	QETH_CARD_TEXT(card, 4, "delmc");
+	iptodo = qeth_l3_get_addr_buffer(QETH_PROT_IPV4);
+	if (!iptodo) {
+		QETH_CARD_TEXT(card, 2, "dmcnomem");
+		return;
+	}
+	iptodo->type = QETH_IP_TYPE_DEL_ALL_MC;
+	spin_lock_irqsave(&card->ip_lock, flags);
+	if (!__qeth_l3_insert_ip_todo(card, iptodo, 0))
+		kfree(iptodo);
+	spin_unlock_irqrestore(&card->ip_lock, flags);
+}
+
+/*
+ * Add/remove address to/from card's ip list, i.e. try to add or remove
+ * reference to/from an IP address that is already registered on the card.
+ * Returns:
+ *	0  address was on card and its reference count has been adjusted,
+ *	   but is still > 0, so nothing has to be done
+ *	   also returns 0 if card was not on card and the todo was to delete
+ *	   the address -> there is also nothing to be done
+ *	1  address was not on card and the todo is to add it to the card's ip
+ *	   list
+ *	-1 address was on card and its reference count has been decremented
+ *	   to <= 0 by the todo -> address must be removed from card
+ */
+static int __qeth_l3_ref_ip_on_card(struct qeth_card *card,
+		struct qeth_ipaddr *todo, struct qeth_ipaddr **__addr)
+{
+	struct qeth_ipaddr *addr;
+	int found = 0;
+
+	list_for_each_entry(addr, &card->ip_list, entry) {
+		if ((addr->proto == QETH_PROT_IPV4) &&
+		    (todo->proto == QETH_PROT_IPV4) &&
+		    (addr->type == todo->type) &&
+		    (addr->u.a4.addr == todo->u.a4.addr) &&
+		    (addr->u.a4.mask == todo->u.a4.mask)) {
+			found = 1;
+			break;
+		}
+		if ((addr->proto == QETH_PROT_IPV6) &&
+		    (todo->proto == QETH_PROT_IPV6) &&
+		    (addr->type == todo->type) &&
+		    (addr->u.a6.pfxlen == todo->u.a6.pfxlen) &&
+		    (memcmp(&addr->u.a6.addr, &todo->u.a6.addr,
+			    sizeof(struct in6_addr)) == 0)) {
+			found = 1;
+			break;
+		}
+	}
+	if (found) {
+		addr->users += todo->users;
+		if (addr->users <= 0) {
+			*__addr = addr;
+			return -1;
+		} else {
+			/* for VIPA and RXIP limit refcount to 1 */
+			if (addr->type != QETH_IP_TYPE_NORMAL)
+				addr->users = 1;
+			return 0;
+		}
+	}
+	if (todo->users > 0) {
+		/* for VIPA and RXIP limit refcount to 1 */
+		if (todo->type != QETH_IP_TYPE_NORMAL)
+			todo->users = 1;
+		return 1;
+	} else
+		return 0;
+}
+
+static void __qeth_l3_delete_all_mc(struct qeth_card *card,
+					unsigned long *flags)
+{
+	struct list_head fail_list;
+	struct qeth_ipaddr *addr, *tmp;
+	int rc;
+
+	INIT_LIST_HEAD(&fail_list);
+again:
+	list_for_each_entry_safe(addr, tmp, &card->ip_list, entry) {
+		if (addr->is_multicast) {
+			list_del(&addr->entry);
+			spin_unlock_irqrestore(&card->ip_lock, *flags);
+			rc = qeth_l3_deregister_addr_entry(card, addr);
+			spin_lock_irqsave(&card->ip_lock, *flags);
+			if (!rc || (rc == IPA_RC_MC_ADDR_NOT_FOUND))
+				kfree(addr);
+			else
+				list_add_tail(&addr->entry, &fail_list);
+			goto again;
+		}
+	}
+	list_splice(&fail_list, &card->ip_list);
+}
+
+void qeth_l3_set_ip_addr_list(struct qeth_card *card)
+{
+	struct list_head *tbd_list;
+	struct qeth_ipaddr *todo, *addr;
+	unsigned long flags;
+	int rc;
+
+	QETH_CARD_TEXT(card, 2, "sdiplist");
+	QETH_CARD_HEX(card, 2, &card, sizeof(void *));
+
+	if ((card->state != CARD_STATE_UP &&
+	     card->state != CARD_STATE_SOFTSETUP) || card->options.sniffer) {
+		return;
+	}
+
+	spin_lock_irqsave(&card->ip_lock, flags);
+	tbd_list = card->ip_tbd_list;
+	card->ip_tbd_list = kzalloc(sizeof(struct list_head), GFP_ATOMIC);
+	if (!card->ip_tbd_list) {
+		QETH_CARD_TEXT(card, 0, "silnomem");
+		card->ip_tbd_list = tbd_list;
+		spin_unlock_irqrestore(&card->ip_lock, flags);
+		return;
+	} else
+		INIT_LIST_HEAD(card->ip_tbd_list);
+
+	while (!list_empty(tbd_list)) {
+		todo = list_entry(tbd_list->next, struct qeth_ipaddr, entry);
+		list_del(&todo->entry);
+		if (todo->type == QETH_IP_TYPE_DEL_ALL_MC) {
+			__qeth_l3_delete_all_mc(card, &flags);
+			kfree(todo);
+			continue;
+		}
+		rc = __qeth_l3_ref_ip_on_card(card, todo, &addr);
+		if (rc == 0) {
+			/* nothing to be done; only adjusted refcount */
+			kfree(todo);
+		} else if (rc == 1) {
+			/* new entry to be added to on-card list */
+			spin_unlock_irqrestore(&card->ip_lock, flags);
+			rc = qeth_l3_register_addr_entry(card, todo);
+			spin_lock_irqsave(&card->ip_lock, flags);
+			if (!rc || (rc == IPA_RC_LAN_OFFLINE))
+				list_add_tail(&todo->entry, &card->ip_list);
+			else
+				kfree(todo);
+		} else if (rc == -1) {
+			/* on-card entry to be removed */
+			list_del_init(&addr->entry);
+			spin_unlock_irqrestore(&card->ip_lock, flags);
+			rc = qeth_l3_deregister_addr_entry(card, addr);
+			spin_lock_irqsave(&card->ip_lock, flags);
+			if (!rc || (rc == IPA_RC_IP_ADDRESS_NOT_DEFINED))
+				kfree(addr);
+			else
+				list_add_tail(&addr->entry, &card->ip_list);
+			kfree(todo);
+		}
+	}
+	spin_unlock_irqrestore(&card->ip_lock, flags);
+	kfree(tbd_list);
+}
+
+static void qeth_l3_clear_ip_list(struct qeth_card *card, int recover)
+{
+	struct qeth_ipaddr *addr, *tmp;
+	unsigned long flags;
+
+	QETH_CARD_TEXT(card, 4, "clearip");
+	if (recover && card->options.sniffer)
+		return;
+	spin_lock_irqsave(&card->ip_lock, flags);
+	/* clear todo list */
+	list_for_each_entry_safe(addr, tmp, card->ip_tbd_list, entry) {
+		list_del(&addr->entry);
+		kfree(addr);
+	}
+
+	while (!list_empty(&card->ip_list)) {
+		addr = list_entry(card->ip_list.next,
+				  struct qeth_ipaddr, entry);
+		list_del_init(&addr->entry);
+		if (!recover || addr->is_multicast) {
+			kfree(addr);
+			continue;
+		}
+		list_add_tail(&addr->entry, card->ip_tbd_list);
+	}
+	spin_unlock_irqrestore(&card->ip_lock, flags);
+}
+
+static int qeth_l3_address_exists_in_list(struct list_head *list,
+		struct qeth_ipaddr *addr, int same_type)
+{
+	struct qeth_ipaddr *tmp;
+
+	list_for_each_entry(tmp, list, entry) {
+		if ((tmp->proto == QETH_PROT_IPV4) &&
+		    (addr->proto == QETH_PROT_IPV4) &&
+		    ((same_type && (tmp->type == addr->type)) ||
+		    (!same_type && (tmp->type != addr->type))) &&
+		    (tmp->u.a4.addr == addr->u.a4.addr))
+			return 1;
+
+		if ((tmp->proto == QETH_PROT_IPV6) &&
+		    (addr->proto == QETH_PROT_IPV6) &&
+		    ((same_type && (tmp->type == addr->type)) ||
+		    (!same_type && (tmp->type != addr->type))) &&
+		    (memcmp(&tmp->u.a6.addr, &addr->u.a6.addr,
+			    sizeof(struct in6_addr)) == 0))
+			return 1;
+
+	}
+	return 0;
+}
+
+static int qeth_l3_send_setdelmc(struct qeth_card *card,
+			struct qeth_ipaddr *addr, int ipacmd)
+{
+	int rc;
+	struct qeth_cmd_buffer *iob;
+	struct qeth_ipa_cmd *cmd;
+
+	QETH_CARD_TEXT(card, 4, "setdelmc");
+
+	iob = qeth_get_ipacmd_buffer(card, ipacmd, addr->proto);
+	cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
+	memcpy(&cmd->data.setdelipm.mac, addr->mac, OSA_ADDR_LEN);
+	if (addr->proto == QETH_PROT_IPV6)
+		memcpy(cmd->data.setdelipm.ip6, &addr->u.a6.addr,
+		       sizeof(struct in6_addr));
+	else
+		memcpy(&cmd->data.setdelipm.ip4, &addr->u.a4.addr, 4);
+
+	rc = qeth_send_ipa_cmd(card, iob, NULL, NULL);
+
+	return rc;
+}
+
+static void qeth_l3_fill_netmask(u8 *netmask, unsigned int len)
+{
+	int i, j;
+	for (i = 0; i < 16; i++) {
+		j = (len) - (i * 8);
+		if (j >= 8)
+			netmask[i] = 0xff;
+		else if (j > 0)
+			netmask[i] = (u8)(0xFF00 >> j);
+		else
+			netmask[i] = 0;
+	}
+}
+
+static int qeth_l3_send_setdelip(struct qeth_card *card,
+		struct qeth_ipaddr *addr, int ipacmd, unsigned int flags)
+{
+	int rc;
+	struct qeth_cmd_buffer *iob;
+	struct qeth_ipa_cmd *cmd;
+	__u8 netmask[16];
+
+	QETH_CARD_TEXT(card, 4, "setdelip");
+	QETH_CARD_TEXT_(card, 4, "flags%02X", flags);
+
+	iob = qeth_get_ipacmd_buffer(card, ipacmd, addr->proto);
+	cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
+	if (addr->proto == QETH_PROT_IPV6) {
+		memcpy(cmd->data.setdelip6.ip_addr, &addr->u.a6.addr,
+		       sizeof(struct in6_addr));
+		qeth_l3_fill_netmask(netmask, addr->u.a6.pfxlen);
+		memcpy(cmd->data.setdelip6.mask, netmask,
+		       sizeof(struct in6_addr));
+		cmd->data.setdelip6.flags = flags;
+	} else {
+		memcpy(cmd->data.setdelip4.ip_addr, &addr->u.a4.addr, 4);
+		memcpy(cmd->data.setdelip4.mask, &addr->u.a4.mask, 4);
+		cmd->data.setdelip4.flags = flags;
+	}
+
+	rc = qeth_send_ipa_cmd(card, iob, NULL, NULL);
+
+	return rc;
+}
+
+static int qeth_l3_send_setrouting(struct qeth_card *card,
+	enum qeth_routing_types type, enum qeth_prot_versions prot)
+{
+	int rc;
+	struct qeth_ipa_cmd *cmd;
+	struct qeth_cmd_buffer *iob;
+
+	QETH_CARD_TEXT(card, 4, "setroutg");
+	iob = qeth_get_ipacmd_buffer(card, IPA_CMD_SETRTG, prot);
+	cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
+	cmd->data.setrtg.type = (type);
+	rc = qeth_send_ipa_cmd(card, iob, NULL, NULL);
+
+	return rc;
+}
+
+static void qeth_l3_correct_routing_type(struct qeth_card *card,
+		enum qeth_routing_types *type, enum qeth_prot_versions prot)
+{
+	if (card->info.type == QETH_CARD_TYPE_IQD) {
+		switch (*type) {
+		case NO_ROUTER:
+		case PRIMARY_CONNECTOR:
+		case SECONDARY_CONNECTOR:
+		case MULTICAST_ROUTER:
+			return;
+		default:
+			goto out_inval;
+		}
+	} else {
+		switch (*type) {
+		case NO_ROUTER:
+		case PRIMARY_ROUTER:
+		case SECONDARY_ROUTER:
+			return;
+		case MULTICAST_ROUTER:
+			if (qeth_is_ipafunc_supported(card, prot,
+						      IPA_OSA_MC_ROUTER))
+				return;
+		default:
+			goto out_inval;
+		}
+	}
+out_inval:
+	*type = NO_ROUTER;
+}
+
+int qeth_l3_setrouting_v4(struct qeth_card *card)
+{
+	int rc;
+
+	QETH_CARD_TEXT(card, 3, "setrtg4");
+
+	qeth_l3_correct_routing_type(card, &card->options.route4.type,
+				  QETH_PROT_IPV4);
+
+	rc = qeth_l3_send_setrouting(card, card->options.route4.type,
+				  QETH_PROT_IPV4);
+	if (rc) {
+		card->options.route4.type = NO_ROUTER;
+		QETH_DBF_MESSAGE(2, "Error (0x%04x) while setting routing type"
+			" on %s. Type set to 'no router'.\n", rc,
+			QETH_CARD_IFNAME(card));
+	}
+	return rc;
+}
+
+int qeth_l3_setrouting_v6(struct qeth_card *card)
+{
+	int rc = 0;
+
+	QETH_CARD_TEXT(card, 3, "setrtg6");
+#ifdef CONFIG_QETH_IPV6
+
+	if (!qeth_is_supported(card, IPA_IPV6))
+		return 0;
+	qeth_l3_correct_routing_type(card, &card->options.route6.type,
+				  QETH_PROT_IPV6);
+
+	rc = qeth_l3_send_setrouting(card, card->options.route6.type,
+				  QETH_PROT_IPV6);
+	if (rc) {
+		card->options.route6.type = NO_ROUTER;
+		QETH_DBF_MESSAGE(2, "Error (0x%04x) while setting routing type"
+			" on %s. Type set to 'no router'.\n", rc,
+			QETH_CARD_IFNAME(card));
+	}
+#endif
+	return rc;
+}
+
+/*
+ * IP address takeover related functions
+ */
+static void qeth_l3_clear_ipato_list(struct qeth_card *card)
+{
+
+	struct qeth_ipato_entry *ipatoe, *tmp;
+	unsigned long flags;
+
+	spin_lock_irqsave(&card->ip_lock, flags);
+	list_for_each_entry_safe(ipatoe, tmp, &card->ipato.entries, entry) {
+		list_del(&ipatoe->entry);
+		kfree(ipatoe);
+	}
+	spin_unlock_irqrestore(&card->ip_lock, flags);
+}
+
+int qeth_l3_add_ipato_entry(struct qeth_card *card,
+				struct qeth_ipato_entry *new)
+{
+	struct qeth_ipato_entry *ipatoe;
+	unsigned long flags;
+	int rc = 0;
+
+	QETH_CARD_TEXT(card, 2, "addipato");
+	spin_lock_irqsave(&card->ip_lock, flags);
+	list_for_each_entry(ipatoe, &card->ipato.entries, entry) {
+		if (ipatoe->proto != new->proto)
+			continue;
+		if (!memcmp(ipatoe->addr, new->addr,
+			    (ipatoe->proto == QETH_PROT_IPV4)? 4:16) &&
+		    (ipatoe->mask_bits == new->mask_bits)) {
+			rc = -EEXIST;
+			break;
+		}
+	}
+	if (!rc)
+		list_add_tail(&new->entry, &card->ipato.entries);
+
+	spin_unlock_irqrestore(&card->ip_lock, flags);
+	return rc;
+}
+
+void qeth_l3_del_ipato_entry(struct qeth_card *card,
+		enum qeth_prot_versions proto, u8 *addr, int mask_bits)
+{
+	struct qeth_ipato_entry *ipatoe, *tmp;
+	unsigned long flags;
+
+	QETH_CARD_TEXT(card, 2, "delipato");
+	spin_lock_irqsave(&card->ip_lock, flags);
+	list_for_each_entry_safe(ipatoe, tmp, &card->ipato.entries, entry) {
+		if (ipatoe->proto != proto)
+			continue;
+		if (!memcmp(ipatoe->addr, addr,
+			    (proto == QETH_PROT_IPV4)? 4:16) &&
+		    (ipatoe->mask_bits == mask_bits)) {
+			list_del(&ipatoe->entry);
+			kfree(ipatoe);
+		}
+	}
+	spin_unlock_irqrestore(&card->ip_lock, flags);
+}
+
+/*
+ * VIPA related functions
+ */
+int qeth_l3_add_vipa(struct qeth_card *card, enum qeth_prot_versions proto,
+	      const u8 *addr)
+{
+	struct qeth_ipaddr *ipaddr;
+	unsigned long flags;
+	int rc = 0;
+
+	ipaddr = qeth_l3_get_addr_buffer(proto);
+	if (ipaddr) {
+		if (proto == QETH_PROT_IPV4) {
+			QETH_CARD_TEXT(card, 2, "addvipa4");
+			memcpy(&ipaddr->u.a4.addr, addr, 4);
+			ipaddr->u.a4.mask = 0;
+		} else if (proto == QETH_PROT_IPV6) {
+			QETH_CARD_TEXT(card, 2, "addvipa6");
+			memcpy(&ipaddr->u.a6.addr, addr, 16);
+			ipaddr->u.a6.pfxlen = 0;
+		}
+		ipaddr->type = QETH_IP_TYPE_VIPA;
+		ipaddr->set_flags = QETH_IPA_SETIP_VIPA_FLAG;
+		ipaddr->del_flags = QETH_IPA_DELIP_VIPA_FLAG;
+	} else
+		return -ENOMEM;
+	spin_lock_irqsave(&card->ip_lock, flags);
+	if (qeth_l3_address_exists_in_list(&card->ip_list, ipaddr, 0) ||
+	    qeth_l3_address_exists_in_list(card->ip_tbd_list, ipaddr, 0))
+		rc = -EEXIST;
+	spin_unlock_irqrestore(&card->ip_lock, flags);
+	if (rc) {
+		return rc;
+	}
+	if (!qeth_l3_add_ip(card, ipaddr))
+		kfree(ipaddr);
+	qeth_l3_set_ip_addr_list(card);
+	return rc;
+}
+
+void qeth_l3_del_vipa(struct qeth_card *card, enum qeth_prot_versions proto,
+	      const u8 *addr)
+{
+	struct qeth_ipaddr *ipaddr;
+
+	ipaddr = qeth_l3_get_addr_buffer(proto);
+	if (ipaddr) {
+		if (proto == QETH_PROT_IPV4) {
+			QETH_CARD_TEXT(card, 2, "delvipa4");
+			memcpy(&ipaddr->u.a4.addr, addr, 4);
+			ipaddr->u.a4.mask = 0;
+		} else if (proto == QETH_PROT_IPV6) {
+			QETH_CARD_TEXT(card, 2, "delvipa6");
+			memcpy(&ipaddr->u.a6.addr, addr, 16);
+			ipaddr->u.a6.pfxlen = 0;
+		}
+		ipaddr->type = QETH_IP_TYPE_VIPA;
+	} else
+		return;
+	if (!qeth_l3_delete_ip(card, ipaddr))
+		kfree(ipaddr);
+	qeth_l3_set_ip_addr_list(card);
+}
+
+/*
+ * proxy ARP related functions
+ */
+int qeth_l3_add_rxip(struct qeth_card *card, enum qeth_prot_versions proto,
+	      const u8 *addr)
+{
+	struct qeth_ipaddr *ipaddr;
+	unsigned long flags;
+	int rc = 0;
+
+	ipaddr = qeth_l3_get_addr_buffer(proto);
+	if (ipaddr) {
+		if (proto == QETH_PROT_IPV4) {
+			QETH_CARD_TEXT(card, 2, "addrxip4");
+			memcpy(&ipaddr->u.a4.addr, addr, 4);
+			ipaddr->u.a4.mask = 0;
+		} else if (proto == QETH_PROT_IPV6) {
+			QETH_CARD_TEXT(card, 2, "addrxip6");
+			memcpy(&ipaddr->u.a6.addr, addr, 16);
+			ipaddr->u.a6.pfxlen = 0;
+		}
+		ipaddr->type = QETH_IP_TYPE_RXIP;
+		ipaddr->set_flags = QETH_IPA_SETIP_TAKEOVER_FLAG;
+		ipaddr->del_flags = 0;
+	} else
+		return -ENOMEM;
+	spin_lock_irqsave(&card->ip_lock, flags);
+	if (qeth_l3_address_exists_in_list(&card->ip_list, ipaddr, 0) ||
+	    qeth_l3_address_exists_in_list(card->ip_tbd_list, ipaddr, 0))
+		rc = -EEXIST;
+	spin_unlock_irqrestore(&card->ip_lock, flags);
+	if (rc) {
+		return rc;
+	}
+	if (!qeth_l3_add_ip(card, ipaddr))
+		kfree(ipaddr);
+	qeth_l3_set_ip_addr_list(card);
+	return 0;
+}
+
+void qeth_l3_del_rxip(struct qeth_card *card, enum qeth_prot_versions proto,
+			const u8 *addr)
+{
+	struct qeth_ipaddr *ipaddr;
+
+	ipaddr = qeth_l3_get_addr_buffer(proto);
+	if (ipaddr) {
+		if (proto == QETH_PROT_IPV4) {
+			QETH_CARD_TEXT(card, 2, "addrxip4");
+			memcpy(&ipaddr->u.a4.addr, addr, 4);
+			ipaddr->u.a4.mask = 0;
+		} else if (proto == QETH_PROT_IPV6) {
+			QETH_CARD_TEXT(card, 2, "addrxip6");
+			memcpy(&ipaddr->u.a6.addr, addr, 16);
+			ipaddr->u.a6.pfxlen = 0;
+		}
+		ipaddr->type = QETH_IP_TYPE_RXIP;
+	} else
+		return;
+	if (!qeth_l3_delete_ip(card, ipaddr))
+		kfree(ipaddr);
+	qeth_l3_set_ip_addr_list(card);
+}
+
+static int qeth_l3_register_addr_entry(struct qeth_card *card,
+				struct qeth_ipaddr *addr)
+{
+	char buf[50];
+	int rc = 0;
+	int cnt = 3;
+
+	if (addr->proto == QETH_PROT_IPV4) {
+		QETH_CARD_TEXT(card, 2, "setaddr4");
+		QETH_CARD_HEX(card, 3, &addr->u.a4.addr, sizeof(int));
+	} else if (addr->proto == QETH_PROT_IPV6) {
+		QETH_CARD_TEXT(card, 2, "setaddr6");
+		QETH_CARD_HEX(card, 3, &addr->u.a6.addr, 8);
+		QETH_CARD_HEX(card, 3, ((char *)&addr->u.a6.addr) + 8, 8);
+	} else {
+		QETH_CARD_TEXT(card, 2, "setaddr?");
+		QETH_CARD_HEX(card, 3, addr, sizeof(struct qeth_ipaddr));
+	}
+	do {
+		if (addr->is_multicast)
+			rc =  qeth_l3_send_setdelmc(card, addr, IPA_CMD_SETIPM);
+		else
+			rc = qeth_l3_send_setdelip(card, addr, IPA_CMD_SETIP,
+					addr->set_flags);
+		if (rc)
+			QETH_CARD_TEXT(card, 2, "failed");
+	} while ((--cnt > 0) && rc);
+	if (rc) {
+		QETH_CARD_TEXT(card, 2, "FAILED");
+		qeth_l3_ipaddr_to_string(addr->proto, (u8 *)&addr->u, buf);
+		dev_warn(&card->gdev->dev,
+			"Registering IP address %s failed\n", buf);
+	}
+	return rc;
+}
+
+static int qeth_l3_deregister_addr_entry(struct qeth_card *card,
+						struct qeth_ipaddr *addr)
+{
+	int rc = 0;
+
+	if (addr->proto == QETH_PROT_IPV4) {
+		QETH_CARD_TEXT(card, 2, "deladdr4");
+		QETH_CARD_HEX(card, 3, &addr->u.a4.addr, sizeof(int));
+	} else if (addr->proto == QETH_PROT_IPV6) {
+		QETH_CARD_TEXT(card, 2, "deladdr6");
+		QETH_CARD_HEX(card, 3, &addr->u.a6.addr, 8);
+		QETH_CARD_HEX(card, 3, ((char *)&addr->u.a6.addr) + 8, 8);
+	} else {
+		QETH_CARD_TEXT(card, 2, "deladdr?");
+		QETH_CARD_HEX(card, 3, addr, sizeof(struct qeth_ipaddr));
+	}
+	if (addr->is_multicast)
+		rc = qeth_l3_send_setdelmc(card, addr, IPA_CMD_DELIPM);
+	else
+		rc = qeth_l3_send_setdelip(card, addr, IPA_CMD_DELIP,
+					addr->del_flags);
+	if (rc)
+		QETH_CARD_TEXT(card, 2, "failed");
+
+	return rc;
+}
+
+static inline u8 qeth_l3_get_qeth_hdr_flags4(int cast_type)
+{
+	if (cast_type == RTN_MULTICAST)
+		return QETH_CAST_MULTICAST;
+	if (cast_type == RTN_BROADCAST)
+		return QETH_CAST_BROADCAST;
+	return QETH_CAST_UNICAST;
+}
+
+static inline u8 qeth_l3_get_qeth_hdr_flags6(int cast_type)
+{
+	u8 ct = QETH_HDR_PASSTHRU | QETH_HDR_IPV6;
+	if (cast_type == RTN_MULTICAST)
+		return ct | QETH_CAST_MULTICAST;
+	if (cast_type == RTN_ANYCAST)
+		return ct | QETH_CAST_ANYCAST;
+	if (cast_type == RTN_BROADCAST)
+		return ct | QETH_CAST_BROADCAST;
+	return ct | QETH_CAST_UNICAST;
+}
+
+static int qeth_l3_send_setadp_mode(struct qeth_card *card, __u32 command,
+					__u32 mode)
+{
+	int rc;
+	struct qeth_cmd_buffer *iob;
+	struct qeth_ipa_cmd *cmd;
+
+	QETH_CARD_TEXT(card, 4, "adpmode");
+
+	iob = qeth_get_adapter_cmd(card, command,
+				   sizeof(struct qeth_ipacmd_setadpparms));
+	cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
+	cmd->data.setadapterparms.data.mode = mode;
+	rc = qeth_send_ipa_cmd(card, iob, qeth_default_setadapterparms_cb,
+			       NULL);
+	return rc;
+}
+
+static int qeth_l3_setadapter_hstr(struct qeth_card *card)
+{
+	int rc;
+
+	QETH_CARD_TEXT(card, 4, "adphstr");
+
+	if (qeth_adp_supported(card, IPA_SETADP_SET_BROADCAST_MODE)) {
+		rc = qeth_l3_send_setadp_mode(card,
+					IPA_SETADP_SET_BROADCAST_MODE,
+					card->options.broadcast_mode);
+		if (rc)
+			QETH_DBF_MESSAGE(2, "couldn't set broadcast mode on "
+				   "device %s: x%x\n",
+				   CARD_BUS_ID(card), rc);
+		rc = qeth_l3_send_setadp_mode(card,
+					IPA_SETADP_ALTER_MAC_ADDRESS,
+					card->options.macaddr_mode);
+		if (rc)
+			QETH_DBF_MESSAGE(2, "couldn't set macaddr mode on "
+				   "device %s: x%x\n", CARD_BUS_ID(card), rc);
+		return rc;
+	}
+	if (card->options.broadcast_mode == QETH_TR_BROADCAST_LOCAL)
+		QETH_DBF_MESSAGE(2, "set adapter parameters not available "
+			   "to set broadcast mode, using ALLRINGS "
+			   "on device %s:\n", CARD_BUS_ID(card));
+	if (card->options.macaddr_mode == QETH_TR_MACADDR_CANONICAL)
+		QETH_DBF_MESSAGE(2, "set adapter parameters not available "
+			   "to set macaddr mode, using NONCANONICAL "
+			   "on device %s:\n", CARD_BUS_ID(card));
+	return 0;
+}
+
+static int qeth_l3_setadapter_parms(struct qeth_card *card)
+{
+	int rc;
+
+	QETH_DBF_TEXT(SETUP, 2, "setadprm");
+
+	if (!qeth_is_supported(card, IPA_SETADAPTERPARMS)) {
+		dev_info(&card->gdev->dev,
+			"set adapter parameters not supported.\n");
+		QETH_DBF_TEXT(SETUP, 2, " notsupp");
+		return 0;
+	}
+	rc = qeth_query_setadapterparms(card);
+	if (rc) {
+		QETH_DBF_MESSAGE(2, "%s couldn't set adapter parameters: "
+			"0x%x\n", dev_name(&card->gdev->dev), rc);
+		return rc;
+	}
+	if (qeth_adp_supported(card, IPA_SETADP_ALTER_MAC_ADDRESS)) {
+		rc = qeth_setadpparms_change_macaddr(card);
+		if (rc)
+			dev_warn(&card->gdev->dev, "Reading the adapter MAC"
+				" address failed\n");
+	}
+
+	if ((card->info.link_type == QETH_LINK_TYPE_HSTR) ||
+	    (card->info.link_type == QETH_LINK_TYPE_LANE_TR))
+		rc = qeth_l3_setadapter_hstr(card);
+
+	return rc;
+}
+
+static int qeth_l3_default_setassparms_cb(struct qeth_card *card,
+			struct qeth_reply *reply, unsigned long data)
+{
+	struct qeth_ipa_cmd *cmd;
+
+	QETH_CARD_TEXT(card, 4, "defadpcb");
+
+	cmd = (struct qeth_ipa_cmd *) data;
+	if (cmd->hdr.return_code == 0) {
+		cmd->hdr.return_code = cmd->data.setassparms.hdr.return_code;
+		if (cmd->hdr.prot_version == QETH_PROT_IPV4)
+			card->options.ipa4.enabled_funcs = cmd->hdr.ipa_enabled;
+		if (cmd->hdr.prot_version == QETH_PROT_IPV6)
+			card->options.ipa6.enabled_funcs = cmd->hdr.ipa_enabled;
+	}
+	if (cmd->data.setassparms.hdr.assist_no == IPA_INBOUND_CHECKSUM &&
+	    cmd->data.setassparms.hdr.command_code == IPA_CMD_ASS_START) {
+		card->info.csum_mask = cmd->data.setassparms.data.flags_32bit;
+		QETH_CARD_TEXT_(card, 3, "csum:%d", card->info.csum_mask);
+	}
+	if (cmd->data.setassparms.hdr.assist_no == IPA_OUTBOUND_CHECKSUM &&
+	    cmd->data.setassparms.hdr.command_code == IPA_CMD_ASS_START) {
+		card->info.tx_csum_mask =
+			cmd->data.setassparms.data.flags_32bit;
+		QETH_CARD_TEXT_(card, 3, "tcsu:%d", card->info.tx_csum_mask);
+	}
+
+	return 0;
+}
+
+static struct qeth_cmd_buffer *qeth_l3_get_setassparms_cmd(
+	struct qeth_card *card, enum qeth_ipa_funcs ipa_func, __u16 cmd_code,
+	__u16 len, enum qeth_prot_versions prot)
+{
+	struct qeth_cmd_buffer *iob;
+	struct qeth_ipa_cmd *cmd;
+
+	QETH_CARD_TEXT(card, 4, "getasscm");
+	iob = qeth_get_ipacmd_buffer(card, IPA_CMD_SETASSPARMS, prot);
+
+	cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
+	cmd->data.setassparms.hdr.assist_no = ipa_func;
+	cmd->data.setassparms.hdr.length = 8 + len;
+	cmd->data.setassparms.hdr.command_code = cmd_code;
+	cmd->data.setassparms.hdr.return_code = 0;
+	cmd->data.setassparms.hdr.seq_no = 0;
+
+	return iob;
+}
+
+static int qeth_l3_send_setassparms(struct qeth_card *card,
+	struct qeth_cmd_buffer *iob, __u16 len, long data,
+	int (*reply_cb)(struct qeth_card *, struct qeth_reply *,
+		unsigned long),
+	void *reply_param)
+{
+	int rc;
+	struct qeth_ipa_cmd *cmd;
+
+	QETH_CARD_TEXT(card, 4, "sendassp");
+
+	cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
+	if (len <= sizeof(__u32))
+		cmd->data.setassparms.data.flags_32bit = (__u32) data;
+	else   /* (len > sizeof(__u32)) */
+		memcpy(&cmd->data.setassparms.data, (void *) data, len);
+
+	rc = qeth_send_ipa_cmd(card, iob, reply_cb, reply_param);
+	return rc;
+}
+
+#ifdef CONFIG_QETH_IPV6
+static int qeth_l3_send_simple_setassparms_ipv6(struct qeth_card *card,
+		enum qeth_ipa_funcs ipa_func, __u16 cmd_code)
+{
+	int rc;
+	struct qeth_cmd_buffer *iob;
+
+	QETH_CARD_TEXT(card, 4, "simassp6");
+	iob = qeth_l3_get_setassparms_cmd(card, ipa_func, cmd_code,
+				       0, QETH_PROT_IPV6);
+	rc = qeth_l3_send_setassparms(card, iob, 0, 0,
+				   qeth_l3_default_setassparms_cb, NULL);
+	return rc;
+}
+#endif
+
+static int qeth_l3_send_simple_setassparms(struct qeth_card *card,
+		enum qeth_ipa_funcs ipa_func, __u16 cmd_code, long data)
+{
+	int rc;
+	int length = 0;
+	struct qeth_cmd_buffer *iob;
+
+	QETH_CARD_TEXT(card, 4, "simassp4");
+	if (data)
+		length = sizeof(__u32);
+	iob = qeth_l3_get_setassparms_cmd(card, ipa_func, cmd_code,
+				       length, QETH_PROT_IPV4);
+	rc = qeth_l3_send_setassparms(card, iob, length, data,
+				   qeth_l3_default_setassparms_cb, NULL);
+	return rc;
+}
+
+static int qeth_l3_start_ipa_arp_processing(struct qeth_card *card)
+{
+	int rc;
+
+	QETH_CARD_TEXT(card, 3, "ipaarp");
+
+	if (!qeth_is_supported(card, IPA_ARP_PROCESSING)) {
+		dev_info(&card->gdev->dev,
+			"ARP processing not supported on %s!\n",
+			QETH_CARD_IFNAME(card));
+		return 0;
+	}
+	rc = qeth_l3_send_simple_setassparms(card, IPA_ARP_PROCESSING,
+					IPA_CMD_ASS_START, 0);
+	if (rc) {
+		dev_warn(&card->gdev->dev,
+			"Starting ARP processing support for %s failed\n",
+			QETH_CARD_IFNAME(card));
+	}
+	return rc;
+}
+
+static int qeth_l3_start_ipa_ip_fragmentation(struct qeth_card *card)
+{
+	int rc;
+
+	QETH_CARD_TEXT(card, 3, "ipaipfrg");
+
+	if (!qeth_is_supported(card, IPA_IP_FRAGMENTATION)) {
+		dev_info(&card->gdev->dev,
+			"Hardware IP fragmentation not supported on %s\n",
+			QETH_CARD_IFNAME(card));
+		return  -EOPNOTSUPP;
+	}
+
+	rc = qeth_l3_send_simple_setassparms(card, IPA_IP_FRAGMENTATION,
+					  IPA_CMD_ASS_START, 0);
+	if (rc) {
+		dev_warn(&card->gdev->dev,
+			"Starting IP fragmentation support for %s failed\n",
+			QETH_CARD_IFNAME(card));
+	} else
+		dev_info(&card->gdev->dev,
+			"Hardware IP fragmentation enabled \n");
+	return rc;
+}
+
+static int qeth_l3_start_ipa_source_mac(struct qeth_card *card)
+{
+	int rc;
+
+	QETH_CARD_TEXT(card, 3, "stsrcmac");
+
+	if (!qeth_is_supported(card, IPA_SOURCE_MAC)) {
+		dev_info(&card->gdev->dev,
+			"Inbound source MAC-address not supported on %s\n",
+			QETH_CARD_IFNAME(card));
+		return -EOPNOTSUPP;
+	}
+
+	rc = qeth_l3_send_simple_setassparms(card, IPA_SOURCE_MAC,
+					  IPA_CMD_ASS_START, 0);
+	if (rc)
+		dev_warn(&card->gdev->dev,
+			"Starting source MAC-address support for %s failed\n",
+			QETH_CARD_IFNAME(card));
+	return rc;
+}
+
+static int qeth_l3_start_ipa_vlan(struct qeth_card *card)
+{
+	int rc = 0;
+
+	QETH_CARD_TEXT(card, 3, "strtvlan");
+
+	if (!qeth_is_supported(card, IPA_FULL_VLAN)) {
+		dev_info(&card->gdev->dev,
+			"VLAN not supported on %s\n", QETH_CARD_IFNAME(card));
+		return -EOPNOTSUPP;
+	}
+
+	rc = qeth_l3_send_simple_setassparms(card, IPA_VLAN_PRIO,
+					  IPA_CMD_ASS_START, 0);
+	if (rc) {
+		dev_warn(&card->gdev->dev,
+			"Starting VLAN support for %s failed\n",
+			QETH_CARD_IFNAME(card));
+	} else {
+		dev_info(&card->gdev->dev, "VLAN enabled\n");
+	}
+	return rc;
+}
+
+static int qeth_l3_start_ipa_multicast(struct qeth_card *card)
+{
+	int rc;
+
+	QETH_CARD_TEXT(card, 3, "stmcast");
+
+	if (!qeth_is_supported(card, IPA_MULTICASTING)) {
+		dev_info(&card->gdev->dev,
+			"Multicast not supported on %s\n",
+			QETH_CARD_IFNAME(card));
+		return -EOPNOTSUPP;
+	}
+
+	rc = qeth_l3_send_simple_setassparms(card, IPA_MULTICASTING,
+					  IPA_CMD_ASS_START, 0);
+	if (rc) {
+		dev_warn(&card->gdev->dev,
+			"Starting multicast support for %s failed\n",
+			QETH_CARD_IFNAME(card));
+	} else {
+		dev_info(&card->gdev->dev, "Multicast enabled\n");
+		card->dev->flags |= IFF_MULTICAST;
+	}
+	return rc;
+}
+
+#ifdef CONFIG_QETH_IPV6
+static int qeth_l3_softsetup_ipv6(struct qeth_card *card)
+{
+	int rc;
+
+	QETH_CARD_TEXT(card, 3, "softipv6");
+
+	if (card->info.type == QETH_CARD_TYPE_IQD)
+		goto out;
+
+	rc = qeth_query_ipassists(card, QETH_PROT_IPV6);
+	if (rc) {
+		dev_err(&card->gdev->dev,
+			"Activating IPv6 support for %s failed\n",
+			QETH_CARD_IFNAME(card));
+		return rc;
+	}
+	rc = qeth_l3_send_simple_setassparms(card, IPA_IPV6,
+					  IPA_CMD_ASS_START, 3);
+	if (rc) {
+		dev_err(&card->gdev->dev,
+			"Activating IPv6 support for %s failed\n",
+			QETH_CARD_IFNAME(card));
+		return rc;
+	}
+	rc = qeth_l3_send_simple_setassparms_ipv6(card, IPA_IPV6,
+					       IPA_CMD_ASS_START);
+	if (rc) {
+		dev_err(&card->gdev->dev,
+			"Activating IPv6 support for %s failed\n",
+			 QETH_CARD_IFNAME(card));
+		return rc;
+	}
+	rc = qeth_l3_send_simple_setassparms_ipv6(card, IPA_PASSTHRU,
+					       IPA_CMD_ASS_START);
+	if (rc) {
+		dev_warn(&card->gdev->dev,
+			"Enabling the passthrough mode for %s failed\n",
+			QETH_CARD_IFNAME(card));
+		return rc;
+	}
+out:
+	dev_info(&card->gdev->dev, "IPV6 enabled\n");
+	return 0;
+}
+#endif
+
+static int qeth_l3_start_ipa_ipv6(struct qeth_card *card)
+{
+	int rc = 0;
+
+	QETH_CARD_TEXT(card, 3, "strtipv6");
+
+	if (!qeth_is_supported(card, IPA_IPV6)) {
+		dev_info(&card->gdev->dev,
+			"IPv6 not supported on %s\n", QETH_CARD_IFNAME(card));
+		return 0;
+	}
+#ifdef CONFIG_QETH_IPV6
+	rc = qeth_l3_softsetup_ipv6(card);
+#endif
+	return rc ;
+}
+
+static int qeth_l3_start_ipa_broadcast(struct qeth_card *card)
+{
+	int rc;
+
+	QETH_CARD_TEXT(card, 3, "stbrdcst");
+	card->info.broadcast_capable = 0;
+	if (!qeth_is_supported(card, IPA_FILTERING)) {
+		dev_info(&card->gdev->dev,
+			"Broadcast not supported on %s\n",
+			QETH_CARD_IFNAME(card));
+		rc = -EOPNOTSUPP;
+		goto out;
+	}
+	rc = qeth_l3_send_simple_setassparms(card, IPA_FILTERING,
+					  IPA_CMD_ASS_START, 0);
+	if (rc) {
+		dev_warn(&card->gdev->dev, "Enabling broadcast filtering for "
+			"%s failed\n", QETH_CARD_IFNAME(card));
+		goto out;
+	}
+
+	rc = qeth_l3_send_simple_setassparms(card, IPA_FILTERING,
+					  IPA_CMD_ASS_CONFIGURE, 1);
+	if (rc) {
+		dev_warn(&card->gdev->dev,
+			"Setting up broadcast filtering for %s failed\n",
+			QETH_CARD_IFNAME(card));
+		goto out;
+	}
+	card->info.broadcast_capable = QETH_BROADCAST_WITH_ECHO;
+	dev_info(&card->gdev->dev, "Broadcast enabled\n");
+	rc = qeth_l3_send_simple_setassparms(card, IPA_FILTERING,
+					  IPA_CMD_ASS_ENABLE, 1);
+	if (rc) {
+		dev_warn(&card->gdev->dev, "Setting up broadcast echo "
+			"filtering for %s failed\n", QETH_CARD_IFNAME(card));
+		goto out;
+	}
+	card->info.broadcast_capable = QETH_BROADCAST_WITHOUT_ECHO;
+out:
+	if (card->info.broadcast_capable)
+		card->dev->flags |= IFF_BROADCAST;
+	else
+		card->dev->flags &= ~IFF_BROADCAST;
+	return rc;
+}
+
+static int qeth_l3_send_checksum_command(struct qeth_card *card)
+{
+	int rc;
+
+	rc = qeth_l3_send_simple_setassparms(card, IPA_INBOUND_CHECKSUM,
+					  IPA_CMD_ASS_START, 0);
+	if (rc) {
+		dev_warn(&card->gdev->dev, "Starting HW checksumming for %s "
+			"failed, using SW checksumming\n",
+			QETH_CARD_IFNAME(card));
+		return rc;
+	}
+	rc = qeth_l3_send_simple_setassparms(card, IPA_INBOUND_CHECKSUM,
+					  IPA_CMD_ASS_ENABLE,
+					  card->info.csum_mask);
+	if (rc) {
+		dev_warn(&card->gdev->dev, "Enabling HW checksumming for %s "
+			"failed, using SW checksumming\n",
+			QETH_CARD_IFNAME(card));
+		return rc;
+	}
+	return 0;
+}
+
+static int qeth_l3_set_rx_csum(struct qeth_card *card, int on)
+{
+	int rc = 0;
+
+	if (on) {
+		rc = qeth_l3_send_checksum_command(card);
+		if (rc)
+			return -EIO;
+		dev_info(&card->gdev->dev,
+			"HW Checksumming (inbound) enabled\n");
+	} else {
+		rc = qeth_l3_send_simple_setassparms(card,
+			IPA_INBOUND_CHECKSUM, IPA_CMD_ASS_STOP, 0);
+		if (rc)
+			return -EIO;
+	}
+
+	return 0;
+}
+
+static int qeth_l3_start_ipa_checksum(struct qeth_card *card)
+{
+	QETH_CARD_TEXT(card, 3, "strtcsum");
+
+	if (card->dev->features & NETIF_F_RXCSUM) {
+		rtnl_lock();
+		/* force set_features call */
+		card->dev->features &= ~NETIF_F_RXCSUM;
+		netdev_update_features(card->dev);
+		rtnl_unlock();
+	}
+	return 0;
+}
+
+static int qeth_l3_start_ipa_tx_checksum(struct qeth_card *card)
+{
+	int rc = 0;
+
+	if (!qeth_is_supported(card, IPA_OUTBOUND_CHECKSUM))
+		return rc;
+	rc = qeth_l3_send_simple_setassparms(card, IPA_OUTBOUND_CHECKSUM,
+			  IPA_CMD_ASS_START, 0);
+	if (rc)
+		goto err_out;
+	rc = qeth_l3_send_simple_setassparms(card, IPA_OUTBOUND_CHECKSUM,
+			  IPA_CMD_ASS_ENABLE, card->info.tx_csum_mask);
+	if (rc)
+		goto err_out;
+	dev_info(&card->gdev->dev, "HW TX Checksumming enabled\n");
+	return rc;
+err_out:
+	dev_warn(&card->gdev->dev, "Enabling HW TX checksumming for %s "
+		"failed, using SW TX checksumming\n", QETH_CARD_IFNAME(card));
+	return rc;
+}
+
+static int qeth_l3_start_ipa_tso(struct qeth_card *card)
+{
+	int rc;
+
+	QETH_CARD_TEXT(card, 3, "sttso");
+
+	if (!qeth_is_supported(card, IPA_OUTBOUND_TSO)) {
+		dev_info(&card->gdev->dev,
+			"Outbound TSO not supported on %s\n",
+			QETH_CARD_IFNAME(card));
+		rc = -EOPNOTSUPP;
+	} else {
+		rc = qeth_l3_send_simple_setassparms(card, IPA_OUTBOUND_TSO,
+						IPA_CMD_ASS_START, 0);
+		if (rc)
+			dev_warn(&card->gdev->dev, "Starting outbound TCP "
+				"segmentation offload for %s failed\n",
+				QETH_CARD_IFNAME(card));
+		else
+			dev_info(&card->gdev->dev,
+				"Outbound TSO enabled\n");
+	}
+	if (rc)
+		card->dev->features &= ~NETIF_F_TSO;
+	return rc;
+}
+
+static int qeth_l3_start_ipassists(struct qeth_card *card)
+{
+	QETH_CARD_TEXT(card, 3, "strtipas");
+
+	qeth_set_access_ctrl_online(card);	/* go on*/
+	qeth_l3_start_ipa_arp_processing(card);	/* go on*/
+	qeth_l3_start_ipa_ip_fragmentation(card);	/* go on*/
+	qeth_l3_start_ipa_source_mac(card);	/* go on*/
+	qeth_l3_start_ipa_vlan(card);		/* go on*/
+	qeth_l3_start_ipa_multicast(card);		/* go on*/
+	qeth_l3_start_ipa_ipv6(card);		/* go on*/
+	qeth_l3_start_ipa_broadcast(card);		/* go on*/
+	qeth_l3_start_ipa_checksum(card);		/* go on*/
+	qeth_l3_start_ipa_tx_checksum(card);
+	qeth_l3_start_ipa_tso(card);		/* go on*/
+	return 0;
+}
+
+static int qeth_l3_iqd_read_initial_mac_cb(struct qeth_card *card,
+		struct qeth_reply *reply, unsigned long data)
+{
+	struct qeth_ipa_cmd *cmd;
+
+	cmd = (struct qeth_ipa_cmd *) data;
+	if (cmd->hdr.return_code == 0)
+		memcpy(card->dev->dev_addr,
+			cmd->data.create_destroy_addr.unique_id, ETH_ALEN);
+	else
+		random_ether_addr(card->dev->dev_addr);
+
+	return 0;
+}
+
+static int qeth_l3_iqd_read_initial_mac(struct qeth_card *card)
+{
+	int rc = 0;
+	struct qeth_cmd_buffer *iob;
+	struct qeth_ipa_cmd *cmd;
+
+	QETH_DBF_TEXT(SETUP, 2, "hsrmac");
+
+	iob = qeth_get_ipacmd_buffer(card, IPA_CMD_CREATE_ADDR,
+				     QETH_PROT_IPV6);
+	cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
+	*((__u16 *) &cmd->data.create_destroy_addr.unique_id[6]) =
+			card->info.unique_id;
+
+	rc = qeth_send_ipa_cmd(card, iob, qeth_l3_iqd_read_initial_mac_cb,
+				NULL);
+	return rc;
+}
+
+static int qeth_l3_get_unique_id_cb(struct qeth_card *card,
+		struct qeth_reply *reply, unsigned long data)
+{
+	struct qeth_ipa_cmd *cmd;
+
+	cmd = (struct qeth_ipa_cmd *) data;
+	if (cmd->hdr.return_code == 0)
+		card->info.unique_id = *((__u16 *)
+				&cmd->data.create_destroy_addr.unique_id[6]);
+	else {
+		card->info.unique_id =  UNIQUE_ID_IF_CREATE_ADDR_FAILED |
+					UNIQUE_ID_NOT_BY_CARD;
+		dev_warn(&card->gdev->dev, "The network adapter failed to "
+			"generate a unique ID\n");
+	}
+	return 0;
+}
+
+static int qeth_l3_get_unique_id(struct qeth_card *card)
+{
+	int rc = 0;
+	struct qeth_cmd_buffer *iob;
+	struct qeth_ipa_cmd *cmd;
+
+	QETH_DBF_TEXT(SETUP, 2, "guniqeid");
+
+	if (!qeth_is_supported(card, IPA_IPV6)) {
+		card->info.unique_id =  UNIQUE_ID_IF_CREATE_ADDR_FAILED |
+					UNIQUE_ID_NOT_BY_CARD;
+		return 0;
+	}
+
+	iob = qeth_get_ipacmd_buffer(card, IPA_CMD_CREATE_ADDR,
+				     QETH_PROT_IPV6);
+	cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
+	*((__u16 *) &cmd->data.create_destroy_addr.unique_id[6]) =
+			card->info.unique_id;
+
+	rc = qeth_send_ipa_cmd(card, iob, qeth_l3_get_unique_id_cb, NULL);
+	return rc;
+}
+
+static int
+qeth_diags_trace_cb(struct qeth_card *card, struct qeth_reply *reply,
+			    unsigned long data)
+{
+	struct qeth_ipa_cmd	   *cmd;
+	__u16 rc;
+
+	QETH_DBF_TEXT(SETUP, 2, "diastrcb");
+
+	cmd = (struct qeth_ipa_cmd *)data;
+	rc = cmd->hdr.return_code;
+	if (rc)
+		QETH_CARD_TEXT_(card, 2, "dxter%x", rc);
+	switch (cmd->data.diagass.action) {
+	case QETH_DIAGS_CMD_TRACE_QUERY:
+		break;
+	case QETH_DIAGS_CMD_TRACE_DISABLE:
+		switch (rc) {
+		case 0:
+		case IPA_RC_INVALID_SUBCMD:
+			card->info.promisc_mode = SET_PROMISC_MODE_OFF;
+			dev_info(&card->gdev->dev, "The HiperSockets network "
+				"traffic analyzer is deactivated\n");
+			break;
+		default:
+			break;
+		}
+		break;
+	case QETH_DIAGS_CMD_TRACE_ENABLE:
+		switch (rc) {
+		case 0:
+			card->info.promisc_mode = SET_PROMISC_MODE_ON;
+			dev_info(&card->gdev->dev, "The HiperSockets network "
+				"traffic analyzer is activated\n");
+			break;
+		case IPA_RC_HARDWARE_AUTH_ERROR:
+			dev_warn(&card->gdev->dev, "The device is not "
+				"authorized to run as a HiperSockets network "
+				"traffic analyzer\n");
+			break;
+		case IPA_RC_TRACE_ALREADY_ACTIVE:
+			dev_warn(&card->gdev->dev, "A HiperSockets "
+				"network traffic analyzer is already "
+				"active in the HiperSockets LAN\n");
+			break;
+		default:
+			break;
+		}
+		break;
+	default:
+		QETH_DBF_MESSAGE(2, "Unknown sniffer action (0x%04x) on %s\n",
+			cmd->data.diagass.action, QETH_CARD_IFNAME(card));
+	}
+
+	return 0;
+}
+
+static int
+qeth_diags_trace(struct qeth_card *card, enum qeth_diags_trace_cmds diags_cmd)
+{
+	struct qeth_cmd_buffer *iob;
+	struct qeth_ipa_cmd    *cmd;
+
+	QETH_DBF_TEXT(SETUP, 2, "diagtrac");
+
+	iob = qeth_get_ipacmd_buffer(card, IPA_CMD_SET_DIAG_ASS, 0);
+	cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
+	cmd->data.diagass.subcmd_len = 16;
+	cmd->data.diagass.subcmd = QETH_DIAGS_CMD_TRACE;
+	cmd->data.diagass.type = QETH_DIAGS_TYPE_HIPERSOCKET;
+	cmd->data.diagass.action = diags_cmd;
+	return qeth_send_ipa_cmd(card, iob, qeth_diags_trace_cb, NULL);
+}
+
+static void qeth_l3_get_mac_for_ipm(__u32 ipm, char *mac,
+				struct net_device *dev)
+{
+	if (dev->type == ARPHRD_IEEE802_TR)
+		ip_tr_mc_map(ipm, mac);
+	else
+		ip_eth_mc_map(ipm, mac);
+}
+
+static void qeth_l3_add_mc(struct qeth_card *card, struct in_device *in4_dev)
+{
+	struct qeth_ipaddr *ipm;
+	struct ip_mc_list *im4;
+	char buf[MAX_ADDR_LEN];
+
+	QETH_CARD_TEXT(card, 4, "addmc");
+	for (im4 = rcu_dereference(in4_dev->mc_list); im4 != NULL;
+	     im4 = rcu_dereference(im4->next_rcu)) {
+		qeth_l3_get_mac_for_ipm(im4->multiaddr, buf, in4_dev->dev);
+		ipm = qeth_l3_get_addr_buffer(QETH_PROT_IPV4);
+		if (!ipm)
+			continue;
+		ipm->u.a4.addr = im4->multiaddr;
+		memcpy(ipm->mac, buf, OSA_ADDR_LEN);
+		ipm->is_multicast = 1;
+		if (!qeth_l3_add_ip(card, ipm))
+			kfree(ipm);
+	}
+}
+
+static void qeth_l3_add_vlan_mc(struct qeth_card *card)
+{
+	struct in_device *in_dev;
+	u16 vid;
+
+	QETH_CARD_TEXT(card, 4, "addmcvl");
+	if (!qeth_is_supported(card, IPA_FULL_VLAN))
+		return;
+
+	for_each_set_bit(vid, card->active_vlans, VLAN_N_VID) {
+		struct net_device *netdev;
+
+		rcu_read_lock();
+		netdev = __vlan_find_dev_deep(card->dev, vid);
+		rcu_read_unlock();
+		if (netdev == NULL ||
+		    !(netdev->flags & IFF_UP))
+			continue;
+		in_dev = in_dev_get(netdev);
+		if (!in_dev)
+			continue;
+		rcu_read_lock();
+		qeth_l3_add_mc(card, in_dev);
+		rcu_read_unlock();
+		in_dev_put(in_dev);
+	}
+}
+
+static void qeth_l3_add_multicast_ipv4(struct qeth_card *card)
+{
+	struct in_device *in4_dev;
+
+	QETH_CARD_TEXT(card, 4, "chkmcv4");
+	in4_dev = in_dev_get(card->dev);
+	if (in4_dev == NULL)
+		return;
+	rcu_read_lock();
+	qeth_l3_add_mc(card, in4_dev);
+	qeth_l3_add_vlan_mc(card);
+	rcu_read_unlock();
+	in_dev_put(in4_dev);
+}
+
+#ifdef CONFIG_QETH_IPV6
+static void qeth_l3_add_mc6(struct qeth_card *card, struct inet6_dev *in6_dev)
+{
+	struct qeth_ipaddr *ipm;
+	struct ifmcaddr6 *im6;
+	char buf[MAX_ADDR_LEN];
+
+	QETH_CARD_TEXT(card, 4, "addmc6");
+	for (im6 = in6_dev->mc_list; im6 != NULL; im6 = im6->next) {
+		ndisc_mc_map(&im6->mca_addr, buf, in6_dev->dev, 0);
+		ipm = qeth_l3_get_addr_buffer(QETH_PROT_IPV6);
+		if (!ipm)
+			continue;
+		ipm->is_multicast = 1;
+		memcpy(ipm->mac, buf, OSA_ADDR_LEN);
+		memcpy(&ipm->u.a6.addr, &im6->mca_addr.s6_addr,
+		       sizeof(struct in6_addr));
+		if (!qeth_l3_add_ip(card, ipm))
+			kfree(ipm);
+	}
+}
+
+static void qeth_l3_add_vlan_mc6(struct qeth_card *card)
+{
+	struct inet6_dev *in_dev;
+	u16 vid;
+
+	QETH_CARD_TEXT(card, 4, "admc6vl");
+	if (!qeth_is_supported(card, IPA_FULL_VLAN))
+		return;
+
+	for_each_set_bit(vid, card->active_vlans, VLAN_N_VID) {
+		struct net_device *netdev;
+
+		netdev = __vlan_find_dev_deep(card->dev, vid);
+		if (netdev == NULL ||
+		    !(netdev->flags & IFF_UP))
+			continue;
+		in_dev = in6_dev_get(netdev);
+		if (!in_dev)
+			continue;
+		read_lock_bh(&in_dev->lock);
+		qeth_l3_add_mc6(card, in_dev);
+		read_unlock_bh(&in_dev->lock);
+		in6_dev_put(in_dev);
+	}
+}
+
+static void qeth_l3_add_multicast_ipv6(struct qeth_card *card)
+{
+	struct inet6_dev *in6_dev;
+
+	QETH_CARD_TEXT(card, 4, "chkmcv6");
+	if (!qeth_is_supported(card, IPA_IPV6))
+		return ;
+	in6_dev = in6_dev_get(card->dev);
+	if (in6_dev == NULL)
+		return;
+	read_lock_bh(&in6_dev->lock);
+	qeth_l3_add_mc6(card, in6_dev);
+	qeth_l3_add_vlan_mc6(card);
+	read_unlock_bh(&in6_dev->lock);
+	in6_dev_put(in6_dev);
+}
+#endif /* CONFIG_QETH_IPV6 */
+
+static void qeth_l3_free_vlan_addresses4(struct qeth_card *card,
+			unsigned short vid)
+{
+	struct in_device *in_dev;
+	struct in_ifaddr *ifa;
+	struct qeth_ipaddr *addr;
+	struct net_device *netdev;
+
+	QETH_CARD_TEXT(card, 4, "frvaddr4");
+
+	netdev = __vlan_find_dev_deep(card->dev, vid);
+	if (!netdev)
+		return;
+	in_dev = in_dev_get(netdev);
+	if (!in_dev)
+		return;
+	for (ifa = in_dev->ifa_list; ifa; ifa = ifa->ifa_next) {
+		addr = qeth_l3_get_addr_buffer(QETH_PROT_IPV4);
+		if (addr) {
+			addr->u.a4.addr = ifa->ifa_address;
+			addr->u.a4.mask = ifa->ifa_mask;
+			addr->type = QETH_IP_TYPE_NORMAL;
+			if (!qeth_l3_delete_ip(card, addr))
+				kfree(addr);
+		}
+	}
+	in_dev_put(in_dev);
+}
+
+static void qeth_l3_free_vlan_addresses6(struct qeth_card *card,
+			unsigned short vid)
+{
+#ifdef CONFIG_QETH_IPV6
+	struct inet6_dev *in6_dev;
+	struct inet6_ifaddr *ifa;
+	struct qeth_ipaddr *addr;
+	struct net_device *netdev;
+
+	QETH_CARD_TEXT(card, 4, "frvaddr6");
+
+	netdev = __vlan_find_dev_deep(card->dev, vid);
+	if (!netdev)
+		return;
+	in6_dev = in6_dev_get(netdev);
+	if (!in6_dev)
+		return;
+	list_for_each_entry(ifa, &in6_dev->addr_list, if_list) {
+		addr = qeth_l3_get_addr_buffer(QETH_PROT_IPV6);
+		if (addr) {
+			memcpy(&addr->u.a6.addr, &ifa->addr,
+			       sizeof(struct in6_addr));
+			addr->u.a6.pfxlen = ifa->prefix_len;
+			addr->type = QETH_IP_TYPE_NORMAL;
+			if (!qeth_l3_delete_ip(card, addr))
+				kfree(addr);
+		}
+	}
+	in6_dev_put(in6_dev);
+#endif /* CONFIG_QETH_IPV6 */
+}
+
+static void qeth_l3_free_vlan_addresses(struct qeth_card *card,
+			unsigned short vid)
+{
+	qeth_l3_free_vlan_addresses4(card, vid);
+	qeth_l3_free_vlan_addresses6(card, vid);
+}
+
+static int qeth_l3_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
+{
+	struct qeth_card *card = dev->ml_priv;
+
+	set_bit(vid, card->active_vlans);
+	return 0;
+}
+
+static int qeth_l3_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
+{
+	struct qeth_card *card = dev->ml_priv;
+	unsigned long flags;
+
+	QETH_CARD_TEXT_(card, 4, "kid:%d", vid);
+	if (qeth_wait_for_threads(card, QETH_RECOVER_THREAD)) {
+		QETH_CARD_TEXT(card, 3, "kidREC");
+		return 0;
+	}
+	spin_lock_irqsave(&card->vlanlock, flags);
+	/* unregister IP addresses of vlan device */
+	qeth_l3_free_vlan_addresses(card, vid);
+	clear_bit(vid, card->active_vlans);
+	spin_unlock_irqrestore(&card->vlanlock, flags);
+	qeth_l3_set_multicast_list(card->dev);
+	return 0;
+}
+
+static inline int qeth_l3_rebuild_skb(struct qeth_card *card,
+			struct sk_buff *skb, struct qeth_hdr *hdr,
+			unsigned short *vlan_id)
+{
+	__be16 prot;
+	struct iphdr *ip_hdr;
+	unsigned char tg_addr[MAX_ADDR_LEN];
+	int is_vlan = 0;
+
+	if (!(hdr->hdr.l3.flags & QETH_HDR_PASSTHRU)) {
+		prot = htons((hdr->hdr.l3.flags & QETH_HDR_IPV6)? ETH_P_IPV6 :
+			      ETH_P_IP);
+		switch (hdr->hdr.l3.flags & QETH_HDR_CAST_MASK) {
+		case QETH_CAST_MULTICAST:
+			switch (prot) {
+#ifdef CONFIG_QETH_IPV6
+			case __constant_htons(ETH_P_IPV6):
+				ndisc_mc_map((struct in6_addr *)
+				     skb->data + 24,
+				     tg_addr, card->dev, 0);
+				break;
+#endif
+			case __constant_htons(ETH_P_IP):
+				ip_hdr = (struct iphdr *)skb->data;
+				(card->dev->type == ARPHRD_IEEE802_TR) ?
+				ip_tr_mc_map(ip_hdr->daddr, tg_addr):
+				ip_eth_mc_map(ip_hdr->daddr, tg_addr);
+				break;
+			default:
+				memcpy(tg_addr, card->dev->broadcast,
+					card->dev->addr_len);
+			}
+			card->stats.multicast++;
+			skb->pkt_type = PACKET_MULTICAST;
+			break;
+		case QETH_CAST_BROADCAST:
+			memcpy(tg_addr, card->dev->broadcast,
+				card->dev->addr_len);
+			card->stats.multicast++;
+			skb->pkt_type = PACKET_BROADCAST;
+			break;
+		case QETH_CAST_UNICAST:
+		case QETH_CAST_ANYCAST:
+		case QETH_CAST_NOCAST:
+		default:
+			if (card->options.sniffer)
+				skb->pkt_type = PACKET_OTHERHOST;
+			else
+				skb->pkt_type = PACKET_HOST;
+			memcpy(tg_addr, card->dev->dev_addr,
+				card->dev->addr_len);
+		}
+		if (hdr->hdr.l3.ext_flags & QETH_HDR_EXT_SRC_MAC_ADDR)
+			card->dev->header_ops->create(skb, card->dev, prot,
+				tg_addr, &hdr->hdr.l3.dest_addr[2],
+				card->dev->addr_len);
+		else
+			card->dev->header_ops->create(skb, card->dev, prot,
+				tg_addr, "FAKELL", card->dev->addr_len);
+	}
+
+#ifdef CONFIG_TR
+	if (card->dev->type == ARPHRD_IEEE802_TR)
+		skb->protocol = tr_type_trans(skb, card->dev);
+	else
+#endif
+		skb->protocol = eth_type_trans(skb, card->dev);
+
+	if (hdr->hdr.l3.ext_flags &
+	    (QETH_HDR_EXT_VLAN_FRAME | QETH_HDR_EXT_INCLUDE_VLAN_TAG)) {
+		*vlan_id = (hdr->hdr.l3.ext_flags & QETH_HDR_EXT_VLAN_FRAME) ?
+		 hdr->hdr.l3.vlan_id : *((u16 *)&hdr->hdr.l3.dest_addr[12]);
+		is_vlan = 1;
+	}
+
+	if (card->dev->features & NETIF_F_RXCSUM) {
+		if ((hdr->hdr.l3.ext_flags &
+		    (QETH_HDR_EXT_CSUM_HDR_REQ |
+		     QETH_HDR_EXT_CSUM_TRANSP_REQ)) ==
+		    (QETH_HDR_EXT_CSUM_HDR_REQ |
+		     QETH_HDR_EXT_CSUM_TRANSP_REQ))
+			skb->ip_summed = CHECKSUM_UNNECESSARY;
+		else
+			skb->ip_summed = CHECKSUM_NONE;
+	} else
+		skb->ip_summed = CHECKSUM_NONE;
+
+	return is_vlan;
+}
+
+static int qeth_l3_process_inbound_buffer(struct qeth_card *card,
+				int budget, int *done)
+{
+	int work_done = 0;
+	struct sk_buff *skb;
+	struct qeth_hdr *hdr;
+	__u16 vlan_tag = 0;
+	int is_vlan;
+	unsigned int len;
+	__u16 magic;
+
+	*done = 0;
+	BUG_ON(!budget);
+	while (budget) {
+		skb = qeth_core_get_next_skb(card,
+			&card->qdio.in_q->bufs[card->rx.b_index],
+			&card->rx.b_element, &card->rx.e_offset, &hdr);
+		if (!skb) {
+			*done = 1;
+			break;
+		}
+		skb->dev = card->dev;
+		switch (hdr->hdr.l3.id) {
+		case QETH_HEADER_TYPE_LAYER3:
+			magic = *(__u16 *)skb->data;
+			if ((card->info.type == QETH_CARD_TYPE_IQD) &&
+			    (magic == ETH_P_AF_IUCV)) {
+				skb->protocol = ETH_P_AF_IUCV;
+				skb->pkt_type = PACKET_HOST;
+				skb->mac_header = NET_SKB_PAD;
+				skb->dev = card->dev;
+				len = skb->len;
+				card->dev->header_ops->create(skb, card->dev, 0,
+					card->dev->dev_addr, "FAKELL",
+					card->dev->addr_len);
+				netif_receive_skb(skb);
+			} else {
+				is_vlan = qeth_l3_rebuild_skb(card, skb, hdr,
+						      &vlan_tag);
+				len = skb->len;
+				if (is_vlan && !card->options.sniffer)
+					__vlan_hwaccel_put_tag(skb, vlan_tag);
+				napi_gro_receive(&card->napi, skb);
+			}
+			break;
+		case QETH_HEADER_TYPE_LAYER2: /* for HiperSockets sniffer */
+			skb->pkt_type = PACKET_HOST;
+			skb->protocol = eth_type_trans(skb, skb->dev);
+			len = skb->len;
+			netif_receive_skb(skb);
+			break;
+		default:
+			dev_kfree_skb_any(skb);
+			QETH_CARD_TEXT(card, 3, "inbunkno");
+			QETH_DBF_HEX(CTRL, 3, hdr, QETH_DBF_CTRL_LEN);
+			continue;
+		}
+		work_done++;
+		budget--;
+		card->stats.rx_packets++;
+		card->stats.rx_bytes += len;
+	}
+	return work_done;
+}
+
+static int qeth_l3_poll(struct napi_struct *napi, int budget)
+{
+	struct qeth_card *card = container_of(napi, struct qeth_card, napi);
+	int work_done = 0;
+	struct qeth_qdio_buffer *buffer;
+	int done;
+	int new_budget = budget;
+
+	if (card->options.performance_stats) {
+		card->perf_stats.inbound_cnt++;
+		card->perf_stats.inbound_start_time = qeth_get_micros();
+	}
+
+	while (1) {
+		if (!card->rx.b_count) {
+			card->rx.qdio_err = 0;
+			card->rx.b_count = qdio_get_next_buffers(
+				card->data.ccwdev, 0, &card->rx.b_index,
+				&card->rx.qdio_err);
+			if (card->rx.b_count <= 0) {
+				card->rx.b_count = 0;
+				break;
+			}
+			card->rx.b_element =
+				&card->qdio.in_q->bufs[card->rx.b_index]
+				.buffer->element[0];
+			card->rx.e_offset = 0;
+		}
+
+		while (card->rx.b_count) {
+			buffer = &card->qdio.in_q->bufs[card->rx.b_index];
+			if (!(card->rx.qdio_err &&
+			    qeth_check_qdio_errors(card, buffer->buffer,
+			    card->rx.qdio_err, "qinerr")))
+				work_done += qeth_l3_process_inbound_buffer(
+					card, new_budget, &done);
+			else
+				done = 1;
+
+			if (done) {
+				if (card->options.performance_stats)
+					card->perf_stats.bufs_rec++;
+				qeth_put_buffer_pool_entry(card,
+					buffer->pool_entry);
+				qeth_queue_input_buffer(card, card->rx.b_index);
+				card->rx.b_count--;
+				if (card->rx.b_count) {
+					card->rx.b_index =
+						(card->rx.b_index + 1) %
+						QDIO_MAX_BUFFERS_PER_Q;
+					card->rx.b_element =
+						&card->qdio.in_q
+						->bufs[card->rx.b_index]
+						.buffer->element[0];
+					card->rx.e_offset = 0;
+				}
+			}
+
+			if (work_done >= budget)
+				goto out;
+			else
+				new_budget = budget - work_done;
+		}
+	}
+
+	napi_complete(napi);
+	if (qdio_start_irq(card->data.ccwdev, 0))
+		napi_schedule(&card->napi);
+out:
+	if (card->options.performance_stats)
+		card->perf_stats.inbound_time += qeth_get_micros() -
+			card->perf_stats.inbound_start_time;
+	return work_done;
+}
+
+static int qeth_l3_verify_vlan_dev(struct net_device *dev,
+			struct qeth_card *card)
+{
+	int rc = 0;
+	u16 vid;
+
+	for_each_set_bit(vid, card->active_vlans, VLAN_N_VID) {
+		struct net_device *netdev;
+
+		rcu_read_lock();
+		netdev = __vlan_find_dev_deep(dev, vid);
+		rcu_read_unlock();
+		if (netdev == dev) {
+			rc = QETH_VLAN_CARD;
+			break;
+		}
+	}
+
+	if (rc && !(vlan_dev_real_dev(dev)->ml_priv == (void *)card))
+		return 0;
+
+	return rc;
+}
+
+static int qeth_l3_verify_dev(struct net_device *dev)
+{
+	struct qeth_card *card;
+	unsigned long flags;
+	int rc = 0;
+
+	read_lock_irqsave(&qeth_core_card_list.rwlock, flags);
+	list_for_each_entry(card, &qeth_core_card_list.list, list) {
+		if (card->dev == dev) {
+			rc = QETH_REAL_CARD;
+			break;
+		}
+		rc = qeth_l3_verify_vlan_dev(dev, card);
+		if (rc)
+			break;
+	}
+	read_unlock_irqrestore(&qeth_core_card_list.rwlock, flags);
+
+	return rc;
+}
+
+static struct qeth_card *qeth_l3_get_card_from_dev(struct net_device *dev)
+{
+	struct qeth_card *card = NULL;
+	int rc;
+
+	rc = qeth_l3_verify_dev(dev);
+	if (rc == QETH_REAL_CARD)
+		card = dev->ml_priv;
+	else if (rc == QETH_VLAN_CARD)
+		card = vlan_dev_real_dev(dev)->ml_priv;
+	if (card && card->options.layer2)
+		card = NULL;
+	if (card)
+		QETH_CARD_TEXT_(card, 4, "%d", rc);
+	return card ;
+}
+
+static int qeth_l3_stop_card(struct qeth_card *card, int recovery_mode)
+{
+	int rc = 0;
+
+	QETH_DBF_TEXT(SETUP, 2, "stopcard");
+	QETH_DBF_HEX(SETUP, 2, &card, sizeof(void *));
+
+	qeth_set_allowed_threads(card, 0, 1);
+	if (card->options.sniffer &&
+	    (card->info.promisc_mode == SET_PROMISC_MODE_ON))
+		qeth_diags_trace(card, QETH_DIAGS_CMD_TRACE_DISABLE);
+	if (card->read.state == CH_STATE_UP &&
+	    card->write.state == CH_STATE_UP &&
+	    (card->state == CARD_STATE_UP)) {
+		if (recovery_mode)
+			qeth_l3_stop(card->dev);
+		else {
+			rtnl_lock();
+			dev_close(card->dev);
+			rtnl_unlock();
+		}
+		card->state = CARD_STATE_SOFTSETUP;
+	}
+	if (card->state == CARD_STATE_SOFTSETUP) {
+		qeth_l3_clear_ip_list(card, 1);
+		qeth_clear_ipacmd_list(card);
+		card->state = CARD_STATE_HARDSETUP;
+	}
+	if (card->state == CARD_STATE_HARDSETUP) {
+		qeth_qdio_clear_card(card, 0);
+		qeth_clear_qdio_buffers(card);
+		qeth_clear_working_pool_list(card);
+		card->state = CARD_STATE_DOWN;
+	}
+	if (card->state == CARD_STATE_DOWN) {
+		qeth_clear_cmd_buffers(&card->read);
+		qeth_clear_cmd_buffers(&card->write);
+	}
+	return rc;
+}
+
+/*
+ * test for and Switch promiscuous mode (on or off)
+ *  either for guestlan or HiperSocket Sniffer
+ */
+static void
+qeth_l3_handle_promisc_mode(struct qeth_card *card)
+{
+	struct net_device *dev = card->dev;
+
+	if (((dev->flags & IFF_PROMISC) &&
+	     (card->info.promisc_mode == SET_PROMISC_MODE_ON)) ||
+	    (!(dev->flags & IFF_PROMISC) &&
+	     (card->info.promisc_mode == SET_PROMISC_MODE_OFF)))
+		return;
+
+	if (card->info.guestlan) {		/* Guestlan trace */
+		if (qeth_adp_supported(card, IPA_SETADP_SET_PROMISC_MODE))
+			qeth_setadp_promisc_mode(card);
+	} else if (card->options.sniffer &&	/* HiperSockets trace */
+		   qeth_adp_supported(card, IPA_SETADP_SET_DIAG_ASSIST)) {
+		if (dev->flags & IFF_PROMISC) {
+			QETH_CARD_TEXT(card, 3, "+promisc");
+			qeth_diags_trace(card, QETH_DIAGS_CMD_TRACE_ENABLE);
+		} else {
+			QETH_CARD_TEXT(card, 3, "-promisc");
+			qeth_diags_trace(card, QETH_DIAGS_CMD_TRACE_DISABLE);
+		}
+	}
+}
+
+static void qeth_l3_set_multicast_list(struct net_device *dev)
+{
+	struct qeth_card *card = dev->ml_priv;
+
+	QETH_CARD_TEXT(card, 3, "setmulti");
+	if (qeth_threads_running(card, QETH_RECOVER_THREAD) &&
+	    (card->state != CARD_STATE_UP))
+		return;
+	if (!card->options.sniffer) {
+		qeth_l3_delete_mc_addresses(card);
+		qeth_l3_add_multicast_ipv4(card);
+#ifdef CONFIG_QETH_IPV6
+		qeth_l3_add_multicast_ipv6(card);
+#endif
+		qeth_l3_set_ip_addr_list(card);
+		if (!qeth_adp_supported(card, IPA_SETADP_SET_PROMISC_MODE))
+			return;
+	}
+	qeth_l3_handle_promisc_mode(card);
+}
+
+static const char *qeth_l3_arp_get_error_cause(int *rc)
+{
+	switch (*rc) {
+	case QETH_IPA_ARP_RC_FAILED:
+		*rc = -EIO;
+		return "operation failed";
+	case QETH_IPA_ARP_RC_NOTSUPP:
+		*rc = -EOPNOTSUPP;
+		return "operation not supported";
+	case QETH_IPA_ARP_RC_OUT_OF_RANGE:
+		*rc = -EINVAL;
+		return "argument out of range";
+	case QETH_IPA_ARP_RC_Q_NOTSUPP:
+		*rc = -EOPNOTSUPP;
+		return "query operation not supported";
+	case QETH_IPA_ARP_RC_Q_NO_DATA:
+		*rc = -ENOENT;
+		return "no query data available";
+	default:
+		return "unknown error";
+	}
+}
+
+static int qeth_l3_arp_set_no_entries(struct qeth_card *card, int no_entries)
+{
+	int tmp;
+	int rc;
+
+	QETH_CARD_TEXT(card, 3, "arpstnoe");
+
+	/*
+	 * currently GuestLAN only supports the ARP assist function
+	 * IPA_CMD_ASS_ARP_QUERY_INFO, but not IPA_CMD_ASS_ARP_SET_NO_ENTRIES;
+	 * thus we say EOPNOTSUPP for this ARP function
+	 */
+	if (card->info.guestlan)
+		return -EOPNOTSUPP;
+	if (!qeth_is_supported(card, IPA_ARP_PROCESSING)) {
+		return -EOPNOTSUPP;
+	}
+	rc = qeth_l3_send_simple_setassparms(card, IPA_ARP_PROCESSING,
+					  IPA_CMD_ASS_ARP_SET_NO_ENTRIES,
+					  no_entries);
+	if (rc) {
+		tmp = rc;
+		QETH_DBF_MESSAGE(2, "Could not set number of ARP entries on "
+			"%s: %s (0x%x/%d)\n", QETH_CARD_IFNAME(card),
+			qeth_l3_arp_get_error_cause(&rc), tmp, tmp);
+	}
+	return rc;
+}
+
+static __u32 get_arp_entry_size(struct qeth_card *card,
+			struct qeth_arp_query_data *qdata,
+			struct qeth_arp_entrytype *type, __u8 strip_entries)
+{
+	__u32 rc;
+	__u8 is_hsi;
+
+	is_hsi = qdata->reply_bits == 5;
+	if (type->ip == QETHARP_IP_ADDR_V4) {
+		QETH_CARD_TEXT(card, 4, "arpev4");
+		if (strip_entries) {
+			rc = is_hsi ? sizeof(struct qeth_arp_qi_entry5_short) :
+				sizeof(struct qeth_arp_qi_entry7_short);
+		} else {
+			rc = is_hsi ? sizeof(struct qeth_arp_qi_entry5) :
+				sizeof(struct qeth_arp_qi_entry7);
+		}
+	} else if (type->ip == QETHARP_IP_ADDR_V6) {
+		QETH_CARD_TEXT(card, 4, "arpev6");
+		if (strip_entries) {
+			rc = is_hsi ?
+				sizeof(struct qeth_arp_qi_entry5_short_ipv6) :
+				sizeof(struct qeth_arp_qi_entry7_short_ipv6);
+		} else {
+			rc = is_hsi ?
+				sizeof(struct qeth_arp_qi_entry5_ipv6) :
+				sizeof(struct qeth_arp_qi_entry7_ipv6);
+		}
+	} else {
+		QETH_CARD_TEXT(card, 4, "arpinv");
+		rc = 0;
+	}
+
+	return rc;
+}
+
+static int arpentry_matches_prot(struct qeth_arp_entrytype *type, __u16 prot)
+{
+	return (type->ip == QETHARP_IP_ADDR_V4 && prot == QETH_PROT_IPV4) ||
+		(type->ip == QETHARP_IP_ADDR_V6 && prot == QETH_PROT_IPV6);
+}
+
+static int qeth_l3_arp_query_cb(struct qeth_card *card,
+		struct qeth_reply *reply, unsigned long data)
+{
+	struct qeth_ipa_cmd *cmd;
+	struct qeth_arp_query_data *qdata;
+	struct qeth_arp_query_info *qinfo;
+	int i;
+	int e;
+	int entrybytes_done;
+	int stripped_bytes;
+	__u8 do_strip_entries;
+
+	QETH_CARD_TEXT(card, 3, "arpquecb");
+
+	qinfo = (struct qeth_arp_query_info *) reply->param;
+	cmd = (struct qeth_ipa_cmd *) data;
+	QETH_CARD_TEXT_(card, 4, "%i", cmd->hdr.prot_version);
+	if (cmd->hdr.return_code) {
+		QETH_CARD_TEXT(card, 4, "arpcberr");
+		QETH_CARD_TEXT_(card, 4, "%i", cmd->hdr.return_code);
+		return 0;
+	}
+	if (cmd->data.setassparms.hdr.return_code) {
+		cmd->hdr.return_code = cmd->data.setassparms.hdr.return_code;
+		QETH_CARD_TEXT(card, 4, "setaperr");
+		QETH_CARD_TEXT_(card, 4, "%i", cmd->hdr.return_code);
+		return 0;
+	}
+	qdata = &cmd->data.setassparms.data.query_arp;
+	QETH_CARD_TEXT_(card, 4, "anoen%i", qdata->no_entries);
+
+	do_strip_entries = (qinfo->mask_bits & QETH_QARP_STRIP_ENTRIES) > 0;
+	stripped_bytes = do_strip_entries ? QETH_QARP_MEDIASPECIFIC_BYTES : 0;
+	entrybytes_done = 0;
+	for (e = 0; e < qdata->no_entries; ++e) {
+		char *cur_entry;
+		__u32 esize;
+		struct qeth_arp_entrytype *etype;
+
+		cur_entry = &qdata->data + entrybytes_done;
+		etype = &((struct qeth_arp_qi_entry5 *) cur_entry)->type;
+		if (!arpentry_matches_prot(etype, cmd->hdr.prot_version)) {
+			QETH_CARD_TEXT(card, 4, "pmis");
+			QETH_CARD_TEXT_(card, 4, "%i", etype->ip);
+			break;
+		}
+		esize = get_arp_entry_size(card, qdata, etype,
+			do_strip_entries);
+		QETH_CARD_TEXT_(card, 5, "esz%i", esize);
+		if (!esize)
+			break;
+
+		if ((qinfo->udata_len - qinfo->udata_offset) < esize) {
+			QETH_CARD_TEXT_(card, 4, "qaer3%i", -ENOMEM);
+			cmd->hdr.return_code = IPA_RC_ENOMEM;
+			goto out_error;
+		}
+
+		memcpy(qinfo->udata + qinfo->udata_offset,
+			&qdata->data + entrybytes_done + stripped_bytes,
+			esize);
+		entrybytes_done += esize + stripped_bytes;
+		qinfo->udata_offset += esize;
+		++qinfo->no_entries;
+	}
+	/* check if all replies received ... */
+	if (cmd->data.setassparms.hdr.seq_no <
+	    cmd->data.setassparms.hdr.number_of_replies)
+		return 1;
+	QETH_CARD_TEXT_(card, 4, "nove%i", qinfo->no_entries);
+	memcpy(qinfo->udata, &qinfo->no_entries, 4);
+	/* keep STRIP_ENTRIES flag so the user program can distinguish
+	 * stripped entries from normal ones */
+	if (qinfo->mask_bits & QETH_QARP_STRIP_ENTRIES)
+		qdata->reply_bits |= QETH_QARP_STRIP_ENTRIES;
+	memcpy(qinfo->udata + QETH_QARP_MASK_OFFSET, &qdata->reply_bits, 2);
+	QETH_CARD_TEXT_(card, 4, "rc%i", 0);
+	return 0;
+out_error:
+	i = 0;
+	memcpy(qinfo->udata, &i, 4);
+	return 0;
+}
+
+static int qeth_l3_send_ipa_arp_cmd(struct qeth_card *card,
+		struct qeth_cmd_buffer *iob, int len,
+		int (*reply_cb)(struct qeth_card *, struct qeth_reply *,
+			unsigned long),
+		void *reply_param)
+{
+	QETH_CARD_TEXT(card, 4, "sendarp");
+
+	memcpy(iob->data, IPA_PDU_HEADER, IPA_PDU_HEADER_SIZE);
+	memcpy(QETH_IPA_CMD_DEST_ADDR(iob->data),
+	       &card->token.ulp_connection_r, QETH_MPC_TOKEN_LENGTH);
+	return qeth_send_control_data(card, IPA_PDU_HEADER_SIZE + len, iob,
+				      reply_cb, reply_param);
+}
+
+static int qeth_l3_query_arp_cache_info(struct qeth_card *card,
+	enum qeth_prot_versions prot,
+	struct qeth_arp_query_info *qinfo)
+{
+	struct qeth_cmd_buffer *iob;
+	struct qeth_ipa_cmd *cmd;
+	int tmp;
+	int rc;
+
+	QETH_CARD_TEXT_(card, 3, "qarpipv%i", prot);
+
+	iob = qeth_l3_get_setassparms_cmd(card, IPA_ARP_PROCESSING,
+			IPA_CMD_ASS_ARP_QUERY_INFO,
+			sizeof(struct qeth_arp_query_data) - sizeof(char),
+			prot);
+	cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
+	cmd->data.setassparms.data.query_arp.request_bits = 0x000F;
+	cmd->data.setassparms.data.query_arp.reply_bits = 0;
+	cmd->data.setassparms.data.query_arp.no_entries = 0;
+	rc = qeth_l3_send_ipa_arp_cmd(card, iob,
+			   QETH_SETASS_BASE_LEN+QETH_ARP_CMD_LEN,
+			   qeth_l3_arp_query_cb, (void *)qinfo);
+	if (rc) {
+		tmp = rc;
+		QETH_DBF_MESSAGE(2,
+			"Error while querying ARP cache on %s: %s "
+			"(0x%x/%d)\n", QETH_CARD_IFNAME(card),
+			qeth_l3_arp_get_error_cause(&rc), tmp, tmp);
+	}
+
+	return rc;
+}
+
+static int qeth_l3_arp_query(struct qeth_card *card, char __user *udata)
+{
+	struct qeth_arp_query_info qinfo = {0, };
+	int rc;
+
+	QETH_CARD_TEXT(card, 3, "arpquery");
+
+	if (!qeth_is_supported(card,/*IPA_QUERY_ARP_ADDR_INFO*/
+			       IPA_ARP_PROCESSING)) {
+		QETH_CARD_TEXT(card, 3, "arpqnsup");
+		rc = -EOPNOTSUPP;
+		goto out;
+	}
+	/* get size of userspace buffer and mask_bits -> 6 bytes */
+	if (copy_from_user(&qinfo, udata, 6)) {
+		rc = -EFAULT;
+		goto out;
+	}
+	qinfo.udata = kzalloc(qinfo.udata_len, GFP_KERNEL);
+	if (!qinfo.udata) {
+		rc = -ENOMEM;
+		goto out;
+	}
+	qinfo.udata_offset = QETH_QARP_ENTRIES_OFFSET;
+	rc = qeth_l3_query_arp_cache_info(card, QETH_PROT_IPV4, &qinfo);
+	if (rc) {
+		if (copy_to_user(udata, qinfo.udata, 4))
+			rc = -EFAULT;
+			goto free_and_out;
+	} else {
+#ifdef CONFIG_QETH_IPV6
+		if (qinfo.mask_bits & QETH_QARP_WITH_IPV6) {
+			/* fails in case of GuestLAN QDIO mode */
+			qeth_l3_query_arp_cache_info(card, QETH_PROT_IPV6,
+				&qinfo);
+		}
+#endif
+		if (copy_to_user(udata, qinfo.udata, qinfo.udata_len)) {
+			QETH_CARD_TEXT(card, 4, "qactf");
+			rc = -EFAULT;
+			goto free_and_out;
+		}
+		QETH_CARD_TEXT_(card, 4, "qacts");
+	}
+free_and_out:
+	kfree(qinfo.udata);
+out:
+	return rc;
+}
+
+static int qeth_l3_arp_add_entry(struct qeth_card *card,
+				struct qeth_arp_cache_entry *entry)
+{
+	struct qeth_cmd_buffer *iob;
+	char buf[16];
+	int tmp;
+	int rc;
+
+	QETH_CARD_TEXT(card, 3, "arpadent");
+
+	/*
+	 * currently GuestLAN only supports the ARP assist function
+	 * IPA_CMD_ASS_ARP_QUERY_INFO, but not IPA_CMD_ASS_ARP_ADD_ENTRY;
+	 * thus we say EOPNOTSUPP for this ARP function
+	 */
+	if (card->info.guestlan)
+		return -EOPNOTSUPP;
+	if (!qeth_is_supported(card, IPA_ARP_PROCESSING)) {
+		return -EOPNOTSUPP;
+	}
+
+	iob = qeth_l3_get_setassparms_cmd(card, IPA_ARP_PROCESSING,
+				       IPA_CMD_ASS_ARP_ADD_ENTRY,
+				       sizeof(struct qeth_arp_cache_entry),
+				       QETH_PROT_IPV4);
+	rc = qeth_l3_send_setassparms(card, iob,
+				   sizeof(struct qeth_arp_cache_entry),
+				   (unsigned long) entry,
+				   qeth_l3_default_setassparms_cb, NULL);
+	if (rc) {
+		tmp = rc;
+		qeth_l3_ipaddr4_to_string((u8 *)entry->ipaddr, buf);
+		QETH_DBF_MESSAGE(2, "Could not add ARP entry for address %s "
+			"on %s: %s (0x%x/%d)\n", buf, QETH_CARD_IFNAME(card),
+			qeth_l3_arp_get_error_cause(&rc), tmp, tmp);
+	}
+	return rc;
+}
+
+static int qeth_l3_arp_remove_entry(struct qeth_card *card,
+				struct qeth_arp_cache_entry *entry)
+{
+	struct qeth_cmd_buffer *iob;
+	char buf[16] = {0, };
+	int tmp;
+	int rc;
+
+	QETH_CARD_TEXT(card, 3, "arprment");
+
+	/*
+	 * currently GuestLAN only supports the ARP assist function
+	 * IPA_CMD_ASS_ARP_QUERY_INFO, but not IPA_CMD_ASS_ARP_REMOVE_ENTRY;
+	 * thus we say EOPNOTSUPP for this ARP function
+	 */
+	if (card->info.guestlan)
+		return -EOPNOTSUPP;
+	if (!qeth_is_supported(card, IPA_ARP_PROCESSING)) {
+		return -EOPNOTSUPP;
+	}
+	memcpy(buf, entry, 12);
+	iob = qeth_l3_get_setassparms_cmd(card, IPA_ARP_PROCESSING,
+				       IPA_CMD_ASS_ARP_REMOVE_ENTRY,
+				       12,
+				       QETH_PROT_IPV4);
+	rc = qeth_l3_send_setassparms(card, iob,
+				   12, (unsigned long)buf,
+				   qeth_l3_default_setassparms_cb, NULL);
+	if (rc) {
+		tmp = rc;
+		memset(buf, 0, 16);
+		qeth_l3_ipaddr4_to_string((u8 *)entry->ipaddr, buf);
+		QETH_DBF_MESSAGE(2, "Could not delete ARP entry for address %s"
+			" on %s: %s (0x%x/%d)\n", buf, QETH_CARD_IFNAME(card),
+			qeth_l3_arp_get_error_cause(&rc), tmp, tmp);
+	}
+	return rc;
+}
+
+static int qeth_l3_arp_flush_cache(struct qeth_card *card)
+{
+	int rc;
+	int tmp;
+
+	QETH_CARD_TEXT(card, 3, "arpflush");
+
+	/*
+	 * currently GuestLAN only supports the ARP assist function
+	 * IPA_CMD_ASS_ARP_QUERY_INFO, but not IPA_CMD_ASS_ARP_FLUSH_CACHE;
+	 * thus we say EOPNOTSUPP for this ARP function
+	*/
+	if (card->info.guestlan || (card->info.type == QETH_CARD_TYPE_IQD))
+		return -EOPNOTSUPP;
+	if (!qeth_is_supported(card, IPA_ARP_PROCESSING)) {
+		return -EOPNOTSUPP;
+	}
+	rc = qeth_l3_send_simple_setassparms(card, IPA_ARP_PROCESSING,
+					  IPA_CMD_ASS_ARP_FLUSH_CACHE, 0);
+	if (rc) {
+		tmp = rc;
+		QETH_DBF_MESSAGE(2, "Could not flush ARP cache on %s: %s "
+			"(0x%x/%d)\n", QETH_CARD_IFNAME(card),
+			qeth_l3_arp_get_error_cause(&rc), tmp, tmp);
+	}
+	return rc;
+}
+
+static int qeth_l3_do_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
+{
+	struct qeth_card *card = dev->ml_priv;
+	struct qeth_arp_cache_entry arp_entry;
+	struct mii_ioctl_data *mii_data;
+	int rc = 0;
+
+	if (!card)
+		return -ENODEV;
+
+	if ((card->state != CARD_STATE_UP) &&
+		(card->state != CARD_STATE_SOFTSETUP))
+		return -ENODEV;
+
+	switch (cmd) {
+	case SIOC_QETH_ARP_SET_NO_ENTRIES:
+		if (!capable(CAP_NET_ADMIN)) {
+			rc = -EPERM;
+			break;
+		}
+		rc = qeth_l3_arp_set_no_entries(card, rq->ifr_ifru.ifru_ivalue);
+		break;
+	case SIOC_QETH_ARP_QUERY_INFO:
+		if (!capable(CAP_NET_ADMIN)) {
+			rc = -EPERM;
+			break;
+		}
+		rc = qeth_l3_arp_query(card, rq->ifr_ifru.ifru_data);
+		break;
+	case SIOC_QETH_ARP_ADD_ENTRY:
+		if (!capable(CAP_NET_ADMIN)) {
+			rc = -EPERM;
+			break;
+		}
+		if (copy_from_user(&arp_entry, rq->ifr_ifru.ifru_data,
+				   sizeof(struct qeth_arp_cache_entry)))
+			rc = -EFAULT;
+		else
+			rc = qeth_l3_arp_add_entry(card, &arp_entry);
+		break;
+	case SIOC_QETH_ARP_REMOVE_ENTRY:
+		if (!capable(CAP_NET_ADMIN)) {
+			rc = -EPERM;
+			break;
+		}
+		if (copy_from_user(&arp_entry, rq->ifr_ifru.ifru_data,
+				   sizeof(struct qeth_arp_cache_entry)))
+			rc = -EFAULT;
+		else
+			rc = qeth_l3_arp_remove_entry(card, &arp_entry);
+		break;
+	case SIOC_QETH_ARP_FLUSH_CACHE:
+		if (!capable(CAP_NET_ADMIN)) {
+			rc = -EPERM;
+			break;
+		}
+		rc = qeth_l3_arp_flush_cache(card);
+		break;
+	case SIOC_QETH_ADP_SET_SNMP_CONTROL:
+		rc = qeth_snmp_command(card, rq->ifr_ifru.ifru_data);
+		break;
+	case SIOC_QETH_GET_CARD_TYPE:
+		if ((card->info.type == QETH_CARD_TYPE_OSD ||
+		     card->info.type == QETH_CARD_TYPE_OSX) &&
+		    !card->info.guestlan)
+			return 1;
+		return 0;
+		break;
+	case SIOCGMIIPHY:
+		mii_data = if_mii(rq);
+		mii_data->phy_id = 0;
+		break;
+	case SIOCGMIIREG:
+		mii_data = if_mii(rq);
+		if (mii_data->phy_id != 0)
+			rc = -EINVAL;
+		else
+			mii_data->val_out = qeth_mdio_read(dev,
+							mii_data->phy_id,
+							mii_data->reg_num);
+		break;
+	case SIOC_QETH_QUERY_OAT:
+		rc = qeth_query_oat_command(card, rq->ifr_ifru.ifru_data);
+		break;
+	default:
+		rc = -EOPNOTSUPP;
+	}
+	if (rc)
+		QETH_CARD_TEXT_(card, 2, "ioce%d", rc);
+	return rc;
+}
+
+int inline qeth_l3_get_cast_type(struct qeth_card *card, struct sk_buff *skb)
+{
+	int cast_type = RTN_UNSPEC;
+	struct neighbour *n = NULL;
+	struct dst_entry *dst;
+
+	rcu_read_lock();
+	dst = skb_dst(skb);
+	if (dst)
+		n = dst_get_neighbour_noref(dst);
+	if (n) {
+		cast_type = n->type;
+		rcu_read_unlock();
+		if ((cast_type == RTN_BROADCAST) ||
+		    (cast_type == RTN_MULTICAST) ||
+		    (cast_type == RTN_ANYCAST))
+			return cast_type;
+		else
+			return RTN_UNSPEC;
+	}
+	rcu_read_unlock();
+
+	/* try something else */
+	if (skb->protocol == ETH_P_IPV6)
+		return (skb_network_header(skb)[24] == 0xff) ?
+				RTN_MULTICAST : 0;
+	else if (skb->protocol == ETH_P_IP)
+		return ((skb_network_header(skb)[16] & 0xf0) == 0xe0) ?
+				RTN_MULTICAST : 0;
+	/* ... */
+	if (!memcmp(skb->data, skb->dev->broadcast, 6))
+		return RTN_BROADCAST;
+	else {
+		u16 hdr_mac;
+
+		hdr_mac = *((u16 *)skb->data);
+		/* tr multicast? */
+		switch (card->info.link_type) {
+		case QETH_LINK_TYPE_HSTR:
+		case QETH_LINK_TYPE_LANE_TR:
+			if ((hdr_mac == QETH_TR_MAC_NC) ||
+			    (hdr_mac == QETH_TR_MAC_C))
+				return RTN_MULTICAST;
+			break;
+		/* eth or so multicast? */
+		default:
+		if ((hdr_mac == QETH_ETH_MAC_V4) ||
+			    (hdr_mac == QETH_ETH_MAC_V6))
+				return RTN_MULTICAST;
+		}
+	}
+	return cast_type;
+}
+
+static void qeth_l3_fill_af_iucv_hdr(struct qeth_card *card,
+		struct qeth_hdr *hdr, struct sk_buff *skb)
+{
+	char daddr[16];
+	struct af_iucv_trans_hdr *iucv_hdr;
+
+	skb_pull(skb, 14);
+	card->dev->header_ops->create(skb, card->dev, 0,
+				      card->dev->dev_addr, card->dev->dev_addr,
+				      card->dev->addr_len);
+	skb_pull(skb, 14);
+	iucv_hdr = (struct af_iucv_trans_hdr *)skb->data;
+	memset(hdr, 0, sizeof(struct qeth_hdr));
+	hdr->hdr.l3.id = QETH_HEADER_TYPE_LAYER3;
+	hdr->hdr.l3.ext_flags = 0;
+	hdr->hdr.l3.length = skb->len;
+	hdr->hdr.l3.flags = QETH_HDR_IPV6 | QETH_CAST_UNICAST;
+	memset(daddr, 0, sizeof(daddr));
+	daddr[0] = 0xfe;
+	daddr[1] = 0x80;
+	memcpy(&daddr[8], iucv_hdr->destUserID, 8);
+	memcpy(hdr->hdr.l3.dest_addr, daddr, 16);
+}
+
+static void qeth_l3_fill_header(struct qeth_card *card, struct qeth_hdr *hdr,
+		struct sk_buff *skb, int ipv, int cast_type)
+{
+	struct dst_entry *dst;
+
+	memset(hdr, 0, sizeof(struct qeth_hdr));
+	hdr->hdr.l3.id = QETH_HEADER_TYPE_LAYER3;
+	hdr->hdr.l3.ext_flags = 0;
+
+	/*
+	 * before we're going to overwrite this location with next hop ip.
+	 * v6 uses passthrough, v4 sets the tag in the QDIO header.
+	 */
+	if (vlan_tx_tag_present(skb)) {
+		if ((ipv == 4) || (card->info.type == QETH_CARD_TYPE_IQD))
+			hdr->hdr.l3.ext_flags = QETH_HDR_EXT_VLAN_FRAME;
+		else
+			hdr->hdr.l3.ext_flags = QETH_HDR_EXT_INCLUDE_VLAN_TAG;
+		hdr->hdr.l3.vlan_id = vlan_tx_tag_get(skb);
+	}
+
+	hdr->hdr.l3.length = skb->len - sizeof(struct qeth_hdr);
+
+	rcu_read_lock();
+	dst = skb_dst(skb);
+	if (ipv == 4) {
+		struct rtable *rt = (struct rtable *) dst;
+		__be32 *pkey = &ip_hdr(skb)->daddr;
+
+		if (rt->rt_gateway)
+			pkey = &rt->rt_gateway;
+
+		/* IPv4 */
+		hdr->hdr.l3.flags = qeth_l3_get_qeth_hdr_flags4(cast_type);
+		memset(hdr->hdr.l3.dest_addr, 0, 12);
+		*((__be32 *) (&hdr->hdr.l3.dest_addr[12])) = *pkey;
+	} else if (ipv == 6) {
+		struct rt6_info *rt = (struct rt6_info *) dst;
+		struct in6_addr *pkey = &ipv6_hdr(skb)->daddr;
+
+		if (!ipv6_addr_any(&rt->rt6i_gateway))
+			pkey = &rt->rt6i_gateway;
+
+		/* IPv6 */
+		hdr->hdr.l3.flags = qeth_l3_get_qeth_hdr_flags6(cast_type);
+		if (card->info.type == QETH_CARD_TYPE_IQD)
+			hdr->hdr.l3.flags &= ~QETH_HDR_PASSTHRU;
+		memcpy(hdr->hdr.l3.dest_addr, pkey, 16);
+	} else {
+		/* passthrough */
+		if ((skb->dev->type == ARPHRD_IEEE802_TR) &&
+			!memcmp(skb->data + sizeof(struct qeth_hdr) +
+			sizeof(__u16), skb->dev->broadcast, 6)) {
+			hdr->hdr.l3.flags = QETH_CAST_BROADCAST |
+						QETH_HDR_PASSTHRU;
+		} else if (!memcmp(skb->data + sizeof(struct qeth_hdr),
+			    skb->dev->broadcast, 6)) {
+			/* broadcast? */
+			hdr->hdr.l3.flags = QETH_CAST_BROADCAST |
+						QETH_HDR_PASSTHRU;
+		} else {
+			hdr->hdr.l3.flags = (cast_type == RTN_MULTICAST) ?
+				QETH_CAST_MULTICAST | QETH_HDR_PASSTHRU :
+				QETH_CAST_UNICAST | QETH_HDR_PASSTHRU;
+		}
+	}
+	rcu_read_unlock();
+}
+
+static inline void qeth_l3_hdr_csum(struct qeth_card *card,
+		struct qeth_hdr *hdr, struct sk_buff *skb)
+{
+	struct iphdr *iph = ip_hdr(skb);
+
+	/* tcph->check contains already the pseudo hdr checksum
+	 * so just set the header flags
+	 */
+	if (iph->protocol == IPPROTO_UDP)
+		hdr->hdr.l3.ext_flags |= QETH_HDR_EXT_UDP;
+	hdr->hdr.l3.ext_flags |= QETH_HDR_EXT_CSUM_TRANSP_REQ |
+		QETH_HDR_EXT_CSUM_HDR_REQ;
+	iph->check = 0;
+	if (card->options.performance_stats)
+		card->perf_stats.tx_csum++;
+}
+
+static void qeth_tso_fill_header(struct qeth_card *card,
+		struct qeth_hdr *qhdr, struct sk_buff *skb)
+{
+	struct qeth_hdr_tso *hdr = (struct qeth_hdr_tso *)qhdr;
+	struct tcphdr *tcph = tcp_hdr(skb);
+	struct iphdr *iph = ip_hdr(skb);
+	struct ipv6hdr *ip6h = ipv6_hdr(skb);
+
+	/*fix header to TSO values ...*/
+	hdr->hdr.hdr.l3.id = QETH_HEADER_TYPE_TSO;
+	hdr->hdr.hdr.l3.length = skb->len - sizeof(struct qeth_hdr_tso);
+	/*set values which are fix for the first approach ...*/
+	hdr->ext.hdr_tot_len = (__u16) sizeof(struct qeth_hdr_ext_tso);
+	hdr->ext.imb_hdr_no  = 1;
+	hdr->ext.hdr_type    = 1;
+	hdr->ext.hdr_version = 1;
+	hdr->ext.hdr_len     = 28;
+	/*insert non-fix values */
+	hdr->ext.mss = skb_shinfo(skb)->gso_size;
+	hdr->ext.dg_hdr_len = (__u16)(iph->ihl*4 + tcph->doff*4);
+	hdr->ext.payload_len = (__u16)(skb->len - hdr->ext.dg_hdr_len -
+				       sizeof(struct qeth_hdr_tso));
+	tcph->check = 0;
+	if (skb->protocol == ETH_P_IPV6) {
+		ip6h->payload_len = 0;
+		tcph->check = ~csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr,
+					       0, IPPROTO_TCP, 0);
+	} else {
+		/*OSA want us to set these values ...*/
+		tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
+					 0, IPPROTO_TCP, 0);
+		iph->tot_len = 0;
+		iph->check = 0;
+	}
+}
+
+static inline int qeth_l3_tso_elements(struct sk_buff *skb)
+{
+	unsigned long tcpd = (unsigned long)tcp_hdr(skb) +
+		tcp_hdr(skb)->doff * 4;
+	int tcpd_len = skb->len - (tcpd - (unsigned long)skb->data);
+	int elements = PFN_UP(tcpd + tcpd_len - 1) - PFN_DOWN(tcpd);
+	elements += skb_shinfo(skb)->nr_frags;
+	return elements;
+}
+
+static int qeth_l3_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+	int rc;
+	u16 *tag;
+	struct qeth_hdr *hdr = NULL;
+	int elements_needed = 0;
+	int elems;
+	struct qeth_card *card = dev->ml_priv;
+	struct sk_buff *new_skb = NULL;
+	int ipv = qeth_get_ip_version(skb);
+	int cast_type = qeth_l3_get_cast_type(card, skb);
+	struct qeth_qdio_out_q *queue = card->qdio.out_qs
+		[qeth_get_priority_queue(card, skb, ipv, cast_type)];
+	int tx_bytes = skb->len;
+	bool large_send;
+	int data_offset = -1;
+	int nr_frags;
+
+	if (((card->info.type == QETH_CARD_TYPE_IQD) &&
+	     (((card->options.cq != QETH_CQ_ENABLED) && !ipv) ||
+	      ((card->options.cq == QETH_CQ_ENABLED) &&
+	       (skb->protocol != ETH_P_AF_IUCV)))) ||
+	    card->options.sniffer)
+			goto tx_drop;
+
+	if ((card->state != CARD_STATE_UP) || !card->lan_online) {
+		card->stats.tx_carrier_errors++;
+		goto tx_drop;
+	}
+
+	if ((cast_type == RTN_BROADCAST) &&
+	    (card->info.broadcast_capable == 0))
+		goto tx_drop;
+
+	if (card->options.performance_stats) {
+		card->perf_stats.outbound_cnt++;
+		card->perf_stats.outbound_start_time = qeth_get_micros();
+	}
+
+	large_send = skb_is_gso(skb);
+
+	if ((card->info.type == QETH_CARD_TYPE_IQD) && (!large_send) &&
+	    (skb_shinfo(skb)->nr_frags == 0)) {
+		new_skb = skb;
+		if (new_skb->protocol == ETH_P_AF_IUCV)
+			data_offset = 0;
+		else
+			data_offset = ETH_HLEN;
+		hdr = kmem_cache_alloc(qeth_core_header_cache, GFP_ATOMIC);
+		if (!hdr)
+			goto tx_drop;
+		elements_needed++;
+	} else {
+		/* create a clone with writeable headroom */
+		new_skb = skb_realloc_headroom(skb, sizeof(struct qeth_hdr_tso)
+					+ VLAN_HLEN);
+		if (!new_skb)
+			goto tx_drop;
+	}
+
+	if (card->info.type == QETH_CARD_TYPE_IQD) {
+		if (data_offset < 0)
+			skb_pull(new_skb, ETH_HLEN);
+	} else {
+		if (ipv == 4) {
+			if (card->dev->type == ARPHRD_IEEE802_TR)
+				skb_pull(new_skb, TR_HLEN);
+			else
+				skb_pull(new_skb, ETH_HLEN);
+		}
+
+		if (ipv != 4 && vlan_tx_tag_present(new_skb)) {
+			skb_push(new_skb, VLAN_HLEN);
+			skb_copy_to_linear_data(new_skb, new_skb->data + 4, 4);
+			skb_copy_to_linear_data_offset(new_skb, 4,
+				new_skb->data + 8, 4);
+			skb_copy_to_linear_data_offset(new_skb, 8,
+				new_skb->data + 12, 4);
+			tag = (u16 *)(new_skb->data + 12);
+			*tag = __constant_htons(ETH_P_8021Q);
+			*(tag + 1) = htons(vlan_tx_tag_get(new_skb));
+		}
+	}
+
+	netif_stop_queue(dev);
+
+	/* fix hardware limitation: as long as we do not have sbal
+	 * chaining we can not send long frag lists
+	 */
+	if (large_send) {
+		if (qeth_l3_tso_elements(new_skb) + 1 > 16) {
+			if (skb_linearize(new_skb))
+				goto tx_drop;
+			if (card->options.performance_stats)
+				card->perf_stats.tx_lin++;
+		}
+	}
+
+	if (large_send && (cast_type == RTN_UNSPEC)) {
+		hdr = (struct qeth_hdr *)skb_push(new_skb,
+						sizeof(struct qeth_hdr_tso));
+		memset(hdr, 0, sizeof(struct qeth_hdr_tso));
+		qeth_l3_fill_header(card, hdr, new_skb, ipv, cast_type);
+		qeth_tso_fill_header(card, hdr, new_skb);
+		elements_needed++;
+	} else {
+		if (data_offset < 0) {
+			hdr = (struct qeth_hdr *)skb_push(new_skb,
+						sizeof(struct qeth_hdr));
+			qeth_l3_fill_header(card, hdr, new_skb, ipv,
+						cast_type);
+		} else {
+			if (new_skb->protocol == ETH_P_AF_IUCV)
+				qeth_l3_fill_af_iucv_hdr(card, hdr, new_skb);
+			else {
+				qeth_l3_fill_header(card, hdr, new_skb, ipv,
+							cast_type);
+				hdr->hdr.l3.length = new_skb->len - data_offset;
+			}
+		}
+
+		if (skb->ip_summed == CHECKSUM_PARTIAL)
+			qeth_l3_hdr_csum(card, hdr, new_skb);
+	}
+
+	elems = qeth_get_elements_no(card, (void *)hdr, new_skb,
+						 elements_needed);
+	if (!elems) {
+		if (data_offset >= 0)
+			kmem_cache_free(qeth_core_header_cache, hdr);
+		goto tx_drop;
+	}
+	elements_needed += elems;
+	nr_frags = skb_shinfo(new_skb)->nr_frags;
+
+	if (card->info.type != QETH_CARD_TYPE_IQD) {
+		int len;
+		if (large_send)
+			len = ((unsigned long)tcp_hdr(new_skb) +
+				tcp_hdr(new_skb)->doff * 4) -
+				(unsigned long)new_skb->data;
+		else
+			len = sizeof(struct qeth_hdr_layer3);
+
+		if (qeth_hdr_chk_and_bounce(new_skb, len))
+			goto tx_drop;
+		rc = qeth_do_send_packet(card, queue, new_skb, hdr,
+					 elements_needed);
+	} else
+		rc = qeth_do_send_packet_fast(card, queue, new_skb, hdr,
+					elements_needed, data_offset, 0);
+
+	if (!rc) {
+		card->stats.tx_packets++;
+		card->stats.tx_bytes += tx_bytes;
+		if (new_skb != skb)
+			dev_kfree_skb_any(skb);
+		if (card->options.performance_stats) {
+			if (large_send) {
+				card->perf_stats.large_send_bytes += tx_bytes;
+				card->perf_stats.large_send_cnt++;
+			}
+			if (nr_frags) {
+				card->perf_stats.sg_skbs_sent++;
+				/* nr_frags + skb->data */
+				card->perf_stats.sg_frags_sent += nr_frags + 1;
+			}
+		}
+		rc = NETDEV_TX_OK;
+	} else {
+		if (data_offset >= 0)
+			kmem_cache_free(qeth_core_header_cache, hdr);
+
+		if (rc == -EBUSY) {
+			if (new_skb != skb)
+				dev_kfree_skb_any(new_skb);
+			return NETDEV_TX_BUSY;
+		} else
+			goto tx_drop;
+	}
+
+	netif_wake_queue(dev);
+	if (card->options.performance_stats)
+		card->perf_stats.outbound_time += qeth_get_micros() -
+			card->perf_stats.outbound_start_time;
+	return rc;
+
+tx_drop:
+	card->stats.tx_dropped++;
+	card->stats.tx_errors++;
+	if ((new_skb != skb) && new_skb)
+		dev_kfree_skb_any(new_skb);
+	dev_kfree_skb_any(skb);
+	netif_wake_queue(dev);
+	return NETDEV_TX_OK;
+}
+
+static int __qeth_l3_open(struct net_device *dev)
+{
+	struct qeth_card *card = dev->ml_priv;
+	int rc = 0;
+
+	QETH_CARD_TEXT(card, 4, "qethopen");
+	if (card->state == CARD_STATE_UP)
+		return rc;
+	if (card->state != CARD_STATE_SOFTSETUP)
+		return -ENODEV;
+	card->data.state = CH_STATE_UP;
+	card->state = CARD_STATE_UP;
+	netif_start_queue(dev);
+
+	if (qdio_stop_irq(card->data.ccwdev, 0) >= 0) {
+		napi_enable(&card->napi);
+		napi_schedule(&card->napi);
+	} else
+		rc = -EIO;
+	return rc;
+}
+
+static int qeth_l3_open(struct net_device *dev)
+{
+	struct qeth_card *card = dev->ml_priv;
+
+	QETH_CARD_TEXT(card, 5, "qethope_");
+	if (qeth_wait_for_threads(card, QETH_RECOVER_THREAD)) {
+		QETH_CARD_TEXT(card, 3, "openREC");
+		return -ERESTARTSYS;
+	}
+	return __qeth_l3_open(dev);
+}
+
+static int qeth_l3_stop(struct net_device *dev)
+{
+	struct qeth_card *card = dev->ml_priv;
+
+	QETH_CARD_TEXT(card, 4, "qethstop");
+	netif_tx_disable(dev);
+	if (card->state == CARD_STATE_UP) {
+		card->state = CARD_STATE_SOFTSETUP;
+		napi_disable(&card->napi);
+	}
+	return 0;
+}
+
+static netdev_features_t qeth_l3_fix_features(struct net_device *dev,
+	netdev_features_t features)
+{
+	struct qeth_card *card = dev->ml_priv;
+
+	if (!qeth_is_supported(card, IPA_OUTBOUND_CHECKSUM))
+		features &= ~NETIF_F_IP_CSUM;
+	if (!qeth_is_supported(card, IPA_OUTBOUND_TSO))
+		features &= ~NETIF_F_TSO;
+	if (!qeth_is_supported(card, IPA_INBOUND_CHECKSUM))
+		features &= ~NETIF_F_RXCSUM;
+
+	return features;
+}
+
+static int qeth_l3_set_features(struct net_device *dev,
+	netdev_features_t features)
+{
+	struct qeth_card *card = dev->ml_priv;
+	u32 changed = dev->features ^ features;
+	int err;
+
+	if (!(changed & NETIF_F_RXCSUM))
+		return 0;
+
+	if (card->state == CARD_STATE_DOWN ||
+	    card->state == CARD_STATE_RECOVER)
+		return 0;
+
+	err = qeth_l3_set_rx_csum(card, features & NETIF_F_RXCSUM);
+	if (err)
+		dev->features = features ^ NETIF_F_RXCSUM;
+
+	return err;
+}
+
+static const struct ethtool_ops qeth_l3_ethtool_ops = {
+	.get_link = ethtool_op_get_link,
+	.get_strings = qeth_core_get_strings,
+	.get_ethtool_stats = qeth_core_get_ethtool_stats,
+	.get_sset_count = qeth_core_get_sset_count,
+	.get_drvinfo = qeth_core_get_drvinfo,
+	.get_settings = qeth_core_ethtool_get_settings,
+};
+
+/*
+ * we need NOARP for IPv4 but we want neighbor solicitation for IPv6. Setting
+ * NOARP on the netdevice is no option because it also turns off neighbor
+ * solicitation. For IPv4 we install a neighbor_setup function. We don't want
+ * arp resolution but we want the hard header (packet socket will work
+ * e.g. tcpdump)
+ */
+static int qeth_l3_neigh_setup_noarp(struct neighbour *n)
+{
+	n->nud_state = NUD_NOARP;
+	memcpy(n->ha, "FAKELL", 6);
+	n->output = n->ops->connected_output;
+	return 0;
+}
+
+static int
+qeth_l3_neigh_setup(struct net_device *dev, struct neigh_parms *np)
+{
+	if (np->tbl->family == AF_INET)
+		np->neigh_setup = qeth_l3_neigh_setup_noarp;
+
+	return 0;
+}
+
+static const struct net_device_ops qeth_l3_netdev_ops = {
+	.ndo_open		= qeth_l3_open,
+	.ndo_stop		= qeth_l3_stop,
+	.ndo_get_stats		= qeth_get_stats,
+	.ndo_start_xmit		= qeth_l3_hard_start_xmit,
+	.ndo_validate_addr	= eth_validate_addr,
+	.ndo_set_rx_mode	= qeth_l3_set_multicast_list,
+	.ndo_do_ioctl		= qeth_l3_do_ioctl,
+	.ndo_change_mtu		= qeth_change_mtu,
+	.ndo_fix_features	= qeth_l3_fix_features,
+	.ndo_set_features	= qeth_l3_set_features,
+	.ndo_vlan_rx_add_vid	= qeth_l3_vlan_rx_add_vid,
+	.ndo_vlan_rx_kill_vid   = qeth_l3_vlan_rx_kill_vid,
+	.ndo_tx_timeout		= qeth_tx_timeout,
+};
+
+static const struct net_device_ops qeth_l3_osa_netdev_ops = {
+	.ndo_open		= qeth_l3_open,
+	.ndo_stop		= qeth_l3_stop,
+	.ndo_get_stats		= qeth_get_stats,
+	.ndo_start_xmit		= qeth_l3_hard_start_xmit,
+	.ndo_validate_addr	= eth_validate_addr,
+	.ndo_set_rx_mode	= qeth_l3_set_multicast_list,
+	.ndo_do_ioctl		= qeth_l3_do_ioctl,
+	.ndo_change_mtu		= qeth_change_mtu,
+	.ndo_fix_features	= qeth_l3_fix_features,
+	.ndo_set_features	= qeth_l3_set_features,
+	.ndo_vlan_rx_add_vid	= qeth_l3_vlan_rx_add_vid,
+	.ndo_vlan_rx_kill_vid   = qeth_l3_vlan_rx_kill_vid,
+	.ndo_tx_timeout		= qeth_tx_timeout,
+	.ndo_neigh_setup	= qeth_l3_neigh_setup,
+};
+
+static int qeth_l3_setup_netdev(struct qeth_card *card)
+{
+	if (card->info.type == QETH_CARD_TYPE_OSD ||
+	    card->info.type == QETH_CARD_TYPE_OSX) {
+		if ((card->info.link_type == QETH_LINK_TYPE_LANE_TR) ||
+		    (card->info.link_type == QETH_LINK_TYPE_HSTR)) {
+#ifdef CONFIG_TR
+			card->dev = alloc_trdev(0);
+#endif
+			if (!card->dev)
+				return -ENODEV;
+			card->dev->netdev_ops = &qeth_l3_netdev_ops;
+		} else {
+			card->dev = alloc_etherdev(0);
+			if (!card->dev)
+				return -ENODEV;
+			card->dev->netdev_ops = &qeth_l3_osa_netdev_ops;
+
+			/*IPv6 address autoconfiguration stuff*/
+			qeth_l3_get_unique_id(card);
+			if (!(card->info.unique_id & UNIQUE_ID_NOT_BY_CARD))
+				card->dev->dev_id = card->info.unique_id &
+							 0xffff;
+			if (!card->info.guestlan) {
+				card->dev->hw_features = NETIF_F_SG |
+					NETIF_F_RXCSUM | NETIF_F_IP_CSUM |
+					NETIF_F_TSO;
+				card->dev->features = NETIF_F_RXCSUM;
+			}
+		}
+	} else if (card->info.type == QETH_CARD_TYPE_IQD) {
+		card->dev = alloc_netdev(0, "hsi%d", ether_setup);
+		if (!card->dev)
+			return -ENODEV;
+		card->dev->flags |= IFF_NOARP;
+		card->dev->netdev_ops = &qeth_l3_netdev_ops;
+		qeth_l3_iqd_read_initial_mac(card);
+		if (card->options.hsuid[0])
+			memcpy(card->dev->perm_addr, card->options.hsuid, 9);
+	} else
+		return -ENODEV;
+
+	card->dev->ml_priv = card;
+	card->dev->watchdog_timeo = QETH_TX_TIMEOUT;
+	card->dev->mtu = card->info.initial_mtu;
+	SET_ETHTOOL_OPS(card->dev, &qeth_l3_ethtool_ops);
+	card->dev->features |=	NETIF_F_HW_VLAN_TX |
+				NETIF_F_HW_VLAN_RX |
+				NETIF_F_HW_VLAN_FILTER;
+	card->dev->priv_flags &= ~IFF_XMIT_DST_RELEASE;
+	card->dev->gso_max_size = 15 * PAGE_SIZE;
+
+	SET_NETDEV_DEV(card->dev, &card->gdev->dev);
+	netif_napi_add(card->dev, &card->napi, qeth_l3_poll, QETH_NAPI_WEIGHT);
+	return register_netdev(card->dev);
+}
+
+static int qeth_l3_probe_device(struct ccwgroup_device *gdev)
+{
+	struct qeth_card *card = dev_get_drvdata(&gdev->dev);
+
+	qeth_l3_create_device_attributes(&gdev->dev);
+	card->options.layer2 = 0;
+	card->info.hwtrap = 0;
+	card->discipline.start_poll = qeth_qdio_start_poll;
+	card->discipline.input_handler = (qdio_handler_t *)
+		qeth_qdio_input_handler;
+	card->discipline.output_handler = (qdio_handler_t *)
+		qeth_qdio_output_handler;
+	card->discipline.recover = qeth_l3_recover;
+	return 0;
+}
+
+static void qeth_l3_remove_device(struct ccwgroup_device *cgdev)
+{
+	struct qeth_card *card = dev_get_drvdata(&cgdev->dev);
+
+	qeth_l3_remove_device_attributes(&cgdev->dev);
+
+	qeth_set_allowed_threads(card, 0, 1);
+	wait_event(card->wait_q, qeth_threads_running(card, 0xffffffff) == 0);
+
+	if (cgdev->state == CCWGROUP_ONLINE)
+		qeth_l3_set_offline(cgdev);
+
+	if (card->dev) {
+		unregister_netdev(card->dev);
+		card->dev = NULL;
+	}
+
+	qeth_l3_clear_ip_list(card, 0);
+	qeth_l3_clear_ipato_list(card);
+	return;
+}
+
+static int __qeth_l3_set_online(struct ccwgroup_device *gdev, int recovery_mode)
+{
+	struct qeth_card *card = dev_get_drvdata(&gdev->dev);
+	int rc = 0;
+	enum qeth_card_states recover_flag;
+
+	BUG_ON(!card);
+	mutex_lock(&card->discipline_mutex);
+	mutex_lock(&card->conf_mutex);
+	QETH_DBF_TEXT(SETUP, 2, "setonlin");
+	QETH_DBF_HEX(SETUP, 2, &card, sizeof(void *));
+
+	recover_flag = card->state;
+	rc = qeth_core_hardsetup_card(card);
+	if (rc) {
+		QETH_DBF_TEXT_(SETUP, 2, "2err%d", rc);
+		rc = -ENODEV;
+		goto out_remove;
+	}
+
+	if (!card->dev && qeth_l3_setup_netdev(card)) {
+		rc = -ENODEV;
+		goto out_remove;
+	}
+
+	if (qeth_is_diagass_supported(card, QETH_DIAGS_CMD_TRAP)) {
+		if (card->info.hwtrap &&
+		    qeth_hw_trap(card, QETH_DIAGS_TRAP_ARM))
+			card->info.hwtrap = 0;
+	} else
+		card->info.hwtrap = 0;
+
+	card->state = CARD_STATE_HARDSETUP;
+	memset(&card->rx, 0, sizeof(struct qeth_rx));
+	qeth_print_status_message(card);
+
+	/* softsetup */
+	QETH_DBF_TEXT(SETUP, 2, "softsetp");
+
+	rc = qeth_send_startlan(card);
+	if (rc) {
+		QETH_DBF_TEXT_(SETUP, 2, "1err%d", rc);
+		if (rc == 0xe080) {
+			dev_warn(&card->gdev->dev,
+				"The LAN is offline\n");
+			card->lan_online = 0;
+			goto contin;
+		}
+		rc = -ENODEV;
+		goto out_remove;
+	} else
+		card->lan_online = 1;
+
+contin:
+	rc = qeth_l3_setadapter_parms(card);
+	if (rc)
+		QETH_DBF_TEXT_(SETUP, 2, "2err%d", rc);
+	if (!card->options.sniffer) {
+		rc = qeth_l3_start_ipassists(card);
+		if (rc)
+			QETH_DBF_TEXT_(SETUP, 2, "3err%d", rc);
+		rc = qeth_l3_setrouting_v4(card);
+		if (rc)
+			QETH_DBF_TEXT_(SETUP, 2, "4err%d", rc);
+		rc = qeth_l3_setrouting_v6(card);
+		if (rc)
+			QETH_DBF_TEXT_(SETUP, 2, "5err%d", rc);
+	}
+	netif_tx_disable(card->dev);
+
+	rc = qeth_init_qdio_queues(card);
+	if (rc) {
+		QETH_DBF_TEXT_(SETUP, 2, "6err%d", rc);
+		rc = -ENODEV;
+		goto out_remove;
+	}
+	card->state = CARD_STATE_SOFTSETUP;
+
+	qeth_set_allowed_threads(card, 0xffffffff, 0);
+	qeth_l3_set_ip_addr_list(card);
+	if (card->lan_online)
+		netif_carrier_on(card->dev);
+	else
+		netif_carrier_off(card->dev);
+	if (recover_flag == CARD_STATE_RECOVER) {
+		rtnl_lock();
+		if (recovery_mode)
+			__qeth_l3_open(card->dev);
+		else
+			dev_open(card->dev);
+		qeth_l3_set_multicast_list(card->dev);
+		rtnl_unlock();
+	}
+	/* let user_space know that device is online */
+	kobject_uevent(&gdev->dev.kobj, KOBJ_CHANGE);
+	mutex_unlock(&card->conf_mutex);
+	mutex_unlock(&card->discipline_mutex);
+	return 0;
+out_remove:
+	qeth_l3_stop_card(card, 0);
+	ccw_device_set_offline(CARD_DDEV(card));
+	ccw_device_set_offline(CARD_WDEV(card));
+	ccw_device_set_offline(CARD_RDEV(card));
+	if (recover_flag == CARD_STATE_RECOVER)
+		card->state = CARD_STATE_RECOVER;
+	else
+		card->state = CARD_STATE_DOWN;
+	mutex_unlock(&card->conf_mutex);
+	mutex_unlock(&card->discipline_mutex);
+	return rc;
+}
+
+static int qeth_l3_set_online(struct ccwgroup_device *gdev)
+{
+	return __qeth_l3_set_online(gdev, 0);
+}
+
+static int __qeth_l3_set_offline(struct ccwgroup_device *cgdev,
+			int recovery_mode)
+{
+	struct qeth_card *card = dev_get_drvdata(&cgdev->dev);
+	int rc = 0, rc2 = 0, rc3 = 0;
+	enum qeth_card_states recover_flag;
+
+	mutex_lock(&card->discipline_mutex);
+	mutex_lock(&card->conf_mutex);
+	QETH_DBF_TEXT(SETUP, 3, "setoffl");
+	QETH_DBF_HEX(SETUP, 3, &card, sizeof(void *));
+
+	if (card->dev && netif_carrier_ok(card->dev))
+		netif_carrier_off(card->dev);
+	recover_flag = card->state;
+	if ((!recovery_mode && card->info.hwtrap) || card->info.hwtrap == 2) {
+		qeth_hw_trap(card, QETH_DIAGS_TRAP_DISARM);
+		card->info.hwtrap = 1;
+	}
+	qeth_l3_stop_card(card, recovery_mode);
+	if ((card->options.cq == QETH_CQ_ENABLED) && card->dev) {
+		rtnl_lock();
+		call_netdevice_notifiers(NETDEV_REBOOT, card->dev);
+		rtnl_unlock();
+	}
+	rc  = ccw_device_set_offline(CARD_DDEV(card));
+	rc2 = ccw_device_set_offline(CARD_WDEV(card));
+	rc3 = ccw_device_set_offline(CARD_RDEV(card));
+	if (!rc)
+		rc = (rc2) ? rc2 : rc3;
+	if (rc)
+		QETH_DBF_TEXT_(SETUP, 2, "1err%d", rc);
+	if (recover_flag == CARD_STATE_UP)
+		card->state = CARD_STATE_RECOVER;
+	/* let user_space know that device is offline */
+	kobject_uevent(&cgdev->dev.kobj, KOBJ_CHANGE);
+	mutex_unlock(&card->conf_mutex);
+	mutex_unlock(&card->discipline_mutex);
+	return 0;
+}
+
+static int qeth_l3_set_offline(struct ccwgroup_device *cgdev)
+{
+	return __qeth_l3_set_offline(cgdev, 0);
+}
+
+static int qeth_l3_recover(void *ptr)
+{
+	struct qeth_card *card;
+	int rc = 0;
+
+	card = (struct qeth_card *) ptr;
+	QETH_CARD_TEXT(card, 2, "recover1");
+	QETH_CARD_HEX(card, 2, &card, sizeof(void *));
+	if (!qeth_do_run_thread(card, QETH_RECOVER_THREAD))
+		return 0;
+	QETH_CARD_TEXT(card, 2, "recover2");
+	dev_warn(&card->gdev->dev,
+		"A recovery process has been started for the device\n");
+	__qeth_l3_set_offline(card->gdev, 1);
+	rc = __qeth_l3_set_online(card->gdev, 1);
+	if (!rc)
+		dev_info(&card->gdev->dev,
+			"Device successfully recovered!\n");
+	else {
+		rtnl_lock();
+		dev_close(card->dev);
+		rtnl_unlock();
+		dev_warn(&card->gdev->dev, "The qeth device driver "
+			"failed to recover an error on the device\n");
+	}
+	qeth_clear_thread_start_bit(card, QETH_RECOVER_THREAD);
+	qeth_clear_thread_running_bit(card, QETH_RECOVER_THREAD);
+	return 0;
+}
+
+static void qeth_l3_shutdown(struct ccwgroup_device *gdev)
+{
+	struct qeth_card *card = dev_get_drvdata(&gdev->dev);
+	qeth_set_allowed_threads(card, 0, 1);
+	if ((gdev->state == CCWGROUP_ONLINE) && card->info.hwtrap)
+		qeth_hw_trap(card, QETH_DIAGS_TRAP_DISARM);
+	qeth_qdio_clear_card(card, 0);
+	qeth_clear_qdio_buffers(card);
+}
+
+static int qeth_l3_pm_suspend(struct ccwgroup_device *gdev)
+{
+	struct qeth_card *card = dev_get_drvdata(&gdev->dev);
+
+	if (card->dev)
+		netif_device_detach(card->dev);
+	qeth_set_allowed_threads(card, 0, 1);
+	wait_event(card->wait_q, qeth_threads_running(card, 0xffffffff) == 0);
+	if (gdev->state == CCWGROUP_OFFLINE)
+		return 0;
+	if (card->state == CARD_STATE_UP) {
+		if (card->info.hwtrap)
+			qeth_hw_trap(card, QETH_DIAGS_TRAP_DISARM);
+		__qeth_l3_set_offline(card->gdev, 1);
+	} else
+		__qeth_l3_set_offline(card->gdev, 0);
+	return 0;
+}
+
+static int qeth_l3_pm_resume(struct ccwgroup_device *gdev)
+{
+	struct qeth_card *card = dev_get_drvdata(&gdev->dev);
+	int rc = 0;
+
+	if (gdev->state == CCWGROUP_OFFLINE)
+		goto out;
+
+	if (card->state == CARD_STATE_RECOVER) {
+		rc = __qeth_l3_set_online(card->gdev, 1);
+		if (rc) {
+			rtnl_lock();
+			dev_close(card->dev);
+			rtnl_unlock();
+		}
+	} else
+		rc = __qeth_l3_set_online(card->gdev, 0);
+out:
+	qeth_set_allowed_threads(card, 0xffffffff, 0);
+	if (card->dev)
+		netif_device_attach(card->dev);
+	if (rc)
+		dev_warn(&card->gdev->dev, "The qeth device driver "
+			"failed to recover an error on the device\n");
+	return rc;
+}
+
+struct ccwgroup_driver qeth_l3_ccwgroup_driver = {
+	.probe = qeth_l3_probe_device,
+	.remove = qeth_l3_remove_device,
+	.set_online = qeth_l3_set_online,
+	.set_offline = qeth_l3_set_offline,
+	.shutdown = qeth_l3_shutdown,
+	.freeze = qeth_l3_pm_suspend,
+	.thaw = qeth_l3_pm_resume,
+	.restore = qeth_l3_pm_resume,
+};
+EXPORT_SYMBOL_GPL(qeth_l3_ccwgroup_driver);
+
+static int qeth_l3_ip_event(struct notifier_block *this,
+			    unsigned long event, void *ptr)
+{
+	struct in_ifaddr *ifa = (struct in_ifaddr *)ptr;
+	struct net_device *dev = (struct net_device *)ifa->ifa_dev->dev;
+	struct qeth_ipaddr *addr;
+	struct qeth_card *card;
+
+	if (dev_net(dev) != &init_net)
+		return NOTIFY_DONE;
+
+	card = qeth_l3_get_card_from_dev(dev);
+	QETH_CARD_TEXT(card, 3, "ipevent");
+	if (!card)
+		return NOTIFY_DONE;
+
+	addr = qeth_l3_get_addr_buffer(QETH_PROT_IPV4);
+	if (addr != NULL) {
+		addr->u.a4.addr = ifa->ifa_address;
+		addr->u.a4.mask = ifa->ifa_mask;
+		addr->type = QETH_IP_TYPE_NORMAL;
+	} else
+		goto out;
+
+	switch (event) {
+	case NETDEV_UP:
+		if (!qeth_l3_add_ip(card, addr))
+			kfree(addr);
+		break;
+	case NETDEV_DOWN:
+		if (!qeth_l3_delete_ip(card, addr))
+			kfree(addr);
+		break;
+	default:
+		break;
+	}
+	qeth_l3_set_ip_addr_list(card);
+out:
+	return NOTIFY_DONE;
+}
+
+static struct notifier_block qeth_l3_ip_notifier = {
+	qeth_l3_ip_event,
+	NULL,
+};
+
+#ifdef CONFIG_QETH_IPV6
+/**
+ * IPv6 event handler
+ */
+static int qeth_l3_ip6_event(struct notifier_block *this,
+			     unsigned long event, void *ptr)
+{
+	struct inet6_ifaddr *ifa = (struct inet6_ifaddr *)ptr;
+	struct net_device *dev = (struct net_device *)ifa->idev->dev;
+	struct qeth_ipaddr *addr;
+	struct qeth_card *card;
+
+	card = qeth_l3_get_card_from_dev(dev);
+	if (!card)
+		return NOTIFY_DONE;
+	QETH_CARD_TEXT(card, 3, "ip6event");
+	if (!qeth_is_supported(card, IPA_IPV6))
+		return NOTIFY_DONE;
+
+	addr = qeth_l3_get_addr_buffer(QETH_PROT_IPV6);
+	if (addr != NULL) {
+		memcpy(&addr->u.a6.addr, &ifa->addr, sizeof(struct in6_addr));
+		addr->u.a6.pfxlen = ifa->prefix_len;
+		addr->type = QETH_IP_TYPE_NORMAL;
+	} else
+		goto out;
+
+	switch (event) {
+	case NETDEV_UP:
+		if (!qeth_l3_add_ip(card, addr))
+			kfree(addr);
+		break;
+	case NETDEV_DOWN:
+		if (!qeth_l3_delete_ip(card, addr))
+			kfree(addr);
+		break;
+	default:
+		break;
+	}
+	qeth_l3_set_ip_addr_list(card);
+out:
+	return NOTIFY_DONE;
+}
+
+static struct notifier_block qeth_l3_ip6_notifier = {
+	qeth_l3_ip6_event,
+	NULL,
+};
+#endif
+
+static int qeth_l3_register_notifiers(void)
+{
+	int rc;
+
+	QETH_DBF_TEXT(SETUP, 5, "regnotif");
+	rc = register_inetaddr_notifier(&qeth_l3_ip_notifier);
+	if (rc)
+		return rc;
+#ifdef CONFIG_QETH_IPV6
+	rc = register_inet6addr_notifier(&qeth_l3_ip6_notifier);
+	if (rc) {
+		unregister_inetaddr_notifier(&qeth_l3_ip_notifier);
+		return rc;
+	}
+#else
+	pr_warning("There is no IPv6 support for the layer 3 discipline\n");
+#endif
+	return 0;
+}
+
+static void qeth_l3_unregister_notifiers(void)
+{
+
+	QETH_DBF_TEXT(SETUP, 5, "unregnot");
+	BUG_ON(unregister_inetaddr_notifier(&qeth_l3_ip_notifier));
+#ifdef CONFIG_QETH_IPV6
+	BUG_ON(unregister_inet6addr_notifier(&qeth_l3_ip6_notifier));
+#endif /* QETH_IPV6 */
+}
+
+static int __init qeth_l3_init(void)
+{
+	int rc = 0;
+
+	pr_info("register layer 3 discipline\n");
+	rc = qeth_l3_register_notifiers();
+	return rc;
+}
+
+static void __exit qeth_l3_exit(void)
+{
+	qeth_l3_unregister_notifiers();
+	pr_info("unregister layer 3 discipline\n");
+}
+
+module_init(qeth_l3_init);
+module_exit(qeth_l3_exit);
+MODULE_AUTHOR("Frank Blaschka <frank.blaschka@de.ibm.com>");
+MODULE_DESCRIPTION("qeth layer 3 discipline");
+MODULE_LICENSE("GPL");
diff --git a/ap/os/linux/linux-3.4.x/drivers/s390/net/qeth_l3_sys.c b/ap/os/linux/linux-3.4.x/drivers/s390/net/qeth_l3_sys.c
new file mode 100644
index 0000000..d979bb2
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/s390/net/qeth_l3_sys.c
@@ -0,0 +1,1172 @@
+/*
+ *  drivers/s390/net/qeth_l3_sys.c
+ *
+ *    Copyright IBM Corp. 2007
+ *    Author(s): Utz Bacher <utz.bacher@de.ibm.com>,
+ *		 Frank Pavlic <fpavlic@de.ibm.com>,
+ *		 Thomas Spatzier <tspat@de.ibm.com>,
+ *		 Frank Blaschka <frank.blaschka@de.ibm.com>
+ */
+
+#include <linux/slab.h>
+#include <asm/ebcdic.h>
+#include "qeth_l3.h"
+
+#define QETH_DEVICE_ATTR(_id, _name, _mode, _show, _store) \
+struct device_attribute dev_attr_##_id = __ATTR(_name, _mode, _show, _store)
+
+static ssize_t qeth_l3_dev_route_show(struct qeth_card *card,
+			struct qeth_routing_info *route, char *buf)
+{
+	switch (route->type) {
+	case PRIMARY_ROUTER:
+		return sprintf(buf, "%s\n", "primary router");
+	case SECONDARY_ROUTER:
+		return sprintf(buf, "%s\n", "secondary router");
+	case MULTICAST_ROUTER:
+		if (card->info.broadcast_capable == QETH_BROADCAST_WITHOUT_ECHO)
+			return sprintf(buf, "%s\n", "multicast router+");
+		else
+			return sprintf(buf, "%s\n", "multicast router");
+	case PRIMARY_CONNECTOR:
+		if (card->info.broadcast_capable == QETH_BROADCAST_WITHOUT_ECHO)
+			return sprintf(buf, "%s\n", "primary connector+");
+		else
+			return sprintf(buf, "%s\n", "primary connector");
+	case SECONDARY_CONNECTOR:
+		if (card->info.broadcast_capable == QETH_BROADCAST_WITHOUT_ECHO)
+			return sprintf(buf, "%s\n", "secondary connector+");
+		else
+			return sprintf(buf, "%s\n", "secondary connector");
+	default:
+		return sprintf(buf, "%s\n", "no");
+	}
+}
+
+static ssize_t qeth_l3_dev_route4_show(struct device *dev,
+			struct device_attribute *attr, char *buf)
+{
+	struct qeth_card *card = dev_get_drvdata(dev);
+
+	if (!card)
+		return -EINVAL;
+
+	return qeth_l3_dev_route_show(card, &card->options.route4, buf);
+}
+
+static ssize_t qeth_l3_dev_route_store(struct qeth_card *card,
+		struct qeth_routing_info *route, enum qeth_prot_versions prot,
+		const char *buf, size_t count)
+{
+	enum qeth_routing_types old_route_type = route->type;
+	char *tmp;
+	int rc = 0;
+
+	tmp = strsep((char **) &buf, "\n");
+	mutex_lock(&card->conf_mutex);
+	if (!strcmp(tmp, "no_router")) {
+		route->type = NO_ROUTER;
+	} else if (!strcmp(tmp, "primary_connector")) {
+		route->type = PRIMARY_CONNECTOR;
+	} else if (!strcmp(tmp, "secondary_connector")) {
+		route->type = SECONDARY_CONNECTOR;
+	} else if (!strcmp(tmp, "primary_router")) {
+		route->type = PRIMARY_ROUTER;
+	} else if (!strcmp(tmp, "secondary_router")) {
+		route->type = SECONDARY_ROUTER;
+	} else if (!strcmp(tmp, "multicast_router")) {
+		route->type = MULTICAST_ROUTER;
+	} else {
+		rc = -EINVAL;
+		goto out;
+	}
+	if (((card->state == CARD_STATE_SOFTSETUP) ||
+	     (card->state == CARD_STATE_UP)) &&
+	    (old_route_type != route->type)) {
+		if (prot == QETH_PROT_IPV4)
+			rc = qeth_l3_setrouting_v4(card);
+		else if (prot == QETH_PROT_IPV6)
+			rc = qeth_l3_setrouting_v6(card);
+	}
+out:
+	mutex_unlock(&card->conf_mutex);
+	return rc ? rc : count;
+}
+
+static ssize_t qeth_l3_dev_route4_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	struct qeth_card *card = dev_get_drvdata(dev);
+
+	if (!card)
+		return -EINVAL;
+
+	return qeth_l3_dev_route_store(card, &card->options.route4,
+				QETH_PROT_IPV4, buf, count);
+}
+
+static DEVICE_ATTR(route4, 0644, qeth_l3_dev_route4_show,
+			qeth_l3_dev_route4_store);
+
+static ssize_t qeth_l3_dev_route6_show(struct device *dev,
+			struct device_attribute *attr, char *buf)
+{
+	struct qeth_card *card = dev_get_drvdata(dev);
+
+	if (!card)
+		return -EINVAL;
+
+	return qeth_l3_dev_route_show(card, &card->options.route6, buf);
+}
+
+static ssize_t qeth_l3_dev_route6_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	struct qeth_card *card = dev_get_drvdata(dev);
+
+	if (!card)
+		return -EINVAL;
+
+	return qeth_l3_dev_route_store(card, &card->options.route6,
+				QETH_PROT_IPV6, buf, count);
+}
+
+static DEVICE_ATTR(route6, 0644, qeth_l3_dev_route6_show,
+			qeth_l3_dev_route6_store);
+
+static ssize_t qeth_l3_dev_fake_broadcast_show(struct device *dev,
+			struct device_attribute *attr, char *buf)
+{
+	struct qeth_card *card = dev_get_drvdata(dev);
+
+	if (!card)
+		return -EINVAL;
+
+	return sprintf(buf, "%i\n", card->options.fake_broadcast? 1:0);
+}
+
+static ssize_t qeth_l3_dev_fake_broadcast_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	struct qeth_card *card = dev_get_drvdata(dev);
+	char *tmp;
+	int i, rc = 0;
+
+	if (!card)
+		return -EINVAL;
+
+	mutex_lock(&card->conf_mutex);
+	if ((card->state != CARD_STATE_DOWN) &&
+	    (card->state != CARD_STATE_RECOVER)) {
+		rc = -EPERM;
+		goto out;
+	}
+
+	i = simple_strtoul(buf, &tmp, 16);
+	if ((i == 0) || (i == 1))
+		card->options.fake_broadcast = i;
+	else
+		rc = -EINVAL;
+out:
+	mutex_unlock(&card->conf_mutex);
+	return rc ? rc : count;
+}
+
+static DEVICE_ATTR(fake_broadcast, 0644, qeth_l3_dev_fake_broadcast_show,
+		   qeth_l3_dev_fake_broadcast_store);
+
+static ssize_t qeth_l3_dev_broadcast_mode_show(struct device *dev,
+				struct device_attribute *attr, char *buf)
+{
+	struct qeth_card *card = dev_get_drvdata(dev);
+
+	if (!card)
+		return -EINVAL;
+
+	if (!((card->info.link_type == QETH_LINK_TYPE_HSTR) ||
+	      (card->info.link_type == QETH_LINK_TYPE_LANE_TR)))
+		return sprintf(buf, "n/a\n");
+
+	return sprintf(buf, "%s\n", (card->options.broadcast_mode ==
+				     QETH_TR_BROADCAST_ALLRINGS)?
+		       "all rings":"local");
+}
+
+static ssize_t qeth_l3_dev_broadcast_mode_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	struct qeth_card *card = dev_get_drvdata(dev);
+	char *tmp;
+	int rc = 0;
+
+	if (!card)
+		return -EINVAL;
+
+	mutex_lock(&card->conf_mutex);
+	if ((card->state != CARD_STATE_DOWN) &&
+	    (card->state != CARD_STATE_RECOVER)) {
+		rc = -EPERM;
+		goto out;
+	}
+
+	if (!((card->info.link_type == QETH_LINK_TYPE_HSTR) ||
+	      (card->info.link_type == QETH_LINK_TYPE_LANE_TR))) {
+		rc = -EINVAL;
+		goto out;
+	}
+
+	tmp = strsep((char **) &buf, "\n");
+
+	if (!strcmp(tmp, "local"))
+		card->options.broadcast_mode = QETH_TR_BROADCAST_LOCAL;
+	else if (!strcmp(tmp, "all_rings"))
+		card->options.broadcast_mode = QETH_TR_BROADCAST_ALLRINGS;
+	else
+		rc = -EINVAL;
+out:
+	mutex_unlock(&card->conf_mutex);
+	return rc ? rc : count;
+}
+
+static DEVICE_ATTR(broadcast_mode, 0644, qeth_l3_dev_broadcast_mode_show,
+		   qeth_l3_dev_broadcast_mode_store);
+
+static ssize_t qeth_l3_dev_canonical_macaddr_show(struct device *dev,
+				struct device_attribute *attr, char *buf)
+{
+	struct qeth_card *card = dev_get_drvdata(dev);
+
+	if (!card)
+		return -EINVAL;
+
+	if (!((card->info.link_type == QETH_LINK_TYPE_HSTR) ||
+	      (card->info.link_type == QETH_LINK_TYPE_LANE_TR)))
+		return sprintf(buf, "n/a\n");
+
+	return sprintf(buf, "%i\n", (card->options.macaddr_mode ==
+				     QETH_TR_MACADDR_CANONICAL)? 1:0);
+}
+
+static ssize_t qeth_l3_dev_canonical_macaddr_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	struct qeth_card *card = dev_get_drvdata(dev);
+	char *tmp;
+	int i, rc = 0;
+
+	if (!card)
+		return -EINVAL;
+
+	mutex_lock(&card->conf_mutex);
+	if ((card->state != CARD_STATE_DOWN) &&
+	    (card->state != CARD_STATE_RECOVER)) {
+		rc = -EPERM;
+		goto out;
+	}
+
+	if (!((card->info.link_type == QETH_LINK_TYPE_HSTR) ||
+	      (card->info.link_type == QETH_LINK_TYPE_LANE_TR))) {
+		rc = -EINVAL;
+		goto out;
+	}
+
+	i = simple_strtoul(buf, &tmp, 16);
+	if ((i == 0) || (i == 1))
+		card->options.macaddr_mode = i?
+			QETH_TR_MACADDR_CANONICAL :
+			QETH_TR_MACADDR_NONCANONICAL;
+	else
+		rc = -EINVAL;
+out:
+	mutex_unlock(&card->conf_mutex);
+	return rc ? rc : count;
+}
+
+static DEVICE_ATTR(canonical_macaddr, 0644, qeth_l3_dev_canonical_macaddr_show,
+		   qeth_l3_dev_canonical_macaddr_store);
+
+static ssize_t qeth_l3_dev_sniffer_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct qeth_card *card = dev_get_drvdata(dev);
+
+	if (!card)
+		return -EINVAL;
+
+	return sprintf(buf, "%i\n", card->options.sniffer ? 1 : 0);
+}
+
+static ssize_t qeth_l3_dev_sniffer_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	struct qeth_card *card = dev_get_drvdata(dev);
+	int rc = 0;
+	unsigned long i;
+
+	if (!card)
+		return -EINVAL;
+
+	if (card->info.type != QETH_CARD_TYPE_IQD)
+		return -EPERM;
+	if (card->options.cq == QETH_CQ_ENABLED)
+		return -EPERM;
+
+	mutex_lock(&card->conf_mutex);
+	if ((card->state != CARD_STATE_DOWN) &&
+	    (card->state != CARD_STATE_RECOVER)) {
+		rc = -EPERM;
+		goto out;
+	}
+
+	rc = strict_strtoul(buf, 16, &i);
+	if (rc) {
+		rc = -EINVAL;
+		goto out;
+	}
+	switch (i) {
+	case 0:
+		card->options.sniffer = i;
+		break;
+	case 1:
+		qdio_get_ssqd_desc(CARD_DDEV(card), &card->ssqd);
+		if (card->ssqd.qdioac2 & QETH_SNIFF_AVAIL) {
+			card->options.sniffer = i;
+			if (card->qdio.init_pool.buf_count !=
+					QETH_IN_BUF_COUNT_MAX)
+				qeth_realloc_buffer_pool(card,
+					QETH_IN_BUF_COUNT_MAX);
+		} else
+			rc = -EPERM;
+		break;
+	default:
+		rc = -EINVAL;
+	}
+out:
+	mutex_unlock(&card->conf_mutex);
+	return rc ? rc : count;
+}
+
+static DEVICE_ATTR(sniffer, 0644, qeth_l3_dev_sniffer_show,
+		qeth_l3_dev_sniffer_store);
+
+
+static ssize_t qeth_l3_dev_hsuid_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct qeth_card *card = dev_get_drvdata(dev);
+	char tmp_hsuid[9];
+
+	if (!card)
+		return -EINVAL;
+
+	if (card->info.type != QETH_CARD_TYPE_IQD)
+		return -EPERM;
+
+	if (card->state == CARD_STATE_DOWN)
+		return -EPERM;
+
+	memcpy(tmp_hsuid, card->options.hsuid, sizeof(tmp_hsuid));
+	EBCASC(tmp_hsuid, 8);
+	return sprintf(buf, "%s\n", tmp_hsuid);
+}
+
+static ssize_t qeth_l3_dev_hsuid_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	struct qeth_card *card = dev_get_drvdata(dev);
+	struct qeth_ipaddr *addr;
+	char *tmp;
+	int i;
+
+	if (!card)
+		return -EINVAL;
+
+	if (card->info.type != QETH_CARD_TYPE_IQD)
+		return -EPERM;
+	if (card->state != CARD_STATE_DOWN &&
+	    card->state != CARD_STATE_RECOVER)
+		return -EPERM;
+	if (card->options.sniffer)
+		return -EPERM;
+	if (card->options.cq == QETH_CQ_NOTAVAILABLE)
+		return -EPERM;
+
+	tmp = strsep((char **)&buf, "\n");
+	if (strlen(tmp) > 8)
+		return -EINVAL;
+
+	if (card->options.hsuid[0]) {
+		/* delete old ip address */
+		addr = qeth_l3_get_addr_buffer(QETH_PROT_IPV6);
+		if (addr != NULL) {
+			addr->u.a6.addr.s6_addr32[0] = 0xfe800000;
+			addr->u.a6.addr.s6_addr32[1] = 0x00000000;
+			for (i = 8; i < 16; i++)
+				addr->u.a6.addr.s6_addr[i] =
+					card->options.hsuid[i - 8];
+			addr->u.a6.pfxlen = 0;
+			addr->type = QETH_IP_TYPE_NORMAL;
+		} else
+			return -ENOMEM;
+		if (!qeth_l3_delete_ip(card, addr))
+			kfree(addr);
+		qeth_l3_set_ip_addr_list(card);
+	}
+
+	if (strlen(tmp) == 0) {
+		/* delete ip address only */
+		card->options.hsuid[0] = '\0';
+		if (card->dev)
+			memcpy(card->dev->perm_addr, card->options.hsuid, 9);
+		qeth_configure_cq(card, QETH_CQ_DISABLED);
+		return count;
+	}
+
+	if (qeth_configure_cq(card, QETH_CQ_ENABLED))
+		return -EPERM;
+
+	for (i = 0; i < 8; i++)
+		card->options.hsuid[i] = ' ';
+	card->options.hsuid[8] = '\0';
+	strncpy(card->options.hsuid, tmp, strlen(tmp));
+	ASCEBC(card->options.hsuid, 8);
+	if (card->dev)
+		memcpy(card->dev->perm_addr, card->options.hsuid, 9);
+
+	addr = qeth_l3_get_addr_buffer(QETH_PROT_IPV6);
+	if (addr != NULL) {
+		addr->u.a6.addr.s6_addr32[0] = 0xfe800000;
+		addr->u.a6.addr.s6_addr32[1] = 0x00000000;
+		for (i = 8; i < 16; i++)
+			addr->u.a6.addr.s6_addr[i] = card->options.hsuid[i - 8];
+		addr->u.a6.pfxlen = 0;
+		addr->type = QETH_IP_TYPE_NORMAL;
+	} else
+		return -ENOMEM;
+	if (!qeth_l3_add_ip(card, addr))
+		kfree(addr);
+	qeth_l3_set_ip_addr_list(card);
+
+	return count;
+}
+
+static DEVICE_ATTR(hsuid, 0644, qeth_l3_dev_hsuid_show,
+		   qeth_l3_dev_hsuid_store);
+
+
+static struct attribute *qeth_l3_device_attrs[] = {
+	&dev_attr_route4.attr,
+	&dev_attr_route6.attr,
+	&dev_attr_fake_broadcast.attr,
+	&dev_attr_broadcast_mode.attr,
+	&dev_attr_canonical_macaddr.attr,
+	&dev_attr_sniffer.attr,
+	&dev_attr_hsuid.attr,
+	NULL,
+};
+
+static struct attribute_group qeth_l3_device_attr_group = {
+	.attrs = qeth_l3_device_attrs,
+};
+
+static ssize_t qeth_l3_dev_ipato_enable_show(struct device *dev,
+			struct device_attribute *attr, char *buf)
+{
+	struct qeth_card *card = dev_get_drvdata(dev);
+
+	if (!card)
+		return -EINVAL;
+
+	return sprintf(buf, "%i\n", card->ipato.enabled? 1:0);
+}
+
+static ssize_t qeth_l3_dev_ipato_enable_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	struct qeth_card *card = dev_get_drvdata(dev);
+	struct qeth_ipaddr *tmpipa, *t;
+	char *tmp;
+	int rc = 0;
+
+	if (!card)
+		return -EINVAL;
+
+	mutex_lock(&card->conf_mutex);
+	if ((card->state != CARD_STATE_DOWN) &&
+	    (card->state != CARD_STATE_RECOVER)) {
+		rc = -EPERM;
+		goto out;
+	}
+
+	tmp = strsep((char **) &buf, "\n");
+	if (!strcmp(tmp, "toggle")) {
+		card->ipato.enabled = (card->ipato.enabled)? 0 : 1;
+	} else if (!strcmp(tmp, "1")) {
+		card->ipato.enabled = 1;
+		list_for_each_entry_safe(tmpipa, t, card->ip_tbd_list, entry) {
+			if ((tmpipa->type == QETH_IP_TYPE_NORMAL) &&
+				qeth_l3_is_addr_covered_by_ipato(card, tmpipa))
+				tmpipa->set_flags |=
+					QETH_IPA_SETIP_TAKEOVER_FLAG;
+		}
+
+	} else if (!strcmp(tmp, "0")) {
+		card->ipato.enabled = 0;
+		list_for_each_entry_safe(tmpipa, t, card->ip_tbd_list, entry) {
+			if (tmpipa->set_flags &
+				QETH_IPA_SETIP_TAKEOVER_FLAG)
+				tmpipa->set_flags &=
+					~QETH_IPA_SETIP_TAKEOVER_FLAG;
+		}
+	} else
+		rc = -EINVAL;
+out:
+	mutex_unlock(&card->conf_mutex);
+	return rc ? rc : count;
+}
+
+static QETH_DEVICE_ATTR(ipato_enable, enable, 0644,
+			qeth_l3_dev_ipato_enable_show,
+			qeth_l3_dev_ipato_enable_store);
+
+static ssize_t qeth_l3_dev_ipato_invert4_show(struct device *dev,
+				struct device_attribute *attr, char *buf)
+{
+	struct qeth_card *card = dev_get_drvdata(dev);
+
+	if (!card)
+		return -EINVAL;
+
+	return sprintf(buf, "%i\n", card->ipato.invert4? 1:0);
+}
+
+static ssize_t qeth_l3_dev_ipato_invert4_store(struct device *dev,
+				struct device_attribute *attr,
+				const char *buf, size_t count)
+{
+	struct qeth_card *card = dev_get_drvdata(dev);
+	char *tmp;
+	int rc = 0;
+
+	if (!card)
+		return -EINVAL;
+
+	mutex_lock(&card->conf_mutex);
+	tmp = strsep((char **) &buf, "\n");
+	if (!strcmp(tmp, "toggle")) {
+		card->ipato.invert4 = (card->ipato.invert4)? 0 : 1;
+	} else if (!strcmp(tmp, "1")) {
+		card->ipato.invert4 = 1;
+	} else if (!strcmp(tmp, "0")) {
+		card->ipato.invert4 = 0;
+	} else
+		rc = -EINVAL;
+	mutex_unlock(&card->conf_mutex);
+	return rc ? rc : count;
+}
+
+static QETH_DEVICE_ATTR(ipato_invert4, invert4, 0644,
+			qeth_l3_dev_ipato_invert4_show,
+			qeth_l3_dev_ipato_invert4_store);
+
+static ssize_t qeth_l3_dev_ipato_add_show(char *buf, struct qeth_card *card,
+			enum qeth_prot_versions proto)
+{
+	struct qeth_ipato_entry *ipatoe;
+	unsigned long flags;
+	char addr_str[40];
+	int entry_len; /* length of 1 entry string, differs between v4 and v6 */
+	int i = 0;
+
+	entry_len = (proto == QETH_PROT_IPV4)? 12 : 40;
+	/* add strlen for "/<mask>\n" */
+	entry_len += (proto == QETH_PROT_IPV4)? 5 : 6;
+	spin_lock_irqsave(&card->ip_lock, flags);
+	list_for_each_entry(ipatoe, &card->ipato.entries, entry) {
+		if (ipatoe->proto != proto)
+			continue;
+		/* String must not be longer than PAGE_SIZE. So we check if
+		 * string length gets near PAGE_SIZE. Then we can savely display
+		 * the next IPv6 address (worst case, compared to IPv4) */
+		if ((PAGE_SIZE - i) <= entry_len)
+			break;
+		qeth_l3_ipaddr_to_string(proto, ipatoe->addr, addr_str);
+		i += snprintf(buf + i, PAGE_SIZE - i,
+			      "%s/%i\n", addr_str, ipatoe->mask_bits);
+	}
+	spin_unlock_irqrestore(&card->ip_lock, flags);
+	i += snprintf(buf + i, PAGE_SIZE - i, "\n");
+
+	return i;
+}
+
+static ssize_t qeth_l3_dev_ipato_add4_show(struct device *dev,
+				struct device_attribute *attr, char *buf)
+{
+	struct qeth_card *card = dev_get_drvdata(dev);
+
+	if (!card)
+		return -EINVAL;
+
+	return qeth_l3_dev_ipato_add_show(buf, card, QETH_PROT_IPV4);
+}
+
+static int qeth_l3_parse_ipatoe(const char *buf, enum qeth_prot_versions proto,
+		  u8 *addr, int *mask_bits)
+{
+	const char *start, *end;
+	char *tmp;
+	char buffer[40] = {0, };
+
+	start = buf;
+	/* get address string */
+	end = strchr(start, '/');
+	if (!end || (end - start >= 40)) {
+		return -EINVAL;
+	}
+	strncpy(buffer, start, end - start);
+	if (qeth_l3_string_to_ipaddr(buffer, proto, addr)) {
+		return -EINVAL;
+	}
+	start = end + 1;
+	*mask_bits = simple_strtoul(start, &tmp, 10);
+	if (!strlen(start) ||
+	    (tmp == start) ||
+	    (*mask_bits > ((proto == QETH_PROT_IPV4) ? 32 : 128))) {
+		return -EINVAL;
+	}
+	return 0;
+}
+
+static ssize_t qeth_l3_dev_ipato_add_store(const char *buf, size_t count,
+			 struct qeth_card *card, enum qeth_prot_versions proto)
+{
+	struct qeth_ipato_entry *ipatoe;
+	u8 addr[16];
+	int mask_bits;
+	int rc = 0;
+
+	mutex_lock(&card->conf_mutex);
+	rc = qeth_l3_parse_ipatoe(buf, proto, addr, &mask_bits);
+	if (rc)
+		goto out;
+
+	ipatoe = kzalloc(sizeof(struct qeth_ipato_entry), GFP_KERNEL);
+	if (!ipatoe) {
+		rc = -ENOMEM;
+		goto out;
+	}
+	ipatoe->proto = proto;
+	memcpy(ipatoe->addr, addr, (proto == QETH_PROT_IPV4)? 4:16);
+	ipatoe->mask_bits = mask_bits;
+
+	rc = qeth_l3_add_ipato_entry(card, ipatoe);
+	if (rc)
+		kfree(ipatoe);
+out:
+	mutex_unlock(&card->conf_mutex);
+	return rc ? rc : count;
+}
+
+static ssize_t qeth_l3_dev_ipato_add4_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	struct qeth_card *card = dev_get_drvdata(dev);
+
+	if (!card)
+		return -EINVAL;
+
+	return qeth_l3_dev_ipato_add_store(buf, count, card, QETH_PROT_IPV4);
+}
+
+static QETH_DEVICE_ATTR(ipato_add4, add4, 0644,
+			qeth_l3_dev_ipato_add4_show,
+			qeth_l3_dev_ipato_add4_store);
+
+static ssize_t qeth_l3_dev_ipato_del_store(const char *buf, size_t count,
+			 struct qeth_card *card, enum qeth_prot_versions proto)
+{
+	u8 addr[16];
+	int mask_bits;
+	int rc = 0;
+
+	mutex_lock(&card->conf_mutex);
+	rc = qeth_l3_parse_ipatoe(buf, proto, addr, &mask_bits);
+	if (!rc)
+		qeth_l3_del_ipato_entry(card, proto, addr, mask_bits);
+	mutex_unlock(&card->conf_mutex);
+	return rc ? rc : count;
+}
+
+static ssize_t qeth_l3_dev_ipato_del4_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	struct qeth_card *card = dev_get_drvdata(dev);
+
+	if (!card)
+		return -EINVAL;
+
+	return qeth_l3_dev_ipato_del_store(buf, count, card, QETH_PROT_IPV4);
+}
+
+static QETH_DEVICE_ATTR(ipato_del4, del4, 0200, NULL,
+			qeth_l3_dev_ipato_del4_store);
+
+static ssize_t qeth_l3_dev_ipato_invert6_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct qeth_card *card = dev_get_drvdata(dev);
+
+	if (!card)
+		return -EINVAL;
+
+	return sprintf(buf, "%i\n", card->ipato.invert6? 1:0);
+}
+
+static ssize_t qeth_l3_dev_ipato_invert6_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	struct qeth_card *card = dev_get_drvdata(dev);
+	char *tmp;
+	int rc = 0;
+
+	if (!card)
+		return -EINVAL;
+
+	mutex_lock(&card->conf_mutex);
+	tmp = strsep((char **) &buf, "\n");
+	if (!strcmp(tmp, "toggle")) {
+		card->ipato.invert6 = (card->ipato.invert6)? 0 : 1;
+	} else if (!strcmp(tmp, "1")) {
+		card->ipato.invert6 = 1;
+	} else if (!strcmp(tmp, "0")) {
+		card->ipato.invert6 = 0;
+	} else
+		rc = -EINVAL;
+	mutex_unlock(&card->conf_mutex);
+	return rc ? rc : count;
+}
+
+static QETH_DEVICE_ATTR(ipato_invert6, invert6, 0644,
+			qeth_l3_dev_ipato_invert6_show,
+			qeth_l3_dev_ipato_invert6_store);
+
+
+static ssize_t qeth_l3_dev_ipato_add6_show(struct device *dev,
+				struct device_attribute *attr, char *buf)
+{
+	struct qeth_card *card = dev_get_drvdata(dev);
+
+	if (!card)
+		return -EINVAL;
+
+	return qeth_l3_dev_ipato_add_show(buf, card, QETH_PROT_IPV6);
+}
+
+static ssize_t qeth_l3_dev_ipato_add6_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	struct qeth_card *card = dev_get_drvdata(dev);
+
+	if (!card)
+		return -EINVAL;
+
+	return qeth_l3_dev_ipato_add_store(buf, count, card, QETH_PROT_IPV6);
+}
+
+static QETH_DEVICE_ATTR(ipato_add6, add6, 0644,
+			qeth_l3_dev_ipato_add6_show,
+			qeth_l3_dev_ipato_add6_store);
+
+static ssize_t qeth_l3_dev_ipato_del6_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	struct qeth_card *card = dev_get_drvdata(dev);
+
+	if (!card)
+		return -EINVAL;
+
+	return qeth_l3_dev_ipato_del_store(buf, count, card, QETH_PROT_IPV6);
+}
+
+static QETH_DEVICE_ATTR(ipato_del6, del6, 0200, NULL,
+			qeth_l3_dev_ipato_del6_store);
+
+static struct attribute *qeth_ipato_device_attrs[] = {
+	&dev_attr_ipato_enable.attr,
+	&dev_attr_ipato_invert4.attr,
+	&dev_attr_ipato_add4.attr,
+	&dev_attr_ipato_del4.attr,
+	&dev_attr_ipato_invert6.attr,
+	&dev_attr_ipato_add6.attr,
+	&dev_attr_ipato_del6.attr,
+	NULL,
+};
+
+static struct attribute_group qeth_device_ipato_group = {
+	.name = "ipa_takeover",
+	.attrs = qeth_ipato_device_attrs,
+};
+
+static ssize_t qeth_l3_dev_vipa_add_show(char *buf, struct qeth_card *card,
+			enum qeth_prot_versions proto)
+{
+	struct qeth_ipaddr *ipaddr;
+	char addr_str[40];
+	int entry_len; /* length of 1 entry string, differs between v4 and v6 */
+	unsigned long flags;
+	int i = 0;
+
+	entry_len = (proto == QETH_PROT_IPV4)? 12 : 40;
+	entry_len += 2; /* \n + terminator */
+	spin_lock_irqsave(&card->ip_lock, flags);
+	list_for_each_entry(ipaddr, &card->ip_list, entry) {
+		if (ipaddr->proto != proto)
+			continue;
+		if (ipaddr->type != QETH_IP_TYPE_VIPA)
+			continue;
+		/* String must not be longer than PAGE_SIZE. So we check if
+		 * string length gets near PAGE_SIZE. Then we can savely display
+		 * the next IPv6 address (worst case, compared to IPv4) */
+		if ((PAGE_SIZE - i) <= entry_len)
+			break;
+		qeth_l3_ipaddr_to_string(proto, (const u8 *)&ipaddr->u,
+			addr_str);
+		i += snprintf(buf + i, PAGE_SIZE - i, "%s\n", addr_str);
+	}
+	spin_unlock_irqrestore(&card->ip_lock, flags);
+	i += snprintf(buf + i, PAGE_SIZE - i, "\n");
+
+	return i;
+}
+
+static ssize_t qeth_l3_dev_vipa_add4_show(struct device *dev,
+			struct device_attribute *attr, char *buf)
+{
+	struct qeth_card *card = dev_get_drvdata(dev);
+
+	if (!card)
+		return -EINVAL;
+
+	return qeth_l3_dev_vipa_add_show(buf, card, QETH_PROT_IPV4);
+}
+
+static int qeth_l3_parse_vipae(const char *buf, enum qeth_prot_versions proto,
+		 u8 *addr)
+{
+	if (qeth_l3_string_to_ipaddr(buf, proto, addr)) {
+		return -EINVAL;
+	}
+	return 0;
+}
+
+static ssize_t qeth_l3_dev_vipa_add_store(const char *buf, size_t count,
+			struct qeth_card *card, enum qeth_prot_versions proto)
+{
+	u8 addr[16] = {0, };
+	int rc;
+
+	mutex_lock(&card->conf_mutex);
+	rc = qeth_l3_parse_vipae(buf, proto, addr);
+	if (!rc)
+		rc = qeth_l3_add_vipa(card, proto, addr);
+	mutex_unlock(&card->conf_mutex);
+	return rc ? rc : count;
+}
+
+static ssize_t qeth_l3_dev_vipa_add4_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	struct qeth_card *card = dev_get_drvdata(dev);
+
+	if (!card)
+		return -EINVAL;
+
+	return qeth_l3_dev_vipa_add_store(buf, count, card, QETH_PROT_IPV4);
+}
+
+static QETH_DEVICE_ATTR(vipa_add4, add4, 0644,
+			qeth_l3_dev_vipa_add4_show,
+			qeth_l3_dev_vipa_add4_store);
+
+static ssize_t qeth_l3_dev_vipa_del_store(const char *buf, size_t count,
+			 struct qeth_card *card, enum qeth_prot_versions proto)
+{
+	u8 addr[16];
+	int rc;
+
+	mutex_lock(&card->conf_mutex);
+	rc = qeth_l3_parse_vipae(buf, proto, addr);
+	if (!rc)
+		qeth_l3_del_vipa(card, proto, addr);
+	mutex_unlock(&card->conf_mutex);
+	return rc ? rc : count;
+}
+
+static ssize_t qeth_l3_dev_vipa_del4_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	struct qeth_card *card = dev_get_drvdata(dev);
+
+	if (!card)
+		return -EINVAL;
+
+	return qeth_l3_dev_vipa_del_store(buf, count, card, QETH_PROT_IPV4);
+}
+
+static QETH_DEVICE_ATTR(vipa_del4, del4, 0200, NULL,
+			qeth_l3_dev_vipa_del4_store);
+
+static ssize_t qeth_l3_dev_vipa_add6_show(struct device *dev,
+				struct device_attribute *attr, char *buf)
+{
+	struct qeth_card *card = dev_get_drvdata(dev);
+
+	if (!card)
+		return -EINVAL;
+
+	return qeth_l3_dev_vipa_add_show(buf, card, QETH_PROT_IPV6);
+}
+
+static ssize_t qeth_l3_dev_vipa_add6_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	struct qeth_card *card = dev_get_drvdata(dev);
+
+	if (!card)
+		return -EINVAL;
+
+	return qeth_l3_dev_vipa_add_store(buf, count, card, QETH_PROT_IPV6);
+}
+
+static QETH_DEVICE_ATTR(vipa_add6, add6, 0644,
+			qeth_l3_dev_vipa_add6_show,
+			qeth_l3_dev_vipa_add6_store);
+
+static ssize_t qeth_l3_dev_vipa_del6_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	struct qeth_card *card = dev_get_drvdata(dev);
+
+	if (!card)
+		return -EINVAL;
+
+	return qeth_l3_dev_vipa_del_store(buf, count, card, QETH_PROT_IPV6);
+}
+
+static QETH_DEVICE_ATTR(vipa_del6, del6, 0200, NULL,
+			qeth_l3_dev_vipa_del6_store);
+
+static struct attribute *qeth_vipa_device_attrs[] = {
+	&dev_attr_vipa_add4.attr,
+	&dev_attr_vipa_del4.attr,
+	&dev_attr_vipa_add6.attr,
+	&dev_attr_vipa_del6.attr,
+	NULL,
+};
+
+static struct attribute_group qeth_device_vipa_group = {
+	.name = "vipa",
+	.attrs = qeth_vipa_device_attrs,
+};
+
+static ssize_t qeth_l3_dev_rxip_add_show(char *buf, struct qeth_card *card,
+		       enum qeth_prot_versions proto)
+{
+	struct qeth_ipaddr *ipaddr;
+	char addr_str[40];
+	int entry_len; /* length of 1 entry string, differs between v4 and v6 */
+	unsigned long flags;
+	int i = 0;
+
+	entry_len = (proto == QETH_PROT_IPV4)? 12 : 40;
+	entry_len += 2; /* \n + terminator */
+	spin_lock_irqsave(&card->ip_lock, flags);
+	list_for_each_entry(ipaddr, &card->ip_list, entry) {
+		if (ipaddr->proto != proto)
+			continue;
+		if (ipaddr->type != QETH_IP_TYPE_RXIP)
+			continue;
+		/* String must not be longer than PAGE_SIZE. So we check if
+		 * string length gets near PAGE_SIZE. Then we can savely display
+		 * the next IPv6 address (worst case, compared to IPv4) */
+		if ((PAGE_SIZE - i) <= entry_len)
+			break;
+		qeth_l3_ipaddr_to_string(proto, (const u8 *)&ipaddr->u,
+			addr_str);
+		i += snprintf(buf + i, PAGE_SIZE - i, "%s\n", addr_str);
+	}
+	spin_unlock_irqrestore(&card->ip_lock, flags);
+	i += snprintf(buf + i, PAGE_SIZE - i, "\n");
+
+	return i;
+}
+
+static ssize_t qeth_l3_dev_rxip_add4_show(struct device *dev,
+			struct device_attribute *attr, char *buf)
+{
+	struct qeth_card *card = dev_get_drvdata(dev);
+
+	if (!card)
+		return -EINVAL;
+
+	return qeth_l3_dev_rxip_add_show(buf, card, QETH_PROT_IPV4);
+}
+
+static int qeth_l3_parse_rxipe(const char *buf, enum qeth_prot_versions proto,
+		 u8 *addr)
+{
+	if (qeth_l3_string_to_ipaddr(buf, proto, addr)) {
+		return -EINVAL;
+	}
+	return 0;
+}
+
+static ssize_t qeth_l3_dev_rxip_add_store(const char *buf, size_t count,
+			struct qeth_card *card, enum qeth_prot_versions proto)
+{
+	u8 addr[16] = {0, };
+	int rc;
+
+	mutex_lock(&card->conf_mutex);
+	rc = qeth_l3_parse_rxipe(buf, proto, addr);
+	if (!rc)
+		rc = qeth_l3_add_rxip(card, proto, addr);
+	mutex_unlock(&card->conf_mutex);
+	return rc ? rc : count;
+}
+
+static ssize_t qeth_l3_dev_rxip_add4_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	struct qeth_card *card = dev_get_drvdata(dev);
+
+	if (!card)
+		return -EINVAL;
+
+	return qeth_l3_dev_rxip_add_store(buf, count, card, QETH_PROT_IPV4);
+}
+
+static QETH_DEVICE_ATTR(rxip_add4, add4, 0644,
+			qeth_l3_dev_rxip_add4_show,
+			qeth_l3_dev_rxip_add4_store);
+
+static ssize_t qeth_l3_dev_rxip_del_store(const char *buf, size_t count,
+			struct qeth_card *card, enum qeth_prot_versions proto)
+{
+	u8 addr[16];
+	int rc;
+
+	mutex_lock(&card->conf_mutex);
+	rc = qeth_l3_parse_rxipe(buf, proto, addr);
+	if (!rc)
+		qeth_l3_del_rxip(card, proto, addr);
+	mutex_unlock(&card->conf_mutex);
+	return rc ? rc : count;
+}
+
+static ssize_t qeth_l3_dev_rxip_del4_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	struct qeth_card *card = dev_get_drvdata(dev);
+
+	if (!card)
+		return -EINVAL;
+
+	return qeth_l3_dev_rxip_del_store(buf, count, card, QETH_PROT_IPV4);
+}
+
+static QETH_DEVICE_ATTR(rxip_del4, del4, 0200, NULL,
+			qeth_l3_dev_rxip_del4_store);
+
+static ssize_t qeth_l3_dev_rxip_add6_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct qeth_card *card = dev_get_drvdata(dev);
+
+	if (!card)
+		return -EINVAL;
+
+	return qeth_l3_dev_rxip_add_show(buf, card, QETH_PROT_IPV6);
+}
+
+static ssize_t qeth_l3_dev_rxip_add6_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	struct qeth_card *card = dev_get_drvdata(dev);
+
+	if (!card)
+		return -EINVAL;
+
+	return qeth_l3_dev_rxip_add_store(buf, count, card, QETH_PROT_IPV6);
+}
+
+static QETH_DEVICE_ATTR(rxip_add6, add6, 0644,
+			qeth_l3_dev_rxip_add6_show,
+			qeth_l3_dev_rxip_add6_store);
+
+static ssize_t qeth_l3_dev_rxip_del6_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	struct qeth_card *card = dev_get_drvdata(dev);
+
+	if (!card)
+		return -EINVAL;
+
+	return qeth_l3_dev_rxip_del_store(buf, count, card, QETH_PROT_IPV6);
+}
+
+static QETH_DEVICE_ATTR(rxip_del6, del6, 0200, NULL,
+			qeth_l3_dev_rxip_del6_store);
+
+static struct attribute *qeth_rxip_device_attrs[] = {
+	&dev_attr_rxip_add4.attr,
+	&dev_attr_rxip_del4.attr,
+	&dev_attr_rxip_add6.attr,
+	&dev_attr_rxip_del6.attr,
+	NULL,
+};
+
+static struct attribute_group qeth_device_rxip_group = {
+	.name = "rxip",
+	.attrs = qeth_rxip_device_attrs,
+};
+
+int qeth_l3_create_device_attributes(struct device *dev)
+{
+	int ret;
+
+	ret = sysfs_create_group(&dev->kobj, &qeth_l3_device_attr_group);
+	if (ret)
+		return ret;
+
+	ret = sysfs_create_group(&dev->kobj, &qeth_device_ipato_group);
+	if (ret) {
+		sysfs_remove_group(&dev->kobj, &qeth_l3_device_attr_group);
+		return ret;
+	}
+
+	ret = sysfs_create_group(&dev->kobj, &qeth_device_vipa_group);
+	if (ret) {
+		sysfs_remove_group(&dev->kobj, &qeth_l3_device_attr_group);
+		sysfs_remove_group(&dev->kobj, &qeth_device_ipato_group);
+		return ret;
+	}
+
+	ret = sysfs_create_group(&dev->kobj, &qeth_device_rxip_group);
+	if (ret) {
+		sysfs_remove_group(&dev->kobj, &qeth_l3_device_attr_group);
+		sysfs_remove_group(&dev->kobj, &qeth_device_ipato_group);
+		sysfs_remove_group(&dev->kobj, &qeth_device_vipa_group);
+		return ret;
+	}
+	return 0;
+}
+
+void qeth_l3_remove_device_attributes(struct device *dev)
+{
+	sysfs_remove_group(&dev->kobj, &qeth_l3_device_attr_group);
+	sysfs_remove_group(&dev->kobj, &qeth_device_ipato_group);
+	sysfs_remove_group(&dev->kobj, &qeth_device_vipa_group);
+	sysfs_remove_group(&dev->kobj, &qeth_device_rxip_group);
+}
diff --git a/ap/os/linux/linux-3.4.x/drivers/s390/net/smsgiucv.c b/ap/os/linux/linux-3.4.x/drivers/s390/net/smsgiucv.c
new file mode 100644
index 0000000..207b7d7
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/s390/net/smsgiucv.c
@@ -0,0 +1,259 @@
+/*
+ * IUCV special message driver
+ *
+ * Copyright IBM Corp. 2003, 2009
+ *
+ * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/errno.h>
+#include <linux/device.h>
+#include <linux/slab.h>
+#include <net/iucv/iucv.h>
+#include <asm/cpcmd.h>
+#include <asm/ebcdic.h>
+#include "smsgiucv.h"
+
+struct smsg_callback {
+	struct list_head list;
+	const char *prefix;
+	int len;
+	void (*callback)(const char *from, char *str);
+};
+
+MODULE_AUTHOR
+   ("(C) 2003 IBM Corporation by Martin Schwidefsky (schwidefsky@de.ibm.com)");
+MODULE_DESCRIPTION ("Linux for S/390 IUCV special message driver");
+
+static struct iucv_path *smsg_path;
+/* dummy device used as trigger for PM functions */
+static struct device *smsg_dev;
+
+static DEFINE_SPINLOCK(smsg_list_lock);
+static LIST_HEAD(smsg_list);
+static int iucv_path_connected;
+
+static int smsg_path_pending(struct iucv_path *, u8 ipvmid[8], u8 ipuser[16]);
+static void smsg_message_pending(struct iucv_path *, struct iucv_message *);
+
+static struct iucv_handler smsg_handler = {
+	.path_pending	 = smsg_path_pending,
+	.message_pending = smsg_message_pending,
+};
+
+static int smsg_path_pending(struct iucv_path *path, u8 ipvmid[8],
+			     u8 ipuser[16])
+{
+	if (strncmp(ipvmid, "*MSG    ", 8) != 0)
+		return -EINVAL;
+	/* Path pending from *MSG. */
+	return iucv_path_accept(path, &smsg_handler, "SMSGIUCV        ", NULL);
+}
+
+static void smsg_message_pending(struct iucv_path *path,
+				 struct iucv_message *msg)
+{
+	struct smsg_callback *cb;
+	unsigned char *buffer;
+	unsigned char sender[9];
+	int rc, i;
+
+	buffer = kmalloc(msg->length + 1, GFP_ATOMIC | GFP_DMA);
+	if (!buffer) {
+		iucv_message_reject(path, msg);
+		return;
+	}
+	rc = iucv_message_receive(path, msg, 0, buffer, msg->length, NULL);
+	if (rc == 0) {
+		buffer[msg->length] = 0;
+		EBCASC(buffer, msg->length);
+		memcpy(sender, buffer, 8);
+		sender[8] = 0;
+		/* Remove trailing whitespace from the sender name. */
+		for (i = 7; i >= 0; i--) {
+			if (sender[i] != ' ' && sender[i] != '\t')
+				break;
+			sender[i] = 0;
+		}
+		spin_lock(&smsg_list_lock);
+		list_for_each_entry(cb, &smsg_list, list)
+			if (strncmp(buffer + 8, cb->prefix, cb->len) == 0) {
+				cb->callback(sender, buffer + 8);
+				break;
+			}
+		spin_unlock(&smsg_list_lock);
+	}
+	kfree(buffer);
+}
+
+int smsg_register_callback(const char *prefix,
+			   void (*callback)(const char *from, char *str))
+{
+	struct smsg_callback *cb;
+
+	cb = kmalloc(sizeof(struct smsg_callback), GFP_KERNEL);
+	if (!cb)
+		return -ENOMEM;
+	cb->prefix = prefix;
+	cb->len = strlen(prefix);
+	cb->callback = callback;
+	spin_lock_bh(&smsg_list_lock);
+	list_add_tail(&cb->list, &smsg_list);
+	spin_unlock_bh(&smsg_list_lock);
+	return 0;
+}
+
+void smsg_unregister_callback(const char *prefix,
+			      void (*callback)(const char *from,
+					       char *str))
+{
+	struct smsg_callback *cb, *tmp;
+
+	spin_lock_bh(&smsg_list_lock);
+	cb = NULL;
+	list_for_each_entry(tmp, &smsg_list, list)
+		if (tmp->callback == callback &&
+		    strcmp(tmp->prefix, prefix) == 0) {
+			cb = tmp;
+			list_del(&cb->list);
+			break;
+		}
+	spin_unlock_bh(&smsg_list_lock);
+	kfree(cb);
+}
+
+static int smsg_pm_freeze(struct device *dev)
+{
+#ifdef CONFIG_PM_DEBUG
+	printk(KERN_WARNING "smsg_pm_freeze\n");
+#endif
+	if (smsg_path && iucv_path_connected) {
+		iucv_path_sever(smsg_path, NULL);
+		iucv_path_connected = 0;
+	}
+	return 0;
+}
+
+static int smsg_pm_restore_thaw(struct device *dev)
+{
+	int rc;
+
+#ifdef CONFIG_PM_DEBUG
+	printk(KERN_WARNING "smsg_pm_restore_thaw\n");
+#endif
+	if (smsg_path && iucv_path_connected) {
+		memset(smsg_path, 0, sizeof(*smsg_path));
+		smsg_path->msglim = 255;
+		smsg_path->flags = 0;
+		rc = iucv_path_connect(smsg_path, &smsg_handler, "*MSG    ",
+				       NULL, NULL, NULL);
+#ifdef CONFIG_PM_DEBUG
+		if (rc)
+			printk(KERN_ERR
+			       "iucv_path_connect returned with rc %i\n", rc);
+#endif
+		if (!rc)
+			iucv_path_connected = 1;
+		cpcmd("SET SMSG IUCV", NULL, 0, NULL);
+	}
+	return 0;
+}
+
+static const struct dev_pm_ops smsg_pm_ops = {
+	.freeze = smsg_pm_freeze,
+	.thaw = smsg_pm_restore_thaw,
+	.restore = smsg_pm_restore_thaw,
+};
+
+static struct device_driver smsg_driver = {
+	.owner = THIS_MODULE,
+	.name = SMSGIUCV_DRV_NAME,
+	.bus  = &iucv_bus,
+	.pm = &smsg_pm_ops,
+};
+
+static void __exit smsg_exit(void)
+{
+	cpcmd("SET SMSG IUCV", NULL, 0, NULL);
+	device_unregister(smsg_dev);
+	iucv_unregister(&smsg_handler, 1);
+	driver_unregister(&smsg_driver);
+}
+
+static int __init smsg_init(void)
+{
+	int rc;
+
+	if (!MACHINE_IS_VM) {
+		rc = -EPROTONOSUPPORT;
+		goto out;
+	}
+	rc = driver_register(&smsg_driver);
+	if (rc != 0)
+		goto out;
+	rc = iucv_register(&smsg_handler, 1);
+	if (rc)
+		goto out_driver;
+	smsg_path = iucv_path_alloc(255, 0, GFP_KERNEL);
+	if (!smsg_path) {
+		rc = -ENOMEM;
+		goto out_register;
+	}
+	rc = iucv_path_connect(smsg_path, &smsg_handler, "*MSG    ",
+			       NULL, NULL, NULL);
+	if (rc)
+		goto out_free_path;
+	else
+		iucv_path_connected = 1;
+	smsg_dev = kzalloc(sizeof(struct device), GFP_KERNEL);
+	if (!smsg_dev) {
+		rc = -ENOMEM;
+		goto out_free_path;
+	}
+	dev_set_name(smsg_dev, "smsg_iucv");
+	smsg_dev->bus = &iucv_bus;
+	smsg_dev->parent = iucv_root;
+	smsg_dev->release = (void (*)(struct device *))kfree;
+	smsg_dev->driver = &smsg_driver;
+	rc = device_register(smsg_dev);
+	if (rc)
+		goto out_put;
+
+	cpcmd("SET SMSG IUCV", NULL, 0, NULL);
+	return 0;
+
+out_put:
+	put_device(smsg_dev);
+out_free_path:
+	iucv_path_free(smsg_path);
+	smsg_path = NULL;
+out_register:
+	iucv_unregister(&smsg_handler, 1);
+out_driver:
+	driver_unregister(&smsg_driver);
+out:
+	return rc;
+}
+
+module_init(smsg_init);
+module_exit(smsg_exit);
+MODULE_LICENSE("GPL");
+
+EXPORT_SYMBOL(smsg_register_callback);
+EXPORT_SYMBOL(smsg_unregister_callback);
diff --git a/ap/os/linux/linux-3.4.x/drivers/s390/net/smsgiucv.h b/ap/os/linux/linux-3.4.x/drivers/s390/net/smsgiucv.h
new file mode 100644
index 0000000..149a115
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/s390/net/smsgiucv.h
@@ -0,0 +1,14 @@
+/*
+ * IUCV special message driver
+ *
+ * Copyright (C) 2003 IBM Deutschland Entwicklung GmbH, IBM Corporation
+ * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
+ */
+
+#define SMSGIUCV_DRV_NAME     "SMSGIUCV"
+
+int  smsg_register_callback(const char *,
+			    void (*)(const char *, char *));
+void smsg_unregister_callback(const char *,
+			      void (*)(const char *, char *));
+
diff --git a/ap/os/linux/linux-3.4.x/drivers/s390/net/smsgiucv_app.c b/ap/os/linux/linux-3.4.x/drivers/s390/net/smsgiucv_app.c
new file mode 100644
index 0000000..32515a2
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/s390/net/smsgiucv_app.c
@@ -0,0 +1,218 @@
+/*
+ * Deliver z/VM CP special messages (SMSG) as uevents.
+ *
+ * The driver registers for z/VM CP special messages with the
+ * "APP" prefix. Incoming messages are delivered to user space
+ * as uevents.
+ *
+ * Copyright IBM Corp. 2010
+ * Author(s): Hendrik Brueckner <brueckner@linux.vnet.ibm.com>
+ *
+ */
+#define KMSG_COMPONENT		"smsgiucv_app"
+#define pr_fmt(fmt)		KMSG_COMPONENT ": " fmt
+
+#include <linux/ctype.h>
+#include <linux/err.h>
+#include <linux/device.h>
+#include <linux/list.h>
+#include <linux/kobject.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/workqueue.h>
+#include <net/iucv/iucv.h>
+#include "smsgiucv.h"
+
+/* prefix used for SMSG registration */
+#define SMSG_PREFIX		"APP"
+
+/* SMSG related uevent environment variables */
+#define ENV_SENDER_STR		"SMSG_SENDER="
+#define ENV_SENDER_LEN		(strlen(ENV_SENDER_STR) + 8 + 1)
+#define ENV_PREFIX_STR		"SMSG_ID="
+#define ENV_PREFIX_LEN		(strlen(ENV_PREFIX_STR) + \
+				 strlen(SMSG_PREFIX) + 1)
+#define ENV_TEXT_STR		"SMSG_TEXT="
+#define ENV_TEXT_LEN(msg)	(strlen(ENV_TEXT_STR) + strlen((msg)) + 1)
+
+/* z/VM user ID which is permitted to send SMSGs
+ * If the value is undefined or empty (""), special messages are
+ * accepted from any z/VM user ID. */
+static char *sender;
+module_param(sender, charp, 0400);
+MODULE_PARM_DESC(sender, "z/VM user ID from which CP SMSGs are accepted");
+
+/* SMSG device representation */
+static struct device *smsg_app_dev;
+
+/* list element for queuing received messages for delivery */
+struct smsg_app_event {
+	struct list_head list;
+	char *buf;
+	char *envp[4];
+};
+
+/* queue for outgoing uevents */
+static LIST_HEAD(smsg_event_queue);
+static DEFINE_SPINLOCK(smsg_event_queue_lock);
+
+static void smsg_app_event_free(struct smsg_app_event *ev)
+{
+	kfree(ev->buf);
+	kfree(ev);
+}
+
+static struct smsg_app_event *smsg_app_event_alloc(const char *from,
+						   const char *msg)
+{
+	struct smsg_app_event *ev;
+
+	ev = kzalloc(sizeof(*ev), GFP_ATOMIC);
+	if (!ev)
+		return NULL;
+
+	ev->buf = kzalloc(ENV_SENDER_LEN + ENV_PREFIX_LEN +
+			  ENV_TEXT_LEN(msg), GFP_ATOMIC);
+	if (!ev->buf) {
+		kfree(ev);
+		return NULL;
+	}
+
+	/* setting up environment pointers into buf */
+	ev->envp[0] = ev->buf;
+	ev->envp[1] = ev->envp[0] + ENV_SENDER_LEN;
+	ev->envp[2] = ev->envp[1] + ENV_PREFIX_LEN;
+	ev->envp[3] = NULL;
+
+	/* setting up environment: sender, prefix name, and message text */
+	snprintf(ev->envp[0], ENV_SENDER_LEN, ENV_SENDER_STR "%s", from);
+	snprintf(ev->envp[1], ENV_PREFIX_LEN, ENV_PREFIX_STR "%s", SMSG_PREFIX);
+	snprintf(ev->envp[2], ENV_TEXT_LEN(msg), ENV_TEXT_STR "%s", msg);
+
+	return ev;
+}
+
+static void smsg_event_work_fn(struct work_struct *work)
+{
+	LIST_HEAD(event_queue);
+	struct smsg_app_event *p, *n;
+	struct device *dev;
+
+	dev = get_device(smsg_app_dev);
+	if (!dev)
+		return;
+
+	spin_lock_bh(&smsg_event_queue_lock);
+	list_splice_init(&smsg_event_queue, &event_queue);
+	spin_unlock_bh(&smsg_event_queue_lock);
+
+	list_for_each_entry_safe(p, n, &event_queue, list) {
+		list_del(&p->list);
+		kobject_uevent_env(&dev->kobj, KOBJ_CHANGE, p->envp);
+		smsg_app_event_free(p);
+	}
+
+	put_device(dev);
+}
+static DECLARE_WORK(smsg_event_work, smsg_event_work_fn);
+
+static void smsg_app_callback(const char *from, char *msg)
+{
+	struct smsg_app_event *se;
+
+	/* check if the originating z/VM user ID matches
+	 * the configured sender. */
+	if (sender && strlen(sender) > 0 && strcmp(from, sender) != 0)
+		return;
+
+	/* get start of message text (skip prefix and leading blanks) */
+	msg += strlen(SMSG_PREFIX);
+	while (*msg && isspace(*msg))
+		msg++;
+	if (*msg == '\0')
+		return;
+
+	/* allocate event list element and its environment */
+	se = smsg_app_event_alloc(from, msg);
+	if (!se)
+		return;
+
+	/* queue event and schedule work function */
+	spin_lock(&smsg_event_queue_lock);
+	list_add_tail(&se->list, &smsg_event_queue);
+	spin_unlock(&smsg_event_queue_lock);
+
+	schedule_work(&smsg_event_work);
+	return;
+}
+
+static int __init smsgiucv_app_init(void)
+{
+	struct device_driver *smsgiucv_drv;
+	int rc;
+
+	if (!MACHINE_IS_VM)
+		return -ENODEV;
+
+	smsg_app_dev = kzalloc(sizeof(*smsg_app_dev), GFP_KERNEL);
+	if (!smsg_app_dev)
+		return -ENOMEM;
+
+	smsgiucv_drv = driver_find(SMSGIUCV_DRV_NAME, &iucv_bus);
+	if (!smsgiucv_drv) {
+		kfree(smsg_app_dev);
+		return -ENODEV;
+	}
+
+	rc = dev_set_name(smsg_app_dev, KMSG_COMPONENT);
+	if (rc) {
+		kfree(smsg_app_dev);
+		goto fail;
+	}
+	smsg_app_dev->bus = &iucv_bus;
+	smsg_app_dev->parent = iucv_root;
+	smsg_app_dev->release = (void (*)(struct device *)) kfree;
+	smsg_app_dev->driver = smsgiucv_drv;
+	rc = device_register(smsg_app_dev);
+	if (rc) {
+		put_device(smsg_app_dev);
+		goto fail;
+	}
+
+	/* convert sender to uppercase characters */
+	if (sender) {
+		int len = strlen(sender);
+		while (len--)
+			sender[len] = toupper(sender[len]);
+	}
+
+	/* register with the smsgiucv device driver */
+	rc = smsg_register_callback(SMSG_PREFIX, smsg_app_callback);
+	if (rc) {
+		device_unregister(smsg_app_dev);
+		goto fail;
+	}
+
+	rc = 0;
+fail:
+	return rc;
+}
+module_init(smsgiucv_app_init);
+
+static void __exit smsgiucv_app_exit(void)
+{
+	/* unregister callback */
+	smsg_unregister_callback(SMSG_PREFIX, smsg_app_callback);
+
+	/* cancel pending work and flush any queued event work */
+	cancel_work_sync(&smsg_event_work);
+	smsg_event_work_fn(&smsg_event_work);
+
+	device_unregister(smsg_app_dev);
+}
+module_exit(smsgiucv_app_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Deliver z/VM CP SMSG as uevents");
+MODULE_AUTHOR("Hendrik Brueckner <brueckner@linux.vnet.ibm.com>");
diff --git a/ap/os/linux/linux-3.4.x/drivers/s390/scsi/Makefile b/ap/os/linux/linux-3.4.x/drivers/s390/scsi/Makefile
new file mode 100644
index 0000000..c454ffe
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/s390/scsi/Makefile
@@ -0,0 +1,9 @@
+#
+# Makefile for the S/390 specific device drivers
+#
+
+zfcp-objs := zfcp_aux.o zfcp_ccw.o zfcp_cfdc.o zfcp_dbf.o zfcp_erp.o \
+	     zfcp_fc.o zfcp_fsf.o zfcp_qdio.o zfcp_scsi.o zfcp_sysfs.o \
+	     zfcp_unit.o
+
+obj-$(CONFIG_ZFCP) += zfcp.o
diff --git a/ap/os/linux/linux-3.4.x/drivers/s390/scsi/zfcp_aux.c b/ap/os/linux/linux-3.4.x/drivers/s390/scsi/zfcp_aux.c
new file mode 100644
index 0000000..3743ac9
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/s390/scsi/zfcp_aux.c
@@ -0,0 +1,608 @@
+/*
+ * zfcp device driver
+ *
+ * Module interface and handling of zfcp data structures.
+ *
+ * Copyright IBM Corp. 2002, 2013
+ */
+
+/*
+ * Driver authors:
+ *            Martin Peschke (originator of the driver)
+ *            Raimund Schroeder
+ *            Aron Zeh
+ *            Wolfgang Taphorn
+ *            Stefan Bader
+ *            Heiko Carstens (kernel 2.6 port of the driver)
+ *            Andreas Herrmann
+ *            Maxim Shchetynin
+ *            Volker Sameske
+ *            Ralph Wuerthner
+ *            Michael Loehr
+ *            Swen Schillig
+ *            Christof Schmitt
+ *            Martin Petermann
+ *            Sven Schuetz
+ *            Steffen Maier
+ */
+
+#define KMSG_COMPONENT "zfcp"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
+#include <linux/miscdevice.h>
+#include <linux/seq_file.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include "zfcp_ext.h"
+#include "zfcp_fc.h"
+#include "zfcp_reqlist.h"
+
+#define ZFCP_BUS_ID_SIZE	20
+
+MODULE_AUTHOR("IBM Deutschland Entwicklung GmbH - linux390@de.ibm.com");
+MODULE_DESCRIPTION("FCP HBA driver");
+MODULE_LICENSE("GPL");
+
+static char *init_device;
+module_param_named(device, init_device, charp, 0400);
+MODULE_PARM_DESC(device, "specify initial device");
+
+static struct kmem_cache * __init zfcp_cache_hw_align(const char *name,
+						      unsigned long size)
+{
+	return kmem_cache_create(name, size, roundup_pow_of_two(size), 0, NULL);
+}
+
+static void __init zfcp_init_device_configure(char *busid, u64 wwpn, u64 lun)
+{
+	struct ccw_device *cdev;
+	struct zfcp_adapter *adapter;
+	struct zfcp_port *port;
+
+	cdev = get_ccwdev_by_busid(&zfcp_ccw_driver, busid);
+	if (!cdev)
+		return;
+
+	if (ccw_device_set_online(cdev))
+		goto out_ccw_device;
+
+	adapter = zfcp_ccw_adapter_by_cdev(cdev);
+	if (!adapter)
+		goto out_ccw_device;
+
+	port = zfcp_get_port_by_wwpn(adapter, wwpn);
+	if (!port)
+		goto out_port;
+	flush_work(&port->rport_work);
+
+	zfcp_unit_add(port, lun);
+	put_device(&port->dev);
+
+out_port:
+	zfcp_ccw_adapter_put(adapter);
+out_ccw_device:
+	put_device(&cdev->dev);
+	return;
+}
+
+static void __init zfcp_init_device_setup(char *devstr)
+{
+	char *token;
+	char *str, *str_saved;
+	char busid[ZFCP_BUS_ID_SIZE];
+	u64 wwpn, lun;
+
+	/* duplicate devstr and keep the original for sysfs presentation*/
+	str_saved = kstrdup(devstr, GFP_KERNEL);
+	str = str_saved;
+	if (!str)
+		return;
+
+	token = strsep(&str, ",");
+	if (!token || strlen(token) >= ZFCP_BUS_ID_SIZE)
+		goto err_out;
+	strncpy(busid, token, ZFCP_BUS_ID_SIZE);
+
+	token = strsep(&str, ",");
+	if (!token || strict_strtoull(token, 0, (unsigned long long *) &wwpn))
+		goto err_out;
+
+	token = strsep(&str, ",");
+	if (!token || strict_strtoull(token, 0, (unsigned long long *) &lun))
+		goto err_out;
+
+	kfree(str_saved);
+	zfcp_init_device_configure(busid, wwpn, lun);
+	return;
+
+err_out:
+	kfree(str_saved);
+	pr_err("%s is not a valid SCSI device\n", devstr);
+}
+
+static int __init zfcp_module_init(void)
+{
+	int retval = -ENOMEM;
+
+	zfcp_fsf_qtcb_cache = zfcp_cache_hw_align("zfcp_fsf_qtcb",
+						  sizeof(struct fsf_qtcb));
+	if (!zfcp_fsf_qtcb_cache)
+		goto out_qtcb_cache;
+
+	zfcp_fc_req_cache = zfcp_cache_hw_align("zfcp_fc_req",
+						sizeof(struct zfcp_fc_req));
+	if (!zfcp_fc_req_cache)
+		goto out_fc_cache;
+
+	zfcp_scsi_transport_template =
+		fc_attach_transport(&zfcp_transport_functions);
+	if (!zfcp_scsi_transport_template)
+		goto out_transport;
+	scsi_transport_reserve_device(zfcp_scsi_transport_template,
+				      sizeof(struct zfcp_scsi_dev));
+
+
+	retval = misc_register(&zfcp_cfdc_misc);
+	if (retval) {
+		pr_err("Registering the misc device zfcp_cfdc failed\n");
+		goto out_misc;
+	}
+
+	retval = ccw_driver_register(&zfcp_ccw_driver);
+	if (retval) {
+		pr_err("The zfcp device driver could not register with "
+		       "the common I/O layer\n");
+		goto out_ccw_register;
+	}
+
+	if (init_device)
+		zfcp_init_device_setup(init_device);
+	return 0;
+
+out_ccw_register:
+	misc_deregister(&zfcp_cfdc_misc);
+out_misc:
+	fc_release_transport(zfcp_scsi_transport_template);
+out_transport:
+	kmem_cache_destroy(zfcp_fc_req_cache);
+out_fc_cache:
+	kmem_cache_destroy(zfcp_fsf_qtcb_cache);
+out_qtcb_cache:
+	return retval;
+}
+
+module_init(zfcp_module_init);
+
+static void __exit zfcp_module_exit(void)
+{
+	ccw_driver_unregister(&zfcp_ccw_driver);
+	misc_deregister(&zfcp_cfdc_misc);
+	fc_release_transport(zfcp_scsi_transport_template);
+	kmem_cache_destroy(zfcp_fc_req_cache);
+	kmem_cache_destroy(zfcp_fsf_qtcb_cache);
+}
+
+module_exit(zfcp_module_exit);
+
+/**
+ * zfcp_get_port_by_wwpn - find port in port list of adapter by wwpn
+ * @adapter: pointer to adapter to search for port
+ * @wwpn: wwpn to search for
+ *
+ * Returns: pointer to zfcp_port or NULL
+ */
+struct zfcp_port *zfcp_get_port_by_wwpn(struct zfcp_adapter *adapter,
+					u64 wwpn)
+{
+	unsigned long flags;
+	struct zfcp_port *port;
+
+	read_lock_irqsave(&adapter->port_list_lock, flags);
+	list_for_each_entry(port, &adapter->port_list, list)
+		if (port->wwpn == wwpn) {
+			if (!get_device(&port->dev))
+				port = NULL;
+			read_unlock_irqrestore(&adapter->port_list_lock, flags);
+			return port;
+		}
+	read_unlock_irqrestore(&adapter->port_list_lock, flags);
+	return NULL;
+}
+
+static int zfcp_allocate_low_mem_buffers(struct zfcp_adapter *adapter)
+{
+	adapter->pool.erp_req =
+		mempool_create_kmalloc_pool(1, sizeof(struct zfcp_fsf_req));
+	if (!adapter->pool.erp_req)
+		return -ENOMEM;
+
+	adapter->pool.gid_pn_req =
+		mempool_create_kmalloc_pool(1, sizeof(struct zfcp_fsf_req));
+	if (!adapter->pool.gid_pn_req)
+		return -ENOMEM;
+
+	adapter->pool.scsi_req =
+		mempool_create_kmalloc_pool(1, sizeof(struct zfcp_fsf_req));
+	if (!adapter->pool.scsi_req)
+		return -ENOMEM;
+
+	adapter->pool.scsi_abort =
+		mempool_create_kmalloc_pool(1, sizeof(struct zfcp_fsf_req));
+	if (!adapter->pool.scsi_abort)
+		return -ENOMEM;
+
+	adapter->pool.status_read_req =
+		mempool_create_kmalloc_pool(FSF_STATUS_READS_RECOM,
+					    sizeof(struct zfcp_fsf_req));
+	if (!adapter->pool.status_read_req)
+		return -ENOMEM;
+
+	adapter->pool.qtcb_pool =
+		mempool_create_slab_pool(4, zfcp_fsf_qtcb_cache);
+	if (!adapter->pool.qtcb_pool)
+		return -ENOMEM;
+
+	BUILD_BUG_ON(sizeof(struct fsf_status_read_buffer) > PAGE_SIZE);
+	adapter->pool.sr_data =
+		mempool_create_page_pool(FSF_STATUS_READS_RECOM, 0);
+	if (!adapter->pool.sr_data)
+		return -ENOMEM;
+
+	adapter->pool.gid_pn =
+		mempool_create_slab_pool(1, zfcp_fc_req_cache);
+	if (!adapter->pool.gid_pn)
+		return -ENOMEM;
+
+	return 0;
+}
+
+static void zfcp_free_low_mem_buffers(struct zfcp_adapter *adapter)
+{
+	if (adapter->pool.erp_req)
+		mempool_destroy(adapter->pool.erp_req);
+	if (adapter->pool.scsi_req)
+		mempool_destroy(adapter->pool.scsi_req);
+	if (adapter->pool.scsi_abort)
+		mempool_destroy(adapter->pool.scsi_abort);
+	if (adapter->pool.qtcb_pool)
+		mempool_destroy(adapter->pool.qtcb_pool);
+	if (adapter->pool.status_read_req)
+		mempool_destroy(adapter->pool.status_read_req);
+	if (adapter->pool.sr_data)
+		mempool_destroy(adapter->pool.sr_data);
+	if (adapter->pool.gid_pn)
+		mempool_destroy(adapter->pool.gid_pn);
+}
+
+/**
+ * zfcp_status_read_refill - refill the long running status_read_requests
+ * @adapter: ptr to struct zfcp_adapter for which the buffers should be refilled
+ *
+ * Returns: 0 on success, 1 otherwise
+ *
+ * if there are 16 or more status_read requests missing an adapter_reopen
+ * is triggered
+ */
+int zfcp_status_read_refill(struct zfcp_adapter *adapter)
+{
+	while (atomic_read(&adapter->stat_miss) > 0)
+		if (zfcp_fsf_status_read(adapter->qdio)) {
+			if (atomic_read(&adapter->stat_miss) >=
+			    adapter->stat_read_buf_num) {
+				zfcp_erp_adapter_reopen(adapter, 0, "axsref1");
+				return 1;
+			}
+			break;
+		} else
+			atomic_dec(&adapter->stat_miss);
+	return 0;
+}
+
+static void _zfcp_status_read_scheduler(struct work_struct *work)
+{
+	zfcp_status_read_refill(container_of(work, struct zfcp_adapter,
+					     stat_work));
+}
+
+static void zfcp_print_sl(struct seq_file *m, struct service_level *sl)
+{
+	struct zfcp_adapter *adapter =
+		container_of(sl, struct zfcp_adapter, service_level);
+
+	seq_printf(m, "zfcp: %s microcode level %x\n",
+		   dev_name(&adapter->ccw_device->dev),
+		   adapter->fsf_lic_version);
+}
+
+static int zfcp_setup_adapter_work_queue(struct zfcp_adapter *adapter)
+{
+	char name[TASK_COMM_LEN];
+
+	snprintf(name, sizeof(name), "zfcp_q_%s",
+		 dev_name(&adapter->ccw_device->dev));
+	adapter->work_queue = create_singlethread_workqueue(name);
+
+	if (adapter->work_queue)
+		return 0;
+	return -ENOMEM;
+}
+
+static void zfcp_destroy_adapter_work_queue(struct zfcp_adapter *adapter)
+{
+	if (adapter->work_queue)
+		destroy_workqueue(adapter->work_queue);
+	adapter->work_queue = NULL;
+
+}
+
+/**
+ * zfcp_adapter_enqueue - enqueue a new adapter to the list
+ * @ccw_device: pointer to the struct cc_device
+ *
+ * Returns:	struct zfcp_adapter*
+ * Enqueues an adapter at the end of the adapter list in the driver data.
+ * All adapter internal structures are set up.
+ * Proc-fs entries are also created.
+ */
+struct zfcp_adapter *zfcp_adapter_enqueue(struct ccw_device *ccw_device)
+{
+	struct zfcp_adapter *adapter;
+
+	if (!get_device(&ccw_device->dev))
+		return ERR_PTR(-ENODEV);
+
+	adapter = kzalloc(sizeof(struct zfcp_adapter), GFP_KERNEL);
+	if (!adapter) {
+		put_device(&ccw_device->dev);
+		return ERR_PTR(-ENOMEM);
+	}
+
+	kref_init(&adapter->ref);
+
+	ccw_device->handler = NULL;
+	adapter->ccw_device = ccw_device;
+
+	INIT_WORK(&adapter->stat_work, _zfcp_status_read_scheduler);
+	INIT_WORK(&adapter->scan_work, zfcp_fc_scan_ports);
+	INIT_WORK(&adapter->ns_up_work, zfcp_fc_sym_name_update);
+
+	if (zfcp_qdio_setup(adapter))
+		goto failed;
+
+	if (zfcp_allocate_low_mem_buffers(adapter))
+		goto failed;
+
+	adapter->req_list = zfcp_reqlist_alloc();
+	if (!adapter->req_list)
+		goto failed;
+
+	if (zfcp_dbf_adapter_register(adapter))
+		goto failed;
+
+	if (zfcp_setup_adapter_work_queue(adapter))
+		goto failed;
+
+	if (zfcp_fc_gs_setup(adapter))
+		goto failed;
+
+	rwlock_init(&adapter->port_list_lock);
+	INIT_LIST_HEAD(&adapter->port_list);
+
+	INIT_LIST_HEAD(&adapter->events.list);
+	INIT_WORK(&adapter->events.work, zfcp_fc_post_event);
+	spin_lock_init(&adapter->events.list_lock);
+
+	init_waitqueue_head(&adapter->erp_ready_wq);
+	init_waitqueue_head(&adapter->erp_done_wqh);
+
+	INIT_LIST_HEAD(&adapter->erp_ready_head);
+	INIT_LIST_HEAD(&adapter->erp_running_head);
+
+	rwlock_init(&adapter->erp_lock);
+	rwlock_init(&adapter->abort_lock);
+
+	if (zfcp_erp_thread_setup(adapter))
+		goto failed;
+
+	adapter->service_level.seq_print = zfcp_print_sl;
+
+	dev_set_drvdata(&ccw_device->dev, adapter);
+
+	if (sysfs_create_group(&ccw_device->dev.kobj,
+			       &zfcp_sysfs_adapter_attrs))
+		goto failed;
+
+	/* report size limit per scatter-gather segment */
+	adapter->dma_parms.max_segment_size = ZFCP_QDIO_SBALE_LEN;
+	adapter->ccw_device->dev.dma_parms = &adapter->dma_parms;
+
+	adapter->stat_read_buf_num = FSF_STATUS_READS_RECOM;
+
+	if (!zfcp_scsi_adapter_register(adapter))
+		return adapter;
+
+failed:
+	zfcp_adapter_unregister(adapter);
+	return ERR_PTR(-ENOMEM);
+}
+
+void zfcp_adapter_unregister(struct zfcp_adapter *adapter)
+{
+	struct ccw_device *cdev = adapter->ccw_device;
+
+	cancel_work_sync(&adapter->scan_work);
+	cancel_work_sync(&adapter->stat_work);
+	cancel_work_sync(&adapter->ns_up_work);
+	zfcp_destroy_adapter_work_queue(adapter);
+
+	zfcp_fc_wka_ports_force_offline(adapter->gs);
+	zfcp_scsi_adapter_unregister(adapter);
+	sysfs_remove_group(&cdev->dev.kobj, &zfcp_sysfs_adapter_attrs);
+
+	zfcp_erp_thread_kill(adapter);
+	zfcp_dbf_adapter_unregister(adapter);
+	zfcp_qdio_destroy(adapter->qdio);
+
+	zfcp_ccw_adapter_put(adapter); /* final put to release */
+}
+
+/**
+ * zfcp_adapter_release - remove the adapter from the resource list
+ * @ref: pointer to struct kref
+ * locks:	adapter list write lock is assumed to be held by caller
+ */
+void zfcp_adapter_release(struct kref *ref)
+{
+	struct zfcp_adapter *adapter = container_of(ref, struct zfcp_adapter,
+						    ref);
+	struct ccw_device *cdev = adapter->ccw_device;
+
+	dev_set_drvdata(&adapter->ccw_device->dev, NULL);
+	zfcp_fc_gs_destroy(adapter);
+	zfcp_free_low_mem_buffers(adapter);
+	kfree(adapter->req_list);
+	kfree(adapter->fc_stats);
+	kfree(adapter->stats_reset_data);
+	kfree(adapter);
+	put_device(&cdev->dev);
+}
+
+/**
+ * zfcp_device_unregister - remove port, unit from system
+ * @dev: reference to device which is to be removed
+ * @grp: related reference to attribute group
+ *
+ * Helper function to unregister port, unit from system
+ */
+void zfcp_device_unregister(struct device *dev,
+			    const struct attribute_group *grp)
+{
+	sysfs_remove_group(&dev->kobj, grp);
+	device_unregister(dev);
+}
+
+static void zfcp_port_release(struct device *dev)
+{
+	struct zfcp_port *port = container_of(dev, struct zfcp_port, dev);
+
+	zfcp_ccw_adapter_put(port->adapter);
+	kfree(port);
+}
+
+/**
+ * zfcp_port_enqueue - enqueue port to port list of adapter
+ * @adapter: adapter where remote port is added
+ * @wwpn: WWPN of the remote port to be enqueued
+ * @status: initial status for the port
+ * @d_id: destination id of the remote port to be enqueued
+ * Returns: pointer to enqueued port on success, ERR_PTR on error
+ *
+ * All port internal structures are set up and the sysfs entry is generated.
+ * d_id is used to enqueue ports with a well known address like the Directory
+ * Service for nameserver lookup.
+ */
+struct zfcp_port *zfcp_port_enqueue(struct zfcp_adapter *adapter, u64 wwpn,
+				     u32 status, u32 d_id)
+{
+	struct zfcp_port *port;
+	int retval = -ENOMEM;
+
+	kref_get(&adapter->ref);
+
+	port = zfcp_get_port_by_wwpn(adapter, wwpn);
+	if (port) {
+		put_device(&port->dev);
+		retval = -EEXIST;
+		goto err_out;
+	}
+
+	port = kzalloc(sizeof(struct zfcp_port), GFP_KERNEL);
+	if (!port)
+		goto err_out;
+
+	rwlock_init(&port->unit_list_lock);
+	INIT_LIST_HEAD(&port->unit_list);
+	atomic_set(&port->units, 0);
+
+	INIT_WORK(&port->gid_pn_work, zfcp_fc_port_did_lookup);
+	INIT_WORK(&port->test_link_work, zfcp_fc_link_test_work);
+	INIT_WORK(&port->rport_work, zfcp_scsi_rport_work);
+
+	port->adapter = adapter;
+	port->d_id = d_id;
+	port->wwpn = wwpn;
+	port->rport_task = RPORT_NONE;
+	port->dev.parent = &adapter->ccw_device->dev;
+	port->dev.release = zfcp_port_release;
+
+	if (dev_set_name(&port->dev, "0x%016llx", (unsigned long long)wwpn)) {
+		kfree(port);
+		goto err_out;
+	}
+	retval = -EINVAL;
+
+	if (device_register(&port->dev)) {
+		put_device(&port->dev);
+		goto err_out;
+	}
+
+	if (sysfs_create_group(&port->dev.kobj,
+			       &zfcp_sysfs_port_attrs))
+		goto err_out_put;
+
+	write_lock_irq(&adapter->port_list_lock);
+	list_add_tail(&port->list, &adapter->port_list);
+	write_unlock_irq(&adapter->port_list_lock);
+
+	atomic_set_mask(status | ZFCP_STATUS_COMMON_RUNNING, &port->status);
+
+	return port;
+
+err_out_put:
+	device_unregister(&port->dev);
+err_out:
+	zfcp_ccw_adapter_put(adapter);
+	return ERR_PTR(retval);
+}
+
+/**
+ * zfcp_sg_free_table - free memory used by scatterlists
+ * @sg: pointer to scatterlist
+ * @count: number of scatterlist which are to be free'ed
+ * the scatterlist are expected to reference pages always
+ */
+void zfcp_sg_free_table(struct scatterlist *sg, int count)
+{
+	int i;
+
+	for (i = 0; i < count; i++, sg++)
+		if (sg)
+			free_page((unsigned long) sg_virt(sg));
+		else
+			break;
+}
+
+/**
+ * zfcp_sg_setup_table - init scatterlist and allocate, assign buffers
+ * @sg: pointer to struct scatterlist
+ * @count: number of scatterlists which should be assigned with buffers
+ * of size page
+ *
+ * Returns: 0 on success, -ENOMEM otherwise
+ */
+int zfcp_sg_setup_table(struct scatterlist *sg, int count)
+{
+	void *addr;
+	int i;
+
+	sg_init_table(sg, count);
+	for (i = 0; i < count; i++, sg++) {
+		addr = (void *) get_zeroed_page(GFP_KERNEL);
+		if (!addr) {
+			zfcp_sg_free_table(sg, i);
+			return -ENOMEM;
+		}
+		sg_set_buf(sg, addr, PAGE_SIZE);
+	}
+	return 0;
+}
diff --git a/ap/os/linux/linux-3.4.x/drivers/s390/scsi/zfcp_ccw.c b/ap/os/linux/linux-3.4.x/drivers/s390/scsi/zfcp_ccw.c
new file mode 100644
index 0000000..79a6afe
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/s390/scsi/zfcp_ccw.c
@@ -0,0 +1,322 @@
+/*
+ * zfcp device driver
+ *
+ * Registration and callback for the s390 common I/O layer.
+ *
+ * Copyright IBM Corporation 2002, 2010
+ */
+
+#define KMSG_COMPONENT "zfcp"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
+#include <linux/module.h>
+#include "zfcp_ext.h"
+#include "zfcp_reqlist.h"
+
+#define ZFCP_MODEL_PRIV 0x4
+
+static DEFINE_SPINLOCK(zfcp_ccw_adapter_ref_lock);
+
+struct zfcp_adapter *zfcp_ccw_adapter_by_cdev(struct ccw_device *cdev)
+{
+	struct zfcp_adapter *adapter;
+	unsigned long flags;
+
+	spin_lock_irqsave(&zfcp_ccw_adapter_ref_lock, flags);
+	adapter = dev_get_drvdata(&cdev->dev);
+	if (adapter)
+		kref_get(&adapter->ref);
+	spin_unlock_irqrestore(&zfcp_ccw_adapter_ref_lock, flags);
+	return adapter;
+}
+
+void zfcp_ccw_adapter_put(struct zfcp_adapter *adapter)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&zfcp_ccw_adapter_ref_lock, flags);
+	kref_put(&adapter->ref, zfcp_adapter_release);
+	spin_unlock_irqrestore(&zfcp_ccw_adapter_ref_lock, flags);
+}
+
+/**
+ * zfcp_ccw_activate - activate adapter and wait for it to finish
+ * @cdev: pointer to belonging ccw device
+ * @clear: Status flags to clear.
+ * @tag: s390dbf trace record tag
+ */
+static int zfcp_ccw_activate(struct ccw_device *cdev, int clear, char *tag)
+{
+	struct zfcp_adapter *adapter = zfcp_ccw_adapter_by_cdev(cdev);
+
+	if (!adapter)
+		return 0;
+
+	zfcp_erp_clear_adapter_status(adapter, clear);
+	zfcp_erp_set_adapter_status(adapter, ZFCP_STATUS_COMMON_RUNNING);
+	zfcp_erp_adapter_reopen(adapter, ZFCP_STATUS_COMMON_ERP_FAILED,
+				tag);
+	zfcp_erp_wait(adapter);
+	flush_work(&adapter->scan_work);
+
+	zfcp_ccw_adapter_put(adapter);
+
+	return 0;
+}
+
+static struct ccw_device_id zfcp_ccw_device_id[] = {
+	{ CCW_DEVICE_DEVTYPE(0x1731, 0x3, 0x1732, 0x3) },
+	{ CCW_DEVICE_DEVTYPE(0x1731, 0x3, 0x1732, ZFCP_MODEL_PRIV) },
+	{},
+};
+MODULE_DEVICE_TABLE(ccw, zfcp_ccw_device_id);
+
+/**
+ * zfcp_ccw_priv_sch - check if subchannel is privileged
+ * @adapter: Adapter/Subchannel to check
+ */
+int zfcp_ccw_priv_sch(struct zfcp_adapter *adapter)
+{
+	return adapter->ccw_device->id.dev_model == ZFCP_MODEL_PRIV;
+}
+
+/**
+ * zfcp_ccw_probe - probe function of zfcp driver
+ * @cdev: pointer to belonging ccw device
+ *
+ * This function gets called by the common i/o layer for each FCP
+ * device found on the current system. This is only a stub to make cio
+ * work: To only allocate adapter resources for devices actually used,
+ * the allocation is deferred to the first call to ccw_set_online.
+ */
+static int zfcp_ccw_probe(struct ccw_device *cdev)
+{
+	return 0;
+}
+
+/**
+ * zfcp_ccw_remove - remove function of zfcp driver
+ * @cdev: pointer to belonging ccw device
+ *
+ * This function gets called by the common i/o layer and removes an adapter
+ * from the system. Task of this function is to get rid of all units and
+ * ports that belong to this adapter. And in addition all resources of this
+ * adapter will be freed too.
+ */
+static void zfcp_ccw_remove(struct ccw_device *cdev)
+{
+	struct zfcp_adapter *adapter;
+	struct zfcp_port *port, *p;
+	struct zfcp_unit *unit, *u;
+	LIST_HEAD(unit_remove_lh);
+	LIST_HEAD(port_remove_lh);
+
+	ccw_device_set_offline(cdev);
+
+	adapter = zfcp_ccw_adapter_by_cdev(cdev);
+	if (!adapter)
+		return;
+
+	write_lock_irq(&adapter->port_list_lock);
+	list_for_each_entry_safe(port, p, &adapter->port_list, list) {
+		write_lock(&port->unit_list_lock);
+		list_for_each_entry_safe(unit, u, &port->unit_list, list)
+			list_move(&unit->list, &unit_remove_lh);
+		write_unlock(&port->unit_list_lock);
+		list_move(&port->list, &port_remove_lh);
+	}
+	write_unlock_irq(&adapter->port_list_lock);
+	zfcp_ccw_adapter_put(adapter); /* put from zfcp_ccw_adapter_by_cdev */
+
+	list_for_each_entry_safe(unit, u, &unit_remove_lh, list)
+		zfcp_device_unregister(&unit->dev, &zfcp_sysfs_unit_attrs);
+
+	list_for_each_entry_safe(port, p, &port_remove_lh, list)
+		zfcp_device_unregister(&port->dev, &zfcp_sysfs_port_attrs);
+
+	zfcp_adapter_unregister(adapter);
+}
+
+/**
+ * zfcp_ccw_set_online - set_online function of zfcp driver
+ * @cdev: pointer to belonging ccw device
+ *
+ * This function gets called by the common i/o layer and sets an
+ * adapter into state online.  The first call will allocate all
+ * adapter resources that will be retained until the device is removed
+ * via zfcp_ccw_remove.
+ *
+ * Setting an fcp device online means that it will be registered with
+ * the SCSI stack, that the QDIO queues will be set up and that the
+ * adapter will be opened.
+ */
+static int zfcp_ccw_set_online(struct ccw_device *cdev)
+{
+	struct zfcp_adapter *adapter = zfcp_ccw_adapter_by_cdev(cdev);
+
+	if (!adapter) {
+		adapter = zfcp_adapter_enqueue(cdev);
+
+		if (IS_ERR(adapter)) {
+			dev_err(&cdev->dev,
+				"Setting up data structures for the "
+				"FCP adapter failed\n");
+			return PTR_ERR(adapter);
+		}
+		kref_get(&adapter->ref);
+	}
+
+	/* initialize request counter */
+	BUG_ON(!zfcp_reqlist_isempty(adapter->req_list));
+	adapter->req_no = 0;
+
+	zfcp_ccw_activate(cdev, 0, "ccsonl1");
+	zfcp_ccw_adapter_put(adapter);
+	return 0;
+}
+
+/**
+ * zfcp_ccw_offline_sync - shut down adapter and wait for it to finish
+ * @cdev: pointer to belonging ccw device
+ * @set: Status flags to set.
+ * @tag: s390dbf trace record tag
+ *
+ * This function gets called by the common i/o layer and sets an adapter
+ * into state offline.
+ */
+static int zfcp_ccw_offline_sync(struct ccw_device *cdev, int set, char *tag)
+{
+	struct zfcp_adapter *adapter = zfcp_ccw_adapter_by_cdev(cdev);
+
+	if (!adapter)
+		return 0;
+
+	zfcp_erp_set_adapter_status(adapter, set);
+	zfcp_erp_adapter_shutdown(adapter, 0, tag);
+	zfcp_erp_wait(adapter);
+
+	zfcp_ccw_adapter_put(adapter);
+	return 0;
+}
+
+/**
+ * zfcp_ccw_set_offline - set_offline function of zfcp driver
+ * @cdev: pointer to belonging ccw device
+ *
+ * This function gets called by the common i/o layer and sets an adapter
+ * into state offline.
+ */
+static int zfcp_ccw_set_offline(struct ccw_device *cdev)
+{
+	return zfcp_ccw_offline_sync(cdev, 0, "ccsoff1");
+}
+
+/**
+ * zfcp_ccw_notify - ccw notify function
+ * @cdev: pointer to belonging ccw device
+ * @event: indicates if adapter was detached or attached
+ *
+ * This function gets called by the common i/o layer if an adapter has gone
+ * or reappeared.
+ */
+static int zfcp_ccw_notify(struct ccw_device *cdev, int event)
+{
+	struct zfcp_adapter *adapter = zfcp_ccw_adapter_by_cdev(cdev);
+
+	if (!adapter)
+		return 1;
+
+	switch (event) {
+	case CIO_GONE:
+		if (atomic_read(&adapter->status) &
+		    ZFCP_STATUS_ADAPTER_SUSPENDED) { /* notification ignore */
+			zfcp_dbf_hba_basic("ccnigo1", adapter);
+			break;
+		}
+		dev_warn(&cdev->dev, "The FCP device has been detached\n");
+		zfcp_erp_adapter_shutdown(adapter, 0, "ccnoti1");
+		break;
+	case CIO_NO_PATH:
+		dev_warn(&cdev->dev,
+			 "The CHPID for the FCP device is offline\n");
+		zfcp_erp_adapter_shutdown(adapter, 0, "ccnoti2");
+		break;
+	case CIO_OPER:
+		if (atomic_read(&adapter->status) &
+		    ZFCP_STATUS_ADAPTER_SUSPENDED) { /* notification ignore */
+			zfcp_dbf_hba_basic("ccniop1", adapter);
+			break;
+		}
+		dev_info(&cdev->dev, "The FCP device is operational again\n");
+		zfcp_erp_set_adapter_status(adapter,
+					    ZFCP_STATUS_COMMON_RUNNING);
+		zfcp_erp_adapter_reopen(adapter, ZFCP_STATUS_COMMON_ERP_FAILED,
+					"ccnoti4");
+		break;
+	case CIO_BOXED:
+		dev_warn(&cdev->dev, "The FCP device did not respond within "
+				     "the specified time\n");
+		zfcp_erp_adapter_shutdown(adapter, 0, "ccnoti5");
+		break;
+	}
+
+	zfcp_ccw_adapter_put(adapter);
+	return 1;
+}
+
+/**
+ * zfcp_ccw_shutdown - handle shutdown from cio
+ * @cdev: device for adapter to shutdown.
+ */
+static void zfcp_ccw_shutdown(struct ccw_device *cdev)
+{
+	struct zfcp_adapter *adapter = zfcp_ccw_adapter_by_cdev(cdev);
+
+	if (!adapter)
+		return;
+
+	zfcp_erp_adapter_shutdown(adapter, 0, "ccshut1");
+	zfcp_erp_wait(adapter);
+	zfcp_erp_thread_kill(adapter);
+
+	zfcp_ccw_adapter_put(adapter);
+}
+
+static int zfcp_ccw_suspend(struct ccw_device *cdev)
+{
+	zfcp_ccw_offline_sync(cdev, ZFCP_STATUS_ADAPTER_SUSPENDED, "ccsusp1");
+	return 0;
+}
+
+static int zfcp_ccw_thaw(struct ccw_device *cdev)
+{
+	/* trace records for thaw and final shutdown during suspend
+	   can only be found in system dump until the end of suspend
+	   but not after resume because it's based on the memory image
+	   right after the very first suspend (freeze) callback */
+	zfcp_ccw_activate(cdev, 0, "ccthaw1");
+	return 0;
+}
+
+static int zfcp_ccw_resume(struct ccw_device *cdev)
+{
+	zfcp_ccw_activate(cdev, ZFCP_STATUS_ADAPTER_SUSPENDED, "ccresu1");
+	return 0;
+}
+
+struct ccw_driver zfcp_ccw_driver = {
+	.driver = {
+		.owner	= THIS_MODULE,
+		.name	= "zfcp",
+	},
+	.ids         = zfcp_ccw_device_id,
+	.probe       = zfcp_ccw_probe,
+	.remove      = zfcp_ccw_remove,
+	.set_online  = zfcp_ccw_set_online,
+	.set_offline = zfcp_ccw_set_offline,
+	.notify      = zfcp_ccw_notify,
+	.shutdown    = zfcp_ccw_shutdown,
+	.freeze      = zfcp_ccw_suspend,
+	.thaw	     = zfcp_ccw_thaw,
+	.restore     = zfcp_ccw_resume,
+};
diff --git a/ap/os/linux/linux-3.4.x/drivers/s390/scsi/zfcp_cfdc.c b/ap/os/linux/linux-3.4.x/drivers/s390/scsi/zfcp_cfdc.c
new file mode 100644
index 0000000..8ed63aa
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/s390/scsi/zfcp_cfdc.c
@@ -0,0 +1,446 @@
+/*
+ * zfcp device driver
+ *
+ * Userspace interface for accessing the
+ * Access Control Lists / Control File Data Channel;
+ * handling of response code and states for ports and LUNs.
+ *
+ * Copyright IBM Corporation 2008, 2010
+ */
+
+#define KMSG_COMPONENT "zfcp"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
+#include <linux/compat.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/miscdevice.h>
+#include <asm/compat.h>
+#include <asm/ccwdev.h>
+#include "zfcp_def.h"
+#include "zfcp_ext.h"
+#include "zfcp_fsf.h"
+
+#define ZFCP_CFDC_CMND_DOWNLOAD_NORMAL		0x00010001
+#define ZFCP_CFDC_CMND_DOWNLOAD_FORCE		0x00010101
+#define ZFCP_CFDC_CMND_FULL_ACCESS		0x00000201
+#define ZFCP_CFDC_CMND_RESTRICTED_ACCESS	0x00000401
+#define ZFCP_CFDC_CMND_UPLOAD			0x00010002
+
+#define ZFCP_CFDC_DOWNLOAD			0x00000001
+#define ZFCP_CFDC_UPLOAD			0x00000002
+#define ZFCP_CFDC_WITH_CONTROL_FILE		0x00010000
+
+#define ZFCP_CFDC_IOC_MAGIC                     0xDD
+#define ZFCP_CFDC_IOC \
+	_IOWR(ZFCP_CFDC_IOC_MAGIC, 0, struct zfcp_cfdc_data)
+
+/**
+ * struct zfcp_cfdc_data - data for ioctl cfdc interface
+ * @signature: request signature
+ * @devno: FCP adapter device number
+ * @command: command code
+ * @fsf_status: returns status of FSF command to userspace
+ * @fsf_status_qual: returned to userspace
+ * @payloads: access conflicts list
+ * @control_file: access control table
+ */
+struct zfcp_cfdc_data {
+	u32 signature;
+	u32 devno;
+	u32 command;
+	u32 fsf_status;
+	u8  fsf_status_qual[FSF_STATUS_QUALIFIER_SIZE];
+	u8  payloads[256];
+	u8  control_file[0];
+};
+
+static int zfcp_cfdc_copy_from_user(struct scatterlist *sg,
+				    void __user *user_buffer)
+{
+	unsigned int length;
+	unsigned int size = ZFCP_CFDC_MAX_SIZE;
+
+	while (size) {
+		length = min((unsigned int)size, sg->length);
+		if (copy_from_user(sg_virt(sg++), user_buffer, length))
+			return -EFAULT;
+		user_buffer += length;
+		size -= length;
+	}
+	return 0;
+}
+
+static int zfcp_cfdc_copy_to_user(void __user  *user_buffer,
+				  struct scatterlist *sg)
+{
+	unsigned int length;
+	unsigned int size = ZFCP_CFDC_MAX_SIZE;
+
+	while (size) {
+		length = min((unsigned int) size, sg->length);
+		if (copy_to_user(user_buffer, sg_virt(sg++), length))
+			return -EFAULT;
+		user_buffer += length;
+		size -= length;
+	}
+	return 0;
+}
+
+static struct zfcp_adapter *zfcp_cfdc_get_adapter(u32 devno)
+{
+	char busid[9];
+	struct ccw_device *cdev;
+	struct zfcp_adapter *adapter;
+
+	snprintf(busid, sizeof(busid), "0.0.%04x", devno);
+	cdev = get_ccwdev_by_busid(&zfcp_ccw_driver, busid);
+	if (!cdev)
+		return NULL;
+
+	adapter = zfcp_ccw_adapter_by_cdev(cdev);
+
+	put_device(&cdev->dev);
+	return adapter;
+}
+
+static int zfcp_cfdc_set_fsf(struct zfcp_fsf_cfdc *fsf_cfdc, int command)
+{
+	switch (command) {
+	case ZFCP_CFDC_CMND_DOWNLOAD_NORMAL:
+		fsf_cfdc->command = FSF_QTCB_DOWNLOAD_CONTROL_FILE;
+		fsf_cfdc->option = FSF_CFDC_OPTION_NORMAL_MODE;
+		break;
+	case ZFCP_CFDC_CMND_DOWNLOAD_FORCE:
+		fsf_cfdc->command = FSF_QTCB_DOWNLOAD_CONTROL_FILE;
+		fsf_cfdc->option = FSF_CFDC_OPTION_FORCE;
+		break;
+	case ZFCP_CFDC_CMND_FULL_ACCESS:
+		fsf_cfdc->command = FSF_QTCB_DOWNLOAD_CONTROL_FILE;
+		fsf_cfdc->option = FSF_CFDC_OPTION_FULL_ACCESS;
+		break;
+	case ZFCP_CFDC_CMND_RESTRICTED_ACCESS:
+		fsf_cfdc->command = FSF_QTCB_DOWNLOAD_CONTROL_FILE;
+		fsf_cfdc->option = FSF_CFDC_OPTION_RESTRICTED_ACCESS;
+		break;
+	case ZFCP_CFDC_CMND_UPLOAD:
+		fsf_cfdc->command = FSF_QTCB_UPLOAD_CONTROL_FILE;
+		fsf_cfdc->option = 0;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int zfcp_cfdc_sg_setup(int command, struct scatterlist *sg,
+			      u8 __user *control_file)
+{
+	int retval;
+	retval = zfcp_sg_setup_table(sg, ZFCP_CFDC_PAGES);
+	if (retval)
+		return retval;
+
+	sg[ZFCP_CFDC_PAGES - 1].length = ZFCP_CFDC_MAX_SIZE % PAGE_SIZE;
+
+	if (command & ZFCP_CFDC_WITH_CONTROL_FILE &&
+	    command & ZFCP_CFDC_DOWNLOAD) {
+		retval = zfcp_cfdc_copy_from_user(sg, control_file);
+		if (retval) {
+			zfcp_sg_free_table(sg, ZFCP_CFDC_PAGES);
+			return -EFAULT;
+		}
+	}
+
+	return 0;
+}
+
+static void zfcp_cfdc_req_to_sense(struct zfcp_cfdc_data *data,
+				   struct zfcp_fsf_req *req)
+{
+	data->fsf_status = req->qtcb->header.fsf_status;
+	memcpy(&data->fsf_status_qual, &req->qtcb->header.fsf_status_qual,
+	       sizeof(union fsf_status_qual));
+	memcpy(&data->payloads, &req->qtcb->bottom.support.els,
+	       sizeof(req->qtcb->bottom.support.els));
+}
+
+static long zfcp_cfdc_dev_ioctl(struct file *file, unsigned int command,
+				unsigned long arg)
+{
+	struct zfcp_cfdc_data *data;
+	struct zfcp_cfdc_data __user *data_user;
+	struct zfcp_adapter *adapter;
+	struct zfcp_fsf_req *req;
+	struct zfcp_fsf_cfdc *fsf_cfdc;
+	int retval;
+
+	if (command != ZFCP_CFDC_IOC)
+		return -ENOTTY;
+
+	if (is_compat_task())
+		data_user = compat_ptr(arg);
+	else
+		data_user = (void __user *)arg;
+
+	if (!data_user)
+		return -EINVAL;
+
+	fsf_cfdc = kmalloc(sizeof(struct zfcp_fsf_cfdc), GFP_KERNEL);
+	if (!fsf_cfdc)
+		return -ENOMEM;
+
+	data = memdup_user(data_user, sizeof(*data_user));
+	if (IS_ERR(data)) {
+		retval = PTR_ERR(data);
+		goto no_mem_sense;
+	}
+
+	if (data->signature != 0xCFDCACDF) {
+		retval = -EINVAL;
+		goto free_buffer;
+	}
+
+	retval = zfcp_cfdc_set_fsf(fsf_cfdc, data->command);
+
+	adapter = zfcp_cfdc_get_adapter(data->devno);
+	if (!adapter) {
+		retval = -ENXIO;
+		goto free_buffer;
+	}
+
+	retval = zfcp_cfdc_sg_setup(data->command, fsf_cfdc->sg,
+				    data_user->control_file);
+	if (retval)
+		goto adapter_put;
+	req = zfcp_fsf_control_file(adapter, fsf_cfdc);
+	if (IS_ERR(req)) {
+		retval = PTR_ERR(req);
+		goto free_sg;
+	}
+
+	if (req->status & ZFCP_STATUS_FSFREQ_ERROR) {
+		retval = -ENXIO;
+		goto free_fsf;
+	}
+
+	zfcp_cfdc_req_to_sense(data, req);
+	retval = copy_to_user(data_user, data, sizeof(*data_user));
+	if (retval) {
+		retval = -EFAULT;
+		goto free_fsf;
+	}
+
+	if (data->command & ZFCP_CFDC_UPLOAD)
+		retval = zfcp_cfdc_copy_to_user(&data_user->control_file,
+						fsf_cfdc->sg);
+
+ free_fsf:
+	zfcp_fsf_req_free(req);
+ free_sg:
+	zfcp_sg_free_table(fsf_cfdc->sg, ZFCP_CFDC_PAGES);
+ adapter_put:
+	zfcp_ccw_adapter_put(adapter);
+ free_buffer:
+	kfree(data);
+ no_mem_sense:
+	kfree(fsf_cfdc);
+	return retval;
+}
+
+static const struct file_operations zfcp_cfdc_fops = {
+	.open = nonseekable_open,
+	.unlocked_ioctl = zfcp_cfdc_dev_ioctl,
+#ifdef CONFIG_COMPAT
+	.compat_ioctl = zfcp_cfdc_dev_ioctl,
+#endif
+	.llseek = no_llseek,
+};
+
+struct miscdevice zfcp_cfdc_misc = {
+	.minor = MISC_DYNAMIC_MINOR,
+	.name = "zfcp_cfdc",
+	.fops = &zfcp_cfdc_fops,
+};
+
+/**
+ * zfcp_cfdc_adapter_access_changed - Process change in adapter ACT
+ * @adapter: Adapter where the Access Control Table (ACT) changed
+ *
+ * After a change in the adapter ACT, check if access to any
+ * previously denied resources is now possible.
+ */
+void zfcp_cfdc_adapter_access_changed(struct zfcp_adapter *adapter)
+{
+	unsigned long flags;
+	struct zfcp_port *port;
+	struct scsi_device *sdev;
+	struct zfcp_scsi_dev *zfcp_sdev;
+	int status;
+
+	if (adapter->connection_features & FSF_FEATURE_NPIV_MODE)
+		return;
+
+	read_lock_irqsave(&adapter->port_list_lock, flags);
+	list_for_each_entry(port, &adapter->port_list, list) {
+		status = atomic_read(&port->status);
+		if ((status & ZFCP_STATUS_COMMON_ACCESS_DENIED) ||
+		    (status & ZFCP_STATUS_COMMON_ACCESS_BOXED))
+			zfcp_erp_port_reopen(port,
+					     ZFCP_STATUS_COMMON_ERP_FAILED,
+					     "cfaac_1");
+	}
+	read_unlock_irqrestore(&adapter->port_list_lock, flags);
+
+	shost_for_each_device(sdev, adapter->scsi_host) {
+		zfcp_sdev = sdev_to_zfcp(sdev);
+		status = atomic_read(&zfcp_sdev->status);
+		if ((status & ZFCP_STATUS_COMMON_ACCESS_DENIED) ||
+		    (status & ZFCP_STATUS_COMMON_ACCESS_BOXED))
+			zfcp_erp_lun_reopen(sdev,
+					    ZFCP_STATUS_COMMON_ERP_FAILED,
+					    "cfaac_2");
+	}
+}
+
+static void zfcp_act_eval_err(struct zfcp_adapter *adapter, u32 table)
+{
+	u16 subtable = table >> 16;
+	u16 rule = table & 0xffff;
+	const char *act_type[] = { "unknown", "OS", "WWPN", "DID", "LUN" };
+
+	if (subtable && subtable < ARRAY_SIZE(act_type))
+		dev_warn(&adapter->ccw_device->dev,
+			 "Access denied according to ACT rule type %s, "
+			 "rule %d\n", act_type[subtable], rule);
+}
+
+/**
+ * zfcp_cfdc_port_denied - Process "access denied" for port
+ * @port: The port where the access has been denied
+ * @qual: The FSF status qualifier for the access denied FSF status
+ */
+void zfcp_cfdc_port_denied(struct zfcp_port *port,
+			   union fsf_status_qual *qual)
+{
+	dev_warn(&port->adapter->ccw_device->dev,
+		 "Access denied to port 0x%016Lx\n",
+		 (unsigned long long)port->wwpn);
+
+	zfcp_act_eval_err(port->adapter, qual->halfword[0]);
+	zfcp_act_eval_err(port->adapter, qual->halfword[1]);
+	zfcp_erp_set_port_status(port,
+				 ZFCP_STATUS_COMMON_ERP_FAILED |
+				 ZFCP_STATUS_COMMON_ACCESS_DENIED);
+}
+
+/**
+ * zfcp_cfdc_lun_denied - Process "access denied" for LUN
+ * @sdev: The SCSI device / LUN where the access has been denied
+ * @qual: The FSF status qualifier for the access denied FSF status
+ */
+void zfcp_cfdc_lun_denied(struct scsi_device *sdev,
+			  union fsf_status_qual *qual)
+{
+	struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev);
+
+	dev_warn(&zfcp_sdev->port->adapter->ccw_device->dev,
+		 "Access denied to LUN 0x%016Lx on port 0x%016Lx\n",
+		 zfcp_scsi_dev_lun(sdev),
+		 (unsigned long long)zfcp_sdev->port->wwpn);
+	zfcp_act_eval_err(zfcp_sdev->port->adapter, qual->halfword[0]);
+	zfcp_act_eval_err(zfcp_sdev->port->adapter, qual->halfword[1]);
+	zfcp_erp_set_lun_status(sdev,
+				ZFCP_STATUS_COMMON_ERP_FAILED |
+				ZFCP_STATUS_COMMON_ACCESS_DENIED);
+
+	atomic_clear_mask(ZFCP_STATUS_LUN_SHARED, &zfcp_sdev->status);
+	atomic_clear_mask(ZFCP_STATUS_LUN_READONLY, &zfcp_sdev->status);
+}
+
+/**
+ * zfcp_cfdc_lun_shrng_vltn - Evaluate LUN sharing violation status
+ * @sdev: The LUN / SCSI device where sharing violation occurred
+ * @qual: The FSF status qualifier from the LUN sharing violation
+ */
+void zfcp_cfdc_lun_shrng_vltn(struct scsi_device *sdev,
+			      union fsf_status_qual *qual)
+{
+	struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev);
+
+	if (qual->word[0])
+		dev_warn(&zfcp_sdev->port->adapter->ccw_device->dev,
+			 "LUN 0x%Lx on port 0x%Lx is already in "
+			 "use by CSS%d, MIF Image ID %x\n",
+			 zfcp_scsi_dev_lun(sdev),
+			 (unsigned long long)zfcp_sdev->port->wwpn,
+			 qual->fsf_queue_designator.cssid,
+			 qual->fsf_queue_designator.hla);
+	else
+		zfcp_act_eval_err(zfcp_sdev->port->adapter, qual->word[2]);
+
+	zfcp_erp_set_lun_status(sdev,
+				ZFCP_STATUS_COMMON_ERP_FAILED |
+				ZFCP_STATUS_COMMON_ACCESS_DENIED);
+	atomic_clear_mask(ZFCP_STATUS_LUN_SHARED, &zfcp_sdev->status);
+	atomic_clear_mask(ZFCP_STATUS_LUN_READONLY, &zfcp_sdev->status);
+}
+
+/**
+ * zfcp_cfdc_open_lun_eval - Eval access ctrl. status for successful "open lun"
+ * @sdev: The SCSI device / LUN where to evaluate the status
+ * @bottom: The qtcb bottom with the status from the "open lun"
+ *
+ * Returns: 0 if LUN is usable, -EACCES if the access control table
+ *          reports an unsupported configuration.
+ */
+int zfcp_cfdc_open_lun_eval(struct scsi_device *sdev,
+			    struct fsf_qtcb_bottom_support *bottom)
+{
+	int shared, rw;
+	struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev);
+	struct zfcp_adapter *adapter = zfcp_sdev->port->adapter;
+
+	if ((adapter->connection_features & FSF_FEATURE_NPIV_MODE) ||
+	    !(adapter->adapter_features & FSF_FEATURE_LUN_SHARING) ||
+	    zfcp_ccw_priv_sch(adapter))
+		return 0;
+
+	shared = !(bottom->lun_access_info & FSF_UNIT_ACCESS_EXCLUSIVE);
+	rw = (bottom->lun_access_info & FSF_UNIT_ACCESS_OUTBOUND_TRANSFER);
+
+	if (shared)
+		atomic_set_mask(ZFCP_STATUS_LUN_SHARED, &zfcp_sdev->status);
+
+	if (!rw) {
+		atomic_set_mask(ZFCP_STATUS_LUN_READONLY, &zfcp_sdev->status);
+		dev_info(&adapter->ccw_device->dev, "SCSI device at LUN "
+			 "0x%016Lx on port 0x%016Lx opened read-only\n",
+			 zfcp_scsi_dev_lun(sdev),
+			 (unsigned long long)zfcp_sdev->port->wwpn);
+	}
+
+	if (!shared && !rw) {
+		dev_err(&adapter->ccw_device->dev, "Exclusive read-only access "
+			"not supported (LUN 0x%016Lx, port 0x%016Lx)\n",
+			zfcp_scsi_dev_lun(sdev),
+			(unsigned long long)zfcp_sdev->port->wwpn);
+		zfcp_erp_set_lun_status(sdev, ZFCP_STATUS_COMMON_ERP_FAILED);
+		zfcp_erp_lun_shutdown(sdev, 0, "fsouh_6");
+		return -EACCES;
+	}
+
+	if (shared && rw) {
+		dev_err(&adapter->ccw_device->dev,
+			"Shared read-write access not supported "
+			"(LUN 0x%016Lx, port 0x%016Lx)\n",
+			zfcp_scsi_dev_lun(sdev),
+			(unsigned long long)zfcp_sdev->port->wwpn);
+		zfcp_erp_set_lun_status(sdev, ZFCP_STATUS_COMMON_ERP_FAILED);
+		zfcp_erp_lun_shutdown(sdev, 0, "fsosh_8");
+		return -EACCES;
+	}
+
+	return 0;
+}
diff --git a/ap/os/linux/linux-3.4.x/drivers/s390/scsi/zfcp_dbf.c b/ap/os/linux/linux-3.4.x/drivers/s390/scsi/zfcp_dbf.c
new file mode 100644
index 0000000..79b9848
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/s390/scsi/zfcp_dbf.c
@@ -0,0 +1,537 @@
+/*
+ * zfcp device driver
+ *
+ * Debug traces for zfcp.
+ *
+ * Copyright IBM Corporation 2002, 2010
+ */
+
+#define KMSG_COMPONENT "zfcp"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
+#include <linux/module.h>
+#include <linux/ctype.h>
+#include <linux/slab.h>
+#include <asm/debug.h>
+#include "zfcp_dbf.h"
+#include "zfcp_ext.h"
+#include "zfcp_fc.h"
+
+static u32 dbfsize = 4;
+
+module_param(dbfsize, uint, 0400);
+MODULE_PARM_DESC(dbfsize,
+		 "number of pages for each debug feature area (default 4)");
+
+static inline unsigned int zfcp_dbf_plen(unsigned int offset)
+{
+	return sizeof(struct zfcp_dbf_pay) + offset - ZFCP_DBF_PAY_MAX_REC;
+}
+
+static inline
+void zfcp_dbf_pl_write(struct zfcp_dbf *dbf, void *data, u16 length, char *area,
+		       u64 req_id)
+{
+	struct zfcp_dbf_pay *pl = &dbf->pay_buf;
+	u16 offset = 0, rec_length;
+
+	spin_lock(&dbf->pay_lock);
+	memset(pl, 0, sizeof(*pl));
+	pl->fsf_req_id = req_id;
+	memcpy(pl->area, area, ZFCP_DBF_TAG_LEN);
+
+	while (offset < length) {
+		rec_length = min((u16) ZFCP_DBF_PAY_MAX_REC,
+				 (u16) (length - offset));
+		memcpy(pl->data, data + offset, rec_length);
+		debug_event(dbf->pay, 1, pl, zfcp_dbf_plen(rec_length));
+
+		offset += rec_length;
+		pl->counter++;
+	}
+
+	spin_unlock(&dbf->pay_lock);
+}
+
+/**
+ * zfcp_dbf_hba_fsf_res - trace event for fsf responses
+ * @tag: tag indicating which kind of unsolicited status has been received
+ * @req: request for which a response was received
+ */
+void zfcp_dbf_hba_fsf_res(char *tag, struct zfcp_fsf_req *req)
+{
+	struct zfcp_dbf *dbf = req->adapter->dbf;
+	struct fsf_qtcb_prefix *q_pref = &req->qtcb->prefix;
+	struct fsf_qtcb_header *q_head = &req->qtcb->header;
+	struct zfcp_dbf_hba *rec = &dbf->hba_buf;
+	unsigned long flags;
+
+	spin_lock_irqsave(&dbf->hba_lock, flags);
+	memset(rec, 0, sizeof(*rec));
+
+	memcpy(rec->tag, tag, ZFCP_DBF_TAG_LEN);
+	rec->id = ZFCP_DBF_HBA_RES;
+	rec->fsf_req_id = req->req_id;
+	rec->fsf_req_status = req->status;
+	rec->fsf_cmd = req->fsf_command;
+	rec->fsf_seq_no = req->seq_no;
+	rec->u.res.req_issued = req->issued;
+	rec->u.res.prot_status = q_pref->prot_status;
+	rec->u.res.fsf_status = q_head->fsf_status;
+
+	memcpy(rec->u.res.prot_status_qual, &q_pref->prot_status_qual,
+	       FSF_PROT_STATUS_QUAL_SIZE);
+	memcpy(rec->u.res.fsf_status_qual, &q_head->fsf_status_qual,
+	       FSF_STATUS_QUALIFIER_SIZE);
+
+	if (req->fsf_command != FSF_QTCB_FCP_CMND) {
+		rec->pl_len = q_head->log_length;
+		zfcp_dbf_pl_write(dbf, (char *)q_pref + q_head->log_start,
+				  rec->pl_len, "fsf_res", req->req_id);
+	}
+
+	debug_event(dbf->hba, 1, rec, sizeof(*rec));
+	spin_unlock_irqrestore(&dbf->hba_lock, flags);
+}
+
+/**
+ * zfcp_dbf_hba_fsf_uss - trace event for an unsolicited status buffer
+ * @tag: tag indicating which kind of unsolicited status has been received
+ * @req: request providing the unsolicited status
+ */
+void zfcp_dbf_hba_fsf_uss(char *tag, struct zfcp_fsf_req *req)
+{
+	struct zfcp_dbf *dbf = req->adapter->dbf;
+	struct fsf_status_read_buffer *srb = req->data;
+	struct zfcp_dbf_hba *rec = &dbf->hba_buf;
+	unsigned long flags;
+
+	spin_lock_irqsave(&dbf->hba_lock, flags);
+	memset(rec, 0, sizeof(*rec));
+
+	memcpy(rec->tag, tag, ZFCP_DBF_TAG_LEN);
+	rec->id = ZFCP_DBF_HBA_USS;
+	rec->fsf_req_id = req->req_id;
+	rec->fsf_req_status = req->status;
+	rec->fsf_cmd = req->fsf_command;
+
+	if (!srb)
+		goto log;
+
+	rec->u.uss.status_type = srb->status_type;
+	rec->u.uss.status_subtype = srb->status_subtype;
+	rec->u.uss.d_id = ntoh24(srb->d_id);
+	rec->u.uss.lun = srb->fcp_lun;
+	memcpy(&rec->u.uss.queue_designator, &srb->queue_designator,
+	       sizeof(rec->u.uss.queue_designator));
+
+	/* status read buffer payload length */
+	rec->pl_len = (!srb->length) ? 0 : srb->length -
+			offsetof(struct fsf_status_read_buffer, payload);
+
+	if (rec->pl_len)
+		zfcp_dbf_pl_write(dbf, srb->payload.data, rec->pl_len,
+				  "fsf_uss", req->req_id);
+log:
+	debug_event(dbf->hba, 2, rec, sizeof(*rec));
+	spin_unlock_irqrestore(&dbf->hba_lock, flags);
+}
+
+/**
+ * zfcp_dbf_hba_bit_err - trace event for bit error conditions
+ * @tag: tag indicating which kind of unsolicited status has been received
+ * @req: request which caused the bit_error condition
+ */
+void zfcp_dbf_hba_bit_err(char *tag, struct zfcp_fsf_req *req)
+{
+	struct zfcp_dbf *dbf = req->adapter->dbf;
+	struct zfcp_dbf_hba *rec = &dbf->hba_buf;
+	struct fsf_status_read_buffer *sr_buf = req->data;
+	unsigned long flags;
+
+	spin_lock_irqsave(&dbf->hba_lock, flags);
+	memset(rec, 0, sizeof(*rec));
+
+	memcpy(rec->tag, tag, ZFCP_DBF_TAG_LEN);
+	rec->id = ZFCP_DBF_HBA_BIT;
+	rec->fsf_req_id = req->req_id;
+	rec->fsf_req_status = req->status;
+	rec->fsf_cmd = req->fsf_command;
+	memcpy(&rec->u.be, &sr_buf->payload.bit_error,
+	       sizeof(struct fsf_bit_error_payload));
+
+	debug_event(dbf->hba, 1, rec, sizeof(*rec));
+	spin_unlock_irqrestore(&dbf->hba_lock, flags);
+}
+
+/**
+ * zfcp_dbf_hba_def_err - trace event for deferred error messages
+ * @adapter: pointer to struct zfcp_adapter
+ * @req_id: request id which caused the deferred error message
+ * @scount: number of sbals incl. the signaling sbal
+ * @pl: array of all involved sbals
+ */
+void zfcp_dbf_hba_def_err(struct zfcp_adapter *adapter, u64 req_id, u16 scount,
+			  void **pl)
+{
+	struct zfcp_dbf *dbf = adapter->dbf;
+	struct zfcp_dbf_pay *payload = &dbf->pay_buf;
+	unsigned long flags;
+	u16 length;
+
+	if (!pl)
+		return;
+
+	spin_lock_irqsave(&dbf->pay_lock, flags);
+	memset(payload, 0, sizeof(*payload));
+
+	memcpy(payload->area, "def_err", 7);
+	payload->fsf_req_id = req_id;
+	payload->counter = 0;
+	length = min((u16)sizeof(struct qdio_buffer),
+		     (u16)ZFCP_DBF_PAY_MAX_REC);
+
+	while (payload->counter < scount && (char *)pl[payload->counter]) {
+		memcpy(payload->data, (char *)pl[payload->counter], length);
+		debug_event(dbf->pay, 1, payload, zfcp_dbf_plen(length));
+		payload->counter++;
+	}
+
+	spin_unlock_irqrestore(&dbf->pay_lock, flags);
+}
+
+/**
+ * zfcp_dbf_hba_basic - trace event for basic adapter events
+ * @adapter: pointer to struct zfcp_adapter
+ */
+void zfcp_dbf_hba_basic(char *tag, struct zfcp_adapter *adapter)
+{
+	struct zfcp_dbf *dbf = adapter->dbf;
+	struct zfcp_dbf_hba *rec = &dbf->hba_buf;
+	unsigned long flags;
+
+	spin_lock_irqsave(&dbf->hba_lock, flags);
+	memset(rec, 0, sizeof(*rec));
+
+	memcpy(rec->tag, tag, ZFCP_DBF_TAG_LEN);
+	rec->id = ZFCP_DBF_HBA_BASIC;
+
+	debug_event(dbf->hba, 1, rec, sizeof(*rec));
+	spin_unlock_irqrestore(&dbf->hba_lock, flags);
+}
+
+static void zfcp_dbf_set_common(struct zfcp_dbf_rec *rec,
+				struct zfcp_adapter *adapter,
+				struct zfcp_port *port,
+				struct scsi_device *sdev)
+{
+	rec->adapter_status = atomic_read(&adapter->status);
+	if (port) {
+		rec->port_status = atomic_read(&port->status);
+		rec->wwpn = port->wwpn;
+		rec->d_id = port->d_id;
+	}
+	if (sdev) {
+		rec->lun_status = atomic_read(&sdev_to_zfcp(sdev)->status);
+		rec->lun = zfcp_scsi_dev_lun(sdev);
+	}
+}
+
+/**
+ * zfcp_dbf_rec_trig - trace event related to triggered recovery
+ * @tag: identifier for event
+ * @adapter: adapter on which the erp_action should run
+ * @port: remote port involved in the erp_action
+ * @sdev: scsi device involved in the erp_action
+ * @want: wanted erp_action
+ * @need: required erp_action
+ *
+ * The adapter->erp_lock has to be held.
+ */
+void zfcp_dbf_rec_trig(char *tag, struct zfcp_adapter *adapter,
+		       struct zfcp_port *port, struct scsi_device *sdev,
+		       u8 want, u8 need)
+{
+	struct zfcp_dbf *dbf = adapter->dbf;
+	struct zfcp_dbf_rec *rec = &dbf->rec_buf;
+	struct list_head *entry;
+	unsigned long flags;
+
+	spin_lock_irqsave(&dbf->rec_lock, flags);
+	memset(rec, 0, sizeof(*rec));
+
+	rec->id = ZFCP_DBF_REC_TRIG;
+	memcpy(rec->tag, tag, ZFCP_DBF_TAG_LEN);
+	zfcp_dbf_set_common(rec, adapter, port, sdev);
+
+	list_for_each(entry, &adapter->erp_ready_head)
+		rec->u.trig.ready++;
+
+	list_for_each(entry, &adapter->erp_running_head)
+		rec->u.trig.running++;
+
+	rec->u.trig.want = want;
+	rec->u.trig.need = need;
+
+	debug_event(dbf->rec, 1, rec, sizeof(*rec));
+	spin_unlock_irqrestore(&dbf->rec_lock, flags);
+}
+
+
+/**
+ * zfcp_dbf_rec_run - trace event related to running recovery
+ * @tag: identifier for event
+ * @erp: erp_action running
+ */
+void zfcp_dbf_rec_run(char *tag, struct zfcp_erp_action *erp)
+{
+	struct zfcp_dbf *dbf = erp->adapter->dbf;
+	struct zfcp_dbf_rec *rec = &dbf->rec_buf;
+	unsigned long flags;
+
+	spin_lock_irqsave(&dbf->rec_lock, flags);
+	memset(rec, 0, sizeof(*rec));
+
+	rec->id = ZFCP_DBF_REC_RUN;
+	memcpy(rec->tag, tag, ZFCP_DBF_TAG_LEN);
+	zfcp_dbf_set_common(rec, erp->adapter, erp->port, erp->sdev);
+
+	rec->u.run.fsf_req_id = erp->fsf_req_id;
+	rec->u.run.rec_status = erp->status;
+	rec->u.run.rec_step = erp->step;
+	rec->u.run.rec_action = erp->action;
+
+	if (erp->sdev)
+		rec->u.run.rec_count =
+			atomic_read(&sdev_to_zfcp(erp->sdev)->erp_counter);
+	else if (erp->port)
+		rec->u.run.rec_count = atomic_read(&erp->port->erp_counter);
+	else
+		rec->u.run.rec_count = atomic_read(&erp->adapter->erp_counter);
+
+	debug_event(dbf->rec, 1, rec, sizeof(*rec));
+	spin_unlock_irqrestore(&dbf->rec_lock, flags);
+}
+
+static inline
+void zfcp_dbf_san(char *tag, struct zfcp_dbf *dbf, void *data, u8 id, u16 len,
+		  u64 req_id, u32 d_id)
+{
+	struct zfcp_dbf_san *rec = &dbf->san_buf;
+	u16 rec_len;
+	unsigned long flags;
+
+	spin_lock_irqsave(&dbf->san_lock, flags);
+	memset(rec, 0, sizeof(*rec));
+
+	rec->id = id;
+	rec->fsf_req_id = req_id;
+	rec->d_id = d_id;
+	rec_len = min(len, (u16)ZFCP_DBF_SAN_MAX_PAYLOAD);
+	memcpy(rec->payload, data, rec_len);
+	memcpy(rec->tag, tag, ZFCP_DBF_TAG_LEN);
+
+	debug_event(dbf->san, 1, rec, sizeof(*rec));
+	spin_unlock_irqrestore(&dbf->san_lock, flags);
+}
+
+/**
+ * zfcp_dbf_san_req - trace event for issued SAN request
+ * @tag: indentifier for event
+ * @fsf_req: request containing issued CT data
+ * d_id: destination ID
+ */
+void zfcp_dbf_san_req(char *tag, struct zfcp_fsf_req *fsf, u32 d_id)
+{
+	struct zfcp_dbf *dbf = fsf->adapter->dbf;
+	struct zfcp_fsf_ct_els *ct_els = fsf->data;
+	u16 length;
+
+	length = (u16)(ct_els->req->length + FC_CT_HDR_LEN);
+	zfcp_dbf_san(tag, dbf, sg_virt(ct_els->req), ZFCP_DBF_SAN_REQ, length,
+		     fsf->req_id, d_id);
+}
+
+/**
+ * zfcp_dbf_san_res - trace event for received SAN request
+ * @tag: indentifier for event
+ * @fsf_req: request containing issued CT data
+ */
+void zfcp_dbf_san_res(char *tag, struct zfcp_fsf_req *fsf)
+{
+	struct zfcp_dbf *dbf = fsf->adapter->dbf;
+	struct zfcp_fsf_ct_els *ct_els = fsf->data;
+	u16 length;
+
+	length = (u16)(ct_els->resp->length + FC_CT_HDR_LEN);
+	zfcp_dbf_san(tag, dbf, sg_virt(ct_els->resp), ZFCP_DBF_SAN_RES, length,
+		     fsf->req_id, 0);
+}
+
+/**
+ * zfcp_dbf_san_in_els - trace event for incoming ELS
+ * @tag: indentifier for event
+ * @fsf_req: request containing issued CT data
+ */
+void zfcp_dbf_san_in_els(char *tag, struct zfcp_fsf_req *fsf)
+{
+	struct zfcp_dbf *dbf = fsf->adapter->dbf;
+	struct fsf_status_read_buffer *srb =
+		(struct fsf_status_read_buffer *) fsf->data;
+	u16 length;
+
+	length = (u16)(srb->length -
+			offsetof(struct fsf_status_read_buffer, payload));
+	zfcp_dbf_san(tag, dbf, srb->payload.data, ZFCP_DBF_SAN_ELS, length,
+		     fsf->req_id, ntoh24(srb->d_id));
+}
+
+/**
+ * zfcp_dbf_scsi - trace event for scsi commands
+ * @tag: identifier for event
+ * @sc: pointer to struct scsi_cmnd
+ * @fsf: pointer to struct zfcp_fsf_req
+ */
+void zfcp_dbf_scsi(char *tag, struct scsi_cmnd *sc, struct zfcp_fsf_req *fsf)
+{
+	struct zfcp_adapter *adapter =
+		(struct zfcp_adapter *) sc->device->host->hostdata[0];
+	struct zfcp_dbf *dbf = adapter->dbf;
+	struct zfcp_dbf_scsi *rec = &dbf->scsi_buf;
+	struct fcp_resp_with_ext *fcp_rsp;
+	struct fcp_resp_rsp_info *fcp_rsp_info;
+	unsigned long flags;
+
+	spin_lock_irqsave(&dbf->scsi_lock, flags);
+	memset(rec, 0, sizeof(*rec));
+
+	memcpy(rec->tag, tag, ZFCP_DBF_TAG_LEN);
+	rec->id = ZFCP_DBF_SCSI_CMND;
+	rec->scsi_result = sc->result;
+	rec->scsi_retries = sc->retries;
+	rec->scsi_allowed = sc->allowed;
+	rec->scsi_id = sc->device->id;
+	rec->scsi_lun = sc->device->lun;
+	rec->host_scribble = (unsigned long)sc->host_scribble;
+
+	memcpy(rec->scsi_opcode, sc->cmnd,
+	       min((int)sc->cmd_len, ZFCP_DBF_SCSI_OPCODE));
+
+	if (fsf) {
+		rec->fsf_req_id = fsf->req_id;
+		fcp_rsp = (struct fcp_resp_with_ext *)
+				&(fsf->qtcb->bottom.io.fcp_rsp);
+		memcpy(&rec->fcp_rsp, fcp_rsp, FCP_RESP_WITH_EXT);
+		if (fcp_rsp->resp.fr_flags & FCP_RSP_LEN_VAL) {
+			fcp_rsp_info = (struct fcp_resp_rsp_info *) &fcp_rsp[1];
+			rec->fcp_rsp_info = fcp_rsp_info->rsp_code;
+		}
+		if (fcp_rsp->resp.fr_flags & FCP_SNS_LEN_VAL) {
+			rec->pl_len = min((u16)SCSI_SENSE_BUFFERSIZE,
+					  (u16)ZFCP_DBF_PAY_MAX_REC);
+			zfcp_dbf_pl_write(dbf, sc->sense_buffer, rec->pl_len,
+					  "fcp_sns", fsf->req_id);
+		}
+	}
+
+	debug_event(dbf->scsi, 1, rec, sizeof(*rec));
+	spin_unlock_irqrestore(&dbf->scsi_lock, flags);
+}
+
+static debug_info_t *zfcp_dbf_reg(const char *name, int size, int rec_size)
+{
+	struct debug_info *d;
+
+	d = debug_register(name, size, 1, rec_size);
+	if (!d)
+		return NULL;
+
+	debug_register_view(d, &debug_hex_ascii_view);
+	debug_set_level(d, 3);
+
+	return d;
+}
+
+static void zfcp_dbf_unregister(struct zfcp_dbf *dbf)
+{
+	if (!dbf)
+		return;
+
+	debug_unregister(dbf->scsi);
+	debug_unregister(dbf->san);
+	debug_unregister(dbf->hba);
+	debug_unregister(dbf->pay);
+	debug_unregister(dbf->rec);
+	kfree(dbf);
+}
+
+/**
+ * zfcp_adapter_debug_register - registers debug feature for an adapter
+ * @adapter: pointer to adapter for which debug features should be registered
+ * return: -ENOMEM on error, 0 otherwise
+ */
+int zfcp_dbf_adapter_register(struct zfcp_adapter *adapter)
+{
+	char name[DEBUG_MAX_NAME_LEN];
+	struct zfcp_dbf *dbf;
+
+	dbf = kzalloc(sizeof(struct zfcp_dbf), GFP_KERNEL);
+	if (!dbf)
+		return -ENOMEM;
+
+	spin_lock_init(&dbf->pay_lock);
+	spin_lock_init(&dbf->hba_lock);
+	spin_lock_init(&dbf->san_lock);
+	spin_lock_init(&dbf->scsi_lock);
+	spin_lock_init(&dbf->rec_lock);
+
+	/* debug feature area which records recovery activity */
+	sprintf(name, "zfcp_%s_rec", dev_name(&adapter->ccw_device->dev));
+	dbf->rec = zfcp_dbf_reg(name, dbfsize, sizeof(struct zfcp_dbf_rec));
+	if (!dbf->rec)
+		goto err_out;
+
+	/* debug feature area which records HBA (FSF and QDIO) conditions */
+	sprintf(name, "zfcp_%s_hba", dev_name(&adapter->ccw_device->dev));
+	dbf->hba = zfcp_dbf_reg(name, dbfsize, sizeof(struct zfcp_dbf_hba));
+	if (!dbf->hba)
+		goto err_out;
+
+	/* debug feature area which records payload info */
+	sprintf(name, "zfcp_%s_pay", dev_name(&adapter->ccw_device->dev));
+	dbf->pay = zfcp_dbf_reg(name, dbfsize * 2, sizeof(struct zfcp_dbf_pay));
+	if (!dbf->pay)
+		goto err_out;
+
+	/* debug feature area which records SAN command failures and recovery */
+	sprintf(name, "zfcp_%s_san", dev_name(&adapter->ccw_device->dev));
+	dbf->san = zfcp_dbf_reg(name, dbfsize, sizeof(struct zfcp_dbf_san));
+	if (!dbf->san)
+		goto err_out;
+
+	/* debug feature area which records SCSI command failures and recovery */
+	sprintf(name, "zfcp_%s_scsi", dev_name(&adapter->ccw_device->dev));
+	dbf->scsi = zfcp_dbf_reg(name, dbfsize, sizeof(struct zfcp_dbf_scsi));
+	if (!dbf->scsi)
+		goto err_out;
+
+	adapter->dbf = dbf;
+
+	return 0;
+err_out:
+	zfcp_dbf_unregister(dbf);
+	return -ENOMEM;
+}
+
+/**
+ * zfcp_adapter_debug_unregister - unregisters debug feature for an adapter
+ * @adapter: pointer to adapter for which debug features should be unregistered
+ */
+void zfcp_dbf_adapter_unregister(struct zfcp_adapter *adapter)
+{
+	struct zfcp_dbf *dbf = adapter->dbf;
+
+	adapter->dbf = NULL;
+	zfcp_dbf_unregister(dbf);
+}
+
diff --git a/ap/os/linux/linux-3.4.x/drivers/s390/scsi/zfcp_dbf.h b/ap/os/linux/linux-3.4.x/drivers/s390/scsi/zfcp_dbf.h
new file mode 100644
index 0000000..3ac7a4b
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/s390/scsi/zfcp_dbf.h
@@ -0,0 +1,383 @@
+/*
+ * zfcp device driver
+ * debug feature declarations
+ *
+ * Copyright IBM Corp. 2008, 2010
+ */
+
+#ifndef ZFCP_DBF_H
+#define ZFCP_DBF_H
+
+#include <scsi/fc/fc_fcp.h>
+#include "zfcp_ext.h"
+#include "zfcp_fsf.h"
+#include "zfcp_def.h"
+
+#define ZFCP_DBF_TAG_LEN       7
+
+#define ZFCP_DBF_INVALID_LUN	0xFFFFFFFFFFFFFFFFull
+
+/**
+ * struct zfcp_dbf_rec_trigger - trace record for triggered recovery action
+ * @ready: number of ready recovery actions
+ * @running: number of running recovery actions
+ * @want: wanted recovery action
+ * @need: needed recovery action
+ */
+struct zfcp_dbf_rec_trigger {
+	u32 ready;
+	u32 running;
+	u8 want;
+	u8 need;
+} __packed;
+
+/**
+ * struct zfcp_dbf_rec_running - trace record for running recovery
+ * @fsf_req_id: request id for fsf requests
+ * @rec_status: status of the fsf request
+ * @rec_step: current step of the recovery action
+ * rec_count: recovery counter
+ */
+struct zfcp_dbf_rec_running {
+	u64 fsf_req_id;
+	u32 rec_status;
+	u16 rec_step;
+	u8 rec_action;
+	u8 rec_count;
+} __packed;
+
+/**
+ * enum zfcp_dbf_rec_id - recovery trace record id
+ * @ZFCP_DBF_REC_TRIG: triggered recovery identifier
+ * @ZFCP_DBF_REC_RUN: running recovery identifier
+ */
+enum zfcp_dbf_rec_id {
+	ZFCP_DBF_REC_TRIG	= 1,
+	ZFCP_DBF_REC_RUN	= 2,
+};
+
+/**
+ * struct zfcp_dbf_rec - trace record for error recovery actions
+ * @id: unique number of recovery record type
+ * @tag: identifier string specifying the location of initiation
+ * @lun: logical unit number
+ * @wwpn: word wide port number
+ * @d_id: destination ID
+ * @adapter_status: current status of the adapter
+ * @port_status: current status of the port
+ * @lun_status: current status of the lun
+ * @u.trig: structure zfcp_dbf_rec_trigger
+ * @u.run: structure zfcp_dbf_rec_running
+ */
+struct zfcp_dbf_rec {
+	u8 id;
+	char tag[ZFCP_DBF_TAG_LEN];
+	u64 lun;
+	u64 wwpn;
+	u32 d_id;
+	u32 adapter_status;
+	u32 port_status;
+	u32 lun_status;
+	union {
+		struct zfcp_dbf_rec_trigger trig;
+		struct zfcp_dbf_rec_running run;
+	} u;
+} __packed;
+
+/**
+ * enum zfcp_dbf_san_id - SAN trace record identifier
+ * @ZFCP_DBF_SAN_REQ: request trace record id
+ * @ZFCP_DBF_SAN_RES: response trace record id
+ * @ZFCP_DBF_SAN_ELS: extended link service record id
+ */
+enum zfcp_dbf_san_id {
+	ZFCP_DBF_SAN_REQ	= 1,
+	ZFCP_DBF_SAN_RES	= 2,
+	ZFCP_DBF_SAN_ELS	= 3,
+};
+
+/** struct zfcp_dbf_san - trace record for SAN requests and responses
+ * @id: unique number of recovery record type
+ * @tag: identifier string specifying the location of initiation
+ * @fsf_req_id: request id for fsf requests
+ * @payload: unformatted information related to request/response
+ * @d_id: destination id
+ */
+struct zfcp_dbf_san {
+	u8 id;
+	char tag[ZFCP_DBF_TAG_LEN];
+	u64 fsf_req_id;
+	u32 d_id;
+#define ZFCP_DBF_SAN_MAX_PAYLOAD (FC_CT_HDR_LEN + 32)
+	char payload[ZFCP_DBF_SAN_MAX_PAYLOAD];
+} __packed;
+
+/**
+ * struct zfcp_dbf_hba_res - trace record for hba responses
+ * @req_issued: timestamp when request was issued
+ * @prot_status: protocol status
+ * @prot_status_qual: protocol status qualifier
+ * @fsf_status: fsf status
+ * @fsf_status_qual: fsf status qualifier
+ */
+struct zfcp_dbf_hba_res {
+	u64 req_issued;
+	u32 prot_status;
+	u8  prot_status_qual[FSF_PROT_STATUS_QUAL_SIZE];
+	u32 fsf_status;
+	u8  fsf_status_qual[FSF_STATUS_QUALIFIER_SIZE];
+} __packed;
+
+/**
+ * struct zfcp_dbf_hba_uss - trace record for unsolicited status
+ * @status_type: type of unsolicited status
+ * @status_subtype: subtype of unsolicited status
+ * @d_id: destination ID
+ * @lun: logical unit number
+ * @queue_designator: queue designator
+ */
+struct zfcp_dbf_hba_uss {
+	u32 status_type;
+	u32 status_subtype;
+	u32 d_id;
+	u64 lun;
+	u64 queue_designator;
+} __packed;
+
+/**
+ * enum zfcp_dbf_hba_id - HBA trace record identifier
+ * @ZFCP_DBF_HBA_RES: response trace record
+ * @ZFCP_DBF_HBA_USS: unsolicited status trace record
+ * @ZFCP_DBF_HBA_BIT: bit error trace record
+ */
+enum zfcp_dbf_hba_id {
+	ZFCP_DBF_HBA_RES	= 1,
+	ZFCP_DBF_HBA_USS	= 2,
+	ZFCP_DBF_HBA_BIT	= 3,
+	ZFCP_DBF_HBA_BASIC	= 4,
+};
+
+/**
+ * struct zfcp_dbf_hba - common trace record for HBA records
+ * @id: unique number of recovery record type
+ * @tag: identifier string specifying the location of initiation
+ * @fsf_req_id: request id for fsf requests
+ * @fsf_req_status: status of fsf request
+ * @fsf_cmd: fsf command
+ * @fsf_seq_no: fsf sequence number
+ * @pl_len: length of payload stored as zfcp_dbf_pay
+ * @u: record type specific data
+ */
+struct zfcp_dbf_hba {
+	u8 id;
+	char tag[ZFCP_DBF_TAG_LEN];
+	u64 fsf_req_id;
+	u32 fsf_req_status;
+	u32 fsf_cmd;
+	u32 fsf_seq_no;
+	u16 pl_len;
+	union {
+		struct zfcp_dbf_hba_res res;
+		struct zfcp_dbf_hba_uss uss;
+		struct fsf_bit_error_payload be;
+	} u;
+} __packed;
+
+/**
+ * enum zfcp_dbf_scsi_id - scsi trace record identifier
+ * @ZFCP_DBF_SCSI_CMND: scsi command trace record
+ */
+enum zfcp_dbf_scsi_id {
+	ZFCP_DBF_SCSI_CMND	= 1,
+};
+
+/**
+ * struct zfcp_dbf_scsi - common trace record for SCSI records
+ * @id: unique number of recovery record type
+ * @tag: identifier string specifying the location of initiation
+ * @scsi_id: scsi device id
+ * @scsi_lun: scsi device logical unit number
+ * @scsi_result: scsi result
+ * @scsi_retries: current retry number of scsi request
+ * @scsi_allowed: allowed retries
+ * @fcp_rsp_info: FCP response info
+ * @scsi_opcode: scsi opcode
+ * @fsf_req_id: request id of fsf request
+ * @host_scribble: LLD specific data attached to SCSI request
+ * @pl_len: length of paload stored as zfcp_dbf_pay
+ * @fsf_rsp: response for fsf request
+ */
+struct zfcp_dbf_scsi {
+	u8 id;
+	char tag[ZFCP_DBF_TAG_LEN];
+	u32 scsi_id;
+	u32 scsi_lun;
+	u32 scsi_result;
+	u8 scsi_retries;
+	u8 scsi_allowed;
+	u8 fcp_rsp_info;
+#define ZFCP_DBF_SCSI_OPCODE	16
+	u8 scsi_opcode[ZFCP_DBF_SCSI_OPCODE];
+	u64 fsf_req_id;
+	u64 host_scribble;
+	u16 pl_len;
+	struct fcp_resp_with_ext fcp_rsp;
+} __packed;
+
+/**
+ * struct zfcp_dbf_pay - trace record for unformatted payload information
+ * @area: area this record is originated from
+ * @counter: ascending record number
+ * @fsf_req_id: request id of fsf request
+ * @data: unformatted data
+ */
+struct zfcp_dbf_pay {
+	u8 counter;
+	char area[ZFCP_DBF_TAG_LEN];
+	u64 fsf_req_id;
+#define ZFCP_DBF_PAY_MAX_REC 0x100
+	char data[ZFCP_DBF_PAY_MAX_REC];
+} __packed;
+
+/**
+ * struct zfcp_dbf - main dbf trace structure
+ * @pay: reference to payload trace area
+ * @rec: reference to recovery trace area
+ * @hba: reference to hba trace area
+ * @san: reference to san trace area
+ * @scsi: reference to scsi trace area
+ * @pay_lock: lock protecting payload trace buffer
+ * @rec_lock: lock protecting recovery trace buffer
+ * @hba_lock: lock protecting hba trace buffer
+ * @san_lock: lock protecting san trace buffer
+ * @scsi_lock: lock protecting scsi trace buffer
+ * @pay_buf: pre-allocated buffer for payload
+ * @rec_buf: pre-allocated buffer for recovery
+ * @hba_buf: pre-allocated buffer for hba
+ * @san_buf: pre-allocated buffer for san
+ * @scsi_buf: pre-allocated buffer for scsi
+ */
+struct zfcp_dbf {
+	debug_info_t			*pay;
+	debug_info_t			*rec;
+	debug_info_t			*hba;
+	debug_info_t			*san;
+	debug_info_t			*scsi;
+	spinlock_t			pay_lock;
+	spinlock_t			rec_lock;
+	spinlock_t			hba_lock;
+	spinlock_t			san_lock;
+	spinlock_t			scsi_lock;
+	struct zfcp_dbf_pay		pay_buf;
+	struct zfcp_dbf_rec		rec_buf;
+	struct zfcp_dbf_hba		hba_buf;
+	struct zfcp_dbf_san		san_buf;
+	struct zfcp_dbf_scsi		scsi_buf;
+};
+
+static inline
+void zfcp_dbf_hba_fsf_resp(char *tag, int level, struct zfcp_fsf_req *req)
+{
+	if (level <= req->adapter->dbf->hba->level)
+		zfcp_dbf_hba_fsf_res(tag, req);
+}
+
+/**
+ * zfcp_dbf_hba_fsf_response - trace event for request completion
+ * @req: request that has been completed
+ */
+static inline
+void zfcp_dbf_hba_fsf_response(struct zfcp_fsf_req *req)
+{
+	struct fsf_qtcb *qtcb = req->qtcb;
+
+	if ((qtcb->prefix.prot_status != FSF_PROT_GOOD) &&
+	    (qtcb->prefix.prot_status != FSF_PROT_FSF_STATUS_PRESENTED)) {
+		zfcp_dbf_hba_fsf_resp("fs_perr", 1, req);
+
+	} else if (qtcb->header.fsf_status != FSF_GOOD) {
+		zfcp_dbf_hba_fsf_resp("fs_ferr", 1, req);
+
+	} else if ((req->fsf_command == FSF_QTCB_OPEN_PORT_WITH_DID) ||
+		   (req->fsf_command == FSF_QTCB_OPEN_LUN)) {
+		zfcp_dbf_hba_fsf_resp("fs_open", 4, req);
+
+	} else if (qtcb->header.log_length) {
+		zfcp_dbf_hba_fsf_resp("fs_qtcb", 5, req);
+
+	} else {
+		zfcp_dbf_hba_fsf_resp("fs_norm", 6, req);
+	}
+}
+
+static inline
+void _zfcp_dbf_scsi(char *tag, int level, struct scsi_cmnd *scmd,
+		   struct zfcp_fsf_req *req)
+{
+	struct zfcp_adapter *adapter = (struct zfcp_adapter *)
+					scmd->device->host->hostdata[0];
+
+	if (level <= adapter->dbf->scsi->level)
+		zfcp_dbf_scsi(tag, scmd, req);
+}
+
+/**
+ * zfcp_dbf_scsi_result - trace event for SCSI command completion
+ * @scmd: SCSI command pointer
+ * @req: FSF request used to issue SCSI command
+ */
+static inline
+void zfcp_dbf_scsi_result(struct scsi_cmnd *scmd, struct zfcp_fsf_req *req)
+{
+	if (scmd->result != 0)
+		_zfcp_dbf_scsi("rsl_err", 3, scmd, req);
+	else if (scmd->retries > 0)
+		_zfcp_dbf_scsi("rsl_ret", 4, scmd, req);
+	else
+		_zfcp_dbf_scsi("rsl_nor", 6, scmd, req);
+}
+
+/**
+ * zfcp_dbf_scsi_fail_send - trace event for failure to send SCSI command
+ * @scmd: SCSI command pointer
+ */
+static inline
+void zfcp_dbf_scsi_fail_send(struct scsi_cmnd *scmd)
+{
+	_zfcp_dbf_scsi("rsl_fai", 4, scmd, NULL);
+}
+
+/**
+ * zfcp_dbf_scsi_abort - trace event for SCSI command abort
+ * @tag: tag indicating success or failure of abort operation
+ * @scmd: SCSI command to be aborted
+ * @fsf_req: request containing abort (might be NULL)
+ */
+static inline
+void zfcp_dbf_scsi_abort(char *tag, struct scsi_cmnd *scmd,
+			 struct zfcp_fsf_req *fsf_req)
+{
+	_zfcp_dbf_scsi(tag, 1, scmd, fsf_req);
+}
+
+/**
+ * zfcp_dbf_scsi_devreset - trace event for Logical Unit or Target Reset
+ * @tag: tag indicating success or failure of reset operation
+ * @scmnd: SCSI command which caused this error recovery
+ * @flag: indicates type of reset (Target Reset, Logical Unit Reset)
+ */
+static inline
+void zfcp_dbf_scsi_devreset(char *tag, struct scsi_cmnd *scmnd, u8 flag)
+{
+	char tmp_tag[ZFCP_DBF_TAG_LEN];
+
+	if (flag == FCP_TMF_TGT_RESET)
+		memcpy(tmp_tag, "tr_", 3);
+	else
+		memcpy(tmp_tag, "lr_", 3);
+
+	memcpy(&tmp_tag[3], tag, 4);
+	_zfcp_dbf_scsi(tmp_tag, 1, scmnd, NULL);
+}
+
+#endif /* ZFCP_DBF_H */
diff --git a/ap/os/linux/linux-3.4.x/drivers/s390/scsi/zfcp_def.h b/ap/os/linux/linux-3.4.x/drivers/s390/scsi/zfcp_def.h
new file mode 100644
index 0000000..f172b84
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/s390/scsi/zfcp_def.h
@@ -0,0 +1,326 @@
+/*
+ * zfcp device driver
+ *
+ * Global definitions for the zfcp device driver.
+ *
+ * Copyright IBM Corporation 2002, 2010
+ */
+
+#ifndef ZFCP_DEF_H
+#define ZFCP_DEF_H
+
+/*************************** INCLUDES *****************************************/
+
+#include <linux/init.h>
+#include <linux/moduleparam.h>
+#include <linux/major.h>
+#include <linux/blkdev.h>
+#include <linux/delay.h>
+#include <linux/timer.h>
+#include <linux/slab.h>
+#include <linux/mempool.h>
+#include <linux/syscalls.h>
+#include <linux/scatterlist.h>
+#include <linux/ioctl.h>
+#include <scsi/fc/fc_fs.h>
+#include <scsi/fc/fc_gs.h>
+#include <scsi/scsi.h>
+#include <scsi/scsi_tcq.h>
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_host.h>
+#include <scsi/scsi_transport.h>
+#include <scsi/scsi_transport_fc.h>
+#include <scsi/scsi_bsg_fc.h>
+#include <asm/ccwdev.h>
+#include <asm/debug.h>
+#include <asm/ebcdic.h>
+#include <asm/sysinfo.h>
+#include "zfcp_fsf.h"
+#include "zfcp_fc.h"
+#include "zfcp_qdio.h"
+
+struct zfcp_reqlist;
+
+/********************* SCSI SPECIFIC DEFINES *********************************/
+#define ZFCP_SCSI_ER_TIMEOUT                    (10*HZ)
+
+/********************* FSF SPECIFIC DEFINES *********************************/
+
+/* ATTENTION: value must not be used by hardware */
+#define FSF_QTCB_UNSOLICITED_STATUS		0x6305
+
+/* timeout value for "default timer" for fsf requests */
+#define ZFCP_FSF_REQUEST_TIMEOUT (60*HZ)
+
+/*************** ADAPTER/PORT/UNIT AND FSF_REQ STATUS FLAGS ******************/
+
+/*
+ * Note, the leftmost status byte is common among adapter, port
+ * and unit
+ */
+#define ZFCP_COMMON_FLAGS			0xfff00000
+
+/* common status bits */
+#define ZFCP_STATUS_COMMON_RUNNING		0x40000000
+#define ZFCP_STATUS_COMMON_ERP_FAILED		0x20000000
+#define ZFCP_STATUS_COMMON_UNBLOCKED		0x10000000
+#define ZFCP_STATUS_COMMON_OPEN                 0x04000000
+#define ZFCP_STATUS_COMMON_ERP_INUSE		0x01000000
+#define ZFCP_STATUS_COMMON_ACCESS_DENIED	0x00800000
+#define ZFCP_STATUS_COMMON_ACCESS_BOXED		0x00400000
+#define ZFCP_STATUS_COMMON_NOESC		0x00200000
+
+/* adapter status */
+#define ZFCP_STATUS_ADAPTER_MB_ACT		0x00000001
+#define ZFCP_STATUS_ADAPTER_QDIOUP		0x00000002
+#define ZFCP_STATUS_ADAPTER_SIOSL_ISSUED	0x00000004
+#define ZFCP_STATUS_ADAPTER_XCONFIG_OK		0x00000008
+#define ZFCP_STATUS_ADAPTER_HOST_CON_INIT	0x00000010
+#define ZFCP_STATUS_ADAPTER_SUSPENDED		0x00000040
+#define ZFCP_STATUS_ADAPTER_ERP_PENDING		0x00000100
+#define ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED	0x00000200
+#define ZFCP_STATUS_ADAPTER_DATA_DIV_ENABLED	0x00000400
+
+/* remote port status */
+#define ZFCP_STATUS_PORT_PHYS_OPEN		0x00000001
+#define ZFCP_STATUS_PORT_LINK_TEST		0x00000002
+
+/* logical unit status */
+#define ZFCP_STATUS_LUN_SHARED			0x00000004
+#define ZFCP_STATUS_LUN_READONLY		0x00000008
+
+/* FSF request status (this does not have a common part) */
+#define ZFCP_STATUS_FSFREQ_ERROR		0x00000008
+#define ZFCP_STATUS_FSFREQ_CLEANUP		0x00000010
+#define ZFCP_STATUS_FSFREQ_ABORTSUCCEEDED	0x00000040
+#define ZFCP_STATUS_FSFREQ_ABORTNOTNEEDED       0x00000080
+#define ZFCP_STATUS_FSFREQ_TMFUNCFAILED         0x00000200
+#define ZFCP_STATUS_FSFREQ_DISMISSED            0x00001000
+
+/************************* STRUCTURE DEFINITIONS *****************************/
+
+struct zfcp_fsf_req;
+
+/* holds various memory pools of an adapter */
+struct zfcp_adapter_mempool {
+	mempool_t *erp_req;
+	mempool_t *gid_pn_req;
+	mempool_t *scsi_req;
+	mempool_t *scsi_abort;
+	mempool_t *status_read_req;
+	mempool_t *sr_data;
+	mempool_t *gid_pn;
+	mempool_t *qtcb_pool;
+};
+
+struct zfcp_erp_action {
+	struct list_head list;
+	int action;	              /* requested action code */
+	struct zfcp_adapter *adapter; /* device which should be recovered */
+	struct zfcp_port *port;
+	struct scsi_device *sdev;
+	u32		status;	      /* recovery status */
+	u32 step;	              /* active step of this erp action */
+	unsigned long		fsf_req_id;
+	struct timer_list timer;
+};
+
+struct fsf_latency_record {
+	u32 min;
+	u32 max;
+	u64 sum;
+};
+
+struct latency_cont {
+	struct fsf_latency_record channel;
+	struct fsf_latency_record fabric;
+	u64 counter;
+};
+
+struct zfcp_latencies {
+	struct latency_cont read;
+	struct latency_cont write;
+	struct latency_cont cmd;
+	spinlock_t lock;
+};
+
+struct zfcp_adapter {
+	struct kref		ref;
+	u64			peer_wwnn;	   /* P2P peer WWNN */
+	u64			peer_wwpn;	   /* P2P peer WWPN */
+	u32			peer_d_id;	   /* P2P peer D_ID */
+	struct ccw_device       *ccw_device;	   /* S/390 ccw device */
+	struct zfcp_qdio	*qdio;
+	u32			hydra_version;	   /* Hydra version */
+	u32			fsf_lic_version;
+	u32			adapter_features;  /* FCP channel features */
+	u32			connection_features; /* host connection features */
+        u32			hardware_version;  /* of FCP channel */
+	u16			timer_ticks;       /* time int for a tick */
+	struct Scsi_Host	*scsi_host;	   /* Pointer to mid-layer */
+	struct list_head	port_list;	   /* remote port list */
+	rwlock_t		port_list_lock;    /* port list lock */
+	unsigned long		req_no;		   /* unique FSF req number */
+	struct zfcp_reqlist	*req_list;
+	u32			fsf_req_seq_no;	   /* FSF cmnd seq number */
+	rwlock_t		abort_lock;        /* Protects against SCSI
+						      stack abort/command
+						      completion races */
+	atomic_t		stat_miss;	   /* # missing status reads*/
+	unsigned int		stat_read_buf_num;
+	struct work_struct	stat_work;
+	atomic_t		status;	           /* status of this adapter */
+	struct list_head	erp_ready_head;	   /* error recovery for this
+						      adapter/devices */
+	wait_queue_head_t	erp_ready_wq;
+	struct list_head	erp_running_head;
+	rwlock_t		erp_lock;
+	wait_queue_head_t	erp_done_wqh;
+	struct zfcp_erp_action	erp_action;	   /* pending error recovery */
+        atomic_t                erp_counter;
+	u32			erp_total_count;   /* total nr of enqueued erp
+						      actions */
+	u32			erp_low_mem_count; /* nr of erp actions waiting
+						      for memory */
+	struct task_struct	*erp_thread;
+	struct zfcp_fc_wka_ports *gs;		   /* generic services */
+	struct zfcp_dbf		*dbf;		   /* debug traces */
+	struct zfcp_adapter_mempool	pool;      /* Adapter memory pools */
+	struct fc_host_statistics *fc_stats;
+	struct fsf_qtcb_bottom_port *stats_reset_data;
+	unsigned long		stats_reset;
+	struct work_struct	scan_work;
+	struct work_struct	ns_up_work;
+	struct service_level	service_level;
+	struct workqueue_struct	*work_queue;
+	struct device_dma_parameters dma_parms;
+	struct zfcp_fc_events events;
+};
+
+struct zfcp_port {
+	struct device          dev;
+	struct fc_rport        *rport;         /* rport of fc transport class */
+	struct list_head       list;	       /* list of remote ports */
+	struct zfcp_adapter    *adapter;       /* adapter used to access port */
+	struct list_head	unit_list;	/* head of logical unit list */
+	rwlock_t		unit_list_lock; /* unit list lock */
+	atomic_t		units;	       /* zfcp_unit count */
+	atomic_t	       status;	       /* status of this remote port */
+	u64		       wwnn;	       /* WWNN if known */
+	u64		       wwpn;	       /* WWPN */
+	u32		       d_id;	       /* D_ID */
+	u32		       handle;	       /* handle assigned by FSF */
+	struct zfcp_erp_action erp_action;     /* pending error recovery */
+        atomic_t               erp_counter;
+	u32                    maxframe_size;
+	u32                    supported_classes;
+	struct work_struct     gid_pn_work;
+	struct work_struct     test_link_work;
+	struct work_struct     rport_work;
+	enum { RPORT_NONE, RPORT_ADD, RPORT_DEL }  rport_task;
+	unsigned int		starget_id;
+};
+
+/**
+ * struct zfcp_unit - LUN configured via zfcp sysfs
+ * @dev: struct device for sysfs representation and reference counting
+ * @list: entry in LUN/unit list per zfcp_port
+ * @port: reference to zfcp_port where this LUN is configured
+ * @fcp_lun: 64 bit LUN value
+ * @scsi_work: for running scsi_scan_target
+ *
+ * This is the representation of a LUN that has been configured for
+ * usage. The main data here is the 64 bit LUN value, data for
+ * running I/O and recovery is in struct zfcp_scsi_dev.
+ */
+struct zfcp_unit {
+	struct device		dev;
+	struct list_head	list;
+	struct zfcp_port	*port;
+	u64			fcp_lun;
+	struct work_struct	scsi_work;
+};
+
+/**
+ * struct zfcp_scsi_dev - zfcp data per SCSI device
+ * @status: zfcp internal status flags
+ * @lun_handle: handle from "open lun" for issuing FSF requests
+ * @erp_action: zfcp erp data for opening and recovering this LUN
+ * @erp_counter: zfcp erp counter for this LUN
+ * @latencies: FSF channel and fabric latencies
+ * @port: zfcp_port where this LUN belongs to
+ */
+struct zfcp_scsi_dev {
+	atomic_t		status;
+	u32			lun_handle;
+	struct zfcp_erp_action	erp_action;
+	atomic_t		erp_counter;
+	struct zfcp_latencies	latencies;
+	struct zfcp_port	*port;
+};
+
+/**
+ * sdev_to_zfcp - Access zfcp LUN data for SCSI device
+ * @sdev: scsi_device where to get the zfcp_scsi_dev pointer
+ */
+static inline struct zfcp_scsi_dev *sdev_to_zfcp(struct scsi_device *sdev)
+{
+	return scsi_transport_device_data(sdev);
+}
+
+/**
+ * zfcp_scsi_dev_lun - Return SCSI device LUN as 64 bit FCP LUN
+ * @sdev: SCSI device where to get the LUN from
+ */
+static inline u64 zfcp_scsi_dev_lun(struct scsi_device *sdev)
+{
+	u64 fcp_lun;
+
+	int_to_scsilun(sdev->lun, (struct scsi_lun *)&fcp_lun);
+	return fcp_lun;
+}
+
+/**
+ * struct zfcp_fsf_req - basic FSF request structure
+ * @list: list of FSF requests
+ * @req_id: unique request ID
+ * @adapter: adapter this request belongs to
+ * @qdio_req: qdio queue related values
+ * @completion: used to signal the completion of the request
+ * @status: status of the request
+ * @fsf_command: FSF command issued
+ * @qtcb: associated QTCB
+ * @seq_no: sequence number of this request
+ * @data: private data
+ * @timer: timer data of this request
+ * @erp_action: reference to erp action if request issued on behalf of ERP
+ * @pool: reference to memory pool if used for this request
+ * @issued: time when request was send (STCK)
+ * @handler: handler which should be called to process response
+ */
+struct zfcp_fsf_req {
+	struct list_head	list;
+	unsigned long		req_id;
+	struct zfcp_adapter	*adapter;
+	struct zfcp_qdio_req	qdio_req;
+	struct completion	completion;
+	u32			status;
+	u32			fsf_command;
+	struct fsf_qtcb		*qtcb;
+	u32			seq_no;
+	void			*data;
+	struct timer_list	timer;
+	struct zfcp_erp_action	*erp_action;
+	mempool_t		*pool;
+	unsigned long long	issued;
+	void			(*handler)(struct zfcp_fsf_req *);
+};
+
+static inline
+int zfcp_adapter_multi_buffer_active(struct zfcp_adapter *adapter)
+{
+	return atomic_read(&adapter->status) & ZFCP_STATUS_ADAPTER_MB_ACT;
+}
+
+#endif /* ZFCP_DEF_H */
diff --git a/ap/os/linux/linux-3.4.x/drivers/s390/scsi/zfcp_erp.c b/ap/os/linux/linux-3.4.x/drivers/s390/scsi/zfcp_erp.c
new file mode 100644
index 0000000..5c87270
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/s390/scsi/zfcp_erp.c
@@ -0,0 +1,1574 @@
+/*
+ * zfcp device driver
+ *
+ * Error Recovery Procedures (ERP).
+ *
+ * Copyright IBM Corporation 2002, 2010
+ */
+
+#define KMSG_COMPONENT "zfcp"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
+#include <linux/kthread.h>
+#include "zfcp_ext.h"
+#include "zfcp_reqlist.h"
+
+#define ZFCP_MAX_ERPS                   3
+
+enum zfcp_erp_act_flags {
+	ZFCP_STATUS_ERP_TIMEDOUT	= 0x10000000,
+	ZFCP_STATUS_ERP_CLOSE_ONLY	= 0x01000000,
+	ZFCP_STATUS_ERP_DISMISSING	= 0x00100000,
+	ZFCP_STATUS_ERP_DISMISSED	= 0x00200000,
+	ZFCP_STATUS_ERP_LOWMEM		= 0x00400000,
+	ZFCP_STATUS_ERP_NO_REF		= 0x00800000,
+};
+
+enum zfcp_erp_steps {
+	ZFCP_ERP_STEP_UNINITIALIZED	= 0x0000,
+	ZFCP_ERP_STEP_FSF_XCONFIG	= 0x0001,
+	ZFCP_ERP_STEP_PHYS_PORT_CLOSING	= 0x0010,
+	ZFCP_ERP_STEP_PORT_CLOSING	= 0x0100,
+	ZFCP_ERP_STEP_PORT_OPENING	= 0x0800,
+	ZFCP_ERP_STEP_LUN_CLOSING	= 0x1000,
+	ZFCP_ERP_STEP_LUN_OPENING	= 0x2000,
+};
+
+enum zfcp_erp_act_type {
+	ZFCP_ERP_ACTION_REOPEN_LUN         = 1,
+	ZFCP_ERP_ACTION_REOPEN_PORT	   = 2,
+	ZFCP_ERP_ACTION_REOPEN_PORT_FORCED = 3,
+	ZFCP_ERP_ACTION_REOPEN_ADAPTER     = 4,
+};
+
+enum zfcp_erp_act_state {
+	ZFCP_ERP_ACTION_RUNNING = 1,
+	ZFCP_ERP_ACTION_READY   = 2,
+};
+
+enum zfcp_erp_act_result {
+	ZFCP_ERP_SUCCEEDED = 0,
+	ZFCP_ERP_FAILED    = 1,
+	ZFCP_ERP_CONTINUES = 2,
+	ZFCP_ERP_EXIT      = 3,
+	ZFCP_ERP_DISMISSED = 4,
+	ZFCP_ERP_NOMEM     = 5,
+};
+
+static void zfcp_erp_adapter_block(struct zfcp_adapter *adapter, int mask)
+{
+	zfcp_erp_clear_adapter_status(adapter,
+				       ZFCP_STATUS_COMMON_UNBLOCKED | mask);
+}
+
+static int zfcp_erp_action_exists(struct zfcp_erp_action *act)
+{
+	struct zfcp_erp_action *curr_act;
+
+	list_for_each_entry(curr_act, &act->adapter->erp_running_head, list)
+		if (act == curr_act)
+			return ZFCP_ERP_ACTION_RUNNING;
+	return 0;
+}
+
+static void zfcp_erp_action_ready(struct zfcp_erp_action *act)
+{
+	struct zfcp_adapter *adapter = act->adapter;
+
+	list_move(&act->list, &act->adapter->erp_ready_head);
+	zfcp_dbf_rec_run("erardy1", act);
+	wake_up(&adapter->erp_ready_wq);
+	zfcp_dbf_rec_run("erardy2", act);
+}
+
+static void zfcp_erp_action_dismiss(struct zfcp_erp_action *act)
+{
+	act->status |= ZFCP_STATUS_ERP_DISMISSED;
+	if (zfcp_erp_action_exists(act) == ZFCP_ERP_ACTION_RUNNING)
+		zfcp_erp_action_ready(act);
+}
+
+static void zfcp_erp_action_dismiss_lun(struct scsi_device *sdev)
+{
+	struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev);
+
+	if (atomic_read(&zfcp_sdev->status) & ZFCP_STATUS_COMMON_ERP_INUSE)
+		zfcp_erp_action_dismiss(&zfcp_sdev->erp_action);
+}
+
+static void zfcp_erp_action_dismiss_port(struct zfcp_port *port)
+{
+	struct scsi_device *sdev;
+
+	if (atomic_read(&port->status) & ZFCP_STATUS_COMMON_ERP_INUSE)
+		zfcp_erp_action_dismiss(&port->erp_action);
+	else {
+		spin_lock(port->adapter->scsi_host->host_lock);
+		__shost_for_each_device(sdev, port->adapter->scsi_host)
+			if (sdev_to_zfcp(sdev)->port == port)
+				zfcp_erp_action_dismiss_lun(sdev);
+		spin_unlock(port->adapter->scsi_host->host_lock);
+	}
+}
+
+static void zfcp_erp_action_dismiss_adapter(struct zfcp_adapter *adapter)
+{
+	struct zfcp_port *port;
+
+	if (atomic_read(&adapter->status) & ZFCP_STATUS_COMMON_ERP_INUSE)
+		zfcp_erp_action_dismiss(&adapter->erp_action);
+	else {
+		read_lock(&adapter->port_list_lock);
+		list_for_each_entry(port, &adapter->port_list, list)
+		    zfcp_erp_action_dismiss_port(port);
+		read_unlock(&adapter->port_list_lock);
+	}
+}
+
+static int zfcp_erp_required_act(int want, struct zfcp_adapter *adapter,
+				 struct zfcp_port *port,
+				 struct scsi_device *sdev)
+{
+	int need = want;
+	int l_status, p_status, a_status;
+	struct zfcp_scsi_dev *zfcp_sdev;
+
+	switch (want) {
+	case ZFCP_ERP_ACTION_REOPEN_LUN:
+		zfcp_sdev = sdev_to_zfcp(sdev);
+		l_status = atomic_read(&zfcp_sdev->status);
+		if (l_status & ZFCP_STATUS_COMMON_ERP_INUSE)
+			return 0;
+		p_status = atomic_read(&port->status);
+		if (!(p_status & ZFCP_STATUS_COMMON_RUNNING) ||
+		      p_status & ZFCP_STATUS_COMMON_ERP_FAILED)
+			return 0;
+		if (!(p_status & ZFCP_STATUS_COMMON_UNBLOCKED))
+			need = ZFCP_ERP_ACTION_REOPEN_PORT;
+		/* fall through */
+	case ZFCP_ERP_ACTION_REOPEN_PORT_FORCED:
+		p_status = atomic_read(&port->status);
+		if (!(p_status & ZFCP_STATUS_COMMON_OPEN))
+			need = ZFCP_ERP_ACTION_REOPEN_PORT;
+		/* fall through */
+	case ZFCP_ERP_ACTION_REOPEN_PORT:
+		p_status = atomic_read(&port->status);
+		if (p_status & ZFCP_STATUS_COMMON_ERP_INUSE)
+			return 0;
+		a_status = atomic_read(&adapter->status);
+		if (!(a_status & ZFCP_STATUS_COMMON_RUNNING) ||
+		      a_status & ZFCP_STATUS_COMMON_ERP_FAILED)
+			return 0;
+		if (p_status & ZFCP_STATUS_COMMON_NOESC)
+			return need;
+		if (!(a_status & ZFCP_STATUS_COMMON_UNBLOCKED))
+			need = ZFCP_ERP_ACTION_REOPEN_ADAPTER;
+		/* fall through */
+	case ZFCP_ERP_ACTION_REOPEN_ADAPTER:
+		a_status = atomic_read(&adapter->status);
+		if (a_status & ZFCP_STATUS_COMMON_ERP_INUSE)
+			return 0;
+		if (!(a_status & ZFCP_STATUS_COMMON_RUNNING) &&
+		    !(a_status & ZFCP_STATUS_COMMON_OPEN))
+			return 0; /* shutdown requested for closed adapter */
+	}
+
+	return need;
+}
+
+static struct zfcp_erp_action *zfcp_erp_setup_act(int need, u32 act_status,
+						  struct zfcp_adapter *adapter,
+						  struct zfcp_port *port,
+						  struct scsi_device *sdev)
+{
+	struct zfcp_erp_action *erp_action;
+	struct zfcp_scsi_dev *zfcp_sdev;
+
+	switch (need) {
+	case ZFCP_ERP_ACTION_REOPEN_LUN:
+		zfcp_sdev = sdev_to_zfcp(sdev);
+		if (!(act_status & ZFCP_STATUS_ERP_NO_REF))
+			if (scsi_device_get(sdev))
+				return NULL;
+		atomic_set_mask(ZFCP_STATUS_COMMON_ERP_INUSE,
+				&zfcp_sdev->status);
+		erp_action = &zfcp_sdev->erp_action;
+		memset(erp_action, 0, sizeof(struct zfcp_erp_action));
+		erp_action->port = port;
+		erp_action->sdev = sdev;
+		if (!(atomic_read(&zfcp_sdev->status) &
+		      ZFCP_STATUS_COMMON_RUNNING))
+			act_status |= ZFCP_STATUS_ERP_CLOSE_ONLY;
+		break;
+
+	case ZFCP_ERP_ACTION_REOPEN_PORT:
+	case ZFCP_ERP_ACTION_REOPEN_PORT_FORCED:
+		if (!get_device(&port->dev))
+			return NULL;
+		zfcp_erp_action_dismiss_port(port);
+		atomic_set_mask(ZFCP_STATUS_COMMON_ERP_INUSE, &port->status);
+		erp_action = &port->erp_action;
+		memset(erp_action, 0, sizeof(struct zfcp_erp_action));
+		erp_action->port = port;
+		if (!(atomic_read(&port->status) & ZFCP_STATUS_COMMON_RUNNING))
+			act_status |= ZFCP_STATUS_ERP_CLOSE_ONLY;
+		break;
+
+	case ZFCP_ERP_ACTION_REOPEN_ADAPTER:
+		kref_get(&adapter->ref);
+		zfcp_erp_action_dismiss_adapter(adapter);
+		atomic_set_mask(ZFCP_STATUS_COMMON_ERP_INUSE, &adapter->status);
+		erp_action = &adapter->erp_action;
+		memset(erp_action, 0, sizeof(struct zfcp_erp_action));
+		if (!(atomic_read(&adapter->status) &
+		      ZFCP_STATUS_COMMON_RUNNING))
+			act_status |= ZFCP_STATUS_ERP_CLOSE_ONLY;
+		break;
+
+	default:
+		return NULL;
+	}
+
+	erp_action->adapter = adapter;
+	erp_action->action = need;
+	erp_action->status = act_status;
+
+	return erp_action;
+}
+
+static int zfcp_erp_action_enqueue(int want, struct zfcp_adapter *adapter,
+				   struct zfcp_port *port,
+				   struct scsi_device *sdev,
+				   char *id, u32 act_status)
+{
+	int retval = 1, need;
+	struct zfcp_erp_action *act;
+
+	if (!adapter->erp_thread)
+		return -EIO;
+
+	need = zfcp_erp_required_act(want, adapter, port, sdev);
+	if (!need)
+		goto out;
+
+	act = zfcp_erp_setup_act(need, act_status, adapter, port, sdev);
+	if (!act)
+		goto out;
+	atomic_set_mask(ZFCP_STATUS_ADAPTER_ERP_PENDING, &adapter->status);
+	++adapter->erp_total_count;
+	list_add_tail(&act->list, &adapter->erp_ready_head);
+	wake_up(&adapter->erp_ready_wq);
+	retval = 0;
+ out:
+	zfcp_dbf_rec_trig(id, adapter, port, sdev, want, need);
+	return retval;
+}
+
+static int _zfcp_erp_adapter_reopen(struct zfcp_adapter *adapter,
+				    int clear_mask, char *id)
+{
+	zfcp_erp_adapter_block(adapter, clear_mask);
+	zfcp_scsi_schedule_rports_block(adapter);
+
+	/* ensure propagation of failed status to new devices */
+	if (atomic_read(&adapter->status) & ZFCP_STATUS_COMMON_ERP_FAILED) {
+		zfcp_erp_set_adapter_status(adapter,
+					    ZFCP_STATUS_COMMON_ERP_FAILED);
+		return -EIO;
+	}
+	return zfcp_erp_action_enqueue(ZFCP_ERP_ACTION_REOPEN_ADAPTER,
+				       adapter, NULL, NULL, id, 0);
+}
+
+/**
+ * zfcp_erp_adapter_reopen - Reopen adapter.
+ * @adapter: Adapter to reopen.
+ * @clear: Status flags to clear.
+ * @id: Id for debug trace event.
+ */
+void zfcp_erp_adapter_reopen(struct zfcp_adapter *adapter, int clear, char *id)
+{
+	unsigned long flags;
+
+	zfcp_erp_adapter_block(adapter, clear);
+	zfcp_scsi_schedule_rports_block(adapter);
+
+	write_lock_irqsave(&adapter->erp_lock, flags);
+	if (atomic_read(&adapter->status) & ZFCP_STATUS_COMMON_ERP_FAILED)
+		zfcp_erp_set_adapter_status(adapter,
+					    ZFCP_STATUS_COMMON_ERP_FAILED);
+	else
+		zfcp_erp_action_enqueue(ZFCP_ERP_ACTION_REOPEN_ADAPTER, adapter,
+					NULL, NULL, id, 0);
+	write_unlock_irqrestore(&adapter->erp_lock, flags);
+}
+
+/**
+ * zfcp_erp_adapter_shutdown - Shutdown adapter.
+ * @adapter: Adapter to shut down.
+ * @clear: Status flags to clear.
+ * @id: Id for debug trace event.
+ */
+void zfcp_erp_adapter_shutdown(struct zfcp_adapter *adapter, int clear,
+			       char *id)
+{
+	int flags = ZFCP_STATUS_COMMON_RUNNING | ZFCP_STATUS_COMMON_ERP_FAILED;
+	zfcp_erp_adapter_reopen(adapter, clear | flags, id);
+}
+
+/**
+ * zfcp_erp_port_shutdown - Shutdown port
+ * @port: Port to shut down.
+ * @clear: Status flags to clear.
+ * @id: Id for debug trace event.
+ */
+void zfcp_erp_port_shutdown(struct zfcp_port *port, int clear, char *id)
+{
+	int flags = ZFCP_STATUS_COMMON_RUNNING | ZFCP_STATUS_COMMON_ERP_FAILED;
+	zfcp_erp_port_reopen(port, clear | flags, id);
+}
+
+static void zfcp_erp_port_block(struct zfcp_port *port, int clear)
+{
+	zfcp_erp_clear_port_status(port,
+				    ZFCP_STATUS_COMMON_UNBLOCKED | clear);
+}
+
+static void _zfcp_erp_port_forced_reopen(struct zfcp_port *port, int clear,
+					 char *id)
+{
+	zfcp_erp_port_block(port, clear);
+	zfcp_scsi_schedule_rport_block(port);
+
+	if (atomic_read(&port->status) & ZFCP_STATUS_COMMON_ERP_FAILED)
+		return;
+
+	zfcp_erp_action_enqueue(ZFCP_ERP_ACTION_REOPEN_PORT_FORCED,
+				port->adapter, port, NULL, id, 0);
+}
+
+/**
+ * zfcp_erp_port_forced_reopen - Forced close of port and open again
+ * @port: Port to force close and to reopen.
+ * @clear: Status flags to clear.
+ * @id: Id for debug trace event.
+ */
+void zfcp_erp_port_forced_reopen(struct zfcp_port *port, int clear, char *id)
+{
+	unsigned long flags;
+	struct zfcp_adapter *adapter = port->adapter;
+
+	write_lock_irqsave(&adapter->erp_lock, flags);
+	_zfcp_erp_port_forced_reopen(port, clear, id);
+	write_unlock_irqrestore(&adapter->erp_lock, flags);
+}
+
+static int _zfcp_erp_port_reopen(struct zfcp_port *port, int clear, char *id)
+{
+	zfcp_erp_port_block(port, clear);
+	zfcp_scsi_schedule_rport_block(port);
+
+	if (atomic_read(&port->status) & ZFCP_STATUS_COMMON_ERP_FAILED) {
+		/* ensure propagation of failed status to new devices */
+		zfcp_erp_set_port_status(port, ZFCP_STATUS_COMMON_ERP_FAILED);
+		return -EIO;
+	}
+
+	return zfcp_erp_action_enqueue(ZFCP_ERP_ACTION_REOPEN_PORT,
+				       port->adapter, port, NULL, id, 0);
+}
+
+/**
+ * zfcp_erp_port_reopen - trigger remote port recovery
+ * @port: port to recover
+ * @clear_mask: flags in port status to be cleared
+ * @id: Id for debug trace event.
+ *
+ * Returns 0 if recovery has been triggered, < 0 if not.
+ */
+int zfcp_erp_port_reopen(struct zfcp_port *port, int clear, char *id)
+{
+	int retval;
+	unsigned long flags;
+	struct zfcp_adapter *adapter = port->adapter;
+
+	write_lock_irqsave(&adapter->erp_lock, flags);
+	retval = _zfcp_erp_port_reopen(port, clear, id);
+	write_unlock_irqrestore(&adapter->erp_lock, flags);
+
+	return retval;
+}
+
+static void zfcp_erp_lun_block(struct scsi_device *sdev, int clear_mask)
+{
+	zfcp_erp_clear_lun_status(sdev,
+				  ZFCP_STATUS_COMMON_UNBLOCKED | clear_mask);
+}
+
+static void _zfcp_erp_lun_reopen(struct scsi_device *sdev, int clear, char *id,
+				 u32 act_status)
+{
+	struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev);
+	struct zfcp_adapter *adapter = zfcp_sdev->port->adapter;
+
+	zfcp_erp_lun_block(sdev, clear);
+
+	if (atomic_read(&zfcp_sdev->status) & ZFCP_STATUS_COMMON_ERP_FAILED)
+		return;
+
+	zfcp_erp_action_enqueue(ZFCP_ERP_ACTION_REOPEN_LUN, adapter,
+				zfcp_sdev->port, sdev, id, act_status);
+}
+
+/**
+ * zfcp_erp_lun_reopen - initiate reopen of a LUN
+ * @sdev: SCSI device / LUN to be reopened
+ * @clear_mask: specifies flags in LUN status to be cleared
+ * @id: Id for debug trace event.
+ *
+ * Return: 0 on success, < 0 on error
+ */
+void zfcp_erp_lun_reopen(struct scsi_device *sdev, int clear, char *id)
+{
+	unsigned long flags;
+	struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev);
+	struct zfcp_port *port = zfcp_sdev->port;
+	struct zfcp_adapter *adapter = port->adapter;
+
+	write_lock_irqsave(&adapter->erp_lock, flags);
+	_zfcp_erp_lun_reopen(sdev, clear, id, 0);
+	write_unlock_irqrestore(&adapter->erp_lock, flags);
+}
+
+/**
+ * zfcp_erp_lun_shutdown - Shutdown LUN
+ * @sdev: SCSI device / LUN to shut down.
+ * @clear: Status flags to clear.
+ * @id: Id for debug trace event.
+ */
+void zfcp_erp_lun_shutdown(struct scsi_device *sdev, int clear, char *id)
+{
+	int flags = ZFCP_STATUS_COMMON_RUNNING | ZFCP_STATUS_COMMON_ERP_FAILED;
+	zfcp_erp_lun_reopen(sdev, clear | flags, id);
+}
+
+/**
+ * zfcp_erp_lun_shutdown_wait - Shutdown LUN and wait for erp completion
+ * @sdev: SCSI device / LUN to shut down.
+ * @id: Id for debug trace event.
+ *
+ * Do not acquire a reference for the LUN when creating the ERP
+ * action. It is safe, because this function waits for the ERP to
+ * complete first. This allows to shutdown the LUN, even when the SCSI
+ * device is in the state SDEV_DEL when scsi_device_get will fail.
+ */
+void zfcp_erp_lun_shutdown_wait(struct scsi_device *sdev, char *id)
+{
+	unsigned long flags;
+	struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev);
+	struct zfcp_port *port = zfcp_sdev->port;
+	struct zfcp_adapter *adapter = port->adapter;
+	int clear = ZFCP_STATUS_COMMON_RUNNING | ZFCP_STATUS_COMMON_ERP_FAILED;
+
+	write_lock_irqsave(&adapter->erp_lock, flags);
+	_zfcp_erp_lun_reopen(sdev, clear, id, ZFCP_STATUS_ERP_NO_REF);
+	write_unlock_irqrestore(&adapter->erp_lock, flags);
+
+	zfcp_erp_wait(adapter);
+}
+
+static int status_change_set(unsigned long mask, atomic_t *status)
+{
+	return (atomic_read(status) ^ mask) & mask;
+}
+
+static void zfcp_erp_adapter_unblock(struct zfcp_adapter *adapter)
+{
+	if (status_change_set(ZFCP_STATUS_COMMON_UNBLOCKED, &adapter->status))
+		zfcp_dbf_rec_run("eraubl1", &adapter->erp_action);
+	atomic_set_mask(ZFCP_STATUS_COMMON_UNBLOCKED, &adapter->status);
+}
+
+static void zfcp_erp_port_unblock(struct zfcp_port *port)
+{
+	if (status_change_set(ZFCP_STATUS_COMMON_UNBLOCKED, &port->status))
+		zfcp_dbf_rec_run("erpubl1", &port->erp_action);
+	atomic_set_mask(ZFCP_STATUS_COMMON_UNBLOCKED, &port->status);
+}
+
+static void zfcp_erp_lun_unblock(struct scsi_device *sdev)
+{
+	struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev);
+
+	if (status_change_set(ZFCP_STATUS_COMMON_UNBLOCKED, &zfcp_sdev->status))
+		zfcp_dbf_rec_run("erlubl1", &sdev_to_zfcp(sdev)->erp_action);
+	atomic_set_mask(ZFCP_STATUS_COMMON_UNBLOCKED, &zfcp_sdev->status);
+}
+
+static void zfcp_erp_action_to_running(struct zfcp_erp_action *erp_action)
+{
+	list_move(&erp_action->list, &erp_action->adapter->erp_running_head);
+	zfcp_dbf_rec_run("erator1", erp_action);
+}
+
+static void zfcp_erp_strategy_check_fsfreq(struct zfcp_erp_action *act)
+{
+	struct zfcp_adapter *adapter = act->adapter;
+	struct zfcp_fsf_req *req;
+
+	if (!act->fsf_req_id)
+		return;
+
+	spin_lock(&adapter->req_list->lock);
+	req = _zfcp_reqlist_find(adapter->req_list, act->fsf_req_id);
+	if (req && req->erp_action == act) {
+		if (act->status & (ZFCP_STATUS_ERP_DISMISSED |
+				   ZFCP_STATUS_ERP_TIMEDOUT)) {
+			req->status |= ZFCP_STATUS_FSFREQ_DISMISSED;
+			zfcp_dbf_rec_run("erscf_1", act);
+			req->erp_action = NULL;
+		}
+		if (act->status & ZFCP_STATUS_ERP_TIMEDOUT)
+			zfcp_dbf_rec_run("erscf_2", act);
+		if (req->status & ZFCP_STATUS_FSFREQ_DISMISSED)
+			act->fsf_req_id = 0;
+	} else
+		act->fsf_req_id = 0;
+	spin_unlock(&adapter->req_list->lock);
+}
+
+/**
+ * zfcp_erp_notify - Trigger ERP action.
+ * @erp_action: ERP action to continue.
+ * @set_mask: ERP action status flags to set.
+ */
+void zfcp_erp_notify(struct zfcp_erp_action *erp_action, unsigned long set_mask)
+{
+	struct zfcp_adapter *adapter = erp_action->adapter;
+	unsigned long flags;
+
+	write_lock_irqsave(&adapter->erp_lock, flags);
+	if (zfcp_erp_action_exists(erp_action) == ZFCP_ERP_ACTION_RUNNING) {
+		erp_action->status |= set_mask;
+		zfcp_erp_action_ready(erp_action);
+	}
+	write_unlock_irqrestore(&adapter->erp_lock, flags);
+}
+
+/**
+ * zfcp_erp_timeout_handler - Trigger ERP action from timed out ERP request
+ * @data: ERP action (from timer data)
+ */
+void zfcp_erp_timeout_handler(unsigned long data)
+{
+	struct zfcp_erp_action *act = (struct zfcp_erp_action *) data;
+	zfcp_erp_notify(act, ZFCP_STATUS_ERP_TIMEDOUT);
+}
+
+static void zfcp_erp_memwait_handler(unsigned long data)
+{
+	zfcp_erp_notify((struct zfcp_erp_action *)data, 0);
+}
+
+static void zfcp_erp_strategy_memwait(struct zfcp_erp_action *erp_action)
+{
+	init_timer(&erp_action->timer);
+	erp_action->timer.function = zfcp_erp_memwait_handler;
+	erp_action->timer.data = (unsigned long) erp_action;
+	erp_action->timer.expires = jiffies + HZ;
+	add_timer(&erp_action->timer);
+}
+
+static void _zfcp_erp_port_reopen_all(struct zfcp_adapter *adapter,
+				      int clear, char *id)
+{
+	struct zfcp_port *port;
+
+	read_lock(&adapter->port_list_lock);
+	list_for_each_entry(port, &adapter->port_list, list)
+		_zfcp_erp_port_reopen(port, clear, id);
+	read_unlock(&adapter->port_list_lock);
+}
+
+static void _zfcp_erp_lun_reopen_all(struct zfcp_port *port, int clear,
+				     char *id)
+{
+	struct scsi_device *sdev;
+
+	spin_lock(port->adapter->scsi_host->host_lock);
+	__shost_for_each_device(sdev, port->adapter->scsi_host)
+		if (sdev_to_zfcp(sdev)->port == port)
+			_zfcp_erp_lun_reopen(sdev, clear, id, 0);
+	spin_unlock(port->adapter->scsi_host->host_lock);
+}
+
+static void zfcp_erp_strategy_followup_failed(struct zfcp_erp_action *act)
+{
+	switch (act->action) {
+	case ZFCP_ERP_ACTION_REOPEN_ADAPTER:
+		_zfcp_erp_adapter_reopen(act->adapter, 0, "ersff_1");
+		break;
+	case ZFCP_ERP_ACTION_REOPEN_PORT_FORCED:
+		_zfcp_erp_port_forced_reopen(act->port, 0, "ersff_2");
+		break;
+	case ZFCP_ERP_ACTION_REOPEN_PORT:
+		_zfcp_erp_port_reopen(act->port, 0, "ersff_3");
+		break;
+	case ZFCP_ERP_ACTION_REOPEN_LUN:
+		_zfcp_erp_lun_reopen(act->sdev, 0, "ersff_4", 0);
+		break;
+	}
+}
+
+static void zfcp_erp_strategy_followup_success(struct zfcp_erp_action *act)
+{
+	switch (act->action) {
+	case ZFCP_ERP_ACTION_REOPEN_ADAPTER:
+		_zfcp_erp_port_reopen_all(act->adapter, 0, "ersfs_1");
+		break;
+	case ZFCP_ERP_ACTION_REOPEN_PORT_FORCED:
+		_zfcp_erp_port_reopen(act->port, 0, "ersfs_2");
+		break;
+	case ZFCP_ERP_ACTION_REOPEN_PORT:
+		_zfcp_erp_lun_reopen_all(act->port, 0, "ersfs_3");
+		break;
+	}
+}
+
+static void zfcp_erp_wakeup(struct zfcp_adapter *adapter)
+{
+	unsigned long flags;
+
+	read_lock_irqsave(&adapter->erp_lock, flags);
+	if (list_empty(&adapter->erp_ready_head) &&
+	    list_empty(&adapter->erp_running_head)) {
+			atomic_clear_mask(ZFCP_STATUS_ADAPTER_ERP_PENDING,
+					  &adapter->status);
+			wake_up(&adapter->erp_done_wqh);
+	}
+	read_unlock_irqrestore(&adapter->erp_lock, flags);
+}
+
+static void zfcp_erp_enqueue_ptp_port(struct zfcp_adapter *adapter)
+{
+	struct zfcp_port *port;
+	port = zfcp_port_enqueue(adapter, adapter->peer_wwpn, 0,
+				 adapter->peer_d_id);
+	if (IS_ERR(port)) /* error or port already attached */
+		return;
+	_zfcp_erp_port_reopen(port, 0, "ereptp1");
+}
+
+static int zfcp_erp_adapter_strat_fsf_xconf(struct zfcp_erp_action *erp_action)
+{
+	int retries;
+	int sleep = 1;
+	struct zfcp_adapter *adapter = erp_action->adapter;
+
+	atomic_clear_mask(ZFCP_STATUS_ADAPTER_XCONFIG_OK, &adapter->status);
+
+	for (retries = 7; retries; retries--) {
+		atomic_clear_mask(ZFCP_STATUS_ADAPTER_HOST_CON_INIT,
+				  &adapter->status);
+		write_lock_irq(&adapter->erp_lock);
+		zfcp_erp_action_to_running(erp_action);
+		write_unlock_irq(&adapter->erp_lock);
+		if (zfcp_fsf_exchange_config_data(erp_action)) {
+			atomic_clear_mask(ZFCP_STATUS_ADAPTER_HOST_CON_INIT,
+					  &adapter->status);
+			return ZFCP_ERP_FAILED;
+		}
+
+		wait_event(adapter->erp_ready_wq,
+			   !list_empty(&adapter->erp_ready_head));
+		if (erp_action->status & ZFCP_STATUS_ERP_TIMEDOUT)
+			break;
+
+		if (!(atomic_read(&adapter->status) &
+		      ZFCP_STATUS_ADAPTER_HOST_CON_INIT))
+			break;
+
+		ssleep(sleep);
+		sleep *= 2;
+	}
+
+	atomic_clear_mask(ZFCP_STATUS_ADAPTER_HOST_CON_INIT,
+			  &adapter->status);
+
+	if (!(atomic_read(&adapter->status) & ZFCP_STATUS_ADAPTER_XCONFIG_OK))
+		return ZFCP_ERP_FAILED;
+
+	if (fc_host_port_type(adapter->scsi_host) == FC_PORTTYPE_PTP)
+		zfcp_erp_enqueue_ptp_port(adapter);
+
+	return ZFCP_ERP_SUCCEEDED;
+}
+
+static int zfcp_erp_adapter_strategy_open_fsf_xport(struct zfcp_erp_action *act)
+{
+	int ret;
+	struct zfcp_adapter *adapter = act->adapter;
+
+	write_lock_irq(&adapter->erp_lock);
+	zfcp_erp_action_to_running(act);
+	write_unlock_irq(&adapter->erp_lock);
+
+	ret = zfcp_fsf_exchange_port_data(act);
+	if (ret == -EOPNOTSUPP)
+		return ZFCP_ERP_SUCCEEDED;
+	if (ret)
+		return ZFCP_ERP_FAILED;
+
+	zfcp_dbf_rec_run("erasox1", act);
+	wait_event(adapter->erp_ready_wq,
+		   !list_empty(&adapter->erp_ready_head));
+	zfcp_dbf_rec_run("erasox2", act);
+	if (act->status & ZFCP_STATUS_ERP_TIMEDOUT)
+		return ZFCP_ERP_FAILED;
+
+	return ZFCP_ERP_SUCCEEDED;
+}
+
+static int zfcp_erp_adapter_strategy_open_fsf(struct zfcp_erp_action *act)
+{
+	if (zfcp_erp_adapter_strat_fsf_xconf(act) == ZFCP_ERP_FAILED)
+		return ZFCP_ERP_FAILED;
+
+	if (zfcp_erp_adapter_strategy_open_fsf_xport(act) == ZFCP_ERP_FAILED)
+		return ZFCP_ERP_FAILED;
+
+	if (mempool_resize(act->adapter->pool.sr_data,
+			   act->adapter->stat_read_buf_num, GFP_KERNEL))
+		return ZFCP_ERP_FAILED;
+
+	if (mempool_resize(act->adapter->pool.status_read_req,
+			   act->adapter->stat_read_buf_num, GFP_KERNEL))
+		return ZFCP_ERP_FAILED;
+
+	atomic_set(&act->adapter->stat_miss, act->adapter->stat_read_buf_num);
+	if (zfcp_status_read_refill(act->adapter))
+		return ZFCP_ERP_FAILED;
+
+	return ZFCP_ERP_SUCCEEDED;
+}
+
+static void zfcp_erp_adapter_strategy_close(struct zfcp_erp_action *act)
+{
+	struct zfcp_adapter *adapter = act->adapter;
+
+	/* close queues to ensure that buffers are not accessed by adapter */
+	zfcp_qdio_close(adapter->qdio);
+	zfcp_fsf_req_dismiss_all(adapter);
+	adapter->fsf_req_seq_no = 0;
+	zfcp_fc_wka_ports_force_offline(adapter->gs);
+	/* all ports and LUNs are closed */
+	zfcp_erp_clear_adapter_status(adapter, ZFCP_STATUS_COMMON_OPEN);
+
+	atomic_clear_mask(ZFCP_STATUS_ADAPTER_XCONFIG_OK |
+			  ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED, &adapter->status);
+}
+
+static int zfcp_erp_adapter_strategy_open(struct zfcp_erp_action *act)
+{
+	struct zfcp_adapter *adapter = act->adapter;
+
+	if (zfcp_qdio_open(adapter->qdio)) {
+		atomic_clear_mask(ZFCP_STATUS_ADAPTER_XCONFIG_OK |
+				  ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED,
+				  &adapter->status);
+		return ZFCP_ERP_FAILED;
+	}
+
+	if (zfcp_erp_adapter_strategy_open_fsf(act)) {
+		zfcp_erp_adapter_strategy_close(act);
+		return ZFCP_ERP_FAILED;
+	}
+
+	atomic_set_mask(ZFCP_STATUS_COMMON_OPEN, &adapter->status);
+
+	return ZFCP_ERP_SUCCEEDED;
+}
+
+static int zfcp_erp_adapter_strategy(struct zfcp_erp_action *act)
+{
+	struct zfcp_adapter *adapter = act->adapter;
+
+	if (atomic_read(&adapter->status) & ZFCP_STATUS_COMMON_OPEN) {
+		zfcp_erp_adapter_strategy_close(act);
+		if (act->status & ZFCP_STATUS_ERP_CLOSE_ONLY)
+			return ZFCP_ERP_EXIT;
+	}
+
+	if (zfcp_erp_adapter_strategy_open(act)) {
+		ssleep(8);
+		return ZFCP_ERP_FAILED;
+	}
+
+	return ZFCP_ERP_SUCCEEDED;
+}
+
+static int zfcp_erp_port_forced_strategy_close(struct zfcp_erp_action *act)
+{
+	int retval;
+
+	retval = zfcp_fsf_close_physical_port(act);
+	if (retval == -ENOMEM)
+		return ZFCP_ERP_NOMEM;
+	act->step = ZFCP_ERP_STEP_PHYS_PORT_CLOSING;
+	if (retval)
+		return ZFCP_ERP_FAILED;
+
+	return ZFCP_ERP_CONTINUES;
+}
+
+static void zfcp_erp_port_strategy_clearstati(struct zfcp_port *port)
+{
+	atomic_clear_mask(ZFCP_STATUS_COMMON_ACCESS_DENIED, &port->status);
+}
+
+static int zfcp_erp_port_forced_strategy(struct zfcp_erp_action *erp_action)
+{
+	struct zfcp_port *port = erp_action->port;
+	int status = atomic_read(&port->status);
+
+	switch (erp_action->step) {
+	case ZFCP_ERP_STEP_UNINITIALIZED:
+		zfcp_erp_port_strategy_clearstati(port);
+		if ((status & ZFCP_STATUS_PORT_PHYS_OPEN) &&
+		    (status & ZFCP_STATUS_COMMON_OPEN))
+			return zfcp_erp_port_forced_strategy_close(erp_action);
+		else
+			return ZFCP_ERP_FAILED;
+
+	case ZFCP_ERP_STEP_PHYS_PORT_CLOSING:
+		if (!(status & ZFCP_STATUS_PORT_PHYS_OPEN))
+			return ZFCP_ERP_SUCCEEDED;
+	}
+	return ZFCP_ERP_FAILED;
+}
+
+static int zfcp_erp_port_strategy_close(struct zfcp_erp_action *erp_action)
+{
+	int retval;
+
+	retval = zfcp_fsf_close_port(erp_action);
+	if (retval == -ENOMEM)
+		return ZFCP_ERP_NOMEM;
+	erp_action->step = ZFCP_ERP_STEP_PORT_CLOSING;
+	if (retval)
+		return ZFCP_ERP_FAILED;
+	return ZFCP_ERP_CONTINUES;
+}
+
+static int zfcp_erp_port_strategy_open_port(struct zfcp_erp_action *erp_action)
+{
+	int retval;
+
+	retval = zfcp_fsf_open_port(erp_action);
+	if (retval == -ENOMEM)
+		return ZFCP_ERP_NOMEM;
+	erp_action->step = ZFCP_ERP_STEP_PORT_OPENING;
+	if (retval)
+		return ZFCP_ERP_FAILED;
+	return ZFCP_ERP_CONTINUES;
+}
+
+static int zfcp_erp_open_ptp_port(struct zfcp_erp_action *act)
+{
+	struct zfcp_adapter *adapter = act->adapter;
+	struct zfcp_port *port = act->port;
+
+	if (port->wwpn != adapter->peer_wwpn) {
+		zfcp_erp_set_port_status(port, ZFCP_STATUS_COMMON_ERP_FAILED);
+		return ZFCP_ERP_FAILED;
+	}
+	port->d_id = adapter->peer_d_id;
+	return zfcp_erp_port_strategy_open_port(act);
+}
+
+static int zfcp_erp_port_strategy_open_common(struct zfcp_erp_action *act)
+{
+	struct zfcp_adapter *adapter = act->adapter;
+	struct zfcp_port *port = act->port;
+	int p_status = atomic_read(&port->status);
+
+	switch (act->step) {
+	case ZFCP_ERP_STEP_UNINITIALIZED:
+	case ZFCP_ERP_STEP_PHYS_PORT_CLOSING:
+	case ZFCP_ERP_STEP_PORT_CLOSING:
+		if (fc_host_port_type(adapter->scsi_host) == FC_PORTTYPE_PTP)
+			return zfcp_erp_open_ptp_port(act);
+		if (!port->d_id) {
+			zfcp_fc_trigger_did_lookup(port);
+			return ZFCP_ERP_EXIT;
+		}
+		return zfcp_erp_port_strategy_open_port(act);
+
+	case ZFCP_ERP_STEP_PORT_OPENING:
+		/* D_ID might have changed during open */
+		if (p_status & ZFCP_STATUS_COMMON_OPEN) {
+			if (!port->d_id) {
+				zfcp_fc_trigger_did_lookup(port);
+				return ZFCP_ERP_EXIT;
+			}
+			return ZFCP_ERP_SUCCEEDED;
+		}
+		if (port->d_id && !(p_status & ZFCP_STATUS_COMMON_NOESC)) {
+			port->d_id = 0;
+			return ZFCP_ERP_FAILED;
+		}
+		/* fall through otherwise */
+	}
+	return ZFCP_ERP_FAILED;
+}
+
+static int zfcp_erp_port_strategy(struct zfcp_erp_action *erp_action)
+{
+	struct zfcp_port *port = erp_action->port;
+	int p_status = atomic_read(&port->status);
+
+	if ((p_status & ZFCP_STATUS_COMMON_NOESC) &&
+	    !(p_status & ZFCP_STATUS_COMMON_OPEN))
+		goto close_init_done;
+
+	switch (erp_action->step) {
+	case ZFCP_ERP_STEP_UNINITIALIZED:
+		zfcp_erp_port_strategy_clearstati(port);
+		if (p_status & ZFCP_STATUS_COMMON_OPEN)
+			return zfcp_erp_port_strategy_close(erp_action);
+		break;
+
+	case ZFCP_ERP_STEP_PORT_CLOSING:
+		if (p_status & ZFCP_STATUS_COMMON_OPEN)
+			return ZFCP_ERP_FAILED;
+		break;
+	}
+
+close_init_done:
+	if (erp_action->status & ZFCP_STATUS_ERP_CLOSE_ONLY)
+		return ZFCP_ERP_EXIT;
+
+	return zfcp_erp_port_strategy_open_common(erp_action);
+}
+
+static void zfcp_erp_lun_strategy_clearstati(struct scsi_device *sdev)
+{
+	struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev);
+
+	atomic_clear_mask(ZFCP_STATUS_COMMON_ACCESS_DENIED |
+			  ZFCP_STATUS_LUN_SHARED | ZFCP_STATUS_LUN_READONLY,
+			  &zfcp_sdev->status);
+}
+
+static int zfcp_erp_lun_strategy_close(struct zfcp_erp_action *erp_action)
+{
+	int retval = zfcp_fsf_close_lun(erp_action);
+	if (retval == -ENOMEM)
+		return ZFCP_ERP_NOMEM;
+	erp_action->step = ZFCP_ERP_STEP_LUN_CLOSING;
+	if (retval)
+		return ZFCP_ERP_FAILED;
+	return ZFCP_ERP_CONTINUES;
+}
+
+static int zfcp_erp_lun_strategy_open(struct zfcp_erp_action *erp_action)
+{
+	int retval = zfcp_fsf_open_lun(erp_action);
+	if (retval == -ENOMEM)
+		return ZFCP_ERP_NOMEM;
+	erp_action->step = ZFCP_ERP_STEP_LUN_OPENING;
+	if (retval)
+		return  ZFCP_ERP_FAILED;
+	return ZFCP_ERP_CONTINUES;
+}
+
+static int zfcp_erp_lun_strategy(struct zfcp_erp_action *erp_action)
+{
+	struct scsi_device *sdev = erp_action->sdev;
+	struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev);
+
+	switch (erp_action->step) {
+	case ZFCP_ERP_STEP_UNINITIALIZED:
+		zfcp_erp_lun_strategy_clearstati(sdev);
+		if (atomic_read(&zfcp_sdev->status) & ZFCP_STATUS_COMMON_OPEN)
+			return zfcp_erp_lun_strategy_close(erp_action);
+		/* already closed, fall through */
+	case ZFCP_ERP_STEP_LUN_CLOSING:
+		if (atomic_read(&zfcp_sdev->status) & ZFCP_STATUS_COMMON_OPEN)
+			return ZFCP_ERP_FAILED;
+		if (erp_action->status & ZFCP_STATUS_ERP_CLOSE_ONLY)
+			return ZFCP_ERP_EXIT;
+		return zfcp_erp_lun_strategy_open(erp_action);
+
+	case ZFCP_ERP_STEP_LUN_OPENING:
+		if (atomic_read(&zfcp_sdev->status) & ZFCP_STATUS_COMMON_OPEN)
+			return ZFCP_ERP_SUCCEEDED;
+	}
+	return ZFCP_ERP_FAILED;
+}
+
+static int zfcp_erp_strategy_check_lun(struct scsi_device *sdev, int result)
+{
+	struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev);
+
+	switch (result) {
+	case ZFCP_ERP_SUCCEEDED :
+		atomic_set(&zfcp_sdev->erp_counter, 0);
+		zfcp_erp_lun_unblock(sdev);
+		break;
+	case ZFCP_ERP_FAILED :
+		atomic_inc(&zfcp_sdev->erp_counter);
+		if (atomic_read(&zfcp_sdev->erp_counter) > ZFCP_MAX_ERPS) {
+			dev_err(&zfcp_sdev->port->adapter->ccw_device->dev,
+				"ERP failed for LUN 0x%016Lx on "
+				"port 0x%016Lx\n",
+				(unsigned long long)zfcp_scsi_dev_lun(sdev),
+				(unsigned long long)zfcp_sdev->port->wwpn);
+			zfcp_erp_set_lun_status(sdev,
+						ZFCP_STATUS_COMMON_ERP_FAILED);
+		}
+		break;
+	}
+
+	if (atomic_read(&zfcp_sdev->status) & ZFCP_STATUS_COMMON_ERP_FAILED) {
+		zfcp_erp_lun_block(sdev, 0);
+		result = ZFCP_ERP_EXIT;
+	}
+	return result;
+}
+
+static int zfcp_erp_strategy_check_port(struct zfcp_port *port, int result)
+{
+	switch (result) {
+	case ZFCP_ERP_SUCCEEDED :
+		atomic_set(&port->erp_counter, 0);
+		zfcp_erp_port_unblock(port);
+		break;
+
+	case ZFCP_ERP_FAILED :
+		if (atomic_read(&port->status) & ZFCP_STATUS_COMMON_NOESC) {
+			zfcp_erp_port_block(port, 0);
+			result = ZFCP_ERP_EXIT;
+		}
+		atomic_inc(&port->erp_counter);
+		if (atomic_read(&port->erp_counter) > ZFCP_MAX_ERPS) {
+			dev_err(&port->adapter->ccw_device->dev,
+				"ERP failed for remote port 0x%016Lx\n",
+				(unsigned long long)port->wwpn);
+			zfcp_erp_set_port_status(port,
+					 ZFCP_STATUS_COMMON_ERP_FAILED);
+		}
+		break;
+	}
+
+	if (atomic_read(&port->status) & ZFCP_STATUS_COMMON_ERP_FAILED) {
+		zfcp_erp_port_block(port, 0);
+		result = ZFCP_ERP_EXIT;
+	}
+	return result;
+}
+
+static int zfcp_erp_strategy_check_adapter(struct zfcp_adapter *adapter,
+					   int result)
+{
+	switch (result) {
+	case ZFCP_ERP_SUCCEEDED :
+		atomic_set(&adapter->erp_counter, 0);
+		zfcp_erp_adapter_unblock(adapter);
+		break;
+
+	case ZFCP_ERP_FAILED :
+		atomic_inc(&adapter->erp_counter);
+		if (atomic_read(&adapter->erp_counter) > ZFCP_MAX_ERPS) {
+			dev_err(&adapter->ccw_device->dev,
+				"ERP cannot recover an error "
+				"on the FCP device\n");
+			zfcp_erp_set_adapter_status(adapter,
+					    ZFCP_STATUS_COMMON_ERP_FAILED);
+		}
+		break;
+	}
+
+	if (atomic_read(&adapter->status) & ZFCP_STATUS_COMMON_ERP_FAILED) {
+		zfcp_erp_adapter_block(adapter, 0);
+		result = ZFCP_ERP_EXIT;
+	}
+	return result;
+}
+
+static int zfcp_erp_strategy_check_target(struct zfcp_erp_action *erp_action,
+					  int result)
+{
+	struct zfcp_adapter *adapter = erp_action->adapter;
+	struct zfcp_port *port = erp_action->port;
+	struct scsi_device *sdev = erp_action->sdev;
+
+	switch (erp_action->action) {
+
+	case ZFCP_ERP_ACTION_REOPEN_LUN:
+		result = zfcp_erp_strategy_check_lun(sdev, result);
+		break;
+
+	case ZFCP_ERP_ACTION_REOPEN_PORT_FORCED:
+	case ZFCP_ERP_ACTION_REOPEN_PORT:
+		result = zfcp_erp_strategy_check_port(port, result);
+		break;
+
+	case ZFCP_ERP_ACTION_REOPEN_ADAPTER:
+		result = zfcp_erp_strategy_check_adapter(adapter, result);
+		break;
+	}
+	return result;
+}
+
+static int zfcp_erp_strat_change_det(atomic_t *target_status, u32 erp_status)
+{
+	int status = atomic_read(target_status);
+
+	if ((status & ZFCP_STATUS_COMMON_RUNNING) &&
+	    (erp_status & ZFCP_STATUS_ERP_CLOSE_ONLY))
+		return 1; /* take it online */
+
+	if (!(status & ZFCP_STATUS_COMMON_RUNNING) &&
+	    !(erp_status & ZFCP_STATUS_ERP_CLOSE_ONLY))
+		return 1; /* take it offline */
+
+	return 0;
+}
+
+static int zfcp_erp_strategy_statechange(struct zfcp_erp_action *act, int ret)
+{
+	int action = act->action;
+	struct zfcp_adapter *adapter = act->adapter;
+	struct zfcp_port *port = act->port;
+	struct scsi_device *sdev = act->sdev;
+	struct zfcp_scsi_dev *zfcp_sdev;
+	u32 erp_status = act->status;
+
+	switch (action) {
+	case ZFCP_ERP_ACTION_REOPEN_ADAPTER:
+		if (zfcp_erp_strat_change_det(&adapter->status, erp_status)) {
+			_zfcp_erp_adapter_reopen(adapter,
+						 ZFCP_STATUS_COMMON_ERP_FAILED,
+						 "ersscg1");
+			return ZFCP_ERP_EXIT;
+		}
+		break;
+
+	case ZFCP_ERP_ACTION_REOPEN_PORT_FORCED:
+	case ZFCP_ERP_ACTION_REOPEN_PORT:
+		if (zfcp_erp_strat_change_det(&port->status, erp_status)) {
+			_zfcp_erp_port_reopen(port,
+					      ZFCP_STATUS_COMMON_ERP_FAILED,
+					      "ersscg2");
+			return ZFCP_ERP_EXIT;
+		}
+		break;
+
+	case ZFCP_ERP_ACTION_REOPEN_LUN:
+		zfcp_sdev = sdev_to_zfcp(sdev);
+		if (zfcp_erp_strat_change_det(&zfcp_sdev->status, erp_status)) {
+			_zfcp_erp_lun_reopen(sdev,
+					     ZFCP_STATUS_COMMON_ERP_FAILED,
+					     "ersscg3", 0);
+			return ZFCP_ERP_EXIT;
+		}
+		break;
+	}
+	return ret;
+}
+
+static void zfcp_erp_action_dequeue(struct zfcp_erp_action *erp_action)
+{
+	struct zfcp_adapter *adapter = erp_action->adapter;
+	struct zfcp_scsi_dev *zfcp_sdev;
+
+	adapter->erp_total_count--;
+	if (erp_action->status & ZFCP_STATUS_ERP_LOWMEM) {
+		adapter->erp_low_mem_count--;
+		erp_action->status &= ~ZFCP_STATUS_ERP_LOWMEM;
+	}
+
+	list_del(&erp_action->list);
+	zfcp_dbf_rec_run("eractd1", erp_action);
+
+	switch (erp_action->action) {
+	case ZFCP_ERP_ACTION_REOPEN_LUN:
+		zfcp_sdev = sdev_to_zfcp(erp_action->sdev);
+		atomic_clear_mask(ZFCP_STATUS_COMMON_ERP_INUSE,
+				  &zfcp_sdev->status);
+		break;
+
+	case ZFCP_ERP_ACTION_REOPEN_PORT_FORCED:
+	case ZFCP_ERP_ACTION_REOPEN_PORT:
+		atomic_clear_mask(ZFCP_STATUS_COMMON_ERP_INUSE,
+				  &erp_action->port->status);
+		break;
+
+	case ZFCP_ERP_ACTION_REOPEN_ADAPTER:
+		atomic_clear_mask(ZFCP_STATUS_COMMON_ERP_INUSE,
+				  &erp_action->adapter->status);
+		break;
+	}
+}
+
+static void zfcp_erp_action_cleanup(struct zfcp_erp_action *act, int result)
+{
+	struct zfcp_adapter *adapter = act->adapter;
+	struct zfcp_port *port = act->port;
+	struct scsi_device *sdev = act->sdev;
+
+	switch (act->action) {
+	case ZFCP_ERP_ACTION_REOPEN_LUN:
+		if (!(act->status & ZFCP_STATUS_ERP_NO_REF))
+			scsi_device_put(sdev);
+		break;
+
+	case ZFCP_ERP_ACTION_REOPEN_PORT:
+		if (result == ZFCP_ERP_SUCCEEDED)
+			zfcp_scsi_schedule_rport_register(port);
+		/* fall through */
+	case ZFCP_ERP_ACTION_REOPEN_PORT_FORCED:
+		put_device(&port->dev);
+		break;
+
+	case ZFCP_ERP_ACTION_REOPEN_ADAPTER:
+		if (result == ZFCP_ERP_SUCCEEDED) {
+			register_service_level(&adapter->service_level);
+			queue_work(adapter->work_queue, &adapter->scan_work);
+			queue_work(adapter->work_queue, &adapter->ns_up_work);
+		} else
+			unregister_service_level(&adapter->service_level);
+
+		kref_put(&adapter->ref, zfcp_adapter_release);
+		break;
+	}
+}
+
+static int zfcp_erp_strategy_do_action(struct zfcp_erp_action *erp_action)
+{
+	switch (erp_action->action) {
+	case ZFCP_ERP_ACTION_REOPEN_ADAPTER:
+		return zfcp_erp_adapter_strategy(erp_action);
+	case ZFCP_ERP_ACTION_REOPEN_PORT_FORCED:
+		return zfcp_erp_port_forced_strategy(erp_action);
+	case ZFCP_ERP_ACTION_REOPEN_PORT:
+		return zfcp_erp_port_strategy(erp_action);
+	case ZFCP_ERP_ACTION_REOPEN_LUN:
+		return zfcp_erp_lun_strategy(erp_action);
+	}
+	return ZFCP_ERP_FAILED;
+}
+
+static int zfcp_erp_strategy(struct zfcp_erp_action *erp_action)
+{
+	int retval;
+	unsigned long flags;
+	struct zfcp_adapter *adapter = erp_action->adapter;
+
+	kref_get(&adapter->ref);
+
+	write_lock_irqsave(&adapter->erp_lock, flags);
+	zfcp_erp_strategy_check_fsfreq(erp_action);
+
+	if (erp_action->status & ZFCP_STATUS_ERP_DISMISSED) {
+		zfcp_erp_action_dequeue(erp_action);
+		retval = ZFCP_ERP_DISMISSED;
+		goto unlock;
+	}
+
+	if (erp_action->status & ZFCP_STATUS_ERP_TIMEDOUT) {
+		retval = ZFCP_ERP_FAILED;
+		goto check_target;
+	}
+
+	zfcp_erp_action_to_running(erp_action);
+
+	/* no lock to allow for blocking operations */
+	write_unlock_irqrestore(&adapter->erp_lock, flags);
+	retval = zfcp_erp_strategy_do_action(erp_action);
+	write_lock_irqsave(&adapter->erp_lock, flags);
+
+	if (erp_action->status & ZFCP_STATUS_ERP_DISMISSED)
+		retval = ZFCP_ERP_CONTINUES;
+
+	switch (retval) {
+	case ZFCP_ERP_NOMEM:
+		if (!(erp_action->status & ZFCP_STATUS_ERP_LOWMEM)) {
+			++adapter->erp_low_mem_count;
+			erp_action->status |= ZFCP_STATUS_ERP_LOWMEM;
+		}
+		if (adapter->erp_total_count == adapter->erp_low_mem_count)
+			_zfcp_erp_adapter_reopen(adapter, 0, "erstgy1");
+		else {
+			zfcp_erp_strategy_memwait(erp_action);
+			retval = ZFCP_ERP_CONTINUES;
+		}
+		goto unlock;
+
+	case ZFCP_ERP_CONTINUES:
+		if (erp_action->status & ZFCP_STATUS_ERP_LOWMEM) {
+			--adapter->erp_low_mem_count;
+			erp_action->status &= ~ZFCP_STATUS_ERP_LOWMEM;
+		}
+		goto unlock;
+	}
+
+check_target:
+	retval = zfcp_erp_strategy_check_target(erp_action, retval);
+	zfcp_erp_action_dequeue(erp_action);
+	retval = zfcp_erp_strategy_statechange(erp_action, retval);
+	if (retval == ZFCP_ERP_EXIT)
+		goto unlock;
+	if (retval == ZFCP_ERP_SUCCEEDED)
+		zfcp_erp_strategy_followup_success(erp_action);
+	if (retval == ZFCP_ERP_FAILED)
+		zfcp_erp_strategy_followup_failed(erp_action);
+
+ unlock:
+	write_unlock_irqrestore(&adapter->erp_lock, flags);
+
+	if (retval != ZFCP_ERP_CONTINUES)
+		zfcp_erp_action_cleanup(erp_action, retval);
+
+	kref_put(&adapter->ref, zfcp_adapter_release);
+	return retval;
+}
+
+static int zfcp_erp_thread(void *data)
+{
+	struct zfcp_adapter *adapter = (struct zfcp_adapter *) data;
+	struct list_head *next;
+	struct zfcp_erp_action *act;
+	unsigned long flags;
+
+	for (;;) {
+		wait_event_interruptible(adapter->erp_ready_wq,
+			   !list_empty(&adapter->erp_ready_head) ||
+			   kthread_should_stop());
+
+		if (kthread_should_stop())
+			break;
+
+		write_lock_irqsave(&adapter->erp_lock, flags);
+		next = adapter->erp_ready_head.next;
+		write_unlock_irqrestore(&adapter->erp_lock, flags);
+
+		if (next != &adapter->erp_ready_head) {
+			act = list_entry(next, struct zfcp_erp_action, list);
+
+			/* there is more to come after dismission, no notify */
+			if (zfcp_erp_strategy(act) != ZFCP_ERP_DISMISSED)
+				zfcp_erp_wakeup(adapter);
+		}
+	}
+
+	return 0;
+}
+
+/**
+ * zfcp_erp_thread_setup - Start ERP thread for adapter
+ * @adapter: Adapter to start the ERP thread for
+ *
+ * Returns 0 on success or error code from kernel_thread()
+ */
+int zfcp_erp_thread_setup(struct zfcp_adapter *adapter)
+{
+	struct task_struct *thread;
+
+	thread = kthread_run(zfcp_erp_thread, adapter, "zfcperp%s",
+			     dev_name(&adapter->ccw_device->dev));
+	if (IS_ERR(thread)) {
+		dev_err(&adapter->ccw_device->dev,
+			"Creating an ERP thread for the FCP device failed.\n");
+		return PTR_ERR(thread);
+	}
+
+	adapter->erp_thread = thread;
+	return 0;
+}
+
+/**
+ * zfcp_erp_thread_kill - Stop ERP thread.
+ * @adapter: Adapter where the ERP thread should be stopped.
+ *
+ * The caller of this routine ensures that the specified adapter has
+ * been shut down and that this operation has been completed. Thus,
+ * there are no pending erp_actions which would need to be handled
+ * here.
+ */
+void zfcp_erp_thread_kill(struct zfcp_adapter *adapter)
+{
+	kthread_stop(adapter->erp_thread);
+	adapter->erp_thread = NULL;
+	WARN_ON(!list_empty(&adapter->erp_ready_head));
+	WARN_ON(!list_empty(&adapter->erp_running_head));
+}
+
+/**
+ * zfcp_erp_wait - wait for completion of error recovery on an adapter
+ * @adapter: adapter for which to wait for completion of its error recovery
+ */
+void zfcp_erp_wait(struct zfcp_adapter *adapter)
+{
+	wait_event(adapter->erp_done_wqh,
+		   !(atomic_read(&adapter->status) &
+			ZFCP_STATUS_ADAPTER_ERP_PENDING));
+}
+
+/**
+ * zfcp_erp_set_adapter_status - set adapter status bits
+ * @adapter: adapter to change the status
+ * @mask: status bits to change
+ *
+ * Changes in common status bits are propagated to attached ports and LUNs.
+ */
+void zfcp_erp_set_adapter_status(struct zfcp_adapter *adapter, u32 mask)
+{
+	struct zfcp_port *port;
+	struct scsi_device *sdev;
+	unsigned long flags;
+	u32 common_mask = mask & ZFCP_COMMON_FLAGS;
+
+	atomic_set_mask(mask, &adapter->status);
+
+	if (!common_mask)
+		return;
+
+	read_lock_irqsave(&adapter->port_list_lock, flags);
+	list_for_each_entry(port, &adapter->port_list, list)
+		atomic_set_mask(common_mask, &port->status);
+	read_unlock_irqrestore(&adapter->port_list_lock, flags);
+
+	spin_lock_irqsave(adapter->scsi_host->host_lock, flags);
+	__shost_for_each_device(sdev, adapter->scsi_host)
+		atomic_set_mask(common_mask, &sdev_to_zfcp(sdev)->status);
+	spin_unlock_irqrestore(adapter->scsi_host->host_lock, flags);
+}
+
+/**
+ * zfcp_erp_clear_adapter_status - clear adapter status bits
+ * @adapter: adapter to change the status
+ * @mask: status bits to change
+ *
+ * Changes in common status bits are propagated to attached ports and LUNs.
+ */
+void zfcp_erp_clear_adapter_status(struct zfcp_adapter *adapter, u32 mask)
+{
+	struct zfcp_port *port;
+	struct scsi_device *sdev;
+	unsigned long flags;
+	u32 common_mask = mask & ZFCP_COMMON_FLAGS;
+	u32 clear_counter = mask & ZFCP_STATUS_COMMON_ERP_FAILED;
+
+	atomic_clear_mask(mask, &adapter->status);
+
+	if (!common_mask)
+		return;
+
+	if (clear_counter)
+		atomic_set(&adapter->erp_counter, 0);
+
+	read_lock_irqsave(&adapter->port_list_lock, flags);
+	list_for_each_entry(port, &adapter->port_list, list) {
+		atomic_clear_mask(common_mask, &port->status);
+		if (clear_counter)
+			atomic_set(&port->erp_counter, 0);
+	}
+	read_unlock_irqrestore(&adapter->port_list_lock, flags);
+
+	spin_lock_irqsave(adapter->scsi_host->host_lock, flags);
+	__shost_for_each_device(sdev, adapter->scsi_host) {
+		atomic_clear_mask(common_mask, &sdev_to_zfcp(sdev)->status);
+		if (clear_counter)
+			atomic_set(&sdev_to_zfcp(sdev)->erp_counter, 0);
+	}
+	spin_unlock_irqrestore(adapter->scsi_host->host_lock, flags);
+}
+
+/**
+ * zfcp_erp_set_port_status - set port status bits
+ * @port: port to change the status
+ * @mask: status bits to change
+ *
+ * Changes in common status bits are propagated to attached LUNs.
+ */
+void zfcp_erp_set_port_status(struct zfcp_port *port, u32 mask)
+{
+	struct scsi_device *sdev;
+	u32 common_mask = mask & ZFCP_COMMON_FLAGS;
+	unsigned long flags;
+
+	atomic_set_mask(mask, &port->status);
+
+	if (!common_mask)
+		return;
+
+	spin_lock_irqsave(port->adapter->scsi_host->host_lock, flags);
+	__shost_for_each_device(sdev, port->adapter->scsi_host)
+		if (sdev_to_zfcp(sdev)->port == port)
+			atomic_set_mask(common_mask,
+					&sdev_to_zfcp(sdev)->status);
+	spin_unlock_irqrestore(port->adapter->scsi_host->host_lock, flags);
+}
+
+/**
+ * zfcp_erp_clear_port_status - clear port status bits
+ * @port: adapter to change the status
+ * @mask: status bits to change
+ *
+ * Changes in common status bits are propagated to attached LUNs.
+ */
+void zfcp_erp_clear_port_status(struct zfcp_port *port, u32 mask)
+{
+	struct scsi_device *sdev;
+	u32 common_mask = mask & ZFCP_COMMON_FLAGS;
+	u32 clear_counter = mask & ZFCP_STATUS_COMMON_ERP_FAILED;
+	unsigned long flags;
+
+	atomic_clear_mask(mask, &port->status);
+
+	if (!common_mask)
+		return;
+
+	if (clear_counter)
+		atomic_set(&port->erp_counter, 0);
+
+	spin_lock_irqsave(port->adapter->scsi_host->host_lock, flags);
+	__shost_for_each_device(sdev, port->adapter->scsi_host)
+		if (sdev_to_zfcp(sdev)->port == port) {
+			atomic_clear_mask(common_mask,
+					  &sdev_to_zfcp(sdev)->status);
+			if (clear_counter)
+				atomic_set(&sdev_to_zfcp(sdev)->erp_counter, 0);
+		}
+	spin_unlock_irqrestore(port->adapter->scsi_host->host_lock, flags);
+}
+
+/**
+ * zfcp_erp_set_lun_status - set lun status bits
+ * @sdev: SCSI device / lun to set the status bits
+ * @mask: status bits to change
+ */
+void zfcp_erp_set_lun_status(struct scsi_device *sdev, u32 mask)
+{
+	struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev);
+
+	atomic_set_mask(mask, &zfcp_sdev->status);
+}
+
+/**
+ * zfcp_erp_clear_lun_status - clear lun status bits
+ * @sdev: SCSi device / lun to clear the status bits
+ * @mask: status bits to change
+ */
+void zfcp_erp_clear_lun_status(struct scsi_device *sdev, u32 mask)
+{
+	struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev);
+
+	atomic_clear_mask(mask, &zfcp_sdev->status);
+
+	if (mask & ZFCP_STATUS_COMMON_ERP_FAILED)
+		atomic_set(&zfcp_sdev->erp_counter, 0);
+}
+
diff --git a/ap/os/linux/linux-3.4.x/drivers/s390/scsi/zfcp_ext.h b/ap/os/linux/linux-3.4.x/drivers/s390/scsi/zfcp_ext.h
new file mode 100644
index 0000000..ef9e502
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/s390/scsi/zfcp_ext.h
@@ -0,0 +1,175 @@
+/*
+ * zfcp device driver
+ *
+ * External function declarations.
+ *
+ * Copyright IBM Corporation 2002, 2010
+ */
+
+#ifndef ZFCP_EXT_H
+#define ZFCP_EXT_H
+
+#include <linux/types.h>
+#include <scsi/fc/fc_els.h>
+#include "zfcp_def.h"
+#include "zfcp_fc.h"
+
+/* zfcp_aux.c */
+extern struct zfcp_port *zfcp_get_port_by_wwpn(struct zfcp_adapter *, u64);
+extern struct zfcp_adapter *zfcp_adapter_enqueue(struct ccw_device *);
+extern struct zfcp_port *zfcp_port_enqueue(struct zfcp_adapter *, u64, u32,
+					   u32);
+extern void zfcp_sg_free_table(struct scatterlist *, int);
+extern int zfcp_sg_setup_table(struct scatterlist *, int);
+extern void zfcp_device_unregister(struct device *,
+				   const struct attribute_group *);
+extern void zfcp_adapter_release(struct kref *);
+extern void zfcp_adapter_unregister(struct zfcp_adapter *);
+
+/* zfcp_ccw.c */
+extern int zfcp_ccw_priv_sch(struct zfcp_adapter *);
+extern struct ccw_driver zfcp_ccw_driver;
+extern struct zfcp_adapter *zfcp_ccw_adapter_by_cdev(struct ccw_device *);
+extern void zfcp_ccw_adapter_put(struct zfcp_adapter *);
+
+/* zfcp_cfdc.c */
+extern struct miscdevice zfcp_cfdc_misc;
+extern void zfcp_cfdc_port_denied(struct zfcp_port *, union fsf_status_qual *);
+extern void zfcp_cfdc_lun_denied(struct scsi_device *, union fsf_status_qual *);
+extern void zfcp_cfdc_lun_shrng_vltn(struct scsi_device *,
+				     union fsf_status_qual *);
+extern int zfcp_cfdc_open_lun_eval(struct scsi_device *,
+				   struct fsf_qtcb_bottom_support *);
+extern void zfcp_cfdc_adapter_access_changed(struct zfcp_adapter *);
+
+
+/* zfcp_dbf.c */
+extern int zfcp_dbf_adapter_register(struct zfcp_adapter *);
+extern void zfcp_dbf_adapter_unregister(struct zfcp_adapter *);
+extern void zfcp_dbf_rec_trig(char *, struct zfcp_adapter *,
+			      struct zfcp_port *, struct scsi_device *, u8, u8);
+extern void zfcp_dbf_rec_run(char *, struct zfcp_erp_action *);
+extern void zfcp_dbf_hba_fsf_uss(char *, struct zfcp_fsf_req *);
+extern void zfcp_dbf_hba_fsf_res(char *, struct zfcp_fsf_req *);
+extern void zfcp_dbf_hba_bit_err(char *, struct zfcp_fsf_req *);
+extern void zfcp_dbf_hba_berr(struct zfcp_dbf *, struct zfcp_fsf_req *);
+extern void zfcp_dbf_hba_def_err(struct zfcp_adapter *, u64, u16, void **);
+extern void zfcp_dbf_hba_basic(char *, struct zfcp_adapter *);
+extern void zfcp_dbf_san_req(char *, struct zfcp_fsf_req *, u32);
+extern void zfcp_dbf_san_res(char *, struct zfcp_fsf_req *);
+extern void zfcp_dbf_san_in_els(char *, struct zfcp_fsf_req *);
+extern void zfcp_dbf_scsi(char *, struct scsi_cmnd *, struct zfcp_fsf_req *);
+
+/* zfcp_erp.c */
+extern void zfcp_erp_set_adapter_status(struct zfcp_adapter *, u32);
+extern void zfcp_erp_clear_adapter_status(struct zfcp_adapter *, u32);
+extern void zfcp_erp_adapter_reopen(struct zfcp_adapter *, int, char *);
+extern void zfcp_erp_adapter_shutdown(struct zfcp_adapter *, int, char *);
+extern void zfcp_erp_set_port_status(struct zfcp_port *, u32);
+extern void zfcp_erp_clear_port_status(struct zfcp_port *, u32);
+extern int  zfcp_erp_port_reopen(struct zfcp_port *, int, char *);
+extern void zfcp_erp_port_shutdown(struct zfcp_port *, int, char *);
+extern void zfcp_erp_port_forced_reopen(struct zfcp_port *, int, char *);
+extern void zfcp_erp_set_lun_status(struct scsi_device *, u32);
+extern void zfcp_erp_clear_lun_status(struct scsi_device *, u32);
+extern void zfcp_erp_lun_reopen(struct scsi_device *, int, char *);
+extern void zfcp_erp_lun_shutdown(struct scsi_device *, int, char *);
+extern void zfcp_erp_lun_shutdown_wait(struct scsi_device *, char *);
+extern int  zfcp_erp_thread_setup(struct zfcp_adapter *);
+extern void zfcp_erp_thread_kill(struct zfcp_adapter *);
+extern void zfcp_erp_wait(struct zfcp_adapter *);
+extern void zfcp_erp_notify(struct zfcp_erp_action *, unsigned long);
+extern void zfcp_erp_timeout_handler(unsigned long);
+
+/* zfcp_fc.c */
+extern struct kmem_cache *zfcp_fc_req_cache;
+extern void zfcp_fc_enqueue_event(struct zfcp_adapter *,
+				enum fc_host_event_code event_code, u32);
+extern void zfcp_fc_post_event(struct work_struct *);
+extern void zfcp_fc_scan_ports(struct work_struct *);
+extern void zfcp_fc_incoming_els(struct zfcp_fsf_req *);
+extern void zfcp_fc_port_did_lookup(struct work_struct *);
+extern void zfcp_fc_trigger_did_lookup(struct zfcp_port *);
+extern void zfcp_fc_plogi_evaluate(struct zfcp_port *, struct fc_els_flogi *);
+extern void zfcp_fc_test_link(struct zfcp_port *);
+extern void zfcp_fc_link_test_work(struct work_struct *);
+extern void zfcp_fc_wka_ports_force_offline(struct zfcp_fc_wka_ports *);
+extern int zfcp_fc_gs_setup(struct zfcp_adapter *);
+extern void zfcp_fc_gs_destroy(struct zfcp_adapter *);
+extern int zfcp_fc_exec_bsg_job(struct fc_bsg_job *);
+extern int zfcp_fc_timeout_bsg_job(struct fc_bsg_job *);
+extern void zfcp_fc_sym_name_update(struct work_struct *);
+
+/* zfcp_fsf.c */
+extern struct kmem_cache *zfcp_fsf_qtcb_cache;
+extern int zfcp_fsf_open_port(struct zfcp_erp_action *);
+extern int zfcp_fsf_open_wka_port(struct zfcp_fc_wka_port *);
+extern int zfcp_fsf_close_wka_port(struct zfcp_fc_wka_port *);
+extern int zfcp_fsf_close_port(struct zfcp_erp_action *);
+extern int zfcp_fsf_close_physical_port(struct zfcp_erp_action *);
+extern int zfcp_fsf_open_lun(struct zfcp_erp_action *);
+extern int zfcp_fsf_close_lun(struct zfcp_erp_action *);
+extern int zfcp_fsf_exchange_config_data(struct zfcp_erp_action *);
+extern int zfcp_fsf_exchange_config_data_sync(struct zfcp_qdio *,
+					      struct fsf_qtcb_bottom_config *);
+extern int zfcp_fsf_exchange_port_data(struct zfcp_erp_action *);
+extern int zfcp_fsf_exchange_port_data_sync(struct zfcp_qdio *,
+					    struct fsf_qtcb_bottom_port *);
+extern struct zfcp_fsf_req *zfcp_fsf_control_file(struct zfcp_adapter *,
+						  struct zfcp_fsf_cfdc *);
+extern void zfcp_fsf_req_dismiss_all(struct zfcp_adapter *);
+extern int zfcp_fsf_status_read(struct zfcp_qdio *);
+extern int zfcp_status_read_refill(struct zfcp_adapter *adapter);
+extern int zfcp_fsf_send_ct(struct zfcp_fc_wka_port *, struct zfcp_fsf_ct_els *,
+			    mempool_t *, unsigned int);
+extern int zfcp_fsf_send_els(struct zfcp_adapter *, u32,
+			     struct zfcp_fsf_ct_els *, unsigned int);
+extern int zfcp_fsf_fcp_cmnd(struct scsi_cmnd *);
+extern void zfcp_fsf_req_free(struct zfcp_fsf_req *);
+extern struct zfcp_fsf_req *zfcp_fsf_fcp_task_mgmt(struct scsi_cmnd *, u8);
+extern struct zfcp_fsf_req *zfcp_fsf_abort_fcp_cmnd(struct scsi_cmnd *);
+extern void zfcp_fsf_reqid_check(struct zfcp_qdio *, int);
+
+/* zfcp_qdio.c */
+extern int zfcp_qdio_setup(struct zfcp_adapter *);
+extern void zfcp_qdio_destroy(struct zfcp_qdio *);
+extern int zfcp_qdio_sbal_get(struct zfcp_qdio *);
+extern int zfcp_qdio_send(struct zfcp_qdio *, struct zfcp_qdio_req *);
+extern int zfcp_qdio_sbals_from_sg(struct zfcp_qdio *, struct zfcp_qdio_req *,
+				   struct scatterlist *);
+extern int zfcp_qdio_open(struct zfcp_qdio *);
+extern void zfcp_qdio_close(struct zfcp_qdio *);
+extern void zfcp_qdio_siosl(struct zfcp_adapter *);
+extern struct zfcp_fsf_req *zfcp_fsf_get_req(struct zfcp_qdio *,
+					     struct qdio_buffer *);
+
+/* zfcp_scsi.c */
+extern struct scsi_transport_template *zfcp_scsi_transport_template;
+extern int zfcp_scsi_adapter_register(struct zfcp_adapter *);
+extern void zfcp_scsi_adapter_unregister(struct zfcp_adapter *);
+extern struct fc_function_template zfcp_transport_functions;
+extern void zfcp_scsi_rport_work(struct work_struct *);
+extern void zfcp_scsi_schedule_rport_register(struct zfcp_port *);
+extern void zfcp_scsi_schedule_rport_block(struct zfcp_port *);
+extern void zfcp_scsi_schedule_rports_block(struct zfcp_adapter *);
+extern void zfcp_scsi_set_prot(struct zfcp_adapter *);
+extern void zfcp_scsi_dif_sense_error(struct scsi_cmnd *, int);
+
+/* zfcp_sysfs.c */
+extern struct attribute_group zfcp_sysfs_unit_attrs;
+extern struct attribute_group zfcp_sysfs_adapter_attrs;
+extern struct attribute_group zfcp_sysfs_port_attrs;
+extern struct mutex zfcp_sysfs_port_units_mutex;
+extern struct device_attribute *zfcp_sysfs_sdev_attrs[];
+extern struct device_attribute *zfcp_sysfs_shost_attrs[];
+
+/* zfcp_unit.c */
+extern int zfcp_unit_add(struct zfcp_port *, u64);
+extern int zfcp_unit_remove(struct zfcp_port *, u64);
+extern struct zfcp_unit *zfcp_unit_find(struct zfcp_port *, u64);
+extern struct scsi_device *zfcp_unit_sdev(struct zfcp_unit *unit);
+extern void zfcp_unit_scsi_scan(struct zfcp_unit *);
+extern void zfcp_unit_queue_scsi_scan(struct zfcp_port *);
+extern unsigned int zfcp_unit_sdev_status(struct zfcp_unit *);
+
+#endif	/* ZFCP_EXT_H */
diff --git a/ap/os/linux/linux-3.4.x/drivers/s390/scsi/zfcp_fc.c b/ap/os/linux/linux-3.4.x/drivers/s390/scsi/zfcp_fc.c
new file mode 100644
index 0000000..297e6b7
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/s390/scsi/zfcp_fc.c
@@ -0,0 +1,970 @@
+/*
+ * zfcp device driver
+ *
+ * Fibre Channel related functions for the zfcp device driver.
+ *
+ * Copyright IBM Corporation 2008, 2010
+ */
+
+#define KMSG_COMPONENT "zfcp"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
+#include <linux/types.h>
+#include <linux/slab.h>
+#include <linux/utsname.h>
+#include <scsi/fc/fc_els.h>
+#include <scsi/libfc.h>
+#include "zfcp_ext.h"
+#include "zfcp_fc.h"
+
+struct kmem_cache *zfcp_fc_req_cache;
+
+static u32 zfcp_fc_rscn_range_mask[] = {
+	[ELS_ADDR_FMT_PORT]		= 0xFFFFFF,
+	[ELS_ADDR_FMT_AREA]		= 0xFFFF00,
+	[ELS_ADDR_FMT_DOM]		= 0xFF0000,
+	[ELS_ADDR_FMT_FAB]		= 0x000000,
+};
+
+/**
+ * zfcp_fc_post_event - post event to userspace via fc_transport
+ * @work: work struct with enqueued events
+ */
+void zfcp_fc_post_event(struct work_struct *work)
+{
+	struct zfcp_fc_event *event = NULL, *tmp = NULL;
+	LIST_HEAD(tmp_lh);
+	struct zfcp_fc_events *events = container_of(work,
+					struct zfcp_fc_events, work);
+	struct zfcp_adapter *adapter = container_of(events, struct zfcp_adapter,
+						events);
+
+	spin_lock_bh(&events->list_lock);
+	list_splice_init(&events->list, &tmp_lh);
+	spin_unlock_bh(&events->list_lock);
+
+	list_for_each_entry_safe(event, tmp, &tmp_lh, list) {
+		fc_host_post_event(adapter->scsi_host, fc_get_event_number(),
+				event->code, event->data);
+		list_del(&event->list);
+		kfree(event);
+	}
+
+}
+
+/**
+ * zfcp_fc_enqueue_event - safely enqueue FC HBA API event from irq context
+ * @adapter: The adapter where to enqueue the event
+ * @event_code: The event code (as defined in fc_host_event_code in
+ *		scsi_transport_fc.h)
+ * @event_data: The event data (e.g. n_port page in case of els)
+ */
+void zfcp_fc_enqueue_event(struct zfcp_adapter *adapter,
+			enum fc_host_event_code event_code, u32 event_data)
+{
+	struct zfcp_fc_event *event;
+
+	event = kmalloc(sizeof(struct zfcp_fc_event), GFP_ATOMIC);
+	if (!event)
+		return;
+
+	event->code = event_code;
+	event->data = event_data;
+
+	spin_lock(&adapter->events.list_lock);
+	list_add_tail(&event->list, &adapter->events.list);
+	spin_unlock(&adapter->events.list_lock);
+
+	queue_work(adapter->work_queue, &adapter->events.work);
+}
+
+static int zfcp_fc_wka_port_get(struct zfcp_fc_wka_port *wka_port)
+{
+	if (mutex_lock_interruptible(&wka_port->mutex))
+		return -ERESTARTSYS;
+
+	if (wka_port->status == ZFCP_FC_WKA_PORT_OFFLINE ||
+	    wka_port->status == ZFCP_FC_WKA_PORT_CLOSING) {
+		wka_port->status = ZFCP_FC_WKA_PORT_OPENING;
+		if (zfcp_fsf_open_wka_port(wka_port))
+			wka_port->status = ZFCP_FC_WKA_PORT_OFFLINE;
+	}
+
+	mutex_unlock(&wka_port->mutex);
+
+	wait_event(wka_port->completion_wq,
+		   wka_port->status == ZFCP_FC_WKA_PORT_ONLINE ||
+		   wka_port->status == ZFCP_FC_WKA_PORT_OFFLINE);
+
+	if (wka_port->status == ZFCP_FC_WKA_PORT_ONLINE) {
+		atomic_inc(&wka_port->refcount);
+		return 0;
+	}
+	return -EIO;
+}
+
+static void zfcp_fc_wka_port_offline(struct work_struct *work)
+{
+	struct delayed_work *dw = to_delayed_work(work);
+	struct zfcp_fc_wka_port *wka_port =
+			container_of(dw, struct zfcp_fc_wka_port, work);
+
+	mutex_lock(&wka_port->mutex);
+	if ((atomic_read(&wka_port->refcount) != 0) ||
+	    (wka_port->status != ZFCP_FC_WKA_PORT_ONLINE))
+		goto out;
+
+	wka_port->status = ZFCP_FC_WKA_PORT_CLOSING;
+	if (zfcp_fsf_close_wka_port(wka_port)) {
+		wka_port->status = ZFCP_FC_WKA_PORT_OFFLINE;
+		wake_up(&wka_port->completion_wq);
+	}
+out:
+	mutex_unlock(&wka_port->mutex);
+}
+
+static void zfcp_fc_wka_port_put(struct zfcp_fc_wka_port *wka_port)
+{
+	if (atomic_dec_return(&wka_port->refcount) != 0)
+		return;
+	/* wait 10 milliseconds, other reqs might pop in */
+	schedule_delayed_work(&wka_port->work, HZ / 100);
+}
+
+static void zfcp_fc_wka_port_init(struct zfcp_fc_wka_port *wka_port, u32 d_id,
+				  struct zfcp_adapter *adapter)
+{
+	init_waitqueue_head(&wka_port->completion_wq);
+
+	wka_port->adapter = adapter;
+	wka_port->d_id = d_id;
+
+	wka_port->status = ZFCP_FC_WKA_PORT_OFFLINE;
+	atomic_set(&wka_port->refcount, 0);
+	mutex_init(&wka_port->mutex);
+	INIT_DELAYED_WORK(&wka_port->work, zfcp_fc_wka_port_offline);
+}
+
+static void zfcp_fc_wka_port_force_offline(struct zfcp_fc_wka_port *wka)
+{
+	cancel_delayed_work_sync(&wka->work);
+	mutex_lock(&wka->mutex);
+	wka->status = ZFCP_FC_WKA_PORT_OFFLINE;
+	mutex_unlock(&wka->mutex);
+}
+
+void zfcp_fc_wka_ports_force_offline(struct zfcp_fc_wka_ports *gs)
+{
+	if (!gs)
+		return;
+	zfcp_fc_wka_port_force_offline(&gs->ms);
+	zfcp_fc_wka_port_force_offline(&gs->ts);
+	zfcp_fc_wka_port_force_offline(&gs->ds);
+	zfcp_fc_wka_port_force_offline(&gs->as);
+}
+
+static void _zfcp_fc_incoming_rscn(struct zfcp_fsf_req *fsf_req, u32 range,
+				   struct fc_els_rscn_page *page)
+{
+	unsigned long flags;
+	struct zfcp_adapter *adapter = fsf_req->adapter;
+	struct zfcp_port *port;
+
+	read_lock_irqsave(&adapter->port_list_lock, flags);
+	list_for_each_entry(port, &adapter->port_list, list) {
+		if ((port->d_id & range) == (ntoh24(page->rscn_fid) & range))
+			zfcp_fc_test_link(port);
+		if (!port->d_id)
+			zfcp_erp_port_reopen(port,
+					     ZFCP_STATUS_COMMON_ERP_FAILED,
+					     "fcrscn1");
+	}
+	read_unlock_irqrestore(&adapter->port_list_lock, flags);
+}
+
+static void zfcp_fc_incoming_rscn(struct zfcp_fsf_req *fsf_req)
+{
+	struct fsf_status_read_buffer *status_buffer = (void *)fsf_req->data;
+	struct fc_els_rscn *head;
+	struct fc_els_rscn_page *page;
+	u16 i;
+	u16 no_entries;
+	unsigned int afmt;
+
+	head = (struct fc_els_rscn *) status_buffer->payload.data;
+	page = (struct fc_els_rscn_page *) head;
+
+	/* see FC-FS */
+	no_entries = head->rscn_plen / sizeof(struct fc_els_rscn_page);
+
+	for (i = 1; i < no_entries; i++) {
+		/* skip head and start with 1st element */
+		page++;
+		afmt = page->rscn_page_flags & ELS_RSCN_ADDR_FMT_MASK;
+		_zfcp_fc_incoming_rscn(fsf_req, zfcp_fc_rscn_range_mask[afmt],
+				       page);
+		zfcp_fc_enqueue_event(fsf_req->adapter, FCH_EVT_RSCN,
+				      *(u32 *)page);
+	}
+	queue_work(fsf_req->adapter->work_queue, &fsf_req->adapter->scan_work);
+}
+
+static void zfcp_fc_incoming_wwpn(struct zfcp_fsf_req *req, u64 wwpn)
+{
+	unsigned long flags;
+	struct zfcp_adapter *adapter = req->adapter;
+	struct zfcp_port *port;
+
+	read_lock_irqsave(&adapter->port_list_lock, flags);
+	list_for_each_entry(port, &adapter->port_list, list)
+		if (port->wwpn == wwpn) {
+			zfcp_erp_port_forced_reopen(port, 0, "fciwwp1");
+			break;
+		}
+	read_unlock_irqrestore(&adapter->port_list_lock, flags);
+}
+
+static void zfcp_fc_incoming_plogi(struct zfcp_fsf_req *req)
+{
+	struct fsf_status_read_buffer *status_buffer;
+	struct fc_els_flogi *plogi;
+
+	status_buffer = (struct fsf_status_read_buffer *) req->data;
+	plogi = (struct fc_els_flogi *) status_buffer->payload.data;
+	zfcp_fc_incoming_wwpn(req, plogi->fl_wwpn);
+}
+
+static void zfcp_fc_incoming_logo(struct zfcp_fsf_req *req)
+{
+	struct fsf_status_read_buffer *status_buffer =
+		(struct fsf_status_read_buffer *)req->data;
+	struct fc_els_logo *logo =
+		(struct fc_els_logo *) status_buffer->payload.data;
+
+	zfcp_fc_incoming_wwpn(req, logo->fl_n_port_wwn);
+}
+
+/**
+ * zfcp_fc_incoming_els - handle incoming ELS
+ * @fsf_req - request which contains incoming ELS
+ */
+void zfcp_fc_incoming_els(struct zfcp_fsf_req *fsf_req)
+{
+	struct fsf_status_read_buffer *status_buffer =
+		(struct fsf_status_read_buffer *) fsf_req->data;
+	unsigned int els_type = status_buffer->payload.data[0];
+
+	zfcp_dbf_san_in_els("fciels1", fsf_req);
+	if (els_type == ELS_PLOGI)
+		zfcp_fc_incoming_plogi(fsf_req);
+	else if (els_type == ELS_LOGO)
+		zfcp_fc_incoming_logo(fsf_req);
+	else if (els_type == ELS_RSCN)
+		zfcp_fc_incoming_rscn(fsf_req);
+}
+
+static void zfcp_fc_ns_gid_pn_eval(struct zfcp_fc_req *fc_req)
+{
+	struct zfcp_fsf_ct_els *ct_els = &fc_req->ct_els;
+	struct zfcp_fc_gid_pn_rsp *gid_pn_rsp = &fc_req->u.gid_pn.rsp;
+
+	if (ct_els->status)
+		return;
+	if (gid_pn_rsp->ct_hdr.ct_cmd != FC_FS_ACC)
+		return;
+
+	/* looks like a valid d_id */
+	ct_els->port->d_id = ntoh24(gid_pn_rsp->gid_pn.fp_fid);
+}
+
+static void zfcp_fc_complete(void *data)
+{
+	complete(data);
+}
+
+static void zfcp_fc_ct_ns_init(struct fc_ct_hdr *ct_hdr, u16 cmd, u16 mr_size)
+{
+	ct_hdr->ct_rev = FC_CT_REV;
+	ct_hdr->ct_fs_type = FC_FST_DIR;
+	ct_hdr->ct_fs_subtype = FC_NS_SUBTYPE;
+	ct_hdr->ct_cmd = cmd;
+	ct_hdr->ct_mr_size = mr_size / 4;
+}
+
+static int zfcp_fc_ns_gid_pn_request(struct zfcp_port *port,
+				     struct zfcp_fc_req *fc_req)
+{
+	struct zfcp_adapter *adapter = port->adapter;
+	DECLARE_COMPLETION_ONSTACK(completion);
+	struct zfcp_fc_gid_pn_req *gid_pn_req = &fc_req->u.gid_pn.req;
+	struct zfcp_fc_gid_pn_rsp *gid_pn_rsp = &fc_req->u.gid_pn.rsp;
+	int ret;
+
+	/* setup parameters for send generic command */
+	fc_req->ct_els.port = port;
+	fc_req->ct_els.handler = zfcp_fc_complete;
+	fc_req->ct_els.handler_data = &completion;
+	fc_req->ct_els.req = &fc_req->sg_req;
+	fc_req->ct_els.resp = &fc_req->sg_rsp;
+	sg_init_one(&fc_req->sg_req, gid_pn_req, sizeof(*gid_pn_req));
+	sg_init_one(&fc_req->sg_rsp, gid_pn_rsp, sizeof(*gid_pn_rsp));
+
+	zfcp_fc_ct_ns_init(&gid_pn_req->ct_hdr,
+			   FC_NS_GID_PN, ZFCP_FC_CT_SIZE_PAGE);
+	gid_pn_req->gid_pn.fn_wwpn = port->wwpn;
+
+	ret = zfcp_fsf_send_ct(&adapter->gs->ds, &fc_req->ct_els,
+			       adapter->pool.gid_pn_req,
+			       ZFCP_FC_CTELS_TMO);
+	if (!ret) {
+		wait_for_completion(&completion);
+		zfcp_fc_ns_gid_pn_eval(fc_req);
+	}
+	return ret;
+}
+
+/**
+ * zfcp_fc_ns_gid_pn - initiate GID_PN nameserver request
+ * @port: port where GID_PN request is needed
+ * return: -ENOMEM on error, 0 otherwise
+ */
+static int zfcp_fc_ns_gid_pn(struct zfcp_port *port)
+{
+	int ret;
+	struct zfcp_fc_req *fc_req;
+	struct zfcp_adapter *adapter = port->adapter;
+
+	fc_req = mempool_alloc(adapter->pool.gid_pn, GFP_ATOMIC);
+	if (!fc_req)
+		return -ENOMEM;
+
+	memset(fc_req, 0, sizeof(*fc_req));
+
+	ret = zfcp_fc_wka_port_get(&adapter->gs->ds);
+	if (ret)
+		goto out;
+
+	ret = zfcp_fc_ns_gid_pn_request(port, fc_req);
+
+	zfcp_fc_wka_port_put(&adapter->gs->ds);
+out:
+	mempool_free(fc_req, adapter->pool.gid_pn);
+	return ret;
+}
+
+void zfcp_fc_port_did_lookup(struct work_struct *work)
+{
+	int ret;
+	struct zfcp_port *port = container_of(work, struct zfcp_port,
+					      gid_pn_work);
+
+	ret = zfcp_fc_ns_gid_pn(port);
+	if (ret) {
+		/* could not issue gid_pn for some reason */
+		zfcp_erp_adapter_reopen(port->adapter, 0, "fcgpn_1");
+		goto out;
+	}
+
+	if (!port->d_id) {
+		zfcp_erp_set_port_status(port, ZFCP_STATUS_COMMON_ERP_FAILED);
+		goto out;
+	}
+
+	zfcp_erp_port_reopen(port, 0, "fcgpn_3");
+out:
+	put_device(&port->dev);
+}
+
+/**
+ * zfcp_fc_trigger_did_lookup - trigger the d_id lookup using a GID_PN request
+ * @port: The zfcp_port to lookup the d_id for.
+ */
+void zfcp_fc_trigger_did_lookup(struct zfcp_port *port)
+{
+	get_device(&port->dev);
+	if (!queue_work(port->adapter->work_queue, &port->gid_pn_work))
+		put_device(&port->dev);
+}
+
+/**
+ * zfcp_fc_plogi_evaluate - evaluate PLOGI playload
+ * @port: zfcp_port structure
+ * @plogi: plogi payload
+ *
+ * Evaluate PLOGI playload and copy important fields into zfcp_port structure
+ */
+void zfcp_fc_plogi_evaluate(struct zfcp_port *port, struct fc_els_flogi *plogi)
+{
+	if (plogi->fl_wwpn != port->wwpn) {
+		port->d_id = 0;
+		dev_warn(&port->adapter->ccw_device->dev,
+			 "A port opened with WWPN 0x%016Lx returned data that "
+			 "identifies it as WWPN 0x%016Lx\n",
+			 (unsigned long long) port->wwpn,
+			 (unsigned long long) plogi->fl_wwpn);
+		return;
+	}
+
+	port->wwnn = plogi->fl_wwnn;
+	port->maxframe_size = plogi->fl_csp.sp_bb_data;
+
+	if (plogi->fl_cssp[0].cp_class & FC_CPC_VALID)
+		port->supported_classes |= FC_COS_CLASS1;
+	if (plogi->fl_cssp[1].cp_class & FC_CPC_VALID)
+		port->supported_classes |= FC_COS_CLASS2;
+	if (plogi->fl_cssp[2].cp_class & FC_CPC_VALID)
+		port->supported_classes |= FC_COS_CLASS3;
+	if (plogi->fl_cssp[3].cp_class & FC_CPC_VALID)
+		port->supported_classes |= FC_COS_CLASS4;
+}
+
+static void zfcp_fc_adisc_handler(void *data)
+{
+	struct zfcp_fc_req *fc_req = data;
+	struct zfcp_port *port = fc_req->ct_els.port;
+	struct fc_els_adisc *adisc_resp = &fc_req->u.adisc.rsp;
+
+	if (fc_req->ct_els.status) {
+		/* request rejected or timed out */
+		zfcp_erp_port_forced_reopen(port, ZFCP_STATUS_COMMON_ERP_FAILED,
+					    "fcadh_1");
+		goto out;
+	}
+
+	if (!port->wwnn)
+		port->wwnn = adisc_resp->adisc_wwnn;
+
+	if ((port->wwpn != adisc_resp->adisc_wwpn) ||
+	    !(atomic_read(&port->status) & ZFCP_STATUS_COMMON_OPEN)) {
+		zfcp_erp_port_reopen(port, ZFCP_STATUS_COMMON_ERP_FAILED,
+				     "fcadh_2");
+		goto out;
+	}
+
+	/* port is good, unblock rport without going through erp */
+	zfcp_scsi_schedule_rport_register(port);
+ out:
+	atomic_clear_mask(ZFCP_STATUS_PORT_LINK_TEST, &port->status);
+	put_device(&port->dev);
+	kmem_cache_free(zfcp_fc_req_cache, fc_req);
+}
+
+static int zfcp_fc_adisc(struct zfcp_port *port)
+{
+	struct zfcp_fc_req *fc_req;
+	struct zfcp_adapter *adapter = port->adapter;
+	struct Scsi_Host *shost = adapter->scsi_host;
+	int ret;
+
+	fc_req = kmem_cache_zalloc(zfcp_fc_req_cache, GFP_ATOMIC);
+	if (!fc_req)
+		return -ENOMEM;
+
+	fc_req->ct_els.port = port;
+	fc_req->ct_els.req = &fc_req->sg_req;
+	fc_req->ct_els.resp = &fc_req->sg_rsp;
+	sg_init_one(&fc_req->sg_req, &fc_req->u.adisc.req,
+		    sizeof(struct fc_els_adisc));
+	sg_init_one(&fc_req->sg_rsp, &fc_req->u.adisc.rsp,
+		    sizeof(struct fc_els_adisc));
+
+	fc_req->ct_els.handler = zfcp_fc_adisc_handler;
+	fc_req->ct_els.handler_data = fc_req;
+
+	/* acc. to FC-FS, hard_nport_id in ADISC should not be set for ports
+	   without FC-AL-2 capability, so we don't set it */
+	fc_req->u.adisc.req.adisc_wwpn = fc_host_port_name(shost);
+	fc_req->u.adisc.req.adisc_wwnn = fc_host_node_name(shost);
+	fc_req->u.adisc.req.adisc_cmd = ELS_ADISC;
+	hton24(fc_req->u.adisc.req.adisc_port_id, fc_host_port_id(shost));
+
+	ret = zfcp_fsf_send_els(adapter, port->d_id, &fc_req->ct_els,
+				ZFCP_FC_CTELS_TMO);
+	if (ret)
+		kmem_cache_free(zfcp_fc_req_cache, fc_req);
+
+	return ret;
+}
+
+void zfcp_fc_link_test_work(struct work_struct *work)
+{
+	struct zfcp_port *port =
+		container_of(work, struct zfcp_port, test_link_work);
+	int retval;
+
+	get_device(&port->dev);
+	port->rport_task = RPORT_DEL;
+	zfcp_scsi_rport_work(&port->rport_work);
+
+	/* only issue one test command at one time per port */
+	if (atomic_read(&port->status) & ZFCP_STATUS_PORT_LINK_TEST)
+		goto out;
+
+	atomic_set_mask(ZFCP_STATUS_PORT_LINK_TEST, &port->status);
+
+	retval = zfcp_fc_adisc(port);
+	if (retval == 0)
+		return;
+
+	/* send of ADISC was not possible */
+	atomic_clear_mask(ZFCP_STATUS_PORT_LINK_TEST, &port->status);
+	zfcp_erp_port_forced_reopen(port, 0, "fcltwk1");
+
+out:
+	put_device(&port->dev);
+}
+
+/**
+ * zfcp_fc_test_link - lightweight link test procedure
+ * @port: port to be tested
+ *
+ * Test status of a link to a remote port using the ELS command ADISC.
+ * If there is a problem with the remote port, error recovery steps
+ * will be triggered.
+ */
+void zfcp_fc_test_link(struct zfcp_port *port)
+{
+	get_device(&port->dev);
+	if (!queue_work(port->adapter->work_queue, &port->test_link_work))
+		put_device(&port->dev);
+}
+
+static struct zfcp_fc_req *zfcp_alloc_sg_env(int buf_num)
+{
+	struct zfcp_fc_req *fc_req;
+
+	fc_req = kmem_cache_zalloc(zfcp_fc_req_cache, GFP_KERNEL);
+	if (!fc_req)
+		return NULL;
+
+	if (zfcp_sg_setup_table(&fc_req->sg_rsp, buf_num)) {
+		kmem_cache_free(zfcp_fc_req_cache, fc_req);
+		return NULL;
+	}
+
+	sg_init_one(&fc_req->sg_req, &fc_req->u.gpn_ft.req,
+		    sizeof(struct zfcp_fc_gpn_ft_req));
+
+	return fc_req;
+}
+
+static int zfcp_fc_send_gpn_ft(struct zfcp_fc_req *fc_req,
+			       struct zfcp_adapter *adapter, int max_bytes)
+{
+	struct zfcp_fsf_ct_els *ct_els = &fc_req->ct_els;
+	struct zfcp_fc_gpn_ft_req *req = &fc_req->u.gpn_ft.req;
+	DECLARE_COMPLETION_ONSTACK(completion);
+	int ret;
+
+	zfcp_fc_ct_ns_init(&req->ct_hdr, FC_NS_GPN_FT, max_bytes);
+	req->gpn_ft.fn_fc4_type = FC_TYPE_FCP;
+
+	ct_els->handler = zfcp_fc_complete;
+	ct_els->handler_data = &completion;
+	ct_els->req = &fc_req->sg_req;
+	ct_els->resp = &fc_req->sg_rsp;
+
+	ret = zfcp_fsf_send_ct(&adapter->gs->ds, ct_els, NULL,
+			       ZFCP_FC_CTELS_TMO);
+	if (!ret)
+		wait_for_completion(&completion);
+	return ret;
+}
+
+static void zfcp_fc_validate_port(struct zfcp_port *port, struct list_head *lh)
+{
+	if (!(atomic_read(&port->status) & ZFCP_STATUS_COMMON_NOESC))
+		return;
+
+	atomic_clear_mask(ZFCP_STATUS_COMMON_NOESC, &port->status);
+
+	if ((port->supported_classes != 0) ||
+	    !list_empty(&port->unit_list))
+		return;
+
+	list_move_tail(&port->list, lh);
+}
+
+static int zfcp_fc_eval_gpn_ft(struct zfcp_fc_req *fc_req,
+			       struct zfcp_adapter *adapter, int max_entries)
+{
+	struct zfcp_fsf_ct_els *ct_els = &fc_req->ct_els;
+	struct scatterlist *sg = &fc_req->sg_rsp;
+	struct fc_ct_hdr *hdr = sg_virt(sg);
+	struct fc_gpn_ft_resp *acc = sg_virt(sg);
+	struct zfcp_port *port, *tmp;
+	unsigned long flags;
+	LIST_HEAD(remove_lh);
+	u32 d_id;
+	int ret = 0, x, last = 0;
+
+	if (ct_els->status)
+		return -EIO;
+
+	if (hdr->ct_cmd != FC_FS_ACC) {
+		if (hdr->ct_reason == FC_BA_RJT_UNABLE)
+			return -EAGAIN; /* might be a temporary condition */
+		return -EIO;
+	}
+
+	if (hdr->ct_mr_size) {
+		dev_warn(&adapter->ccw_device->dev,
+			 "The name server reported %d words residual data\n",
+			 hdr->ct_mr_size);
+		return -E2BIG;
+	}
+
+	/* first entry is the header */
+	for (x = 1; x < max_entries && !last; x++) {
+		if (x % (ZFCP_FC_GPN_FT_ENT_PAGE + 1))
+			acc++;
+		else
+			acc = sg_virt(++sg);
+
+		last = acc->fp_flags & FC_NS_FID_LAST;
+		d_id = ntoh24(acc->fp_fid);
+
+		/* don't attach ports with a well known address */
+		if (d_id >= FC_FID_WELL_KNOWN_BASE)
+			continue;
+		/* skip the adapter's port and known remote ports */
+		if (acc->fp_wwpn == fc_host_port_name(adapter->scsi_host))
+			continue;
+
+		port = zfcp_port_enqueue(adapter, acc->fp_wwpn,
+					 ZFCP_STATUS_COMMON_NOESC, d_id);
+		if (!IS_ERR(port))
+			zfcp_erp_port_reopen(port, 0, "fcegpf1");
+		else if (PTR_ERR(port) != -EEXIST)
+			ret = PTR_ERR(port);
+	}
+
+	zfcp_erp_wait(adapter);
+	write_lock_irqsave(&adapter->port_list_lock, flags);
+	list_for_each_entry_safe(port, tmp, &adapter->port_list, list)
+		zfcp_fc_validate_port(port, &remove_lh);
+	write_unlock_irqrestore(&adapter->port_list_lock, flags);
+
+	list_for_each_entry_safe(port, tmp, &remove_lh, list) {
+		zfcp_erp_port_shutdown(port, 0, "fcegpf2");
+		zfcp_device_unregister(&port->dev, &zfcp_sysfs_port_attrs);
+	}
+
+	return ret;
+}
+
+/**
+ * zfcp_fc_scan_ports - scan remote ports and attach new ports
+ * @work: reference to scheduled work
+ */
+void zfcp_fc_scan_ports(struct work_struct *work)
+{
+	struct zfcp_adapter *adapter = container_of(work, struct zfcp_adapter,
+						    scan_work);
+	int ret, i;
+	struct zfcp_fc_req *fc_req;
+	int chain, max_entries, buf_num, max_bytes;
+
+	chain = adapter->adapter_features & FSF_FEATURE_ELS_CT_CHAINED_SBALS;
+	buf_num = chain ? ZFCP_FC_GPN_FT_NUM_BUFS : 1;
+	max_entries = chain ? ZFCP_FC_GPN_FT_MAX_ENT : ZFCP_FC_GPN_FT_ENT_PAGE;
+	max_bytes = chain ? ZFCP_FC_GPN_FT_MAX_SIZE : ZFCP_FC_CT_SIZE_PAGE;
+
+	if (fc_host_port_type(adapter->scsi_host) != FC_PORTTYPE_NPORT &&
+	    fc_host_port_type(adapter->scsi_host) != FC_PORTTYPE_NPIV)
+		return;
+
+	if (zfcp_fc_wka_port_get(&adapter->gs->ds))
+		return;
+
+	fc_req = zfcp_alloc_sg_env(buf_num);
+	if (!fc_req)
+		goto out;
+
+	for (i = 0; i < 3; i++) {
+		ret = zfcp_fc_send_gpn_ft(fc_req, adapter, max_bytes);
+		if (!ret) {
+			ret = zfcp_fc_eval_gpn_ft(fc_req, adapter, max_entries);
+			if (ret == -EAGAIN)
+				ssleep(1);
+			else
+				break;
+		}
+	}
+	zfcp_sg_free_table(&fc_req->sg_rsp, buf_num);
+	kmem_cache_free(zfcp_fc_req_cache, fc_req);
+out:
+	zfcp_fc_wka_port_put(&adapter->gs->ds);
+}
+
+static int zfcp_fc_gspn(struct zfcp_adapter *adapter,
+			struct zfcp_fc_req *fc_req)
+{
+	DECLARE_COMPLETION_ONSTACK(completion);
+	char devno[] = "DEVNO:";
+	struct zfcp_fsf_ct_els *ct_els = &fc_req->ct_els;
+	struct zfcp_fc_gspn_req *gspn_req = &fc_req->u.gspn.req;
+	struct zfcp_fc_gspn_rsp *gspn_rsp = &fc_req->u.gspn.rsp;
+	int ret;
+
+	zfcp_fc_ct_ns_init(&gspn_req->ct_hdr, FC_NS_GSPN_ID,
+			   FC_SYMBOLIC_NAME_SIZE);
+	hton24(gspn_req->gspn.fp_fid, fc_host_port_id(adapter->scsi_host));
+
+	sg_init_one(&fc_req->sg_req, gspn_req, sizeof(*gspn_req));
+	sg_init_one(&fc_req->sg_rsp, gspn_rsp, sizeof(*gspn_rsp));
+
+	ct_els->handler = zfcp_fc_complete;
+	ct_els->handler_data = &completion;
+	ct_els->req = &fc_req->sg_req;
+	ct_els->resp = &fc_req->sg_rsp;
+
+	ret = zfcp_fsf_send_ct(&adapter->gs->ds, ct_els, NULL,
+			       ZFCP_FC_CTELS_TMO);
+	if (ret)
+		return ret;
+
+	wait_for_completion(&completion);
+	if (ct_els->status)
+		return ct_els->status;
+
+	if (fc_host_port_type(adapter->scsi_host) == FC_PORTTYPE_NPIV &&
+	    !(strstr(gspn_rsp->gspn.fp_name, devno)))
+		snprintf(fc_host_symbolic_name(adapter->scsi_host),
+			 FC_SYMBOLIC_NAME_SIZE, "%s%s %s NAME: %s",
+			 gspn_rsp->gspn.fp_name, devno,
+			 dev_name(&adapter->ccw_device->dev),
+			 init_utsname()->nodename);
+	else
+		strlcpy(fc_host_symbolic_name(adapter->scsi_host),
+			gspn_rsp->gspn.fp_name, FC_SYMBOLIC_NAME_SIZE);
+
+	return 0;
+}
+
+static void zfcp_fc_rspn(struct zfcp_adapter *adapter,
+			 struct zfcp_fc_req *fc_req)
+{
+	DECLARE_COMPLETION_ONSTACK(completion);
+	struct Scsi_Host *shost = adapter->scsi_host;
+	struct zfcp_fsf_ct_els *ct_els = &fc_req->ct_els;
+	struct zfcp_fc_rspn_req *rspn_req = &fc_req->u.rspn.req;
+	struct fc_ct_hdr *rspn_rsp = &fc_req->u.rspn.rsp;
+	int ret, len;
+
+	zfcp_fc_ct_ns_init(&rspn_req->ct_hdr, FC_NS_RSPN_ID,
+			   FC_SYMBOLIC_NAME_SIZE);
+	hton24(rspn_req->rspn.fr_fid.fp_fid, fc_host_port_id(shost));
+	len = strlcpy(rspn_req->rspn.fr_name, fc_host_symbolic_name(shost),
+		      FC_SYMBOLIC_NAME_SIZE);
+	rspn_req->rspn.fr_name_len = len;
+
+	sg_init_one(&fc_req->sg_req, rspn_req, sizeof(*rspn_req));
+	sg_init_one(&fc_req->sg_rsp, rspn_rsp, sizeof(*rspn_rsp));
+
+	ct_els->handler = zfcp_fc_complete;
+	ct_els->handler_data = &completion;
+	ct_els->req = &fc_req->sg_req;
+	ct_els->resp = &fc_req->sg_rsp;
+
+	ret = zfcp_fsf_send_ct(&adapter->gs->ds, ct_els, NULL,
+			       ZFCP_FC_CTELS_TMO);
+	if (!ret)
+		wait_for_completion(&completion);
+}
+
+/**
+ * zfcp_fc_sym_name_update - Retrieve and update the symbolic port name
+ * @work: ns_up_work of the adapter where to update the symbolic port name
+ *
+ * Retrieve the current symbolic port name that may have been set by
+ * the hardware using the GSPN request and update the fc_host
+ * symbolic_name sysfs attribute. When running in NPIV mode (and hence
+ * the port name is unique for this system), update the symbolic port
+ * name to add Linux specific information and update the FC nameserver
+ * using the RSPN request.
+ */
+void zfcp_fc_sym_name_update(struct work_struct *work)
+{
+	struct zfcp_adapter *adapter = container_of(work, struct zfcp_adapter,
+						    ns_up_work);
+	int ret;
+	struct zfcp_fc_req *fc_req;
+
+	if (fc_host_port_type(adapter->scsi_host) != FC_PORTTYPE_NPORT &&
+	    fc_host_port_type(adapter->scsi_host) != FC_PORTTYPE_NPIV)
+		return;
+
+	fc_req = kmem_cache_zalloc(zfcp_fc_req_cache, GFP_KERNEL);
+	if (!fc_req)
+		return;
+
+	ret = zfcp_fc_wka_port_get(&adapter->gs->ds);
+	if (ret)
+		goto out_free;
+
+	ret = zfcp_fc_gspn(adapter, fc_req);
+	if (ret || fc_host_port_type(adapter->scsi_host) != FC_PORTTYPE_NPIV)
+		goto out_ds_put;
+
+	memset(fc_req, 0, sizeof(*fc_req));
+	zfcp_fc_rspn(adapter, fc_req);
+
+out_ds_put:
+	zfcp_fc_wka_port_put(&adapter->gs->ds);
+out_free:
+	kmem_cache_free(zfcp_fc_req_cache, fc_req);
+}
+
+static void zfcp_fc_ct_els_job_handler(void *data)
+{
+	struct fc_bsg_job *job = data;
+	struct zfcp_fsf_ct_els *zfcp_ct_els = job->dd_data;
+	struct fc_bsg_reply *jr = job->reply;
+
+	jr->reply_payload_rcv_len = job->reply_payload.payload_len;
+	jr->reply_data.ctels_reply.status = FC_CTELS_STATUS_OK;
+	jr->result = zfcp_ct_els->status ? -EIO : 0;
+	job->job_done(job);
+}
+
+static struct zfcp_fc_wka_port *zfcp_fc_job_wka_port(struct fc_bsg_job *job)
+{
+	u32 preamble_word1;
+	u8 gs_type;
+	struct zfcp_adapter *adapter;
+
+	preamble_word1 = job->request->rqst_data.r_ct.preamble_word1;
+	gs_type = (preamble_word1 & 0xff000000) >> 24;
+
+	adapter = (struct zfcp_adapter *) job->shost->hostdata[0];
+
+	switch (gs_type) {
+	case FC_FST_ALIAS:
+		return &adapter->gs->as;
+	case FC_FST_MGMT:
+		return &adapter->gs->ms;
+	case FC_FST_TIME:
+		return &adapter->gs->ts;
+		break;
+	case FC_FST_DIR:
+		return &adapter->gs->ds;
+		break;
+	default:
+		return NULL;
+	}
+}
+
+static void zfcp_fc_ct_job_handler(void *data)
+{
+	struct fc_bsg_job *job = data;
+	struct zfcp_fc_wka_port *wka_port;
+
+	wka_port = zfcp_fc_job_wka_port(job);
+	zfcp_fc_wka_port_put(wka_port);
+
+	zfcp_fc_ct_els_job_handler(data);
+}
+
+static int zfcp_fc_exec_els_job(struct fc_bsg_job *job,
+				struct zfcp_adapter *adapter)
+{
+	struct zfcp_fsf_ct_els *els = job->dd_data;
+	struct fc_rport *rport = job->rport;
+	struct zfcp_port *port;
+	u32 d_id;
+
+	if (rport) {
+		port = zfcp_get_port_by_wwpn(adapter, rport->port_name);
+		if (!port)
+			return -EINVAL;
+
+		d_id = port->d_id;
+		put_device(&port->dev);
+	} else
+		d_id = ntoh24(job->request->rqst_data.h_els.port_id);
+
+	els->handler = zfcp_fc_ct_els_job_handler;
+	return zfcp_fsf_send_els(adapter, d_id, els, job->req->timeout / HZ);
+}
+
+static int zfcp_fc_exec_ct_job(struct fc_bsg_job *job,
+			       struct zfcp_adapter *adapter)
+{
+	int ret;
+	struct zfcp_fsf_ct_els *ct = job->dd_data;
+	struct zfcp_fc_wka_port *wka_port;
+
+	wka_port = zfcp_fc_job_wka_port(job);
+	if (!wka_port)
+		return -EINVAL;
+
+	ret = zfcp_fc_wka_port_get(wka_port);
+	if (ret)
+		return ret;
+
+	ct->handler = zfcp_fc_ct_job_handler;
+	ret = zfcp_fsf_send_ct(wka_port, ct, NULL, job->req->timeout / HZ);
+	if (ret)
+		zfcp_fc_wka_port_put(wka_port);
+
+	return ret;
+}
+
+int zfcp_fc_exec_bsg_job(struct fc_bsg_job *job)
+{
+	struct Scsi_Host *shost;
+	struct zfcp_adapter *adapter;
+	struct zfcp_fsf_ct_els *ct_els = job->dd_data;
+
+	shost = job->rport ? rport_to_shost(job->rport) : job->shost;
+	adapter = (struct zfcp_adapter *)shost->hostdata[0];
+
+	if (!(atomic_read(&adapter->status) & ZFCP_STATUS_COMMON_OPEN))
+		return -EINVAL;
+
+	ct_els->req = job->request_payload.sg_list;
+	ct_els->resp = job->reply_payload.sg_list;
+	ct_els->handler_data = job;
+
+	switch (job->request->msgcode) {
+	case FC_BSG_RPT_ELS:
+	case FC_BSG_HST_ELS_NOLOGIN:
+		return zfcp_fc_exec_els_job(job, adapter);
+	case FC_BSG_RPT_CT:
+	case FC_BSG_HST_CT:
+		return zfcp_fc_exec_ct_job(job, adapter);
+	default:
+		return -EINVAL;
+	}
+}
+
+int zfcp_fc_timeout_bsg_job(struct fc_bsg_job *job)
+{
+	/* hardware tracks timeout, reset bsg timeout to not interfere */
+	return -EAGAIN;
+}
+
+int zfcp_fc_gs_setup(struct zfcp_adapter *adapter)
+{
+	struct zfcp_fc_wka_ports *wka_ports;
+
+	wka_ports = kzalloc(sizeof(struct zfcp_fc_wka_ports), GFP_KERNEL);
+	if (!wka_ports)
+		return -ENOMEM;
+
+	adapter->gs = wka_ports;
+	zfcp_fc_wka_port_init(&wka_ports->ms, FC_FID_MGMT_SERV, adapter);
+	zfcp_fc_wka_port_init(&wka_ports->ts, FC_FID_TIME_SERV, adapter);
+	zfcp_fc_wka_port_init(&wka_ports->ds, FC_FID_DIR_SERV, adapter);
+	zfcp_fc_wka_port_init(&wka_ports->as, FC_FID_ALIASES, adapter);
+
+	return 0;
+}
+
+void zfcp_fc_gs_destroy(struct zfcp_adapter *adapter)
+{
+	kfree(adapter->gs);
+	adapter->gs = NULL;
+}
+
diff --git a/ap/os/linux/linux-3.4.x/drivers/s390/scsi/zfcp_fc.h b/ap/os/linux/linux-3.4.x/drivers/s390/scsi/zfcp_fc.h
new file mode 100644
index 0000000..4561f3b
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/s390/scsi/zfcp_fc.h
@@ -0,0 +1,297 @@
+/*
+ * zfcp device driver
+ *
+ * Fibre Channel related definitions and inline functions for the zfcp
+ * device driver
+ *
+ * Copyright IBM Corporation 2009
+ */
+
+#ifndef ZFCP_FC_H
+#define ZFCP_FC_H
+
+#include <scsi/fc/fc_els.h>
+#include <scsi/fc/fc_fcp.h>
+#include <scsi/fc/fc_ns.h>
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_tcq.h>
+#include "zfcp_fsf.h"
+
+#define ZFCP_FC_CT_SIZE_PAGE	  (PAGE_SIZE - sizeof(struct fc_ct_hdr))
+#define ZFCP_FC_GPN_FT_ENT_PAGE	  (ZFCP_FC_CT_SIZE_PAGE \
+					/ sizeof(struct fc_gpn_ft_resp))
+#define ZFCP_FC_GPN_FT_NUM_BUFS	  4 /* memory pages */
+
+#define ZFCP_FC_GPN_FT_MAX_SIZE	  (ZFCP_FC_GPN_FT_NUM_BUFS * PAGE_SIZE \
+					- sizeof(struct fc_ct_hdr))
+#define ZFCP_FC_GPN_FT_MAX_ENT	  (ZFCP_FC_GPN_FT_NUM_BUFS * \
+					(ZFCP_FC_GPN_FT_ENT_PAGE + 1))
+
+#define ZFCP_FC_CTELS_TMO	(2 * FC_DEF_R_A_TOV / 1000)
+
+/**
+ * struct zfcp_fc_event - FC HBAAPI event for internal queueing from irq context
+ * @code: Event code
+ * @data: Event data
+ * @list: list_head for zfcp_fc_events list
+ */
+struct zfcp_fc_event {
+	enum fc_host_event_code code;
+	u32 data;
+	struct list_head list;
+};
+
+/**
+ * struct zfcp_fc_events - Infrastructure for posting FC events from irq context
+ * @list: List for queueing of events from irq context to workqueue
+ * @list_lock: Lock for event list
+ * @work: work_struct for forwarding events in workqueue
+*/
+struct zfcp_fc_events {
+	struct list_head list;
+	spinlock_t list_lock;
+	struct work_struct work;
+};
+
+/**
+ * struct zfcp_fc_gid_pn_req - container for ct header plus gid_pn request
+ * @ct_hdr: FC GS common transport header
+ * @gid_pn: GID_PN request
+ */
+struct zfcp_fc_gid_pn_req {
+	struct fc_ct_hdr	ct_hdr;
+	struct fc_ns_gid_pn	gid_pn;
+} __packed;
+
+/**
+ * struct zfcp_fc_gid_pn_rsp - container for ct header plus gid_pn response
+ * @ct_hdr: FC GS common transport header
+ * @gid_pn: GID_PN response
+ */
+struct zfcp_fc_gid_pn_rsp {
+	struct fc_ct_hdr	ct_hdr;
+	struct fc_gid_pn_resp	gid_pn;
+} __packed;
+
+/**
+ * struct zfcp_fc_gpn_ft - container for ct header plus gpn_ft request
+ * @ct_hdr: FC GS common transport header
+ * @gpn_ft: GPN_FT request
+ */
+struct zfcp_fc_gpn_ft_req {
+	struct fc_ct_hdr	ct_hdr;
+	struct fc_ns_gid_ft	gpn_ft;
+} __packed;
+
+/**
+ * struct zfcp_fc_gspn_req - container for ct header plus GSPN_ID request
+ * @ct_hdr: FC GS common transport header
+ * @gspn: GSPN_ID request
+ */
+struct zfcp_fc_gspn_req {
+	struct fc_ct_hdr	ct_hdr;
+	struct fc_gid_pn_resp	gspn;
+} __packed;
+
+/**
+ * struct zfcp_fc_gspn_rsp - container for ct header plus GSPN_ID response
+ * @ct_hdr: FC GS common transport header
+ * @gspn: GSPN_ID response
+ * @name: The name string of the GSPN_ID response
+ */
+struct zfcp_fc_gspn_rsp {
+	struct fc_ct_hdr	ct_hdr;
+	struct fc_gspn_resp	gspn;
+	char			name[FC_SYMBOLIC_NAME_SIZE];
+} __packed;
+
+/**
+ * struct zfcp_fc_rspn_req - container for ct header plus RSPN_ID request
+ * @ct_hdr: FC GS common transport header
+ * @rspn: RSPN_ID request
+ * @name: The name string of the RSPN_ID request
+ */
+struct zfcp_fc_rspn_req {
+	struct fc_ct_hdr	ct_hdr;
+	struct fc_ns_rspn	rspn;
+	char			name[FC_SYMBOLIC_NAME_SIZE];
+} __packed;
+
+/**
+ * struct zfcp_fc_req - Container for FC ELS and CT requests sent from zfcp
+ * @ct_els: data required for issuing fsf command
+ * @sg_req: scatterlist entry for request data
+ * @sg_rsp: scatterlist entry for response data
+ * @u: request specific data
+ */
+struct zfcp_fc_req {
+	struct zfcp_fsf_ct_els				ct_els;
+	struct scatterlist				sg_req;
+	struct scatterlist				sg_rsp;
+	union {
+		struct {
+			struct fc_els_adisc		req;
+			struct fc_els_adisc		rsp;
+		} adisc;
+		struct {
+			struct zfcp_fc_gid_pn_req	req;
+			struct zfcp_fc_gid_pn_rsp	rsp;
+		} gid_pn;
+		struct {
+			struct scatterlist sg_rsp2[ZFCP_FC_GPN_FT_NUM_BUFS - 1];
+			struct zfcp_fc_gpn_ft_req	req;
+		} gpn_ft;
+		struct {
+			struct zfcp_fc_gspn_req		req;
+			struct zfcp_fc_gspn_rsp		rsp;
+		} gspn;
+		struct {
+			struct zfcp_fc_rspn_req		req;
+			struct fc_ct_hdr		rsp;
+		} rspn;
+	} u;
+};
+
+/**
+ * enum zfcp_fc_wka_status - FC WKA port status in zfcp
+ * @ZFCP_FC_WKA_PORT_OFFLINE: Port is closed and not in use
+ * @ZFCP_FC_WKA_PORT_CLOSING: The FSF "close port" request is pending
+ * @ZFCP_FC_WKA_PORT_OPENING: The FSF "open port" request is pending
+ * @ZFCP_FC_WKA_PORT_ONLINE: The port is open and the port handle is valid
+ */
+enum zfcp_fc_wka_status {
+	ZFCP_FC_WKA_PORT_OFFLINE,
+	ZFCP_FC_WKA_PORT_CLOSING,
+	ZFCP_FC_WKA_PORT_OPENING,
+	ZFCP_FC_WKA_PORT_ONLINE,
+};
+
+/**
+ * struct zfcp_fc_wka_port - representation of well-known-address (WKA) FC port
+ * @adapter: Pointer to adapter structure this WKA port belongs to
+ * @completion_wq: Wait for completion of open/close command
+ * @status: Current status of WKA port
+ * @refcount: Reference count to keep port open as long as it is in use
+ * @d_id: FC destination id or well-known-address
+ * @handle: FSF handle for the open WKA port
+ * @mutex: Mutex used during opening/closing state changes
+ * @work: For delaying the closing of the WKA port
+ */
+struct zfcp_fc_wka_port {
+	struct zfcp_adapter	*adapter;
+	wait_queue_head_t	completion_wq;
+	enum zfcp_fc_wka_status	status;
+	atomic_t		refcount;
+	u32			d_id;
+	u32			handle;
+	struct mutex		mutex;
+	struct delayed_work	work;
+};
+
+/**
+ * struct zfcp_fc_wka_ports - Data structures for FC generic services
+ * @ms: FC Management service
+ * @ts: FC time service
+ * @ds: FC directory service
+ * @as: FC alias service
+ */
+struct zfcp_fc_wka_ports {
+	struct zfcp_fc_wka_port ms;
+	struct zfcp_fc_wka_port ts;
+	struct zfcp_fc_wka_port ds;
+	struct zfcp_fc_wka_port as;
+};
+
+/**
+ * zfcp_fc_scsi_to_fcp - setup FCP command with data from scsi_cmnd
+ * @fcp: fcp_cmnd to setup
+ * @scsi: scsi_cmnd where to get LUN, task attributes/flags and CDB
+ * @tm: task management flags to setup task management command
+ */
+static inline
+void zfcp_fc_scsi_to_fcp(struct fcp_cmnd *fcp, struct scsi_cmnd *scsi,
+			 u8 tm_flags)
+{
+	char tag[2];
+
+	int_to_scsilun(scsi->device->lun, (struct scsi_lun *) &fcp->fc_lun);
+
+	if (unlikely(tm_flags)) {
+		fcp->fc_tm_flags = tm_flags;
+		return;
+	}
+
+	if (scsi_populate_tag_msg(scsi, tag)) {
+		switch (tag[0]) {
+		case MSG_ORDERED_TAG:
+			fcp->fc_pri_ta |= FCP_PTA_ORDERED;
+			break;
+		case MSG_SIMPLE_TAG:
+			fcp->fc_pri_ta |= FCP_PTA_SIMPLE;
+			break;
+		};
+	} else
+		fcp->fc_pri_ta = FCP_PTA_SIMPLE;
+
+	if (scsi->sc_data_direction == DMA_FROM_DEVICE)
+		fcp->fc_flags |= FCP_CFL_RDDATA;
+	if (scsi->sc_data_direction == DMA_TO_DEVICE)
+		fcp->fc_flags |= FCP_CFL_WRDATA;
+
+	memcpy(fcp->fc_cdb, scsi->cmnd, scsi->cmd_len);
+
+	fcp->fc_dl = scsi_bufflen(scsi);
+
+	if (scsi_get_prot_type(scsi) == SCSI_PROT_DIF_TYPE1)
+		fcp->fc_dl += fcp->fc_dl / scsi->device->sector_size * 8;
+}
+
+/**
+ * zfcp_fc_evap_fcp_rsp - evaluate FCP RSP IU and update scsi_cmnd accordingly
+ * @fcp_rsp: FCP RSP IU to evaluate
+ * @scsi: SCSI command where to update status and sense buffer
+ */
+static inline
+void zfcp_fc_eval_fcp_rsp(struct fcp_resp_with_ext *fcp_rsp,
+			  struct scsi_cmnd *scsi)
+{
+	struct fcp_resp_rsp_info *rsp_info;
+	char *sense;
+	u32 sense_len, resid;
+	u8 rsp_flags;
+
+	set_msg_byte(scsi, COMMAND_COMPLETE);
+	scsi->result |= fcp_rsp->resp.fr_status;
+
+	rsp_flags = fcp_rsp->resp.fr_flags;
+
+	if (unlikely(rsp_flags & FCP_RSP_LEN_VAL)) {
+		rsp_info = (struct fcp_resp_rsp_info *) &fcp_rsp[1];
+		if (rsp_info->rsp_code == FCP_TMF_CMPL)
+			set_host_byte(scsi, DID_OK);
+		else {
+			set_host_byte(scsi, DID_ERROR);
+			return;
+		}
+	}
+
+	if (unlikely(rsp_flags & FCP_SNS_LEN_VAL)) {
+		sense = (char *) &fcp_rsp[1];
+		if (rsp_flags & FCP_RSP_LEN_VAL)
+			sense += fcp_rsp->ext.fr_rsp_len;
+		sense_len = min(fcp_rsp->ext.fr_sns_len,
+				(u32) SCSI_SENSE_BUFFERSIZE);
+		memcpy(scsi->sense_buffer, sense, sense_len);
+	}
+
+	if (unlikely(rsp_flags & FCP_RESID_UNDER)) {
+		resid = fcp_rsp->ext.fr_resid;
+		scsi_set_resid(scsi, resid);
+		if (scsi_bufflen(scsi) - resid < scsi->underflow &&
+		     !(rsp_flags & FCP_SNS_LEN_VAL) &&
+		     fcp_rsp->resp.fr_status == SAM_STAT_GOOD)
+			set_host_byte(scsi, DID_ERROR);
+	}
+}
+
+#endif
diff --git a/ap/os/linux/linux-3.4.x/drivers/s390/scsi/zfcp_fsf.c b/ap/os/linux/linux-3.4.x/drivers/s390/scsi/zfcp_fsf.c
new file mode 100644
index 0000000..961e327
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/s390/scsi/zfcp_fsf.c
@@ -0,0 +1,2499 @@
+/*
+ * zfcp device driver
+ *
+ * Implementation of FSF commands.
+ *
+ * Copyright IBM Corp. 2002, 2013
+ */
+
+#define KMSG_COMPONENT "zfcp"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
+#include <linux/blktrace_api.h>
+#include <linux/slab.h>
+#include <scsi/fc/fc_els.h>
+#include "zfcp_ext.h"
+#include "zfcp_fc.h"
+#include "zfcp_dbf.h"
+#include "zfcp_qdio.h"
+#include "zfcp_reqlist.h"
+
+struct kmem_cache *zfcp_fsf_qtcb_cache;
+
+static void zfcp_fsf_request_timeout_handler(unsigned long data)
+{
+	struct zfcp_adapter *adapter = (struct zfcp_adapter *) data;
+	zfcp_qdio_siosl(adapter);
+	zfcp_erp_adapter_reopen(adapter, ZFCP_STATUS_COMMON_ERP_FAILED,
+				"fsrth_1");
+}
+
+static void zfcp_fsf_start_timer(struct zfcp_fsf_req *fsf_req,
+				 unsigned long timeout)
+{
+	fsf_req->timer.function = zfcp_fsf_request_timeout_handler;
+	fsf_req->timer.data = (unsigned long) fsf_req->adapter;
+	fsf_req->timer.expires = jiffies + timeout;
+	add_timer(&fsf_req->timer);
+}
+
+static void zfcp_fsf_start_erp_timer(struct zfcp_fsf_req *fsf_req)
+{
+	BUG_ON(!fsf_req->erp_action);
+	fsf_req->timer.function = zfcp_erp_timeout_handler;
+	fsf_req->timer.data = (unsigned long) fsf_req->erp_action;
+	fsf_req->timer.expires = jiffies + 30 * HZ;
+	add_timer(&fsf_req->timer);
+}
+
+/* association between FSF command and FSF QTCB type */
+static u32 fsf_qtcb_type[] = {
+	[FSF_QTCB_FCP_CMND] =             FSF_IO_COMMAND,
+	[FSF_QTCB_ABORT_FCP_CMND] =       FSF_SUPPORT_COMMAND,
+	[FSF_QTCB_OPEN_PORT_WITH_DID] =   FSF_SUPPORT_COMMAND,
+	[FSF_QTCB_OPEN_LUN] =             FSF_SUPPORT_COMMAND,
+	[FSF_QTCB_CLOSE_LUN] =            FSF_SUPPORT_COMMAND,
+	[FSF_QTCB_CLOSE_PORT] =           FSF_SUPPORT_COMMAND,
+	[FSF_QTCB_CLOSE_PHYSICAL_PORT] =  FSF_SUPPORT_COMMAND,
+	[FSF_QTCB_SEND_ELS] =             FSF_SUPPORT_COMMAND,
+	[FSF_QTCB_SEND_GENERIC] =         FSF_SUPPORT_COMMAND,
+	[FSF_QTCB_EXCHANGE_CONFIG_DATA] = FSF_CONFIG_COMMAND,
+	[FSF_QTCB_EXCHANGE_PORT_DATA] =   FSF_PORT_COMMAND,
+	[FSF_QTCB_DOWNLOAD_CONTROL_FILE] = FSF_SUPPORT_COMMAND,
+	[FSF_QTCB_UPLOAD_CONTROL_FILE] =  FSF_SUPPORT_COMMAND
+};
+
+static void zfcp_fsf_class_not_supp(struct zfcp_fsf_req *req)
+{
+	dev_err(&req->adapter->ccw_device->dev, "FCP device not "
+		"operational because of an unsupported FC class\n");
+	zfcp_erp_adapter_shutdown(req->adapter, 0, "fscns_1");
+	req->status |= ZFCP_STATUS_FSFREQ_ERROR;
+}
+
+/**
+ * zfcp_fsf_req_free - free memory used by fsf request
+ * @fsf_req: pointer to struct zfcp_fsf_req
+ */
+void zfcp_fsf_req_free(struct zfcp_fsf_req *req)
+{
+	if (likely(req->pool)) {
+		if (likely(req->qtcb))
+			mempool_free(req->qtcb, req->adapter->pool.qtcb_pool);
+		mempool_free(req, req->pool);
+		return;
+	}
+
+	if (likely(req->qtcb))
+		kmem_cache_free(zfcp_fsf_qtcb_cache, req->qtcb);
+	kfree(req);
+}
+
+static void zfcp_fsf_status_read_port_closed(struct zfcp_fsf_req *req)
+{
+	unsigned long flags;
+	struct fsf_status_read_buffer *sr_buf = req->data;
+	struct zfcp_adapter *adapter = req->adapter;
+	struct zfcp_port *port;
+	int d_id = ntoh24(sr_buf->d_id);
+
+	read_lock_irqsave(&adapter->port_list_lock, flags);
+	list_for_each_entry(port, &adapter->port_list, list)
+		if (port->d_id == d_id) {
+			zfcp_erp_port_reopen(port, 0, "fssrpc1");
+			break;
+		}
+	read_unlock_irqrestore(&adapter->port_list_lock, flags);
+}
+
+static void zfcp_fsf_link_down_info_eval(struct zfcp_fsf_req *req,
+					 struct fsf_link_down_info *link_down)
+{
+	struct zfcp_adapter *adapter = req->adapter;
+
+	if (atomic_read(&adapter->status) & ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED)
+		return;
+
+	atomic_set_mask(ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED, &adapter->status);
+
+	zfcp_scsi_schedule_rports_block(adapter);
+
+	if (!link_down)
+		goto out;
+
+	switch (link_down->error_code) {
+	case FSF_PSQ_LINK_NO_LIGHT:
+		dev_warn(&req->adapter->ccw_device->dev,
+			 "There is no light signal from the local "
+			 "fibre channel cable\n");
+		break;
+	case FSF_PSQ_LINK_WRAP_PLUG:
+		dev_warn(&req->adapter->ccw_device->dev,
+			 "There is a wrap plug instead of a fibre "
+			 "channel cable\n");
+		break;
+	case FSF_PSQ_LINK_NO_FCP:
+		dev_warn(&req->adapter->ccw_device->dev,
+			 "The adjacent fibre channel node does not "
+			 "support FCP\n");
+		break;
+	case FSF_PSQ_LINK_FIRMWARE_UPDATE:
+		dev_warn(&req->adapter->ccw_device->dev,
+			 "The FCP device is suspended because of a "
+			 "firmware update\n");
+		break;
+	case FSF_PSQ_LINK_INVALID_WWPN:
+		dev_warn(&req->adapter->ccw_device->dev,
+			 "The FCP device detected a WWPN that is "
+			 "duplicate or not valid\n");
+		break;
+	case FSF_PSQ_LINK_NO_NPIV_SUPPORT:
+		dev_warn(&req->adapter->ccw_device->dev,
+			 "The fibre channel fabric does not support NPIV\n");
+		break;
+	case FSF_PSQ_LINK_NO_FCP_RESOURCES:
+		dev_warn(&req->adapter->ccw_device->dev,
+			 "The FCP adapter cannot support more NPIV ports\n");
+		break;
+	case FSF_PSQ_LINK_NO_FABRIC_RESOURCES:
+		dev_warn(&req->adapter->ccw_device->dev,
+			 "The adjacent switch cannot support "
+			 "more NPIV ports\n");
+		break;
+	case FSF_PSQ_LINK_FABRIC_LOGIN_UNABLE:
+		dev_warn(&req->adapter->ccw_device->dev,
+			 "The FCP adapter could not log in to the "
+			 "fibre channel fabric\n");
+		break;
+	case FSF_PSQ_LINK_WWPN_ASSIGNMENT_CORRUPTED:
+		dev_warn(&req->adapter->ccw_device->dev,
+			 "The WWPN assignment file on the FCP adapter "
+			 "has been damaged\n");
+		break;
+	case FSF_PSQ_LINK_MODE_TABLE_CURRUPTED:
+		dev_warn(&req->adapter->ccw_device->dev,
+			 "The mode table on the FCP adapter "
+			 "has been damaged\n");
+		break;
+	case FSF_PSQ_LINK_NO_WWPN_ASSIGNMENT:
+		dev_warn(&req->adapter->ccw_device->dev,
+			 "All NPIV ports on the FCP adapter have "
+			 "been assigned\n");
+		break;
+	default:
+		dev_warn(&req->adapter->ccw_device->dev,
+			 "The link between the FCP adapter and "
+			 "the FC fabric is down\n");
+	}
+out:
+	zfcp_erp_set_adapter_status(adapter, ZFCP_STATUS_COMMON_ERP_FAILED);
+}
+
+static void zfcp_fsf_status_read_link_down(struct zfcp_fsf_req *req)
+{
+	struct fsf_status_read_buffer *sr_buf = req->data;
+	struct fsf_link_down_info *ldi =
+		(struct fsf_link_down_info *) &sr_buf->payload;
+
+	switch (sr_buf->status_subtype) {
+	case FSF_STATUS_READ_SUB_NO_PHYSICAL_LINK:
+		zfcp_fsf_link_down_info_eval(req, ldi);
+		break;
+	case FSF_STATUS_READ_SUB_FDISC_FAILED:
+		zfcp_fsf_link_down_info_eval(req, ldi);
+		break;
+	case FSF_STATUS_READ_SUB_FIRMWARE_UPDATE:
+		zfcp_fsf_link_down_info_eval(req, NULL);
+	};
+}
+
+static void zfcp_fsf_status_read_handler(struct zfcp_fsf_req *req)
+{
+	struct zfcp_adapter *adapter = req->adapter;
+	struct fsf_status_read_buffer *sr_buf = req->data;
+
+	if (req->status & ZFCP_STATUS_FSFREQ_DISMISSED) {
+		zfcp_dbf_hba_fsf_uss("fssrh_1", req);
+		mempool_free(virt_to_page(sr_buf), adapter->pool.sr_data);
+		zfcp_fsf_req_free(req);
+		return;
+	}
+
+	zfcp_dbf_hba_fsf_uss("fssrh_4", req);
+
+	switch (sr_buf->status_type) {
+	case FSF_STATUS_READ_PORT_CLOSED:
+		zfcp_fsf_status_read_port_closed(req);
+		break;
+	case FSF_STATUS_READ_INCOMING_ELS:
+		zfcp_fc_incoming_els(req);
+		break;
+	case FSF_STATUS_READ_SENSE_DATA_AVAIL:
+		break;
+	case FSF_STATUS_READ_BIT_ERROR_THRESHOLD:
+		dev_warn(&adapter->ccw_device->dev,
+			 "The error threshold for checksum statistics "
+			 "has been exceeded\n");
+		zfcp_dbf_hba_bit_err("fssrh_3", req);
+		break;
+	case FSF_STATUS_READ_LINK_DOWN:
+		zfcp_fsf_status_read_link_down(req);
+		zfcp_fc_enqueue_event(adapter, FCH_EVT_LINKDOWN, 0);
+		break;
+	case FSF_STATUS_READ_LINK_UP:
+		dev_info(&adapter->ccw_device->dev,
+			 "The local link has been restored\n");
+		/* All ports should be marked as ready to run again */
+		zfcp_erp_set_adapter_status(adapter,
+					    ZFCP_STATUS_COMMON_RUNNING);
+		zfcp_erp_adapter_reopen(adapter,
+					ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED |
+					ZFCP_STATUS_COMMON_ERP_FAILED,
+					"fssrh_2");
+		zfcp_fc_enqueue_event(adapter, FCH_EVT_LINKUP, 0);
+
+		break;
+	case FSF_STATUS_READ_NOTIFICATION_LOST:
+		if (sr_buf->status_subtype & FSF_STATUS_READ_SUB_ACT_UPDATED)
+			zfcp_cfdc_adapter_access_changed(adapter);
+		if (sr_buf->status_subtype & FSF_STATUS_READ_SUB_INCOMING_ELS)
+			queue_work(adapter->work_queue, &adapter->scan_work);
+		break;
+	case FSF_STATUS_READ_CFDC_UPDATED:
+		zfcp_cfdc_adapter_access_changed(adapter);
+		break;
+	case FSF_STATUS_READ_FEATURE_UPDATE_ALERT:
+		adapter->adapter_features = sr_buf->payload.word[0];
+		break;
+	}
+
+	mempool_free(virt_to_page(sr_buf), adapter->pool.sr_data);
+	zfcp_fsf_req_free(req);
+
+	atomic_inc(&adapter->stat_miss);
+	queue_work(adapter->work_queue, &adapter->stat_work);
+}
+
+static void zfcp_fsf_fsfstatus_qual_eval(struct zfcp_fsf_req *req)
+{
+	switch (req->qtcb->header.fsf_status_qual.word[0]) {
+	case FSF_SQ_FCP_RSP_AVAILABLE:
+	case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE:
+	case FSF_SQ_NO_RETRY_POSSIBLE:
+	case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED:
+		return;
+	case FSF_SQ_COMMAND_ABORTED:
+		break;
+	case FSF_SQ_NO_RECOM:
+		dev_err(&req->adapter->ccw_device->dev,
+			"The FCP adapter reported a problem "
+			"that cannot be recovered\n");
+		zfcp_qdio_siosl(req->adapter);
+		zfcp_erp_adapter_shutdown(req->adapter, 0, "fsfsqe1");
+		break;
+	}
+	/* all non-return stats set FSFREQ_ERROR*/
+	req->status |= ZFCP_STATUS_FSFREQ_ERROR;
+}
+
+static void zfcp_fsf_fsfstatus_eval(struct zfcp_fsf_req *req)
+{
+	if (unlikely(req->status & ZFCP_STATUS_FSFREQ_ERROR))
+		return;
+
+	switch (req->qtcb->header.fsf_status) {
+	case FSF_UNKNOWN_COMMAND:
+		dev_err(&req->adapter->ccw_device->dev,
+			"The FCP adapter does not recognize the command 0x%x\n",
+			req->qtcb->header.fsf_command);
+		zfcp_erp_adapter_shutdown(req->adapter, 0, "fsfse_1");
+		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
+		break;
+	case FSF_ADAPTER_STATUS_AVAILABLE:
+		zfcp_fsf_fsfstatus_qual_eval(req);
+		break;
+	}
+}
+
+static void zfcp_fsf_protstatus_eval(struct zfcp_fsf_req *req)
+{
+	struct zfcp_adapter *adapter = req->adapter;
+	struct fsf_qtcb *qtcb = req->qtcb;
+	union fsf_prot_status_qual *psq = &qtcb->prefix.prot_status_qual;
+
+	zfcp_dbf_hba_fsf_response(req);
+
+	if (req->status & ZFCP_STATUS_FSFREQ_DISMISSED) {
+		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
+		return;
+	}
+
+	switch (qtcb->prefix.prot_status) {
+	case FSF_PROT_GOOD:
+	case FSF_PROT_FSF_STATUS_PRESENTED:
+		return;
+	case FSF_PROT_QTCB_VERSION_ERROR:
+		dev_err(&adapter->ccw_device->dev,
+			"QTCB version 0x%x not supported by FCP adapter "
+			"(0x%x to 0x%x)\n", FSF_QTCB_CURRENT_VERSION,
+			psq->word[0], psq->word[1]);
+		zfcp_erp_adapter_shutdown(adapter, 0, "fspse_1");
+		break;
+	case FSF_PROT_ERROR_STATE:
+	case FSF_PROT_SEQ_NUMB_ERROR:
+		zfcp_erp_adapter_reopen(adapter, 0, "fspse_2");
+		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
+		break;
+	case FSF_PROT_UNSUPP_QTCB_TYPE:
+		dev_err(&adapter->ccw_device->dev,
+			"The QTCB type is not supported by the FCP adapter\n");
+		zfcp_erp_adapter_shutdown(adapter, 0, "fspse_3");
+		break;
+	case FSF_PROT_HOST_CONNECTION_INITIALIZING:
+		atomic_set_mask(ZFCP_STATUS_ADAPTER_HOST_CON_INIT,
+				&adapter->status);
+		break;
+	case FSF_PROT_DUPLICATE_REQUEST_ID:
+		dev_err(&adapter->ccw_device->dev,
+			"0x%Lx is an ambiguous request identifier\n",
+			(unsigned long long)qtcb->bottom.support.req_handle);
+		zfcp_erp_adapter_shutdown(adapter, 0, "fspse_4");
+		break;
+	case FSF_PROT_LINK_DOWN:
+		zfcp_fsf_link_down_info_eval(req, &psq->link_down_info);
+		/* go through reopen to flush pending requests */
+		zfcp_erp_adapter_reopen(adapter, 0, "fspse_6");
+		break;
+	case FSF_PROT_REEST_QUEUE:
+		/* All ports should be marked as ready to run again */
+		zfcp_erp_set_adapter_status(adapter,
+					    ZFCP_STATUS_COMMON_RUNNING);
+		zfcp_erp_adapter_reopen(adapter,
+					ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED |
+					ZFCP_STATUS_COMMON_ERP_FAILED,
+					"fspse_8");
+		break;
+	default:
+		dev_err(&adapter->ccw_device->dev,
+			"0x%x is not a valid transfer protocol status\n",
+			qtcb->prefix.prot_status);
+		zfcp_qdio_siosl(adapter);
+		zfcp_erp_adapter_shutdown(adapter, 0, "fspse_9");
+	}
+	req->status |= ZFCP_STATUS_FSFREQ_ERROR;
+}
+
+/**
+ * zfcp_fsf_req_complete - process completion of a FSF request
+ * @fsf_req: The FSF request that has been completed.
+ *
+ * When a request has been completed either from the FCP adapter,
+ * or it has been dismissed due to a queue shutdown, this function
+ * is called to process the completion status and trigger further
+ * events related to the FSF request.
+ */
+static void zfcp_fsf_req_complete(struct zfcp_fsf_req *req)
+{
+	if (unlikely(req->fsf_command == FSF_QTCB_UNSOLICITED_STATUS)) {
+		zfcp_fsf_status_read_handler(req);
+		return;
+	}
+
+	del_timer(&req->timer);
+	zfcp_fsf_protstatus_eval(req);
+	zfcp_fsf_fsfstatus_eval(req);
+	req->handler(req);
+
+	if (req->erp_action)
+		zfcp_erp_notify(req->erp_action, 0);
+
+	if (likely(req->status & ZFCP_STATUS_FSFREQ_CLEANUP))
+		zfcp_fsf_req_free(req);
+	else
+		complete(&req->completion);
+}
+
+/**
+ * zfcp_fsf_req_dismiss_all - dismiss all fsf requests
+ * @adapter: pointer to struct zfcp_adapter
+ *
+ * Never ever call this without shutting down the adapter first.
+ * Otherwise the adapter would continue using and corrupting s390 storage.
+ * Included BUG_ON() call to ensure this is done.
+ * ERP is supposed to be the only user of this function.
+ */
+void zfcp_fsf_req_dismiss_all(struct zfcp_adapter *adapter)
+{
+	struct zfcp_fsf_req *req, *tmp;
+	LIST_HEAD(remove_queue);
+
+	BUG_ON(atomic_read(&adapter->status) & ZFCP_STATUS_ADAPTER_QDIOUP);
+	zfcp_reqlist_move(adapter->req_list, &remove_queue);
+
+	list_for_each_entry_safe(req, tmp, &remove_queue, list) {
+		list_del(&req->list);
+		req->status |= ZFCP_STATUS_FSFREQ_DISMISSED;
+		zfcp_fsf_req_complete(req);
+	}
+}
+
+#define ZFCP_FSF_PORTSPEED_1GBIT	(1 <<  0)
+#define ZFCP_FSF_PORTSPEED_2GBIT	(1 <<  1)
+#define ZFCP_FSF_PORTSPEED_4GBIT	(1 <<  2)
+#define ZFCP_FSF_PORTSPEED_10GBIT	(1 <<  3)
+#define ZFCP_FSF_PORTSPEED_8GBIT	(1 <<  4)
+#define ZFCP_FSF_PORTSPEED_16GBIT	(1 <<  5)
+#define ZFCP_FSF_PORTSPEED_NOT_NEGOTIATED (1 << 15)
+
+static u32 zfcp_fsf_convert_portspeed(u32 fsf_speed)
+{
+	u32 fdmi_speed = 0;
+	if (fsf_speed & ZFCP_FSF_PORTSPEED_1GBIT)
+		fdmi_speed |= FC_PORTSPEED_1GBIT;
+	if (fsf_speed & ZFCP_FSF_PORTSPEED_2GBIT)
+		fdmi_speed |= FC_PORTSPEED_2GBIT;
+	if (fsf_speed & ZFCP_FSF_PORTSPEED_4GBIT)
+		fdmi_speed |= FC_PORTSPEED_4GBIT;
+	if (fsf_speed & ZFCP_FSF_PORTSPEED_10GBIT)
+		fdmi_speed |= FC_PORTSPEED_10GBIT;
+	if (fsf_speed & ZFCP_FSF_PORTSPEED_8GBIT)
+		fdmi_speed |= FC_PORTSPEED_8GBIT;
+	if (fsf_speed & ZFCP_FSF_PORTSPEED_16GBIT)
+		fdmi_speed |= FC_PORTSPEED_16GBIT;
+	if (fsf_speed & ZFCP_FSF_PORTSPEED_NOT_NEGOTIATED)
+		fdmi_speed |= FC_PORTSPEED_NOT_NEGOTIATED;
+	return fdmi_speed;
+}
+
+static int zfcp_fsf_exchange_config_evaluate(struct zfcp_fsf_req *req)
+{
+	struct fsf_qtcb_bottom_config *bottom = &req->qtcb->bottom.config;
+	struct zfcp_adapter *adapter = req->adapter;
+	struct Scsi_Host *shost = adapter->scsi_host;
+	struct fc_els_flogi *nsp, *plogi;
+
+	/* adjust pointers for missing command code */
+	nsp = (struct fc_els_flogi *) ((u8 *)&bottom->nport_serv_param
+					- sizeof(u32));
+	plogi = (struct fc_els_flogi *) ((u8 *)&bottom->plogi_payload
+					- sizeof(u32));
+
+	if (req->data)
+		memcpy(req->data, bottom, sizeof(*bottom));
+
+	fc_host_port_name(shost) = nsp->fl_wwpn;
+	fc_host_node_name(shost) = nsp->fl_wwnn;
+	fc_host_supported_classes(shost) = FC_COS_CLASS2 | FC_COS_CLASS3;
+
+	adapter->timer_ticks = bottom->timer_interval & ZFCP_FSF_TIMER_INT_MASK;
+	adapter->stat_read_buf_num = max(bottom->status_read_buf_num,
+					 (u16)FSF_STATUS_READS_RECOM);
+
+	if (fc_host_permanent_port_name(shost) == -1)
+		fc_host_permanent_port_name(shost) = fc_host_port_name(shost);
+
+	zfcp_scsi_set_prot(adapter);
+
+	/* no error return above here, otherwise must fix call chains */
+	/* do not evaluate invalid fields */
+	if (req->qtcb->header.fsf_status == FSF_EXCHANGE_CONFIG_DATA_INCOMPLETE)
+		return 0;
+
+	fc_host_port_id(shost) = ntoh24(bottom->s_id);
+	fc_host_speed(shost) =
+		zfcp_fsf_convert_portspeed(bottom->fc_link_speed);
+
+	adapter->hydra_version = bottom->adapter_type;
+
+	switch (bottom->fc_topology) {
+	case FSF_TOPO_P2P:
+		adapter->peer_d_id = ntoh24(bottom->peer_d_id);
+		adapter->peer_wwpn = plogi->fl_wwpn;
+		adapter->peer_wwnn = plogi->fl_wwnn;
+		fc_host_port_type(shost) = FC_PORTTYPE_PTP;
+		break;
+	case FSF_TOPO_FABRIC:
+		fc_host_port_type(shost) = FC_PORTTYPE_NPORT;
+		break;
+	case FSF_TOPO_AL:
+		fc_host_port_type(shost) = FC_PORTTYPE_NLPORT;
+		/* fall through */
+	default:
+		dev_err(&adapter->ccw_device->dev,
+			"Unknown or unsupported arbitrated loop "
+			"fibre channel topology detected\n");
+		zfcp_erp_adapter_shutdown(adapter, 0, "fsece_1");
+		return -EIO;
+	}
+
+	return 0;
+}
+
+static void zfcp_fsf_exchange_config_data_handler(struct zfcp_fsf_req *req)
+{
+	struct zfcp_adapter *adapter = req->adapter;
+	struct fsf_qtcb *qtcb = req->qtcb;
+	struct fsf_qtcb_bottom_config *bottom = &qtcb->bottom.config;
+	struct Scsi_Host *shost = adapter->scsi_host;
+
+	if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
+		return;
+
+	adapter->fsf_lic_version = bottom->lic_version;
+	adapter->adapter_features = bottom->adapter_features;
+	adapter->connection_features = bottom->connection_features;
+	adapter->peer_wwpn = 0;
+	adapter->peer_wwnn = 0;
+	adapter->peer_d_id = 0;
+
+	switch (qtcb->header.fsf_status) {
+	case FSF_GOOD:
+		if (zfcp_fsf_exchange_config_evaluate(req))
+			return;
+
+		if (bottom->max_qtcb_size < sizeof(struct fsf_qtcb)) {
+			dev_err(&adapter->ccw_device->dev,
+				"FCP adapter maximum QTCB size (%d bytes) "
+				"is too small\n",
+				bottom->max_qtcb_size);
+			zfcp_erp_adapter_shutdown(adapter, 0, "fsecdh1");
+			return;
+		}
+		atomic_set_mask(ZFCP_STATUS_ADAPTER_XCONFIG_OK,
+				&adapter->status);
+		break;
+	case FSF_EXCHANGE_CONFIG_DATA_INCOMPLETE:
+		fc_host_node_name(shost) = 0;
+		fc_host_port_name(shost) = 0;
+		fc_host_port_id(shost) = 0;
+		fc_host_speed(shost) = FC_PORTSPEED_UNKNOWN;
+		fc_host_port_type(shost) = FC_PORTTYPE_UNKNOWN;
+		adapter->hydra_version = 0;
+
+		/* avoids adapter shutdown to be able to recognize
+		 * events such as LINK UP */
+		atomic_set_mask(ZFCP_STATUS_ADAPTER_XCONFIG_OK,
+				&adapter->status);
+		zfcp_fsf_link_down_info_eval(req,
+			&qtcb->header.fsf_status_qual.link_down_info);
+		if (zfcp_fsf_exchange_config_evaluate(req))
+			return;
+		break;
+	default:
+		zfcp_erp_adapter_shutdown(adapter, 0, "fsecdh3");
+		return;
+	}
+
+	if (adapter->adapter_features & FSF_FEATURE_HBAAPI_MANAGEMENT) {
+		adapter->hardware_version = bottom->hardware_version;
+		memcpy(fc_host_serial_number(shost), bottom->serial_number,
+		       min(FC_SERIAL_NUMBER_SIZE, 17));
+		EBCASC(fc_host_serial_number(shost),
+		       min(FC_SERIAL_NUMBER_SIZE, 17));
+	}
+
+	if (FSF_QTCB_CURRENT_VERSION < bottom->low_qtcb_version) {
+		dev_err(&adapter->ccw_device->dev,
+			"The FCP adapter only supports newer "
+			"control block versions\n");
+		zfcp_erp_adapter_shutdown(adapter, 0, "fsecdh4");
+		return;
+	}
+	if (FSF_QTCB_CURRENT_VERSION > bottom->high_qtcb_version) {
+		dev_err(&adapter->ccw_device->dev,
+			"The FCP adapter only supports older "
+			"control block versions\n");
+		zfcp_erp_adapter_shutdown(adapter, 0, "fsecdh5");
+	}
+}
+
+static void zfcp_fsf_exchange_port_evaluate(struct zfcp_fsf_req *req)
+{
+	struct zfcp_adapter *adapter = req->adapter;
+	struct fsf_qtcb_bottom_port *bottom = &req->qtcb->bottom.port;
+	struct Scsi_Host *shost = adapter->scsi_host;
+
+	if (req->data)
+		memcpy(req->data, bottom, sizeof(*bottom));
+
+	if (adapter->connection_features & FSF_FEATURE_NPIV_MODE) {
+		fc_host_permanent_port_name(shost) = bottom->wwpn;
+		fc_host_port_type(shost) = FC_PORTTYPE_NPIV;
+	} else
+		fc_host_permanent_port_name(shost) = fc_host_port_name(shost);
+	fc_host_maxframe_size(shost) = bottom->maximum_frame_size;
+	fc_host_supported_speeds(shost) =
+		zfcp_fsf_convert_portspeed(bottom->supported_speed);
+	memcpy(fc_host_supported_fc4s(shost), bottom->supported_fc4_types,
+	       FC_FC4_LIST_SIZE);
+	memcpy(fc_host_active_fc4s(shost), bottom->active_fc4_types,
+	       FC_FC4_LIST_SIZE);
+}
+
+static void zfcp_fsf_exchange_port_data_handler(struct zfcp_fsf_req *req)
+{
+	struct fsf_qtcb *qtcb = req->qtcb;
+
+	if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
+		return;
+
+	switch (qtcb->header.fsf_status) {
+	case FSF_GOOD:
+		zfcp_fsf_exchange_port_evaluate(req);
+		break;
+	case FSF_EXCHANGE_CONFIG_DATA_INCOMPLETE:
+		zfcp_fsf_exchange_port_evaluate(req);
+		zfcp_fsf_link_down_info_eval(req,
+			&qtcb->header.fsf_status_qual.link_down_info);
+		break;
+	}
+}
+
+static struct zfcp_fsf_req *zfcp_fsf_alloc(mempool_t *pool)
+{
+	struct zfcp_fsf_req *req;
+
+	if (likely(pool))
+		req = mempool_alloc(pool, GFP_ATOMIC);
+	else
+		req = kmalloc(sizeof(*req), GFP_ATOMIC);
+
+	if (unlikely(!req))
+		return NULL;
+
+	memset(req, 0, sizeof(*req));
+	req->pool = pool;
+	return req;
+}
+
+static struct fsf_qtcb *zfcp_qtcb_alloc(mempool_t *pool)
+{
+	struct fsf_qtcb *qtcb;
+
+	if (likely(pool))
+		qtcb = mempool_alloc(pool, GFP_ATOMIC);
+	else
+		qtcb = kmem_cache_alloc(zfcp_fsf_qtcb_cache, GFP_ATOMIC);
+
+	if (unlikely(!qtcb))
+		return NULL;
+
+	memset(qtcb, 0, sizeof(*qtcb));
+	return qtcb;
+}
+
+static struct zfcp_fsf_req *zfcp_fsf_req_create(struct zfcp_qdio *qdio,
+						u32 fsf_cmd, u8 sbtype,
+						mempool_t *pool)
+{
+	struct zfcp_adapter *adapter = qdio->adapter;
+	struct zfcp_fsf_req *req = zfcp_fsf_alloc(pool);
+
+	if (unlikely(!req))
+		return ERR_PTR(-ENOMEM);
+
+	if (adapter->req_no == 0)
+		adapter->req_no++;
+
+	INIT_LIST_HEAD(&req->list);
+	init_timer(&req->timer);
+	init_completion(&req->completion);
+
+	req->adapter = adapter;
+	req->fsf_command = fsf_cmd;
+	req->req_id = adapter->req_no;
+
+	if (likely(fsf_cmd != FSF_QTCB_UNSOLICITED_STATUS)) {
+		if (likely(pool))
+			req->qtcb = zfcp_qtcb_alloc(adapter->pool.qtcb_pool);
+		else
+			req->qtcb = zfcp_qtcb_alloc(NULL);
+
+		if (unlikely(!req->qtcb)) {
+			zfcp_fsf_req_free(req);
+			return ERR_PTR(-ENOMEM);
+		}
+
+		req->seq_no = adapter->fsf_req_seq_no;
+		req->qtcb->prefix.req_seq_no = adapter->fsf_req_seq_no;
+		req->qtcb->prefix.req_id = req->req_id;
+		req->qtcb->prefix.ulp_info = 26;
+		req->qtcb->prefix.qtcb_type = fsf_qtcb_type[req->fsf_command];
+		req->qtcb->prefix.qtcb_version = FSF_QTCB_CURRENT_VERSION;
+		req->qtcb->header.req_handle = req->req_id;
+		req->qtcb->header.fsf_command = req->fsf_command;
+	}
+
+	zfcp_qdio_req_init(adapter->qdio, &req->qdio_req, req->req_id, sbtype,
+			   req->qtcb, sizeof(struct fsf_qtcb));
+
+	return req;
+}
+
+static int zfcp_fsf_req_send(struct zfcp_fsf_req *req)
+{
+	struct zfcp_adapter *adapter = req->adapter;
+	struct zfcp_qdio *qdio = adapter->qdio;
+	int with_qtcb = (req->qtcb != NULL);
+	int req_id = req->req_id;
+
+	zfcp_reqlist_add(adapter->req_list, req);
+
+	req->qdio_req.qdio_outb_usage = atomic_read(&qdio->req_q_free);
+	req->issued = get_clock();
+	if (zfcp_qdio_send(qdio, &req->qdio_req)) {
+		del_timer(&req->timer);
+		/* lookup request again, list might have changed */
+		zfcp_reqlist_find_rm(adapter->req_list, req_id);
+		zfcp_erp_adapter_reopen(adapter, 0, "fsrs__1");
+		return -EIO;
+	}
+
+	/* Don't increase for unsolicited status */
+	if (with_qtcb)
+		adapter->fsf_req_seq_no++;
+	adapter->req_no++;
+
+	return 0;
+}
+
+/**
+ * zfcp_fsf_status_read - send status read request
+ * @adapter: pointer to struct zfcp_adapter
+ * @req_flags: request flags
+ * Returns: 0 on success, ERROR otherwise
+ */
+int zfcp_fsf_status_read(struct zfcp_qdio *qdio)
+{
+	struct zfcp_adapter *adapter = qdio->adapter;
+	struct zfcp_fsf_req *req;
+	struct fsf_status_read_buffer *sr_buf;
+	struct page *page;
+	int retval = -EIO;
+
+	spin_lock_irq(&qdio->req_q_lock);
+	if (zfcp_qdio_sbal_get(qdio))
+		goto out;
+
+	req = zfcp_fsf_req_create(qdio, FSF_QTCB_UNSOLICITED_STATUS, 0,
+				  adapter->pool.status_read_req);
+	if (IS_ERR(req)) {
+		retval = PTR_ERR(req);
+		goto out;
+	}
+
+	page = mempool_alloc(adapter->pool.sr_data, GFP_ATOMIC);
+	if (!page) {
+		retval = -ENOMEM;
+		goto failed_buf;
+	}
+	sr_buf = page_address(page);
+	memset(sr_buf, 0, sizeof(*sr_buf));
+	req->data = sr_buf;
+
+	zfcp_qdio_fill_next(qdio, &req->qdio_req, sr_buf, sizeof(*sr_buf));
+	zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
+
+	retval = zfcp_fsf_req_send(req);
+	if (retval)
+		goto failed_req_send;
+
+	goto out;
+
+failed_req_send:
+	req->data = NULL;
+	mempool_free(virt_to_page(sr_buf), adapter->pool.sr_data);
+failed_buf:
+	zfcp_dbf_hba_fsf_uss("fssr__1", req);
+	zfcp_fsf_req_free(req);
+out:
+	spin_unlock_irq(&qdio->req_q_lock);
+	return retval;
+}
+
+static void zfcp_fsf_abort_fcp_command_handler(struct zfcp_fsf_req *req)
+{
+	struct scsi_device *sdev = req->data;
+	struct zfcp_scsi_dev *zfcp_sdev;
+	union fsf_status_qual *fsq = &req->qtcb->header.fsf_status_qual;
+
+	if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
+		return;
+
+	zfcp_sdev = sdev_to_zfcp(sdev);
+
+	switch (req->qtcb->header.fsf_status) {
+	case FSF_PORT_HANDLE_NOT_VALID:
+		if (fsq->word[0] == fsq->word[1]) {
+			zfcp_erp_adapter_reopen(zfcp_sdev->port->adapter, 0,
+						"fsafch1");
+			req->status |= ZFCP_STATUS_FSFREQ_ERROR;
+		}
+		break;
+	case FSF_LUN_HANDLE_NOT_VALID:
+		if (fsq->word[0] == fsq->word[1]) {
+			zfcp_erp_port_reopen(zfcp_sdev->port, 0, "fsafch2");
+			req->status |= ZFCP_STATUS_FSFREQ_ERROR;
+		}
+		break;
+	case FSF_FCP_COMMAND_DOES_NOT_EXIST:
+		req->status |= ZFCP_STATUS_FSFREQ_ABORTNOTNEEDED;
+		break;
+	case FSF_PORT_BOXED:
+		zfcp_erp_set_port_status(zfcp_sdev->port,
+					 ZFCP_STATUS_COMMON_ACCESS_BOXED);
+		zfcp_erp_port_reopen(zfcp_sdev->port,
+				     ZFCP_STATUS_COMMON_ERP_FAILED, "fsafch3");
+		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
+		break;
+	case FSF_LUN_BOXED:
+		zfcp_erp_set_lun_status(sdev, ZFCP_STATUS_COMMON_ACCESS_BOXED);
+		zfcp_erp_lun_reopen(sdev, ZFCP_STATUS_COMMON_ERP_FAILED,
+				    "fsafch4");
+		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
+                break;
+	case FSF_ADAPTER_STATUS_AVAILABLE:
+		switch (fsq->word[0]) {
+		case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE:
+			zfcp_fc_test_link(zfcp_sdev->port);
+			/* fall through */
+		case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED:
+			req->status |= ZFCP_STATUS_FSFREQ_ERROR;
+			break;
+		}
+		break;
+	case FSF_GOOD:
+		req->status |= ZFCP_STATUS_FSFREQ_ABORTSUCCEEDED;
+		break;
+	}
+}
+
+/**
+ * zfcp_fsf_abort_fcp_cmnd - abort running SCSI command
+ * @scmnd: The SCSI command to abort
+ * Returns: pointer to struct zfcp_fsf_req
+ */
+
+struct zfcp_fsf_req *zfcp_fsf_abort_fcp_cmnd(struct scsi_cmnd *scmnd)
+{
+	struct zfcp_fsf_req *req = NULL;
+	struct scsi_device *sdev = scmnd->device;
+	struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev);
+	struct zfcp_qdio *qdio = zfcp_sdev->port->adapter->qdio;
+	unsigned long old_req_id = (unsigned long) scmnd->host_scribble;
+
+	spin_lock_irq(&qdio->req_q_lock);
+	if (zfcp_qdio_sbal_get(qdio))
+		goto out;
+	req = zfcp_fsf_req_create(qdio, FSF_QTCB_ABORT_FCP_CMND,
+				  SBAL_SFLAGS0_TYPE_READ,
+				  qdio->adapter->pool.scsi_abort);
+	if (IS_ERR(req)) {
+		req = NULL;
+		goto out;
+	}
+
+	if (unlikely(!(atomic_read(&zfcp_sdev->status) &
+		       ZFCP_STATUS_COMMON_UNBLOCKED)))
+		goto out_error_free;
+
+	zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
+
+	req->data = sdev;
+	req->handler = zfcp_fsf_abort_fcp_command_handler;
+	req->qtcb->header.lun_handle = zfcp_sdev->lun_handle;
+	req->qtcb->header.port_handle = zfcp_sdev->port->handle;
+	req->qtcb->bottom.support.req_handle = (u64) old_req_id;
+
+	zfcp_fsf_start_timer(req, ZFCP_SCSI_ER_TIMEOUT);
+	if (!zfcp_fsf_req_send(req))
+		goto out;
+
+out_error_free:
+	zfcp_fsf_req_free(req);
+	req = NULL;
+out:
+	spin_unlock_irq(&qdio->req_q_lock);
+	return req;
+}
+
+static void zfcp_fsf_send_ct_handler(struct zfcp_fsf_req *req)
+{
+	struct zfcp_adapter *adapter = req->adapter;
+	struct zfcp_fsf_ct_els *ct = req->data;
+	struct fsf_qtcb_header *header = &req->qtcb->header;
+
+	ct->status = -EINVAL;
+
+	if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
+		goto skip_fsfstatus;
+
+	switch (header->fsf_status) {
+        case FSF_GOOD:
+		zfcp_dbf_san_res("fsscth2", req);
+		ct->status = 0;
+		break;
+        case FSF_SERVICE_CLASS_NOT_SUPPORTED:
+		zfcp_fsf_class_not_supp(req);
+		break;
+        case FSF_ADAPTER_STATUS_AVAILABLE:
+                switch (header->fsf_status_qual.word[0]){
+                case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE:
+                case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED:
+			req->status |= ZFCP_STATUS_FSFREQ_ERROR;
+			break;
+                }
+                break;
+	case FSF_ACCESS_DENIED:
+		break;
+        case FSF_PORT_BOXED:
+		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
+		break;
+	case FSF_PORT_HANDLE_NOT_VALID:
+		zfcp_erp_adapter_reopen(adapter, 0, "fsscth1");
+		/* fall through */
+	case FSF_GENERIC_COMMAND_REJECTED:
+	case FSF_PAYLOAD_SIZE_MISMATCH:
+	case FSF_REQUEST_SIZE_TOO_LARGE:
+	case FSF_RESPONSE_SIZE_TOO_LARGE:
+	case FSF_SBAL_MISMATCH:
+		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
+		break;
+	}
+
+skip_fsfstatus:
+	if (ct->handler)
+		ct->handler(ct->handler_data);
+}
+
+static void zfcp_fsf_setup_ct_els_unchained(struct zfcp_qdio *qdio,
+					    struct zfcp_qdio_req *q_req,
+					    struct scatterlist *sg_req,
+					    struct scatterlist *sg_resp)
+{
+	zfcp_qdio_fill_next(qdio, q_req, sg_virt(sg_req), sg_req->length);
+	zfcp_qdio_fill_next(qdio, q_req, sg_virt(sg_resp), sg_resp->length);
+	zfcp_qdio_set_sbale_last(qdio, q_req);
+}
+
+static int zfcp_fsf_setup_ct_els_sbals(struct zfcp_fsf_req *req,
+				       struct scatterlist *sg_req,
+				       struct scatterlist *sg_resp)
+{
+	struct zfcp_adapter *adapter = req->adapter;
+	struct zfcp_qdio *qdio = adapter->qdio;
+	struct fsf_qtcb *qtcb = req->qtcb;
+	u32 feat = adapter->adapter_features;
+
+	if (zfcp_adapter_multi_buffer_active(adapter)) {
+		if (zfcp_qdio_sbals_from_sg(qdio, &req->qdio_req, sg_req))
+			return -EIO;
+		if (zfcp_qdio_sbals_from_sg(qdio, &req->qdio_req, sg_resp))
+			return -EIO;
+
+		zfcp_qdio_set_data_div(qdio, &req->qdio_req,
+					zfcp_qdio_sbale_count(sg_req));
+		zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
+		zfcp_qdio_set_scount(qdio, &req->qdio_req);
+		return 0;
+	}
+
+	/* use single, unchained SBAL if it can hold the request */
+	if (zfcp_qdio_sg_one_sbale(sg_req) && zfcp_qdio_sg_one_sbale(sg_resp)) {
+		zfcp_fsf_setup_ct_els_unchained(qdio, &req->qdio_req,
+						sg_req, sg_resp);
+		return 0;
+	}
+
+	if (!(feat & FSF_FEATURE_ELS_CT_CHAINED_SBALS))
+		return -EOPNOTSUPP;
+
+	if (zfcp_qdio_sbals_from_sg(qdio, &req->qdio_req, sg_req))
+		return -EIO;
+
+	qtcb->bottom.support.req_buf_length = zfcp_qdio_real_bytes(sg_req);
+
+	zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
+	zfcp_qdio_skip_to_last_sbale(qdio, &req->qdio_req);
+
+	if (zfcp_qdio_sbals_from_sg(qdio, &req->qdio_req, sg_resp))
+		return -EIO;
+
+	qtcb->bottom.support.resp_buf_length = zfcp_qdio_real_bytes(sg_resp);
+
+	zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
+
+	return 0;
+}
+
+static int zfcp_fsf_setup_ct_els(struct zfcp_fsf_req *req,
+				 struct scatterlist *sg_req,
+				 struct scatterlist *sg_resp,
+				 unsigned int timeout)
+{
+	int ret;
+
+	ret = zfcp_fsf_setup_ct_els_sbals(req, sg_req, sg_resp);
+	if (ret)
+		return ret;
+
+	/* common settings for ct/gs and els requests */
+	if (timeout > 255)
+		timeout = 255; /* max value accepted by hardware */
+	req->qtcb->bottom.support.service_class = FSF_CLASS_3;
+	req->qtcb->bottom.support.timeout = timeout;
+	zfcp_fsf_start_timer(req, (timeout + 10) * HZ);
+
+	return 0;
+}
+
+/**
+ * zfcp_fsf_send_ct - initiate a Generic Service request (FC-GS)
+ * @ct: pointer to struct zfcp_send_ct with data for request
+ * @pool: if non-null this mempool is used to allocate struct zfcp_fsf_req
+ */
+int zfcp_fsf_send_ct(struct zfcp_fc_wka_port *wka_port,
+		     struct zfcp_fsf_ct_els *ct, mempool_t *pool,
+		     unsigned int timeout)
+{
+	struct zfcp_qdio *qdio = wka_port->adapter->qdio;
+	struct zfcp_fsf_req *req;
+	int ret = -EIO;
+
+	spin_lock_irq(&qdio->req_q_lock);
+	if (zfcp_qdio_sbal_get(qdio))
+		goto out;
+
+	req = zfcp_fsf_req_create(qdio, FSF_QTCB_SEND_GENERIC,
+				  SBAL_SFLAGS0_TYPE_WRITE_READ, pool);
+
+	if (IS_ERR(req)) {
+		ret = PTR_ERR(req);
+		goto out;
+	}
+
+	req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
+	ret = zfcp_fsf_setup_ct_els(req, ct->req, ct->resp, timeout);
+	if (ret)
+		goto failed_send;
+
+	req->handler = zfcp_fsf_send_ct_handler;
+	req->qtcb->header.port_handle = wka_port->handle;
+	req->data = ct;
+
+	zfcp_dbf_san_req("fssct_1", req, wka_port->d_id);
+
+	ret = zfcp_fsf_req_send(req);
+	if (ret)
+		goto failed_send;
+
+	goto out;
+
+failed_send:
+	zfcp_fsf_req_free(req);
+out:
+	spin_unlock_irq(&qdio->req_q_lock);
+	return ret;
+}
+
+static void zfcp_fsf_send_els_handler(struct zfcp_fsf_req *req)
+{
+	struct zfcp_fsf_ct_els *send_els = req->data;
+	struct zfcp_port *port = send_els->port;
+	struct fsf_qtcb_header *header = &req->qtcb->header;
+
+	send_els->status = -EINVAL;
+
+	if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
+		goto skip_fsfstatus;
+
+	switch (header->fsf_status) {
+	case FSF_GOOD:
+		zfcp_dbf_san_res("fsselh1", req);
+		send_els->status = 0;
+		break;
+	case FSF_SERVICE_CLASS_NOT_SUPPORTED:
+		zfcp_fsf_class_not_supp(req);
+		break;
+	case FSF_ADAPTER_STATUS_AVAILABLE:
+		switch (header->fsf_status_qual.word[0]){
+		case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE:
+		case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED:
+		case FSF_SQ_RETRY_IF_POSSIBLE:
+			req->status |= ZFCP_STATUS_FSFREQ_ERROR;
+			break;
+		}
+		break;
+	case FSF_ELS_COMMAND_REJECTED:
+	case FSF_PAYLOAD_SIZE_MISMATCH:
+	case FSF_REQUEST_SIZE_TOO_LARGE:
+	case FSF_RESPONSE_SIZE_TOO_LARGE:
+		break;
+	case FSF_ACCESS_DENIED:
+		if (port) {
+			zfcp_cfdc_port_denied(port, &header->fsf_status_qual);
+			req->status |= ZFCP_STATUS_FSFREQ_ERROR;
+		}
+		break;
+	case FSF_SBAL_MISMATCH:
+		/* should never occur, avoided in zfcp_fsf_send_els */
+		/* fall through */
+	default:
+		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
+		break;
+	}
+skip_fsfstatus:
+	if (send_els->handler)
+		send_els->handler(send_els->handler_data);
+}
+
+/**
+ * zfcp_fsf_send_els - initiate an ELS command (FC-FS)
+ * @els: pointer to struct zfcp_send_els with data for the command
+ */
+int zfcp_fsf_send_els(struct zfcp_adapter *adapter, u32 d_id,
+		      struct zfcp_fsf_ct_els *els, unsigned int timeout)
+{
+	struct zfcp_fsf_req *req;
+	struct zfcp_qdio *qdio = adapter->qdio;
+	int ret = -EIO;
+
+	spin_lock_irq(&qdio->req_q_lock);
+	if (zfcp_qdio_sbal_get(qdio))
+		goto out;
+
+	req = zfcp_fsf_req_create(qdio, FSF_QTCB_SEND_ELS,
+				  SBAL_SFLAGS0_TYPE_WRITE_READ, NULL);
+
+	if (IS_ERR(req)) {
+		ret = PTR_ERR(req);
+		goto out;
+	}
+
+	req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
+
+	if (!zfcp_adapter_multi_buffer_active(adapter))
+		zfcp_qdio_sbal_limit(qdio, &req->qdio_req, 2);
+
+	ret = zfcp_fsf_setup_ct_els(req, els->req, els->resp, timeout);
+
+	if (ret)
+		goto failed_send;
+
+	hton24(req->qtcb->bottom.support.d_id, d_id);
+	req->handler = zfcp_fsf_send_els_handler;
+	req->data = els;
+
+	zfcp_dbf_san_req("fssels1", req, d_id);
+
+	ret = zfcp_fsf_req_send(req);
+	if (ret)
+		goto failed_send;
+
+	goto out;
+
+failed_send:
+	zfcp_fsf_req_free(req);
+out:
+	spin_unlock_irq(&qdio->req_q_lock);
+	return ret;
+}
+
+int zfcp_fsf_exchange_config_data(struct zfcp_erp_action *erp_action)
+{
+	struct zfcp_fsf_req *req;
+	struct zfcp_qdio *qdio = erp_action->adapter->qdio;
+	int retval = -EIO;
+
+	spin_lock_irq(&qdio->req_q_lock);
+	if (zfcp_qdio_sbal_get(qdio))
+		goto out;
+
+	req = zfcp_fsf_req_create(qdio, FSF_QTCB_EXCHANGE_CONFIG_DATA,
+				  SBAL_SFLAGS0_TYPE_READ,
+				  qdio->adapter->pool.erp_req);
+
+	if (IS_ERR(req)) {
+		retval = PTR_ERR(req);
+		goto out;
+	}
+
+	req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
+	zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
+
+	req->qtcb->bottom.config.feature_selection =
+			FSF_FEATURE_CFDC |
+			FSF_FEATURE_LUN_SHARING |
+			FSF_FEATURE_NOTIFICATION_LOST |
+			FSF_FEATURE_UPDATE_ALERT;
+	req->erp_action = erp_action;
+	req->handler = zfcp_fsf_exchange_config_data_handler;
+	erp_action->fsf_req_id = req->req_id;
+
+	zfcp_fsf_start_erp_timer(req);
+	retval = zfcp_fsf_req_send(req);
+	if (retval) {
+		zfcp_fsf_req_free(req);
+		erp_action->fsf_req_id = 0;
+	}
+out:
+	spin_unlock_irq(&qdio->req_q_lock);
+	return retval;
+}
+
+int zfcp_fsf_exchange_config_data_sync(struct zfcp_qdio *qdio,
+				       struct fsf_qtcb_bottom_config *data)
+{
+	struct zfcp_fsf_req *req = NULL;
+	int retval = -EIO;
+
+	spin_lock_irq(&qdio->req_q_lock);
+	if (zfcp_qdio_sbal_get(qdio))
+		goto out_unlock;
+
+	req = zfcp_fsf_req_create(qdio, FSF_QTCB_EXCHANGE_CONFIG_DATA,
+				  SBAL_SFLAGS0_TYPE_READ, NULL);
+
+	if (IS_ERR(req)) {
+		retval = PTR_ERR(req);
+		goto out_unlock;
+	}
+
+	zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
+	req->handler = zfcp_fsf_exchange_config_data_handler;
+
+	req->qtcb->bottom.config.feature_selection =
+			FSF_FEATURE_CFDC |
+			FSF_FEATURE_LUN_SHARING |
+			FSF_FEATURE_NOTIFICATION_LOST |
+			FSF_FEATURE_UPDATE_ALERT;
+
+	if (data)
+		req->data = data;
+
+	zfcp_fsf_start_timer(req, ZFCP_FSF_REQUEST_TIMEOUT);
+	retval = zfcp_fsf_req_send(req);
+	spin_unlock_irq(&qdio->req_q_lock);
+	if (!retval)
+		wait_for_completion(&req->completion);
+
+	zfcp_fsf_req_free(req);
+	return retval;
+
+out_unlock:
+	spin_unlock_irq(&qdio->req_q_lock);
+	return retval;
+}
+
+/**
+ * zfcp_fsf_exchange_port_data - request information about local port
+ * @erp_action: ERP action for the adapter for which port data is requested
+ * Returns: 0 on success, error otherwise
+ */
+int zfcp_fsf_exchange_port_data(struct zfcp_erp_action *erp_action)
+{
+	struct zfcp_qdio *qdio = erp_action->adapter->qdio;
+	struct zfcp_fsf_req *req;
+	int retval = -EIO;
+
+	if (!(qdio->adapter->adapter_features & FSF_FEATURE_HBAAPI_MANAGEMENT))
+		return -EOPNOTSUPP;
+
+	spin_lock_irq(&qdio->req_q_lock);
+	if (zfcp_qdio_sbal_get(qdio))
+		goto out;
+
+	req = zfcp_fsf_req_create(qdio, FSF_QTCB_EXCHANGE_PORT_DATA,
+				  SBAL_SFLAGS0_TYPE_READ,
+				  qdio->adapter->pool.erp_req);
+
+	if (IS_ERR(req)) {
+		retval = PTR_ERR(req);
+		goto out;
+	}
+
+	req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
+	zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
+
+	req->handler = zfcp_fsf_exchange_port_data_handler;
+	req->erp_action = erp_action;
+	erp_action->fsf_req_id = req->req_id;
+
+	zfcp_fsf_start_erp_timer(req);
+	retval = zfcp_fsf_req_send(req);
+	if (retval) {
+		zfcp_fsf_req_free(req);
+		erp_action->fsf_req_id = 0;
+	}
+out:
+	spin_unlock_irq(&qdio->req_q_lock);
+	return retval;
+}
+
+/**
+ * zfcp_fsf_exchange_port_data_sync - request information about local port
+ * @qdio: pointer to struct zfcp_qdio
+ * @data: pointer to struct fsf_qtcb_bottom_port
+ * Returns: 0 on success, error otherwise
+ */
+int zfcp_fsf_exchange_port_data_sync(struct zfcp_qdio *qdio,
+				     struct fsf_qtcb_bottom_port *data)
+{
+	struct zfcp_fsf_req *req = NULL;
+	int retval = -EIO;
+
+	if (!(qdio->adapter->adapter_features & FSF_FEATURE_HBAAPI_MANAGEMENT))
+		return -EOPNOTSUPP;
+
+	spin_lock_irq(&qdio->req_q_lock);
+	if (zfcp_qdio_sbal_get(qdio))
+		goto out_unlock;
+
+	req = zfcp_fsf_req_create(qdio, FSF_QTCB_EXCHANGE_PORT_DATA,
+				  SBAL_SFLAGS0_TYPE_READ, NULL);
+
+	if (IS_ERR(req)) {
+		retval = PTR_ERR(req);
+		goto out_unlock;
+	}
+
+	if (data)
+		req->data = data;
+
+	zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
+
+	req->handler = zfcp_fsf_exchange_port_data_handler;
+	zfcp_fsf_start_timer(req, ZFCP_FSF_REQUEST_TIMEOUT);
+	retval = zfcp_fsf_req_send(req);
+	spin_unlock_irq(&qdio->req_q_lock);
+
+	if (!retval)
+		wait_for_completion(&req->completion);
+
+	zfcp_fsf_req_free(req);
+
+	return retval;
+
+out_unlock:
+	spin_unlock_irq(&qdio->req_q_lock);
+	return retval;
+}
+
+static void zfcp_fsf_open_port_handler(struct zfcp_fsf_req *req)
+{
+	struct zfcp_port *port = req->data;
+	struct fsf_qtcb_header *header = &req->qtcb->header;
+	struct fc_els_flogi *plogi;
+
+	if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
+		goto out;
+
+	switch (header->fsf_status) {
+	case FSF_PORT_ALREADY_OPEN:
+		break;
+	case FSF_ACCESS_DENIED:
+		zfcp_cfdc_port_denied(port, &header->fsf_status_qual);
+		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
+		break;
+	case FSF_MAXIMUM_NUMBER_OF_PORTS_EXCEEDED:
+		dev_warn(&req->adapter->ccw_device->dev,
+			 "Not enough FCP adapter resources to open "
+			 "remote port 0x%016Lx\n",
+			 (unsigned long long)port->wwpn);
+		zfcp_erp_set_port_status(port,
+					 ZFCP_STATUS_COMMON_ERP_FAILED);
+		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
+		break;
+	case FSF_ADAPTER_STATUS_AVAILABLE:
+		switch (header->fsf_status_qual.word[0]) {
+		case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE:
+		case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED:
+		case FSF_SQ_NO_RETRY_POSSIBLE:
+			req->status |= ZFCP_STATUS_FSFREQ_ERROR;
+			break;
+		}
+		break;
+	case FSF_GOOD:
+		port->handle = header->port_handle;
+		atomic_set_mask(ZFCP_STATUS_COMMON_OPEN |
+				ZFCP_STATUS_PORT_PHYS_OPEN, &port->status);
+		atomic_clear_mask(ZFCP_STATUS_COMMON_ACCESS_DENIED |
+		                  ZFCP_STATUS_COMMON_ACCESS_BOXED,
+		                  &port->status);
+		/* check whether D_ID has changed during open */
+		/*
+		 * FIXME: This check is not airtight, as the FCP channel does
+		 * not monitor closures of target port connections caused on
+		 * the remote side. Thus, they might miss out on invalidating
+		 * locally cached WWPNs (and other N_Port parameters) of gone
+		 * target ports. So, our heroic attempt to make things safe
+		 * could be undermined by 'open port' response data tagged with
+		 * obsolete WWPNs. Another reason to monitor potential
+		 * connection closures ourself at least (by interpreting
+		 * incoming ELS' and unsolicited status). It just crosses my
+		 * mind that one should be able to cross-check by means of
+		 * another GID_PN straight after a port has been opened.
+		 * Alternately, an ADISC/PDISC ELS should suffice, as well.
+		 */
+		plogi = (struct fc_els_flogi *) req->qtcb->bottom.support.els;
+		if (req->qtcb->bottom.support.els1_length >=
+		    FSF_PLOGI_MIN_LEN)
+				zfcp_fc_plogi_evaluate(port, plogi);
+		break;
+	case FSF_UNKNOWN_OP_SUBTYPE:
+		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
+		break;
+	}
+
+out:
+	put_device(&port->dev);
+}
+
+/**
+ * zfcp_fsf_open_port - create and send open port request
+ * @erp_action: pointer to struct zfcp_erp_action
+ * Returns: 0 on success, error otherwise
+ */
+int zfcp_fsf_open_port(struct zfcp_erp_action *erp_action)
+{
+	struct zfcp_qdio *qdio = erp_action->adapter->qdio;
+	struct zfcp_port *port = erp_action->port;
+	struct zfcp_fsf_req *req;
+	int retval = -EIO;
+
+	spin_lock_irq(&qdio->req_q_lock);
+	if (zfcp_qdio_sbal_get(qdio))
+		goto out;
+
+	req = zfcp_fsf_req_create(qdio, FSF_QTCB_OPEN_PORT_WITH_DID,
+				  SBAL_SFLAGS0_TYPE_READ,
+				  qdio->adapter->pool.erp_req);
+
+	if (IS_ERR(req)) {
+		retval = PTR_ERR(req);
+		goto out;
+	}
+
+	req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
+	zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
+
+	req->handler = zfcp_fsf_open_port_handler;
+	hton24(req->qtcb->bottom.support.d_id, port->d_id);
+	req->data = port;
+	req->erp_action = erp_action;
+	erp_action->fsf_req_id = req->req_id;
+	get_device(&port->dev);
+
+	zfcp_fsf_start_erp_timer(req);
+	retval = zfcp_fsf_req_send(req);
+	if (retval) {
+		zfcp_fsf_req_free(req);
+		erp_action->fsf_req_id = 0;
+		put_device(&port->dev);
+	}
+out:
+	spin_unlock_irq(&qdio->req_q_lock);
+	return retval;
+}
+
+static void zfcp_fsf_close_port_handler(struct zfcp_fsf_req *req)
+{
+	struct zfcp_port *port = req->data;
+
+	if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
+		return;
+
+	switch (req->qtcb->header.fsf_status) {
+	case FSF_PORT_HANDLE_NOT_VALID:
+		zfcp_erp_adapter_reopen(port->adapter, 0, "fscph_1");
+		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
+		break;
+	case FSF_ADAPTER_STATUS_AVAILABLE:
+		break;
+	case FSF_GOOD:
+		zfcp_erp_clear_port_status(port, ZFCP_STATUS_COMMON_OPEN);
+		break;
+	}
+}
+
+/**
+ * zfcp_fsf_close_port - create and send close port request
+ * @erp_action: pointer to struct zfcp_erp_action
+ * Returns: 0 on success, error otherwise
+ */
+int zfcp_fsf_close_port(struct zfcp_erp_action *erp_action)
+{
+	struct zfcp_qdio *qdio = erp_action->adapter->qdio;
+	struct zfcp_fsf_req *req;
+	int retval = -EIO;
+
+	spin_lock_irq(&qdio->req_q_lock);
+	if (zfcp_qdio_sbal_get(qdio))
+		goto out;
+
+	req = zfcp_fsf_req_create(qdio, FSF_QTCB_CLOSE_PORT,
+				  SBAL_SFLAGS0_TYPE_READ,
+				  qdio->adapter->pool.erp_req);
+
+	if (IS_ERR(req)) {
+		retval = PTR_ERR(req);
+		goto out;
+	}
+
+	req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
+	zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
+
+	req->handler = zfcp_fsf_close_port_handler;
+	req->data = erp_action->port;
+	req->erp_action = erp_action;
+	req->qtcb->header.port_handle = erp_action->port->handle;
+	erp_action->fsf_req_id = req->req_id;
+
+	zfcp_fsf_start_erp_timer(req);
+	retval = zfcp_fsf_req_send(req);
+	if (retval) {
+		zfcp_fsf_req_free(req);
+		erp_action->fsf_req_id = 0;
+	}
+out:
+	spin_unlock_irq(&qdio->req_q_lock);
+	return retval;
+}
+
+static void zfcp_fsf_open_wka_port_handler(struct zfcp_fsf_req *req)
+{
+	struct zfcp_fc_wka_port *wka_port = req->data;
+	struct fsf_qtcb_header *header = &req->qtcb->header;
+
+	if (req->status & ZFCP_STATUS_FSFREQ_ERROR) {
+		wka_port->status = ZFCP_FC_WKA_PORT_OFFLINE;
+		goto out;
+	}
+
+	switch (header->fsf_status) {
+	case FSF_MAXIMUM_NUMBER_OF_PORTS_EXCEEDED:
+		dev_warn(&req->adapter->ccw_device->dev,
+			 "Opening WKA port 0x%x failed\n", wka_port->d_id);
+		/* fall through */
+	case FSF_ADAPTER_STATUS_AVAILABLE:
+		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
+		/* fall through */
+	case FSF_ACCESS_DENIED:
+		wka_port->status = ZFCP_FC_WKA_PORT_OFFLINE;
+		break;
+	case FSF_GOOD:
+		wka_port->handle = header->port_handle;
+		/* fall through */
+	case FSF_PORT_ALREADY_OPEN:
+		wka_port->status = ZFCP_FC_WKA_PORT_ONLINE;
+	}
+out:
+	wake_up(&wka_port->completion_wq);
+}
+
+/**
+ * zfcp_fsf_open_wka_port - create and send open wka-port request
+ * @wka_port: pointer to struct zfcp_fc_wka_port
+ * Returns: 0 on success, error otherwise
+ */
+int zfcp_fsf_open_wka_port(struct zfcp_fc_wka_port *wka_port)
+{
+	struct zfcp_qdio *qdio = wka_port->adapter->qdio;
+	struct zfcp_fsf_req *req;
+	int retval = -EIO;
+
+	spin_lock_irq(&qdio->req_q_lock);
+	if (zfcp_qdio_sbal_get(qdio))
+		goto out;
+
+	req = zfcp_fsf_req_create(qdio, FSF_QTCB_OPEN_PORT_WITH_DID,
+				  SBAL_SFLAGS0_TYPE_READ,
+				  qdio->adapter->pool.erp_req);
+
+	if (IS_ERR(req)) {
+		retval = PTR_ERR(req);
+		goto out;
+	}
+
+	req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
+	zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
+
+	req->handler = zfcp_fsf_open_wka_port_handler;
+	hton24(req->qtcb->bottom.support.d_id, wka_port->d_id);
+	req->data = wka_port;
+
+	zfcp_fsf_start_timer(req, ZFCP_FSF_REQUEST_TIMEOUT);
+	retval = zfcp_fsf_req_send(req);
+	if (retval)
+		zfcp_fsf_req_free(req);
+out:
+	spin_unlock_irq(&qdio->req_q_lock);
+	return retval;
+}
+
+static void zfcp_fsf_close_wka_port_handler(struct zfcp_fsf_req *req)
+{
+	struct zfcp_fc_wka_port *wka_port = req->data;
+
+	if (req->qtcb->header.fsf_status == FSF_PORT_HANDLE_NOT_VALID) {
+		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
+		zfcp_erp_adapter_reopen(wka_port->adapter, 0, "fscwph1");
+	}
+
+	wka_port->status = ZFCP_FC_WKA_PORT_OFFLINE;
+	wake_up(&wka_port->completion_wq);
+}
+
+/**
+ * zfcp_fsf_close_wka_port - create and send close wka port request
+ * @wka_port: WKA port to open
+ * Returns: 0 on success, error otherwise
+ */
+int zfcp_fsf_close_wka_port(struct zfcp_fc_wka_port *wka_port)
+{
+	struct zfcp_qdio *qdio = wka_port->adapter->qdio;
+	struct zfcp_fsf_req *req;
+	int retval = -EIO;
+
+	spin_lock_irq(&qdio->req_q_lock);
+	if (zfcp_qdio_sbal_get(qdio))
+		goto out;
+
+	req = zfcp_fsf_req_create(qdio, FSF_QTCB_CLOSE_PORT,
+				  SBAL_SFLAGS0_TYPE_READ,
+				  qdio->adapter->pool.erp_req);
+
+	if (IS_ERR(req)) {
+		retval = PTR_ERR(req);
+		goto out;
+	}
+
+	req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
+	zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
+
+	req->handler = zfcp_fsf_close_wka_port_handler;
+	req->data = wka_port;
+	req->qtcb->header.port_handle = wka_port->handle;
+
+	zfcp_fsf_start_timer(req, ZFCP_FSF_REQUEST_TIMEOUT);
+	retval = zfcp_fsf_req_send(req);
+	if (retval)
+		zfcp_fsf_req_free(req);
+out:
+	spin_unlock_irq(&qdio->req_q_lock);
+	return retval;
+}
+
+static void zfcp_fsf_close_physical_port_handler(struct zfcp_fsf_req *req)
+{
+	struct zfcp_port *port = req->data;
+	struct fsf_qtcb_header *header = &req->qtcb->header;
+	struct scsi_device *sdev;
+
+	if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
+		return;
+
+	switch (header->fsf_status) {
+	case FSF_PORT_HANDLE_NOT_VALID:
+		zfcp_erp_adapter_reopen(port->adapter, 0, "fscpph1");
+		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
+		break;
+	case FSF_ACCESS_DENIED:
+		zfcp_cfdc_port_denied(port, &header->fsf_status_qual);
+		break;
+	case FSF_PORT_BOXED:
+		/* can't use generic zfcp_erp_modify_port_status because
+		 * ZFCP_STATUS_COMMON_OPEN must not be reset for the port */
+		atomic_clear_mask(ZFCP_STATUS_PORT_PHYS_OPEN, &port->status);
+		shost_for_each_device(sdev, port->adapter->scsi_host)
+			if (sdev_to_zfcp(sdev)->port == port)
+				atomic_clear_mask(ZFCP_STATUS_COMMON_OPEN,
+						  &sdev_to_zfcp(sdev)->status);
+		zfcp_erp_set_port_status(port, ZFCP_STATUS_COMMON_ACCESS_BOXED);
+		zfcp_erp_port_reopen(port, ZFCP_STATUS_COMMON_ERP_FAILED,
+				     "fscpph2");
+		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
+		break;
+	case FSF_ADAPTER_STATUS_AVAILABLE:
+		switch (header->fsf_status_qual.word[0]) {
+		case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE:
+			/* fall through */
+		case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED:
+			req->status |= ZFCP_STATUS_FSFREQ_ERROR;
+			break;
+		}
+		break;
+	case FSF_GOOD:
+		/* can't use generic zfcp_erp_modify_port_status because
+		 * ZFCP_STATUS_COMMON_OPEN must not be reset for the port
+		 */
+		atomic_clear_mask(ZFCP_STATUS_PORT_PHYS_OPEN, &port->status);
+		shost_for_each_device(sdev, port->adapter->scsi_host)
+			if (sdev_to_zfcp(sdev)->port == port)
+				atomic_clear_mask(ZFCP_STATUS_COMMON_OPEN,
+						  &sdev_to_zfcp(sdev)->status);
+		break;
+	}
+}
+
+/**
+ * zfcp_fsf_close_physical_port - close physical port
+ * @erp_action: pointer to struct zfcp_erp_action
+ * Returns: 0 on success
+ */
+int zfcp_fsf_close_physical_port(struct zfcp_erp_action *erp_action)
+{
+	struct zfcp_qdio *qdio = erp_action->adapter->qdio;
+	struct zfcp_fsf_req *req;
+	int retval = -EIO;
+
+	spin_lock_irq(&qdio->req_q_lock);
+	if (zfcp_qdio_sbal_get(qdio))
+		goto out;
+
+	req = zfcp_fsf_req_create(qdio, FSF_QTCB_CLOSE_PHYSICAL_PORT,
+				  SBAL_SFLAGS0_TYPE_READ,
+				  qdio->adapter->pool.erp_req);
+
+	if (IS_ERR(req)) {
+		retval = PTR_ERR(req);
+		goto out;
+	}
+
+	req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
+	zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
+
+	req->data = erp_action->port;
+	req->qtcb->header.port_handle = erp_action->port->handle;
+	req->erp_action = erp_action;
+	req->handler = zfcp_fsf_close_physical_port_handler;
+	erp_action->fsf_req_id = req->req_id;
+
+	zfcp_fsf_start_erp_timer(req);
+	retval = zfcp_fsf_req_send(req);
+	if (retval) {
+		zfcp_fsf_req_free(req);
+		erp_action->fsf_req_id = 0;
+	}
+out:
+	spin_unlock_irq(&qdio->req_q_lock);
+	return retval;
+}
+
+static void zfcp_fsf_open_lun_handler(struct zfcp_fsf_req *req)
+{
+	struct zfcp_adapter *adapter = req->adapter;
+	struct scsi_device *sdev = req->data;
+	struct zfcp_scsi_dev *zfcp_sdev;
+	struct fsf_qtcb_header *header = &req->qtcb->header;
+	struct fsf_qtcb_bottom_support *bottom = &req->qtcb->bottom.support;
+
+	if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
+		return;
+
+	zfcp_sdev = sdev_to_zfcp(sdev);
+
+	atomic_clear_mask(ZFCP_STATUS_COMMON_ACCESS_DENIED |
+			  ZFCP_STATUS_COMMON_ACCESS_BOXED |
+			  ZFCP_STATUS_LUN_SHARED |
+			  ZFCP_STATUS_LUN_READONLY,
+			  &zfcp_sdev->status);
+
+	switch (header->fsf_status) {
+
+	case FSF_PORT_HANDLE_NOT_VALID:
+		zfcp_erp_adapter_reopen(adapter, 0, "fsouh_1");
+		/* fall through */
+	case FSF_LUN_ALREADY_OPEN:
+		break;
+	case FSF_ACCESS_DENIED:
+		zfcp_cfdc_lun_denied(sdev, &header->fsf_status_qual);
+		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
+		break;
+	case FSF_PORT_BOXED:
+		zfcp_erp_set_port_status(zfcp_sdev->port,
+					 ZFCP_STATUS_COMMON_ACCESS_BOXED);
+		zfcp_erp_port_reopen(zfcp_sdev->port,
+				     ZFCP_STATUS_COMMON_ERP_FAILED, "fsouh_2");
+		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
+		break;
+	case FSF_LUN_SHARING_VIOLATION:
+		zfcp_cfdc_lun_shrng_vltn(sdev, &header->fsf_status_qual);
+		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
+		break;
+	case FSF_MAXIMUM_NUMBER_OF_LUNS_EXCEEDED:
+		dev_warn(&adapter->ccw_device->dev,
+			 "No handle is available for LUN "
+			 "0x%016Lx on port 0x%016Lx\n",
+			 (unsigned long long)zfcp_scsi_dev_lun(sdev),
+			 (unsigned long long)zfcp_sdev->port->wwpn);
+		zfcp_erp_set_lun_status(sdev, ZFCP_STATUS_COMMON_ERP_FAILED);
+		/* fall through */
+	case FSF_INVALID_COMMAND_OPTION:
+		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
+		break;
+	case FSF_ADAPTER_STATUS_AVAILABLE:
+		switch (header->fsf_status_qual.word[0]) {
+		case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE:
+			zfcp_fc_test_link(zfcp_sdev->port);
+			/* fall through */
+		case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED:
+			req->status |= ZFCP_STATUS_FSFREQ_ERROR;
+			break;
+		}
+		break;
+
+	case FSF_GOOD:
+		zfcp_sdev->lun_handle = header->lun_handle;
+		atomic_set_mask(ZFCP_STATUS_COMMON_OPEN, &zfcp_sdev->status);
+		zfcp_cfdc_open_lun_eval(sdev, bottom);
+		break;
+	}
+}
+
+/**
+ * zfcp_fsf_open_lun - open LUN
+ * @erp_action: pointer to struct zfcp_erp_action
+ * Returns: 0 on success, error otherwise
+ */
+int zfcp_fsf_open_lun(struct zfcp_erp_action *erp_action)
+{
+	struct zfcp_adapter *adapter = erp_action->adapter;
+	struct zfcp_qdio *qdio = adapter->qdio;
+	struct zfcp_fsf_req *req;
+	int retval = -EIO;
+
+	spin_lock_irq(&qdio->req_q_lock);
+	if (zfcp_qdio_sbal_get(qdio))
+		goto out;
+
+	req = zfcp_fsf_req_create(qdio, FSF_QTCB_OPEN_LUN,
+				  SBAL_SFLAGS0_TYPE_READ,
+				  adapter->pool.erp_req);
+
+	if (IS_ERR(req)) {
+		retval = PTR_ERR(req);
+		goto out;
+	}
+
+	req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
+	zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
+
+	req->qtcb->header.port_handle = erp_action->port->handle;
+	req->qtcb->bottom.support.fcp_lun = zfcp_scsi_dev_lun(erp_action->sdev);
+	req->handler = zfcp_fsf_open_lun_handler;
+	req->data = erp_action->sdev;
+	req->erp_action = erp_action;
+	erp_action->fsf_req_id = req->req_id;
+
+	if (!(adapter->connection_features & FSF_FEATURE_NPIV_MODE))
+		req->qtcb->bottom.support.option = FSF_OPEN_LUN_SUPPRESS_BOXING;
+
+	zfcp_fsf_start_erp_timer(req);
+	retval = zfcp_fsf_req_send(req);
+	if (retval) {
+		zfcp_fsf_req_free(req);
+		erp_action->fsf_req_id = 0;
+	}
+out:
+	spin_unlock_irq(&qdio->req_q_lock);
+	return retval;
+}
+
+static void zfcp_fsf_close_lun_handler(struct zfcp_fsf_req *req)
+{
+	struct scsi_device *sdev = req->data;
+	struct zfcp_scsi_dev *zfcp_sdev;
+
+	if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
+		return;
+
+	zfcp_sdev = sdev_to_zfcp(sdev);
+
+	switch (req->qtcb->header.fsf_status) {
+	case FSF_PORT_HANDLE_NOT_VALID:
+		zfcp_erp_adapter_reopen(zfcp_sdev->port->adapter, 0, "fscuh_1");
+		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
+		break;
+	case FSF_LUN_HANDLE_NOT_VALID:
+		zfcp_erp_port_reopen(zfcp_sdev->port, 0, "fscuh_2");
+		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
+		break;
+	case FSF_PORT_BOXED:
+		zfcp_erp_set_port_status(zfcp_sdev->port,
+					 ZFCP_STATUS_COMMON_ACCESS_BOXED);
+		zfcp_erp_port_reopen(zfcp_sdev->port,
+				     ZFCP_STATUS_COMMON_ERP_FAILED, "fscuh_3");
+		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
+		break;
+	case FSF_ADAPTER_STATUS_AVAILABLE:
+		switch (req->qtcb->header.fsf_status_qual.word[0]) {
+		case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE:
+			zfcp_fc_test_link(zfcp_sdev->port);
+			/* fall through */
+		case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED:
+			req->status |= ZFCP_STATUS_FSFREQ_ERROR;
+			break;
+		}
+		break;
+	case FSF_GOOD:
+		atomic_clear_mask(ZFCP_STATUS_COMMON_OPEN, &zfcp_sdev->status);
+		break;
+	}
+}
+
+/**
+ * zfcp_fsf_close_LUN - close LUN
+ * @erp_action: pointer to erp_action triggering the "close LUN"
+ * Returns: 0 on success, error otherwise
+ */
+int zfcp_fsf_close_lun(struct zfcp_erp_action *erp_action)
+{
+	struct zfcp_qdio *qdio = erp_action->adapter->qdio;
+	struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(erp_action->sdev);
+	struct zfcp_fsf_req *req;
+	int retval = -EIO;
+
+	spin_lock_irq(&qdio->req_q_lock);
+	if (zfcp_qdio_sbal_get(qdio))
+		goto out;
+
+	req = zfcp_fsf_req_create(qdio, FSF_QTCB_CLOSE_LUN,
+				  SBAL_SFLAGS0_TYPE_READ,
+				  qdio->adapter->pool.erp_req);
+
+	if (IS_ERR(req)) {
+		retval = PTR_ERR(req);
+		goto out;
+	}
+
+	req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
+	zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
+
+	req->qtcb->header.port_handle = erp_action->port->handle;
+	req->qtcb->header.lun_handle = zfcp_sdev->lun_handle;
+	req->handler = zfcp_fsf_close_lun_handler;
+	req->data = erp_action->sdev;
+	req->erp_action = erp_action;
+	erp_action->fsf_req_id = req->req_id;
+
+	zfcp_fsf_start_erp_timer(req);
+	retval = zfcp_fsf_req_send(req);
+	if (retval) {
+		zfcp_fsf_req_free(req);
+		erp_action->fsf_req_id = 0;
+	}
+out:
+	spin_unlock_irq(&qdio->req_q_lock);
+	return retval;
+}
+
+static void zfcp_fsf_update_lat(struct fsf_latency_record *lat_rec, u32 lat)
+{
+	lat_rec->sum += lat;
+	lat_rec->min = min(lat_rec->min, lat);
+	lat_rec->max = max(lat_rec->max, lat);
+}
+
+static void zfcp_fsf_req_trace(struct zfcp_fsf_req *req, struct scsi_cmnd *scsi)
+{
+	struct fsf_qual_latency_info *lat_in;
+	struct latency_cont *lat = NULL;
+	struct zfcp_scsi_dev *zfcp_sdev;
+	struct zfcp_blk_drv_data blktrc;
+	int ticks = req->adapter->timer_ticks;
+
+	lat_in = &req->qtcb->prefix.prot_status_qual.latency_info;
+
+	blktrc.flags = 0;
+	blktrc.magic = ZFCP_BLK_DRV_DATA_MAGIC;
+	if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
+		blktrc.flags |= ZFCP_BLK_REQ_ERROR;
+	blktrc.inb_usage = 0;
+	blktrc.outb_usage = req->qdio_req.qdio_outb_usage;
+
+	if (req->adapter->adapter_features & FSF_FEATURE_MEASUREMENT_DATA &&
+	    !(req->status & ZFCP_STATUS_FSFREQ_ERROR)) {
+		zfcp_sdev = sdev_to_zfcp(scsi->device);
+		blktrc.flags |= ZFCP_BLK_LAT_VALID;
+		blktrc.channel_lat = lat_in->channel_lat * ticks;
+		blktrc.fabric_lat = lat_in->fabric_lat * ticks;
+
+		switch (req->qtcb->bottom.io.data_direction) {
+		case FSF_DATADIR_DIF_READ_STRIP:
+		case FSF_DATADIR_DIF_READ_CONVERT:
+		case FSF_DATADIR_READ:
+			lat = &zfcp_sdev->latencies.read;
+			break;
+		case FSF_DATADIR_DIF_WRITE_INSERT:
+		case FSF_DATADIR_DIF_WRITE_CONVERT:
+		case FSF_DATADIR_WRITE:
+			lat = &zfcp_sdev->latencies.write;
+			break;
+		case FSF_DATADIR_CMND:
+			lat = &zfcp_sdev->latencies.cmd;
+			break;
+		}
+
+		if (lat) {
+			spin_lock(&zfcp_sdev->latencies.lock);
+			zfcp_fsf_update_lat(&lat->channel, lat_in->channel_lat);
+			zfcp_fsf_update_lat(&lat->fabric, lat_in->fabric_lat);
+			lat->counter++;
+			spin_unlock(&zfcp_sdev->latencies.lock);
+		}
+	}
+
+	blk_add_driver_data(scsi->request->q, scsi->request, &blktrc,
+			    sizeof(blktrc));
+}
+
+static void zfcp_fsf_fcp_handler_common(struct zfcp_fsf_req *req)
+{
+	struct scsi_cmnd *scmnd = req->data;
+	struct scsi_device *sdev = scmnd->device;
+	struct zfcp_scsi_dev *zfcp_sdev;
+	struct fsf_qtcb_header *header = &req->qtcb->header;
+
+	if (unlikely(req->status & ZFCP_STATUS_FSFREQ_ERROR))
+		return;
+
+	zfcp_sdev = sdev_to_zfcp(sdev);
+
+	switch (header->fsf_status) {
+	case FSF_HANDLE_MISMATCH:
+	case FSF_PORT_HANDLE_NOT_VALID:
+		zfcp_erp_adapter_reopen(zfcp_sdev->port->adapter, 0, "fssfch1");
+		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
+		break;
+	case FSF_FCPLUN_NOT_VALID:
+	case FSF_LUN_HANDLE_NOT_VALID:
+		zfcp_erp_port_reopen(zfcp_sdev->port, 0, "fssfch2");
+		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
+		break;
+	case FSF_SERVICE_CLASS_NOT_SUPPORTED:
+		zfcp_fsf_class_not_supp(req);
+		break;
+	case FSF_ACCESS_DENIED:
+		zfcp_cfdc_lun_denied(sdev, &header->fsf_status_qual);
+		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
+		break;
+	case FSF_DIRECTION_INDICATOR_NOT_VALID:
+		dev_err(&req->adapter->ccw_device->dev,
+			"Incorrect direction %d, LUN 0x%016Lx on port "
+			"0x%016Lx closed\n",
+			req->qtcb->bottom.io.data_direction,
+			(unsigned long long)zfcp_scsi_dev_lun(sdev),
+			(unsigned long long)zfcp_sdev->port->wwpn);
+		zfcp_erp_adapter_shutdown(zfcp_sdev->port->adapter, 0,
+					  "fssfch3");
+		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
+		break;
+	case FSF_CMND_LENGTH_NOT_VALID:
+		dev_err(&req->adapter->ccw_device->dev,
+			"Incorrect CDB length %d, LUN 0x%016Lx on "
+			"port 0x%016Lx closed\n",
+			req->qtcb->bottom.io.fcp_cmnd_length,
+			(unsigned long long)zfcp_scsi_dev_lun(sdev),
+			(unsigned long long)zfcp_sdev->port->wwpn);
+		zfcp_erp_adapter_shutdown(zfcp_sdev->port->adapter, 0,
+					  "fssfch4");
+		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
+		break;
+	case FSF_PORT_BOXED:
+		zfcp_erp_set_port_status(zfcp_sdev->port,
+					 ZFCP_STATUS_COMMON_ACCESS_BOXED);
+		zfcp_erp_port_reopen(zfcp_sdev->port,
+				     ZFCP_STATUS_COMMON_ERP_FAILED, "fssfch5");
+		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
+		break;
+	case FSF_LUN_BOXED:
+		zfcp_erp_set_lun_status(sdev, ZFCP_STATUS_COMMON_ACCESS_BOXED);
+		zfcp_erp_lun_reopen(sdev, ZFCP_STATUS_COMMON_ERP_FAILED,
+				    "fssfch6");
+		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
+		break;
+	case FSF_ADAPTER_STATUS_AVAILABLE:
+		if (header->fsf_status_qual.word[0] ==
+		    FSF_SQ_INVOKE_LINK_TEST_PROCEDURE)
+			zfcp_fc_test_link(zfcp_sdev->port);
+		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
+		break;
+	}
+}
+
+static void zfcp_fsf_fcp_cmnd_handler(struct zfcp_fsf_req *req)
+{
+	struct scsi_cmnd *scpnt;
+	struct fcp_resp_with_ext *fcp_rsp;
+	unsigned long flags;
+
+	read_lock_irqsave(&req->adapter->abort_lock, flags);
+
+	scpnt = req->data;
+	if (unlikely(!scpnt)) {
+		read_unlock_irqrestore(&req->adapter->abort_lock, flags);
+		return;
+	}
+
+	zfcp_fsf_fcp_handler_common(req);
+
+	if (unlikely(req->status & ZFCP_STATUS_FSFREQ_ERROR)) {
+		set_host_byte(scpnt, DID_TRANSPORT_DISRUPTED);
+		goto skip_fsfstatus;
+	}
+
+	switch (req->qtcb->header.fsf_status) {
+	case FSF_INCONSISTENT_PROT_DATA:
+	case FSF_INVALID_PROT_PARM:
+		set_host_byte(scpnt, DID_ERROR);
+		goto skip_fsfstatus;
+	case FSF_BLOCK_GUARD_CHECK_FAILURE:
+		zfcp_scsi_dif_sense_error(scpnt, 0x1);
+		goto skip_fsfstatus;
+	case FSF_APP_TAG_CHECK_FAILURE:
+		zfcp_scsi_dif_sense_error(scpnt, 0x2);
+		goto skip_fsfstatus;
+	case FSF_REF_TAG_CHECK_FAILURE:
+		zfcp_scsi_dif_sense_error(scpnt, 0x3);
+		goto skip_fsfstatus;
+	}
+	fcp_rsp = (struct fcp_resp_with_ext *) &req->qtcb->bottom.io.fcp_rsp;
+	zfcp_fc_eval_fcp_rsp(fcp_rsp, scpnt);
+
+skip_fsfstatus:
+	zfcp_fsf_req_trace(req, scpnt);
+	zfcp_dbf_scsi_result(scpnt, req);
+
+	scpnt->host_scribble = NULL;
+	(scpnt->scsi_done) (scpnt);
+	/*
+	 * We must hold this lock until scsi_done has been called.
+	 * Otherwise we may call scsi_done after abort regarding this
+	 * command has completed.
+	 * Note: scsi_done must not block!
+	 */
+	read_unlock_irqrestore(&req->adapter->abort_lock, flags);
+}
+
+static int zfcp_fsf_set_data_dir(struct scsi_cmnd *scsi_cmnd, u32 *data_dir)
+{
+	switch (scsi_get_prot_op(scsi_cmnd)) {
+	case SCSI_PROT_NORMAL:
+		switch (scsi_cmnd->sc_data_direction) {
+		case DMA_NONE:
+			*data_dir = FSF_DATADIR_CMND;
+			break;
+		case DMA_FROM_DEVICE:
+			*data_dir = FSF_DATADIR_READ;
+			break;
+		case DMA_TO_DEVICE:
+			*data_dir = FSF_DATADIR_WRITE;
+			break;
+		case DMA_BIDIRECTIONAL:
+			return -EINVAL;
+		}
+		break;
+
+	case SCSI_PROT_READ_STRIP:
+		*data_dir = FSF_DATADIR_DIF_READ_STRIP;
+		break;
+	case SCSI_PROT_WRITE_INSERT:
+		*data_dir = FSF_DATADIR_DIF_WRITE_INSERT;
+		break;
+	case SCSI_PROT_READ_PASS:
+		*data_dir = FSF_DATADIR_DIF_READ_CONVERT;
+		break;
+	case SCSI_PROT_WRITE_PASS:
+		*data_dir = FSF_DATADIR_DIF_WRITE_CONVERT;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+/**
+ * zfcp_fsf_fcp_cmnd - initiate an FCP command (for a SCSI command)
+ * @scsi_cmnd: scsi command to be sent
+ */
+int zfcp_fsf_fcp_cmnd(struct scsi_cmnd *scsi_cmnd)
+{
+	struct zfcp_fsf_req *req;
+	struct fcp_cmnd *fcp_cmnd;
+	u8 sbtype = SBAL_SFLAGS0_TYPE_READ;
+	int retval = -EIO;
+	struct scsi_device *sdev = scsi_cmnd->device;
+	struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev);
+	struct zfcp_adapter *adapter = zfcp_sdev->port->adapter;
+	struct zfcp_qdio *qdio = adapter->qdio;
+	struct fsf_qtcb_bottom_io *io;
+	unsigned long flags;
+
+	if (unlikely(!(atomic_read(&zfcp_sdev->status) &
+		       ZFCP_STATUS_COMMON_UNBLOCKED)))
+		return -EBUSY;
+
+	spin_lock_irqsave(&qdio->req_q_lock, flags);
+	if (atomic_read(&qdio->req_q_free) <= 0) {
+		atomic_inc(&qdio->req_q_full);
+		goto out;
+	}
+
+	if (scsi_cmnd->sc_data_direction == DMA_TO_DEVICE)
+		sbtype = SBAL_SFLAGS0_TYPE_WRITE;
+
+	req = zfcp_fsf_req_create(qdio, FSF_QTCB_FCP_CMND,
+				  sbtype, adapter->pool.scsi_req);
+
+	if (IS_ERR(req)) {
+		retval = PTR_ERR(req);
+		goto out;
+	}
+
+	scsi_cmnd->host_scribble = (unsigned char *) req->req_id;
+
+	io = &req->qtcb->bottom.io;
+	req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
+	req->data = scsi_cmnd;
+	req->handler = zfcp_fsf_fcp_cmnd_handler;
+	req->qtcb->header.lun_handle = zfcp_sdev->lun_handle;
+	req->qtcb->header.port_handle = zfcp_sdev->port->handle;
+	io->service_class = FSF_CLASS_3;
+	io->fcp_cmnd_length = FCP_CMND_LEN;
+
+	if (scsi_get_prot_op(scsi_cmnd) != SCSI_PROT_NORMAL) {
+		io->data_block_length = scsi_cmnd->device->sector_size;
+		io->ref_tag_value = scsi_get_lba(scsi_cmnd) & 0xFFFFFFFF;
+	}
+
+	if (zfcp_fsf_set_data_dir(scsi_cmnd, &io->data_direction))
+		goto failed_scsi_cmnd;
+
+	fcp_cmnd = (struct fcp_cmnd *) &req->qtcb->bottom.io.fcp_cmnd;
+	zfcp_fc_scsi_to_fcp(fcp_cmnd, scsi_cmnd, 0);
+
+	if (scsi_prot_sg_count(scsi_cmnd)) {
+		zfcp_qdio_set_data_div(qdio, &req->qdio_req,
+				       scsi_prot_sg_count(scsi_cmnd));
+		retval = zfcp_qdio_sbals_from_sg(qdio, &req->qdio_req,
+						 scsi_prot_sglist(scsi_cmnd));
+		if (retval)
+			goto failed_scsi_cmnd;
+		io->prot_data_length = zfcp_qdio_real_bytes(
+						scsi_prot_sglist(scsi_cmnd));
+	}
+
+	retval = zfcp_qdio_sbals_from_sg(qdio, &req->qdio_req,
+					 scsi_sglist(scsi_cmnd));
+	if (unlikely(retval))
+		goto failed_scsi_cmnd;
+
+	zfcp_qdio_set_sbale_last(adapter->qdio, &req->qdio_req);
+	if (zfcp_adapter_multi_buffer_active(adapter))
+		zfcp_qdio_set_scount(qdio, &req->qdio_req);
+
+	retval = zfcp_fsf_req_send(req);
+	if (unlikely(retval))
+		goto failed_scsi_cmnd;
+
+	goto out;
+
+failed_scsi_cmnd:
+	zfcp_fsf_req_free(req);
+	scsi_cmnd->host_scribble = NULL;
+out:
+	spin_unlock_irqrestore(&qdio->req_q_lock, flags);
+	return retval;
+}
+
+static void zfcp_fsf_fcp_task_mgmt_handler(struct zfcp_fsf_req *req)
+{
+	struct fcp_resp_with_ext *fcp_rsp;
+	struct fcp_resp_rsp_info *rsp_info;
+
+	zfcp_fsf_fcp_handler_common(req);
+
+	fcp_rsp = (struct fcp_resp_with_ext *) &req->qtcb->bottom.io.fcp_rsp;
+	rsp_info = (struct fcp_resp_rsp_info *) &fcp_rsp[1];
+
+	if ((rsp_info->rsp_code != FCP_TMF_CMPL) ||
+	     (req->status & ZFCP_STATUS_FSFREQ_ERROR))
+		req->status |= ZFCP_STATUS_FSFREQ_TMFUNCFAILED;
+}
+
+/**
+ * zfcp_fsf_fcp_task_mgmt - send SCSI task management command
+ * @scmnd: SCSI command to send the task management command for
+ * @tm_flags: unsigned byte for task management flags
+ * Returns: on success pointer to struct fsf_req, NULL otherwise
+ */
+struct zfcp_fsf_req *zfcp_fsf_fcp_task_mgmt(struct scsi_cmnd *scmnd,
+					    u8 tm_flags)
+{
+	struct zfcp_fsf_req *req = NULL;
+	struct fcp_cmnd *fcp_cmnd;
+	struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(scmnd->device);
+	struct zfcp_qdio *qdio = zfcp_sdev->port->adapter->qdio;
+
+	if (unlikely(!(atomic_read(&zfcp_sdev->status) &
+		       ZFCP_STATUS_COMMON_UNBLOCKED)))
+		return NULL;
+
+	spin_lock_irq(&qdio->req_q_lock);
+	if (zfcp_qdio_sbal_get(qdio))
+		goto out;
+
+	req = zfcp_fsf_req_create(qdio, FSF_QTCB_FCP_CMND,
+				  SBAL_SFLAGS0_TYPE_WRITE,
+				  qdio->adapter->pool.scsi_req);
+
+	if (IS_ERR(req)) {
+		req = NULL;
+		goto out;
+	}
+
+	req->data = scmnd;
+	req->handler = zfcp_fsf_fcp_task_mgmt_handler;
+	req->qtcb->header.lun_handle = zfcp_sdev->lun_handle;
+	req->qtcb->header.port_handle = zfcp_sdev->port->handle;
+	req->qtcb->bottom.io.data_direction = FSF_DATADIR_CMND;
+	req->qtcb->bottom.io.service_class = FSF_CLASS_3;
+	req->qtcb->bottom.io.fcp_cmnd_length = FCP_CMND_LEN;
+
+	zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
+
+	fcp_cmnd = (struct fcp_cmnd *) &req->qtcb->bottom.io.fcp_cmnd;
+	zfcp_fc_scsi_to_fcp(fcp_cmnd, scmnd, tm_flags);
+
+	zfcp_fsf_start_timer(req, ZFCP_SCSI_ER_TIMEOUT);
+	if (!zfcp_fsf_req_send(req))
+		goto out;
+
+	zfcp_fsf_req_free(req);
+	req = NULL;
+out:
+	spin_unlock_irq(&qdio->req_q_lock);
+	return req;
+}
+
+static void zfcp_fsf_control_file_handler(struct zfcp_fsf_req *req)
+{
+}
+
+/**
+ * zfcp_fsf_control_file - control file upload/download
+ * @adapter: pointer to struct zfcp_adapter
+ * @fsf_cfdc: pointer to struct zfcp_fsf_cfdc
+ * Returns: on success pointer to struct zfcp_fsf_req, NULL otherwise
+ */
+struct zfcp_fsf_req *zfcp_fsf_control_file(struct zfcp_adapter *adapter,
+					   struct zfcp_fsf_cfdc *fsf_cfdc)
+{
+	struct zfcp_qdio *qdio = adapter->qdio;
+	struct zfcp_fsf_req *req = NULL;
+	struct fsf_qtcb_bottom_support *bottom;
+	int retval = -EIO;
+	u8 direction;
+
+	if (!(adapter->adapter_features & FSF_FEATURE_CFDC))
+		return ERR_PTR(-EOPNOTSUPP);
+
+	switch (fsf_cfdc->command) {
+	case FSF_QTCB_DOWNLOAD_CONTROL_FILE:
+		direction = SBAL_SFLAGS0_TYPE_WRITE;
+		break;
+	case FSF_QTCB_UPLOAD_CONTROL_FILE:
+		direction = SBAL_SFLAGS0_TYPE_READ;
+		break;
+	default:
+		return ERR_PTR(-EINVAL);
+	}
+
+	spin_lock_irq(&qdio->req_q_lock);
+	if (zfcp_qdio_sbal_get(qdio))
+		goto out;
+
+	req = zfcp_fsf_req_create(qdio, fsf_cfdc->command, direction, NULL);
+	if (IS_ERR(req)) {
+		retval = -EPERM;
+		goto out;
+	}
+
+	req->handler = zfcp_fsf_control_file_handler;
+
+	bottom = &req->qtcb->bottom.support;
+	bottom->operation_subtype = FSF_CFDC_OPERATION_SUBTYPE;
+	bottom->option = fsf_cfdc->option;
+
+	retval = zfcp_qdio_sbals_from_sg(qdio, &req->qdio_req, fsf_cfdc->sg);
+
+	if (retval ||
+		(zfcp_qdio_real_bytes(fsf_cfdc->sg) != ZFCP_CFDC_MAX_SIZE)) {
+		zfcp_fsf_req_free(req);
+		retval = -EIO;
+		goto out;
+	}
+	zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
+	if (zfcp_adapter_multi_buffer_active(adapter))
+		zfcp_qdio_set_scount(qdio, &req->qdio_req);
+
+	zfcp_fsf_start_timer(req, ZFCP_FSF_REQUEST_TIMEOUT);
+	retval = zfcp_fsf_req_send(req);
+out:
+	spin_unlock_irq(&qdio->req_q_lock);
+
+	if (!retval) {
+		wait_for_completion(&req->completion);
+		return req;
+	}
+	return ERR_PTR(retval);
+}
+
+/**
+ * zfcp_fsf_reqid_check - validate req_id contained in SBAL returned by QDIO
+ * @adapter: pointer to struct zfcp_adapter
+ * @sbal_idx: response queue index of SBAL to be processed
+ */
+void zfcp_fsf_reqid_check(struct zfcp_qdio *qdio, int sbal_idx)
+{
+	struct zfcp_adapter *adapter = qdio->adapter;
+	struct qdio_buffer *sbal = qdio->res_q[sbal_idx];
+	struct qdio_buffer_element *sbale;
+	struct zfcp_fsf_req *fsf_req;
+	unsigned long req_id;
+	int idx;
+
+	for (idx = 0; idx < QDIO_MAX_ELEMENTS_PER_BUFFER; idx++) {
+
+		sbale = &sbal->element[idx];
+		req_id = (unsigned long) sbale->addr;
+		fsf_req = zfcp_reqlist_find_rm(adapter->req_list, req_id);
+
+		if (!fsf_req) {
+			/*
+			 * Unknown request means that we have potentially memory
+			 * corruption and must stop the machine immediately.
+			 */
+			zfcp_qdio_siosl(adapter);
+			panic("error: unknown req_id (%lx) on adapter %s.\n",
+			      req_id, dev_name(&adapter->ccw_device->dev));
+		}
+
+		fsf_req->qdio_req.sbal_response = sbal_idx;
+		zfcp_fsf_req_complete(fsf_req);
+
+		if (likely(sbale->eflags & SBAL_EFLAGS_LAST_ENTRY))
+			break;
+	}
+}
+
+struct zfcp_fsf_req *zfcp_fsf_get_req(struct zfcp_qdio *qdio,
+				      struct qdio_buffer *sbal)
+{
+	struct qdio_buffer_element *sbale = &sbal->element[0];
+	u64 req_id = (unsigned long) sbale->addr;
+
+	return zfcp_reqlist_find(qdio->adapter->req_list, req_id);
+}
diff --git a/ap/os/linux/linux-3.4.x/drivers/s390/scsi/zfcp_fsf.h b/ap/os/linux/linux-3.4.x/drivers/s390/scsi/zfcp_fsf.h
new file mode 100644
index 0000000..db8c853
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/s390/scsi/zfcp_fsf.h
@@ -0,0 +1,475 @@
+/*
+ * zfcp device driver
+ *
+ * Interface to the FSF support functions.
+ *
+ * Copyright IBM Corporation 2002, 2010
+ */
+
+#ifndef FSF_H
+#define FSF_H
+
+#include <linux/pfn.h>
+#include <linux/scatterlist.h>
+#include <scsi/libfc.h>
+
+#define FSF_QTCB_CURRENT_VERSION		0x00000001
+
+/* FSF commands */
+#define	FSF_QTCB_FCP_CMND			0x00000001
+#define	FSF_QTCB_ABORT_FCP_CMND			0x00000002
+#define	FSF_QTCB_OPEN_PORT_WITH_DID		0x00000005
+#define	FSF_QTCB_OPEN_LUN			0x00000006
+#define	FSF_QTCB_CLOSE_LUN			0x00000007
+#define	FSF_QTCB_CLOSE_PORT			0x00000008
+#define	FSF_QTCB_CLOSE_PHYSICAL_PORT		0x00000009
+#define	FSF_QTCB_SEND_ELS			0x0000000B
+#define	FSF_QTCB_SEND_GENERIC			0x0000000C
+#define	FSF_QTCB_EXCHANGE_CONFIG_DATA		0x0000000D
+#define	FSF_QTCB_EXCHANGE_PORT_DATA		0x0000000E
+#define FSF_QTCB_DOWNLOAD_CONTROL_FILE		0x00000012
+#define FSF_QTCB_UPLOAD_CONTROL_FILE		0x00000013
+
+/* FSF QTCB types */
+#define FSF_IO_COMMAND				0x00000001
+#define FSF_SUPPORT_COMMAND			0x00000002
+#define FSF_CONFIG_COMMAND			0x00000003
+#define FSF_PORT_COMMAND			0x00000004
+
+/* FSF control file upload/download operations' subtype and options */
+#define FSF_CFDC_OPERATION_SUBTYPE		0x00020001
+#define FSF_CFDC_OPTION_NORMAL_MODE		0x00000000
+#define FSF_CFDC_OPTION_FORCE			0x00000001
+#define FSF_CFDC_OPTION_FULL_ACCESS		0x00000002
+#define FSF_CFDC_OPTION_RESTRICTED_ACCESS	0x00000004
+
+/* FSF protocol states */
+#define FSF_PROT_GOOD				0x00000001
+#define FSF_PROT_QTCB_VERSION_ERROR		0x00000010
+#define FSF_PROT_SEQ_NUMB_ERROR			0x00000020
+#define FSF_PROT_UNSUPP_QTCB_TYPE		0x00000040
+#define FSF_PROT_HOST_CONNECTION_INITIALIZING	0x00000080
+#define FSF_PROT_FSF_STATUS_PRESENTED		0x00000100
+#define FSF_PROT_DUPLICATE_REQUEST_ID		0x00000200
+#define FSF_PROT_LINK_DOWN                      0x00000400
+#define FSF_PROT_REEST_QUEUE                    0x00000800
+#define FSF_PROT_ERROR_STATE			0x01000000
+
+/* FSF states */
+#define FSF_GOOD				0x00000000
+#define FSF_PORT_ALREADY_OPEN			0x00000001
+#define FSF_LUN_ALREADY_OPEN			0x00000002
+#define FSF_PORT_HANDLE_NOT_VALID		0x00000003
+#define FSF_LUN_HANDLE_NOT_VALID		0x00000004
+#define FSF_HANDLE_MISMATCH			0x00000005
+#define FSF_SERVICE_CLASS_NOT_SUPPORTED		0x00000006
+#define FSF_FCPLUN_NOT_VALID			0x00000009
+#define FSF_ACCESS_DENIED			0x00000010
+#define FSF_LUN_SHARING_VIOLATION               0x00000012
+#define FSF_FCP_COMMAND_DOES_NOT_EXIST		0x00000022
+#define FSF_DIRECTION_INDICATOR_NOT_VALID	0x00000030
+#define FSF_CMND_LENGTH_NOT_VALID		0x00000033
+#define FSF_MAXIMUM_NUMBER_OF_PORTS_EXCEEDED	0x00000040
+#define FSF_MAXIMUM_NUMBER_OF_LUNS_EXCEEDED	0x00000041
+#define FSF_ELS_COMMAND_REJECTED		0x00000050
+#define FSF_GENERIC_COMMAND_REJECTED		0x00000051
+#define FSF_PORT_BOXED				0x00000059
+#define FSF_LUN_BOXED				0x0000005A
+#define FSF_EXCHANGE_CONFIG_DATA_INCOMPLETE	0x0000005B
+#define FSF_PAYLOAD_SIZE_MISMATCH		0x00000060
+#define FSF_REQUEST_SIZE_TOO_LARGE		0x00000061
+#define FSF_RESPONSE_SIZE_TOO_LARGE		0x00000062
+#define FSF_SBAL_MISMATCH			0x00000063
+#define FSF_INCONSISTENT_PROT_DATA		0x00000070
+#define FSF_INVALID_PROT_PARM			0x00000071
+#define FSF_BLOCK_GUARD_CHECK_FAILURE		0x00000081
+#define FSF_APP_TAG_CHECK_FAILURE		0x00000082
+#define FSF_REF_TAG_CHECK_FAILURE		0x00000083
+#define FSF_ADAPTER_STATUS_AVAILABLE		0x000000AD
+#define FSF_UNKNOWN_COMMAND			0x000000E2
+#define FSF_UNKNOWN_OP_SUBTYPE                  0x000000E3
+#define FSF_INVALID_COMMAND_OPTION              0x000000E5
+
+#define FSF_PROT_STATUS_QUAL_SIZE		16
+#define FSF_STATUS_QUALIFIER_SIZE		16
+
+/* FSF status qualifier, recommendations */
+#define FSF_SQ_NO_RECOM				0x00
+#define FSF_SQ_FCP_RSP_AVAILABLE		0x01
+#define FSF_SQ_RETRY_IF_POSSIBLE		0x02
+#define FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED	0x03
+#define FSF_SQ_INVOKE_LINK_TEST_PROCEDURE	0x04
+#define FSF_SQ_COMMAND_ABORTED			0x06
+#define FSF_SQ_NO_RETRY_POSSIBLE		0x07
+
+/* FSF status qualifier (most significant 4 bytes), local link down */
+#define FSF_PSQ_LINK_NO_LIGHT			0x00000004
+#define FSF_PSQ_LINK_WRAP_PLUG			0x00000008
+#define FSF_PSQ_LINK_NO_FCP			0x00000010
+#define FSF_PSQ_LINK_FIRMWARE_UPDATE		0x00000020
+#define FSF_PSQ_LINK_INVALID_WWPN		0x00000100
+#define FSF_PSQ_LINK_NO_NPIV_SUPPORT		0x00000200
+#define FSF_PSQ_LINK_NO_FCP_RESOURCES		0x00000400
+#define FSF_PSQ_LINK_NO_FABRIC_RESOURCES	0x00000800
+#define FSF_PSQ_LINK_FABRIC_LOGIN_UNABLE	0x00001000
+#define FSF_PSQ_LINK_WWPN_ASSIGNMENT_CORRUPTED	0x00002000
+#define FSF_PSQ_LINK_MODE_TABLE_CURRUPTED	0x00004000
+#define FSF_PSQ_LINK_NO_WWPN_ASSIGNMENT		0x00008000
+
+/* payload size in status read buffer */
+#define FSF_STATUS_READ_PAYLOAD_SIZE		4032
+
+/* number of status read buffers that should be sent by ULP */
+#define FSF_STATUS_READS_RECOM			16
+
+/* status types in status read buffer */
+#define FSF_STATUS_READ_PORT_CLOSED		0x00000001
+#define FSF_STATUS_READ_INCOMING_ELS		0x00000002
+#define FSF_STATUS_READ_SENSE_DATA_AVAIL        0x00000003
+#define FSF_STATUS_READ_BIT_ERROR_THRESHOLD	0x00000004
+#define FSF_STATUS_READ_LINK_DOWN		0x00000005
+#define FSF_STATUS_READ_LINK_UP          	0x00000006
+#define FSF_STATUS_READ_NOTIFICATION_LOST	0x00000009
+#define FSF_STATUS_READ_CFDC_UPDATED		0x0000000A
+#define FSF_STATUS_READ_FEATURE_UPDATE_ALERT	0x0000000C
+
+/* status subtypes for link down */
+#define FSF_STATUS_READ_SUB_NO_PHYSICAL_LINK	0x00000000
+#define FSF_STATUS_READ_SUB_FDISC_FAILED	0x00000001
+#define FSF_STATUS_READ_SUB_FIRMWARE_UPDATE	0x00000002
+
+/* status subtypes for unsolicited status notification lost */
+#define FSF_STATUS_READ_SUB_INCOMING_ELS	0x00000001
+#define FSF_STATUS_READ_SUB_ACT_UPDATED		0x00000020
+
+/* topologie that is detected by the adapter */
+#define FSF_TOPO_P2P				0x00000001
+#define FSF_TOPO_FABRIC				0x00000002
+#define FSF_TOPO_AL				0x00000003
+
+/* data direction for FCP commands */
+#define FSF_DATADIR_WRITE			0x00000001
+#define FSF_DATADIR_READ			0x00000002
+#define FSF_DATADIR_CMND			0x00000004
+#define FSF_DATADIR_DIF_WRITE_INSERT		0x00000009
+#define FSF_DATADIR_DIF_READ_STRIP		0x0000000a
+#define FSF_DATADIR_DIF_WRITE_CONVERT		0x0000000b
+#define FSF_DATADIR_DIF_READ_CONVERT		0X0000000c
+
+/* data protection control flags */
+#define FSF_APP_TAG_CHECK_ENABLE		0x10
+
+/* fc service class */
+#define FSF_CLASS_3				0x00000003
+
+/* logging space behind QTCB */
+#define FSF_QTCB_LOG_SIZE			1024
+
+/* channel features */
+#define FSF_FEATURE_CFDC			0x00000002
+#define FSF_FEATURE_LUN_SHARING			0x00000004
+#define FSF_FEATURE_NOTIFICATION_LOST		0x00000008
+#define FSF_FEATURE_HBAAPI_MANAGEMENT           0x00000010
+#define FSF_FEATURE_ELS_CT_CHAINED_SBALS	0x00000020
+#define FSF_FEATURE_UPDATE_ALERT		0x00000100
+#define FSF_FEATURE_MEASUREMENT_DATA		0x00000200
+#define FSF_FEATURE_DIF_PROT_TYPE1		0x00010000
+#define FSF_FEATURE_DIX_PROT_TCPIP		0x00020000
+
+/* host connection features */
+#define FSF_FEATURE_NPIV_MODE			0x00000001
+
+/* option */
+#define FSF_OPEN_LUN_SUPPRESS_BOXING		0x00000001
+
+/* open LUN access flags*/
+#define FSF_UNIT_ACCESS_EXCLUSIVE		0x02000000
+#define FSF_UNIT_ACCESS_OUTBOUND_TRANSFER	0x10000000
+
+/* FSF interface for CFDC */
+#define ZFCP_CFDC_MAX_SIZE		127 * 1024
+#define ZFCP_CFDC_PAGES 		PFN_UP(ZFCP_CFDC_MAX_SIZE)
+
+struct zfcp_fsf_cfdc {
+	struct scatterlist sg[ZFCP_CFDC_PAGES];
+	u32 command;
+	u32 option;
+};
+
+struct fsf_queue_designator {
+	u8  cssid;
+	u8  chpid;
+	u8  hla;
+	u8  ua;
+	u32 res1;
+} __attribute__ ((packed));
+
+struct fsf_bit_error_payload {
+	u32 res1;
+	u32 link_failure_error_count;
+	u32 loss_of_sync_error_count;
+	u32 loss_of_signal_error_count;
+	u32 primitive_sequence_error_count;
+	u32 invalid_transmission_word_error_count;
+	u32 crc_error_count;
+	u32 primitive_sequence_event_timeout_count;
+	u32 elastic_buffer_overrun_error_count;
+	u32 fcal_arbitration_timeout_count;
+	u32 advertised_receive_b2b_credit;
+	u32 current_receive_b2b_credit;
+	u32 advertised_transmit_b2b_credit;
+	u32 current_transmit_b2b_credit;
+} __attribute__ ((packed));
+
+struct fsf_link_down_info {
+	u32 error_code;
+	u32 res1;
+	u8 res2[2];
+	u8 primary_status;
+	u8 ioerr_code;
+	u8 action_code;
+	u8 reason_code;
+	u8 explanation_code;
+	u8 vendor_specific_code;
+} __attribute__ ((packed));
+
+struct fsf_status_read_buffer {
+	u32 status_type;
+	u32 status_subtype;
+	u32 length;
+	u32 res1;
+	struct fsf_queue_designator queue_designator;
+	u8 res2;
+	u8 d_id[3];
+	u32 class;
+	u64 fcp_lun;
+	u8  res3[24];
+	union {
+		u8  data[FSF_STATUS_READ_PAYLOAD_SIZE];
+		u32 word[FSF_STATUS_READ_PAYLOAD_SIZE/sizeof(u32)];
+		struct fsf_link_down_info link_down_info;
+		struct fsf_bit_error_payload bit_error;
+	} payload;
+} __attribute__ ((packed));
+
+struct fsf_qual_version_error {
+	u32 fsf_version;
+	u32 res1[3];
+} __attribute__ ((packed));
+
+struct fsf_qual_sequence_error {
+	u32 exp_req_seq_no;
+	u32 res1[3];
+} __attribute__ ((packed));
+
+struct fsf_qual_latency_info {
+	u32 channel_lat;
+	u32 fabric_lat;
+	u8 res1[8];
+} __attribute__ ((packed));
+
+union fsf_prot_status_qual {
+	u32 word[FSF_PROT_STATUS_QUAL_SIZE / sizeof(u32)];
+	u64 doubleword[FSF_PROT_STATUS_QUAL_SIZE / sizeof(u64)];
+	struct fsf_qual_version_error   version_error;
+	struct fsf_qual_sequence_error  sequence_error;
+	struct fsf_link_down_info link_down_info;
+	struct fsf_qual_latency_info latency_info;
+} __attribute__ ((packed));
+
+struct fsf_qtcb_prefix {
+	u64 req_id;
+	u32 qtcb_version;
+	u32 ulp_info;
+	u32 qtcb_type;
+	u32 req_seq_no;
+	u32 prot_status;
+	union fsf_prot_status_qual prot_status_qual;
+	u8  res1[20];
+} __attribute__ ((packed));
+
+struct fsf_statistics_info {
+	u64 input_req;
+	u64 output_req;
+	u64 control_req;
+	u64 input_mb;
+	u64 output_mb;
+	u64 seconds_act;
+} __attribute__ ((packed));
+
+union fsf_status_qual {
+	u8  byte[FSF_STATUS_QUALIFIER_SIZE];
+	u16 halfword[FSF_STATUS_QUALIFIER_SIZE / sizeof (u16)];
+	u32 word[FSF_STATUS_QUALIFIER_SIZE / sizeof (u32)];
+	u64 doubleword[FSF_STATUS_QUALIFIER_SIZE / sizeof(u64)];
+	struct fsf_queue_designator fsf_queue_designator;
+	struct fsf_link_down_info link_down_info;
+} __attribute__ ((packed));
+
+struct fsf_qtcb_header {
+	u64 req_handle;
+	u32 fsf_command;
+	u32 res1;
+	u32 port_handle;
+	u32 lun_handle;
+	u32 res2;
+	u32 fsf_status;
+	union fsf_status_qual fsf_status_qual;
+	u8  res3[28];
+	u16 log_start;
+	u16 log_length;
+	u8  res4[16];
+} __attribute__ ((packed));
+
+#define FSF_PLOGI_MIN_LEN	112
+
+#define FSF_FCP_CMND_SIZE	288
+#define FSF_FCP_RSP_SIZE	128
+
+struct fsf_qtcb_bottom_io {
+	u32 data_direction;
+	u32 service_class;
+	u8  res1;
+	u8  data_prot_flags;
+	u16 app_tag_value;
+	u32 ref_tag_value;
+	u32 fcp_cmnd_length;
+	u32 data_block_length;
+	u32 prot_data_length;
+	u8  res2[4];
+	u8  fcp_cmnd[FSF_FCP_CMND_SIZE];
+	u8  fcp_rsp[FSF_FCP_RSP_SIZE];
+	u8  res3[64];
+} __attribute__ ((packed));
+
+struct fsf_qtcb_bottom_support {
+	u32 operation_subtype;
+	u8  res1[13];
+	u8 d_id[3];
+	u32 option;
+	u64 fcp_lun;
+	u64 res2;
+	u64 req_handle;
+	u32 service_class;
+	u8  res3[3];
+	u8  timeout;
+        u32 lun_access_info;
+        u8  res4[180];
+	u32 els1_length;
+	u32 els2_length;
+	u32 req_buf_length;
+	u32 resp_buf_length;
+	u8  els[256];
+} __attribute__ ((packed));
+
+#define ZFCP_FSF_TIMER_INT_MASK	0x3FFF
+
+struct fsf_qtcb_bottom_config {
+	u32 lic_version;
+	u32 feature_selection;
+	u32 high_qtcb_version;
+	u32 low_qtcb_version;
+	u32 max_qtcb_size;
+	u32 max_data_transfer_size;
+	u32 adapter_features;
+	u32 connection_features;
+	u32 fc_topology;
+	u32 fc_link_speed;
+	u32 adapter_type;
+	u8 res0;
+	u8 peer_d_id[3];
+	u16 status_read_buf_num;
+	u16 timer_interval;
+	u8 res2[9];
+	u8 s_id[3];
+	u8 nport_serv_param[128];
+	u8 res3[8];
+	u32 adapter_ports;
+	u32 hardware_version;
+	u8 serial_number[32];
+	u8 plogi_payload[112];
+	struct fsf_statistics_info stat_info;
+	u8 res4[112];
+} __attribute__ ((packed));
+
+struct fsf_qtcb_bottom_port {
+	u64 wwpn;
+	u32 fc_port_id;
+	u32 port_type;
+	u32 port_state;
+	u32 class_of_service;	/* should be 0x00000006 for class 2 and 3 */
+	u8 supported_fc4_types[32]; /* should be 0x00000100 for scsi fcp */
+	u8 active_fc4_types[32];
+	u32 supported_speed;	/* 0x0001 for 1 GBit/s or 0x0002 for 2 GBit/s */
+	u32 maximum_frame_size;	/* fixed value of 2112 */
+	u64 seconds_since_last_reset;
+	u64 tx_frames;
+	u64 tx_words;
+	u64 rx_frames;
+	u64 rx_words;
+	u64 lip;		/* 0 */
+	u64 nos;		/* currently 0 */
+	u64 error_frames;	/* currently 0 */
+	u64 dumped_frames;	/* currently 0 */
+	u64 link_failure;
+	u64 loss_of_sync;
+	u64 loss_of_signal;
+	u64 psp_error_counts;
+	u64 invalid_tx_words;
+	u64 invalid_crcs;
+	u64 input_requests;
+	u64 output_requests;
+	u64 control_requests;
+	u64 input_mb;		/* where 1 MByte == 1.000.000 Bytes */
+	u64 output_mb;		/* where 1 MByte == 1.000.000 Bytes */
+	u8 cp_util;
+	u8 cb_util;
+	u8 a_util;
+	u8 res2[253];
+} __attribute__ ((packed));
+
+union fsf_qtcb_bottom {
+	struct fsf_qtcb_bottom_io      io;
+	struct fsf_qtcb_bottom_support support;
+	struct fsf_qtcb_bottom_config  config;
+	struct fsf_qtcb_bottom_port port;
+};
+
+struct fsf_qtcb {
+	struct fsf_qtcb_prefix prefix;
+	struct fsf_qtcb_header header;
+	union  fsf_qtcb_bottom bottom;
+	u8 log[FSF_QTCB_LOG_SIZE];
+} __attribute__ ((packed));
+
+struct zfcp_blk_drv_data {
+#define ZFCP_BLK_DRV_DATA_MAGIC			0x1
+	u32 magic;
+#define ZFCP_BLK_LAT_VALID			0x1
+#define ZFCP_BLK_REQ_ERROR			0x2
+	u16 flags;
+	u8 inb_usage;
+	u8 outb_usage;
+	u64 channel_lat;
+	u64 fabric_lat;
+} __attribute__ ((packed));
+
+/**
+ * struct zfcp_fsf_ct_els - zfcp data for ct or els request
+ * @req: scatter-gather list for request
+ * @resp: scatter-gather list for response
+ * @handler: handler function (called for response to the request)
+ * @handler_data: data passed to handler function
+ * @port: Optional pointer to port for zfcp internal ELS (only test link ADISC)
+ * @status: used to pass error status to calling function
+ */
+struct zfcp_fsf_ct_els {
+	struct scatterlist *req;
+	struct scatterlist *resp;
+	void (*handler)(void *);
+	void *handler_data;
+	struct zfcp_port *port;
+	int status;
+};
+
+#endif				/* FSF_H */
diff --git a/ap/os/linux/linux-3.4.x/drivers/s390/scsi/zfcp_qdio.c b/ap/os/linux/linux-3.4.x/drivers/s390/scsi/zfcp_qdio.c
new file mode 100644
index 0000000..52c6b59
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/s390/scsi/zfcp_qdio.c
@@ -0,0 +1,511 @@
+/*
+ * zfcp device driver
+ *
+ * Setup and helper functions to access QDIO.
+ *
+ * Copyright IBM Corporation 2002, 2010
+ */
+
+#define KMSG_COMPONENT "zfcp"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
+#include <linux/slab.h>
+#include <linux/module.h>
+#include "zfcp_ext.h"
+#include "zfcp_qdio.h"
+
+#define QBUFF_PER_PAGE		(PAGE_SIZE / sizeof(struct qdio_buffer))
+
+static bool enable_multibuffer;
+module_param_named(datarouter, enable_multibuffer, bool, 0400);
+MODULE_PARM_DESC(datarouter, "Enable hardware data router support");
+
+static int zfcp_qdio_buffers_enqueue(struct qdio_buffer **sbal)
+{
+	int pos;
+
+	for (pos = 0; pos < QDIO_MAX_BUFFERS_PER_Q; pos += QBUFF_PER_PAGE) {
+		sbal[pos] = (struct qdio_buffer *) get_zeroed_page(GFP_KERNEL);
+		if (!sbal[pos])
+			return -ENOMEM;
+	}
+	for (pos = 0; pos < QDIO_MAX_BUFFERS_PER_Q; pos++)
+		if (pos % QBUFF_PER_PAGE)
+			sbal[pos] = sbal[pos - 1] + 1;
+	return 0;
+}
+
+static void zfcp_qdio_handler_error(struct zfcp_qdio *qdio, char *id,
+				    unsigned int qdio_err)
+{
+	struct zfcp_adapter *adapter = qdio->adapter;
+
+	dev_warn(&adapter->ccw_device->dev, "A QDIO problem occurred\n");
+
+	if (qdio_err & QDIO_ERROR_SLSB_STATE) {
+		zfcp_qdio_siosl(adapter);
+		zfcp_erp_adapter_shutdown(adapter, 0, id);
+		return;
+	}
+	zfcp_erp_adapter_reopen(adapter,
+				ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED |
+				ZFCP_STATUS_COMMON_ERP_FAILED, id);
+}
+
+static void zfcp_qdio_zero_sbals(struct qdio_buffer *sbal[], int first, int cnt)
+{
+	int i, sbal_idx;
+
+	for (i = first; i < first + cnt; i++) {
+		sbal_idx = i % QDIO_MAX_BUFFERS_PER_Q;
+		memset(sbal[sbal_idx], 0, sizeof(struct qdio_buffer));
+	}
+}
+
+/* this needs to be called prior to updating the queue fill level */
+static inline void zfcp_qdio_account(struct zfcp_qdio *qdio)
+{
+	unsigned long long now, span;
+	int used;
+
+	now = get_clock_monotonic();
+	span = (now - qdio->req_q_time) >> 12;
+	used = QDIO_MAX_BUFFERS_PER_Q - atomic_read(&qdio->req_q_free);
+	qdio->req_q_util += used * span;
+	qdio->req_q_time = now;
+}
+
+static void zfcp_qdio_int_req(struct ccw_device *cdev, unsigned int qdio_err,
+			      int queue_no, int idx, int count,
+			      unsigned long parm)
+{
+	struct zfcp_qdio *qdio = (struct zfcp_qdio *) parm;
+
+	if (unlikely(qdio_err)) {
+		zfcp_qdio_handler_error(qdio, "qdireq1", qdio_err);
+		return;
+	}
+
+	/* cleanup all SBALs being program-owned now */
+	zfcp_qdio_zero_sbals(qdio->req_q, idx, count);
+
+	spin_lock_irq(&qdio->stat_lock);
+	zfcp_qdio_account(qdio);
+	spin_unlock_irq(&qdio->stat_lock);
+	atomic_add(count, &qdio->req_q_free);
+	wake_up(&qdio->req_q_wq);
+}
+
+static void zfcp_qdio_int_resp(struct ccw_device *cdev, unsigned int qdio_err,
+			       int queue_no, int idx, int count,
+			       unsigned long parm)
+{
+	struct zfcp_qdio *qdio = (struct zfcp_qdio *) parm;
+	struct zfcp_adapter *adapter = qdio->adapter;
+	int sbal_no, sbal_idx;
+
+	if (unlikely(qdio_err)) {
+		if (zfcp_adapter_multi_buffer_active(adapter)) {
+			void *pl[ZFCP_QDIO_MAX_SBALS_PER_REQ + 1];
+			struct qdio_buffer_element *sbale;
+			u64 req_id;
+			u8 scount;
+
+			memset(pl, 0,
+			       ZFCP_QDIO_MAX_SBALS_PER_REQ * sizeof(void *));
+			sbale = qdio->res_q[idx]->element;
+			req_id = (u64) sbale->addr;
+			scount = min(sbale->scount + 1,
+				     ZFCP_QDIO_MAX_SBALS_PER_REQ + 1);
+				     /* incl. signaling SBAL */
+
+			for (sbal_no = 0; sbal_no < scount; sbal_no++) {
+				sbal_idx = (idx + sbal_no) %
+					QDIO_MAX_BUFFERS_PER_Q;
+				pl[sbal_no] = qdio->res_q[sbal_idx];
+			}
+			zfcp_dbf_hba_def_err(adapter, req_id, scount, pl);
+		}
+		zfcp_qdio_handler_error(qdio, "qdires1", qdio_err);
+		return;
+	}
+
+	/*
+	 * go through all SBALs from input queue currently
+	 * returned by QDIO layer
+	 */
+	for (sbal_no = 0; sbal_no < count; sbal_no++) {
+		sbal_idx = (idx + sbal_no) % QDIO_MAX_BUFFERS_PER_Q;
+		/* go through all SBALEs of SBAL */
+		zfcp_fsf_reqid_check(qdio, sbal_idx);
+	}
+
+	/*
+	 * put SBALs back to response queue
+	 */
+	if (do_QDIO(cdev, QDIO_FLAG_SYNC_INPUT, 0, idx, count))
+		zfcp_erp_adapter_reopen(qdio->adapter, 0, "qdires2");
+}
+
+static struct qdio_buffer_element *
+zfcp_qdio_sbal_chain(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req)
+{
+	struct qdio_buffer_element *sbale;
+
+	/* set last entry flag in current SBALE of current SBAL */
+	sbale = zfcp_qdio_sbale_curr(qdio, q_req);
+	sbale->eflags |= SBAL_EFLAGS_LAST_ENTRY;
+
+	/* don't exceed last allowed SBAL */
+	if (q_req->sbal_last == q_req->sbal_limit)
+		return NULL;
+
+	/* set chaining flag in first SBALE of current SBAL */
+	sbale = zfcp_qdio_sbale_req(qdio, q_req);
+	sbale->sflags |= SBAL_SFLAGS0_MORE_SBALS;
+
+	/* calculate index of next SBAL */
+	q_req->sbal_last++;
+	q_req->sbal_last %= QDIO_MAX_BUFFERS_PER_Q;
+
+	/* keep this requests number of SBALs up-to-date */
+	q_req->sbal_number++;
+	BUG_ON(q_req->sbal_number > ZFCP_QDIO_MAX_SBALS_PER_REQ);
+
+	/* start at first SBALE of new SBAL */
+	q_req->sbale_curr = 0;
+
+	/* set storage-block type for new SBAL */
+	sbale = zfcp_qdio_sbale_curr(qdio, q_req);
+	sbale->sflags |= q_req->sbtype;
+
+	return sbale;
+}
+
+static struct qdio_buffer_element *
+zfcp_qdio_sbale_next(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req)
+{
+	if (q_req->sbale_curr == qdio->max_sbale_per_sbal - 1)
+		return zfcp_qdio_sbal_chain(qdio, q_req);
+	q_req->sbale_curr++;
+	return zfcp_qdio_sbale_curr(qdio, q_req);
+}
+
+/**
+ * zfcp_qdio_sbals_from_sg - fill SBALs from scatter-gather list
+ * @qdio: pointer to struct zfcp_qdio
+ * @q_req: pointer to struct zfcp_qdio_req
+ * @sg: scatter-gather list
+ * @max_sbals: upper bound for number of SBALs to be used
+ * Returns: zero or -EINVAL on error
+ */
+int zfcp_qdio_sbals_from_sg(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req,
+			    struct scatterlist *sg)
+{
+	struct qdio_buffer_element *sbale;
+
+	/* set storage-block type for this request */
+	sbale = zfcp_qdio_sbale_req(qdio, q_req);
+	sbale->sflags |= q_req->sbtype;
+
+	for (; sg; sg = sg_next(sg)) {
+		sbale = zfcp_qdio_sbale_next(qdio, q_req);
+		if (!sbale) {
+			atomic_inc(&qdio->req_q_full);
+			zfcp_qdio_zero_sbals(qdio->req_q, q_req->sbal_first,
+					     q_req->sbal_number);
+			return -EINVAL;
+		}
+		sbale->addr = sg_virt(sg);
+		sbale->length = sg->length;
+	}
+	return 0;
+}
+
+static int zfcp_qdio_sbal_check(struct zfcp_qdio *qdio)
+{
+	if (atomic_read(&qdio->req_q_free) ||
+	    !(atomic_read(&qdio->adapter->status) & ZFCP_STATUS_ADAPTER_QDIOUP))
+		return 1;
+	return 0;
+}
+
+/**
+ * zfcp_qdio_sbal_get - get free sbal in request queue, wait if necessary
+ * @qdio: pointer to struct zfcp_qdio
+ *
+ * The req_q_lock must be held by the caller of this function, and
+ * this function may only be called from process context; it will
+ * sleep when waiting for a free sbal.
+ *
+ * Returns: 0 on success, -EIO if there is no free sbal after waiting.
+ */
+int zfcp_qdio_sbal_get(struct zfcp_qdio *qdio)
+{
+	long ret;
+
+	ret = wait_event_interruptible_lock_irq_timeout(qdio->req_q_wq,
+		       zfcp_qdio_sbal_check(qdio), qdio->req_q_lock, 5 * HZ);
+
+	if (!(atomic_read(&qdio->adapter->status) & ZFCP_STATUS_ADAPTER_QDIOUP))
+		return -EIO;
+
+	if (ret > 0)
+		return 0;
+
+	if (!ret) {
+		atomic_inc(&qdio->req_q_full);
+		/* assume hanging outbound queue, try queue recovery */
+		zfcp_erp_adapter_reopen(qdio->adapter, 0, "qdsbg_1");
+	}
+
+	return -EIO;
+}
+
+/**
+ * zfcp_qdio_send - set PCI flag in first SBALE and send req to QDIO
+ * @qdio: pointer to struct zfcp_qdio
+ * @q_req: pointer to struct zfcp_qdio_req
+ * Returns: 0 on success, error otherwise
+ */
+int zfcp_qdio_send(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req)
+{
+	int retval;
+	u8 sbal_number = q_req->sbal_number;
+
+	spin_lock(&qdio->stat_lock);
+	zfcp_qdio_account(qdio);
+	spin_unlock(&qdio->stat_lock);
+
+	retval = do_QDIO(qdio->adapter->ccw_device, QDIO_FLAG_SYNC_OUTPUT, 0,
+			 q_req->sbal_first, sbal_number);
+
+	if (unlikely(retval)) {
+		zfcp_qdio_zero_sbals(qdio->req_q, q_req->sbal_first,
+				     sbal_number);
+		return retval;
+	}
+
+	/* account for transferred buffers */
+	atomic_sub(sbal_number, &qdio->req_q_free);
+	qdio->req_q_idx += sbal_number;
+	qdio->req_q_idx %= QDIO_MAX_BUFFERS_PER_Q;
+
+	return 0;
+}
+
+
+static void zfcp_qdio_setup_init_data(struct qdio_initialize *id,
+				      struct zfcp_qdio *qdio)
+{
+	memset(id, 0, sizeof(*id));
+	id->cdev = qdio->adapter->ccw_device;
+	id->q_format = QDIO_ZFCP_QFMT;
+	memcpy(id->adapter_name, dev_name(&id->cdev->dev), 8);
+	ASCEBC(id->adapter_name, 8);
+	id->qib_rflags = QIB_RFLAGS_ENABLE_DATA_DIV;
+	if (enable_multibuffer)
+		id->qdr_ac |= QDR_AC_MULTI_BUFFER_ENABLE;
+	id->no_input_qs = 1;
+	id->no_output_qs = 1;
+	id->input_handler = zfcp_qdio_int_resp;
+	id->output_handler = zfcp_qdio_int_req;
+	id->int_parm = (unsigned long) qdio;
+	id->input_sbal_addr_array = (void **) (qdio->res_q);
+	id->output_sbal_addr_array = (void **) (qdio->req_q);
+	id->scan_threshold =
+		QDIO_MAX_BUFFERS_PER_Q - ZFCP_QDIO_MAX_SBALS_PER_REQ * 2;
+}
+
+/**
+ * zfcp_qdio_allocate - allocate queue memory and initialize QDIO data
+ * @adapter: pointer to struct zfcp_adapter
+ * Returns: -ENOMEM on memory allocation error or return value from
+ *          qdio_allocate
+ */
+static int zfcp_qdio_allocate(struct zfcp_qdio *qdio)
+{
+	struct qdio_initialize init_data;
+
+	if (zfcp_qdio_buffers_enqueue(qdio->req_q) ||
+	    zfcp_qdio_buffers_enqueue(qdio->res_q))
+		return -ENOMEM;
+
+	zfcp_qdio_setup_init_data(&init_data, qdio);
+	init_waitqueue_head(&qdio->req_q_wq);
+
+	return qdio_allocate(&init_data);
+}
+
+/**
+ * zfcp_close_qdio - close qdio queues for an adapter
+ * @qdio: pointer to structure zfcp_qdio
+ */
+void zfcp_qdio_close(struct zfcp_qdio *qdio)
+{
+	struct zfcp_adapter *adapter = qdio->adapter;
+	int idx, count;
+
+	if (!(atomic_read(&adapter->status) & ZFCP_STATUS_ADAPTER_QDIOUP))
+		return;
+
+	/* clear QDIOUP flag, thus do_QDIO is not called during qdio_shutdown */
+	spin_lock_irq(&qdio->req_q_lock);
+	atomic_clear_mask(ZFCP_STATUS_ADAPTER_QDIOUP, &adapter->status);
+	spin_unlock_irq(&qdio->req_q_lock);
+
+	wake_up(&qdio->req_q_wq);
+
+	qdio_shutdown(adapter->ccw_device, QDIO_FLAG_CLEANUP_USING_CLEAR);
+
+	/* cleanup used outbound sbals */
+	count = atomic_read(&qdio->req_q_free);
+	if (count < QDIO_MAX_BUFFERS_PER_Q) {
+		idx = (qdio->req_q_idx + count) % QDIO_MAX_BUFFERS_PER_Q;
+		count = QDIO_MAX_BUFFERS_PER_Q - count;
+		zfcp_qdio_zero_sbals(qdio->req_q, idx, count);
+	}
+	qdio->req_q_idx = 0;
+	atomic_set(&qdio->req_q_free, 0);
+}
+
+/**
+ * zfcp_qdio_open - prepare and initialize response queue
+ * @qdio: pointer to struct zfcp_qdio
+ * Returns: 0 on success, otherwise -EIO
+ */
+int zfcp_qdio_open(struct zfcp_qdio *qdio)
+{
+	struct qdio_buffer_element *sbale;
+	struct qdio_initialize init_data;
+	struct zfcp_adapter *adapter = qdio->adapter;
+	struct ccw_device *cdev = adapter->ccw_device;
+	struct qdio_ssqd_desc ssqd;
+	int cc;
+
+	if (atomic_read(&adapter->status) & ZFCP_STATUS_ADAPTER_QDIOUP)
+		return -EIO;
+
+	atomic_clear_mask(ZFCP_STATUS_ADAPTER_SIOSL_ISSUED,
+			  &qdio->adapter->status);
+
+	zfcp_qdio_setup_init_data(&init_data, qdio);
+
+	if (qdio_establish(&init_data))
+		goto failed_establish;
+
+	if (qdio_get_ssqd_desc(init_data.cdev, &ssqd))
+		goto failed_qdio;
+
+	if (ssqd.qdioac2 & CHSC_AC2_DATA_DIV_ENABLED)
+		atomic_set_mask(ZFCP_STATUS_ADAPTER_DATA_DIV_ENABLED,
+				&qdio->adapter->status);
+
+	if (ssqd.qdioac2 & CHSC_AC2_MULTI_BUFFER_ENABLED) {
+		atomic_set_mask(ZFCP_STATUS_ADAPTER_MB_ACT, &adapter->status);
+		qdio->max_sbale_per_sbal = QDIO_MAX_ELEMENTS_PER_BUFFER;
+	} else {
+		atomic_clear_mask(ZFCP_STATUS_ADAPTER_MB_ACT, &adapter->status);
+		qdio->max_sbale_per_sbal = QDIO_MAX_ELEMENTS_PER_BUFFER - 1;
+	}
+
+	qdio->max_sbale_per_req =
+		ZFCP_QDIO_MAX_SBALS_PER_REQ * qdio->max_sbale_per_sbal
+		- 2;
+	if (qdio_activate(cdev))
+		goto failed_qdio;
+
+	for (cc = 0; cc < QDIO_MAX_BUFFERS_PER_Q; cc++) {
+		sbale = &(qdio->res_q[cc]->element[0]);
+		sbale->length = 0;
+		sbale->eflags = SBAL_EFLAGS_LAST_ENTRY;
+		sbale->sflags = 0;
+		sbale->addr = NULL;
+	}
+
+	if (do_QDIO(cdev, QDIO_FLAG_SYNC_INPUT, 0, 0, QDIO_MAX_BUFFERS_PER_Q))
+		goto failed_qdio;
+
+	/* set index of first available SBALS / number of available SBALS */
+	qdio->req_q_idx = 0;
+	atomic_set(&qdio->req_q_free, QDIO_MAX_BUFFERS_PER_Q);
+	atomic_set_mask(ZFCP_STATUS_ADAPTER_QDIOUP, &qdio->adapter->status);
+
+	if (adapter->scsi_host) {
+		adapter->scsi_host->sg_tablesize = qdio->max_sbale_per_req;
+		adapter->scsi_host->max_sectors = qdio->max_sbale_per_req * 8;
+	}
+
+	return 0;
+
+failed_qdio:
+	qdio_shutdown(cdev, QDIO_FLAG_CLEANUP_USING_CLEAR);
+failed_establish:
+	dev_err(&cdev->dev,
+		"Setting up the QDIO connection to the FCP adapter failed\n");
+	return -EIO;
+}
+
+void zfcp_qdio_destroy(struct zfcp_qdio *qdio)
+{
+	int p;
+
+	if (!qdio)
+		return;
+
+	if (qdio->adapter->ccw_device)
+		qdio_free(qdio->adapter->ccw_device);
+
+	for (p = 0; p < QDIO_MAX_BUFFERS_PER_Q; p += QBUFF_PER_PAGE) {
+		free_page((unsigned long) qdio->req_q[p]);
+		free_page((unsigned long) qdio->res_q[p]);
+	}
+
+	kfree(qdio);
+}
+
+int zfcp_qdio_setup(struct zfcp_adapter *adapter)
+{
+	struct zfcp_qdio *qdio;
+
+	qdio = kzalloc(sizeof(struct zfcp_qdio), GFP_KERNEL);
+	if (!qdio)
+		return -ENOMEM;
+
+	qdio->adapter = adapter;
+
+	if (zfcp_qdio_allocate(qdio)) {
+		zfcp_qdio_destroy(qdio);
+		return -ENOMEM;
+	}
+
+	spin_lock_init(&qdio->req_q_lock);
+	spin_lock_init(&qdio->stat_lock);
+
+	adapter->qdio = qdio;
+	return 0;
+}
+
+/**
+ * zfcp_qdio_siosl - Trigger logging in FCP channel
+ * @adapter: The zfcp_adapter where to trigger logging
+ *
+ * Call the cio siosl function to trigger hardware logging.  This
+ * wrapper function sets a flag to ensure hardware logging is only
+ * triggered once before going through qdio shutdown.
+ *
+ * The triggers are always run from qdio tasklet context, so no
+ * additional synchronization is necessary.
+ */
+void zfcp_qdio_siosl(struct zfcp_adapter *adapter)
+{
+	int rc;
+
+	if (atomic_read(&adapter->status) & ZFCP_STATUS_ADAPTER_SIOSL_ISSUED)
+		return;
+
+	rc = ccw_device_siosl(adapter->ccw_device);
+	if (!rc)
+		atomic_set_mask(ZFCP_STATUS_ADAPTER_SIOSL_ISSUED,
+				&adapter->status);
+}
diff --git a/ap/os/linux/linux-3.4.x/drivers/s390/scsi/zfcp_qdio.h b/ap/os/linux/linux-3.4.x/drivers/s390/scsi/zfcp_qdio.h
new file mode 100644
index 0000000..8ac7f53
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/s390/scsi/zfcp_qdio.h
@@ -0,0 +1,271 @@
+/*
+ * zfcp device driver
+ *
+ * Header file for zfcp qdio interface
+ *
+ * Copyright IBM Corporation 2010
+ */
+
+#ifndef ZFCP_QDIO_H
+#define ZFCP_QDIO_H
+
+#include <asm/qdio.h>
+
+#define ZFCP_QDIO_SBALE_LEN	PAGE_SIZE
+
+/* Max SBALS for chaining */
+#define ZFCP_QDIO_MAX_SBALS_PER_REQ	36
+
+/**
+ * struct zfcp_qdio - basic qdio data structure
+ * @res_q: response queue
+ * @req_q: request queue
+ * @req_q_idx: index of next free buffer
+ * @req_q_free: number of free buffers in queue
+ * @stat_lock: lock to protect req_q_util and req_q_time
+ * @req_q_lock: lock to serialize access to request queue
+ * @req_q_time: time of last fill level change
+ * @req_q_util: used for accounting
+ * @req_q_full: queue full incidents
+ * @req_q_wq: used to wait for SBAL availability
+ * @adapter: adapter used in conjunction with this qdio structure
+ */
+struct zfcp_qdio {
+	struct qdio_buffer	*res_q[QDIO_MAX_BUFFERS_PER_Q];
+	struct qdio_buffer	*req_q[QDIO_MAX_BUFFERS_PER_Q];
+	u8			req_q_idx;
+	atomic_t		req_q_free;
+	spinlock_t		stat_lock;
+	spinlock_t		req_q_lock;
+	unsigned long long	req_q_time;
+	u64			req_q_util;
+	atomic_t		req_q_full;
+	wait_queue_head_t	req_q_wq;
+	struct zfcp_adapter	*adapter;
+	u16			max_sbale_per_sbal;
+	u16			max_sbale_per_req;
+};
+
+/**
+ * struct zfcp_qdio_req - qdio queue related values for a request
+ * @sbtype: sbal type flags for sbale 0
+ * @sbal_number: number of free sbals
+ * @sbal_first: first sbal for this request
+ * @sbal_last: last sbal for this request
+ * @sbal_limit: last possible sbal for this request
+ * @sbale_curr: current sbale at creation of this request
+ * @sbal_response: sbal used in interrupt
+ * @qdio_outb_usage: usage of outbound queue
+ */
+struct zfcp_qdio_req {
+	u8	sbtype;
+	u8	sbal_number;
+	u8	sbal_first;
+	u8	sbal_last;
+	u8	sbal_limit;
+	u8	sbale_curr;
+	u8	sbal_response;
+	u16	qdio_outb_usage;
+};
+
+/**
+ * zfcp_qdio_sbale_req - return pointer to sbale on req_q for a request
+ * @qdio: pointer to struct zfcp_qdio
+ * @q_rec: pointer to struct zfcp_qdio_req
+ * Returns: pointer to qdio_buffer_element (sbale) structure
+ */
+static inline struct qdio_buffer_element *
+zfcp_qdio_sbale_req(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req)
+{
+	return &qdio->req_q[q_req->sbal_last]->element[0];
+}
+
+/**
+ * zfcp_qdio_sbale_curr - return current sbale on req_q for a request
+ * @qdio: pointer to struct zfcp_qdio
+ * @fsf_req: pointer to struct zfcp_fsf_req
+ * Returns: pointer to qdio_buffer_element (sbale) structure
+ */
+static inline struct qdio_buffer_element *
+zfcp_qdio_sbale_curr(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req)
+{
+	return &qdio->req_q[q_req->sbal_last]->element[q_req->sbale_curr];
+}
+
+/**
+ * zfcp_qdio_req_init - initialize qdio request
+ * @qdio: request queue where to start putting the request
+ * @q_req: the qdio request to start
+ * @req_id: The request id
+ * @sbtype: type flags to set for all sbals
+ * @data: First data block
+ * @len: Length of first data block
+ *
+ * This is the start of putting the request into the queue, the last
+ * step is passing the request to zfcp_qdio_send. The request queue
+ * lock must be held during the whole process from init to send.
+ */
+static inline
+void zfcp_qdio_req_init(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req,
+			unsigned long req_id, u8 sbtype, void *data, u32 len)
+{
+	struct qdio_buffer_element *sbale;
+	int count = min(atomic_read(&qdio->req_q_free),
+			ZFCP_QDIO_MAX_SBALS_PER_REQ);
+
+	q_req->sbal_first = q_req->sbal_last = qdio->req_q_idx;
+	q_req->sbal_number = 1;
+	q_req->sbtype = sbtype;
+	q_req->sbale_curr = 1;
+	q_req->sbal_limit = (q_req->sbal_first + count - 1)
+					% QDIO_MAX_BUFFERS_PER_Q;
+
+	sbale = zfcp_qdio_sbale_req(qdio, q_req);
+	sbale->addr = (void *) req_id;
+	sbale->eflags = 0;
+	sbale->sflags = SBAL_SFLAGS0_COMMAND | sbtype;
+
+	if (unlikely(!data))
+		return;
+	sbale++;
+	sbale->addr = data;
+	sbale->length = len;
+}
+
+/**
+ * zfcp_qdio_fill_next - Fill next sbale, only for single sbal requests
+ * @qdio: pointer to struct zfcp_qdio
+ * @q_req: pointer to struct zfcp_queue_req
+ *
+ * This is only required for single sbal requests, calling it when
+ * wrapping around to the next sbal is a bug.
+ */
+static inline
+void zfcp_qdio_fill_next(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req,
+			 void *data, u32 len)
+{
+	struct qdio_buffer_element *sbale;
+
+	BUG_ON(q_req->sbale_curr == qdio->max_sbale_per_sbal - 1);
+	q_req->sbale_curr++;
+	sbale = zfcp_qdio_sbale_curr(qdio, q_req);
+	sbale->addr = data;
+	sbale->length = len;
+}
+
+/**
+ * zfcp_qdio_set_sbale_last - set last entry flag in current sbale
+ * @qdio: pointer to struct zfcp_qdio
+ * @q_req: pointer to struct zfcp_queue_req
+ */
+static inline
+void zfcp_qdio_set_sbale_last(struct zfcp_qdio *qdio,
+			      struct zfcp_qdio_req *q_req)
+{
+	struct qdio_buffer_element *sbale;
+
+	sbale = zfcp_qdio_sbale_curr(qdio, q_req);
+	sbale->eflags |= SBAL_EFLAGS_LAST_ENTRY;
+}
+
+/**
+ * zfcp_qdio_sg_one_sbal - check if one sbale is enough for sg data
+ * @sg: The scatterlist where to check the data size
+ *
+ * Returns: 1 when one sbale is enough for the data in the scatterlist,
+ *	    0 if not.
+ */
+static inline
+int zfcp_qdio_sg_one_sbale(struct scatterlist *sg)
+{
+	return sg_is_last(sg) && sg->length <= ZFCP_QDIO_SBALE_LEN;
+}
+
+/**
+ * zfcp_qdio_skip_to_last_sbale - skip to last sbale in sbal
+ * @q_req: The current zfcp_qdio_req
+ */
+static inline
+void zfcp_qdio_skip_to_last_sbale(struct zfcp_qdio *qdio,
+				  struct zfcp_qdio_req *q_req)
+{
+	q_req->sbale_curr = qdio->max_sbale_per_sbal - 1;
+}
+
+/**
+ * zfcp_qdio_sbal_limit - set the sbal limit for a request in q_req
+ * @qdio: pointer to struct zfcp_qdio
+ * @q_req: The current zfcp_qdio_req
+ * @max_sbals: maximum number of SBALs allowed
+ */
+static inline
+void zfcp_qdio_sbal_limit(struct zfcp_qdio *qdio,
+			  struct zfcp_qdio_req *q_req, int max_sbals)
+{
+	int count = min(atomic_read(&qdio->req_q_free), max_sbals);
+
+	q_req->sbal_limit = (q_req->sbal_first + count - 1) %
+				QDIO_MAX_BUFFERS_PER_Q;
+}
+
+/**
+ * zfcp_qdio_set_data_div - set data division count
+ * @qdio: pointer to struct zfcp_qdio
+ * @q_req: The current zfcp_qdio_req
+ * @count: The data division count
+ */
+static inline
+void zfcp_qdio_set_data_div(struct zfcp_qdio *qdio,
+			    struct zfcp_qdio_req *q_req, u32 count)
+{
+	struct qdio_buffer_element *sbale;
+
+	sbale = qdio->req_q[q_req->sbal_first]->element;
+	sbale->length = count;
+}
+
+/**
+ * zfcp_qdio_sbale_count - count sbale used
+ * @sg: pointer to struct scatterlist
+ */
+static inline
+unsigned int zfcp_qdio_sbale_count(struct scatterlist *sg)
+{
+	unsigned int count = 0;
+
+	for (; sg; sg = sg_next(sg))
+		count++;
+
+	return count;
+}
+
+/**
+ * zfcp_qdio_real_bytes - count bytes used
+ * @sg: pointer to struct scatterlist
+ */
+static inline
+unsigned int zfcp_qdio_real_bytes(struct scatterlist *sg)
+{
+	unsigned int real_bytes = 0;
+
+	for (; sg; sg = sg_next(sg))
+		real_bytes += sg->length;
+
+	return real_bytes;
+}
+
+/**
+ * zfcp_qdio_set_scount - set SBAL count value
+ * @qdio: pointer to struct zfcp_qdio
+ * @q_req: The current zfcp_qdio_req
+ */
+static inline
+void zfcp_qdio_set_scount(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req)
+{
+	struct qdio_buffer_element *sbale;
+
+	sbale = qdio->req_q[q_req->sbal_first]->element;
+	sbale->scount = q_req->sbal_number - 1;
+}
+
+#endif /* ZFCP_QDIO_H */
diff --git a/ap/os/linux/linux-3.4.x/drivers/s390/scsi/zfcp_reqlist.h b/ap/os/linux/linux-3.4.x/drivers/s390/scsi/zfcp_reqlist.h
new file mode 100644
index 0000000..a72d1b7
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/s390/scsi/zfcp_reqlist.h
@@ -0,0 +1,183 @@
+/*
+ * zfcp device driver
+ *
+ * Data structure and helper functions for tracking pending FSF
+ * requests.
+ *
+ * Copyright IBM Corporation 2009
+ */
+
+#ifndef ZFCP_REQLIST_H
+#define ZFCP_REQLIST_H
+
+/* number of hash buckets */
+#define ZFCP_REQ_LIST_BUCKETS 128
+
+/**
+ * struct zfcp_reqlist - Container for request list (reqlist)
+ * @lock: Spinlock for protecting the hash list
+ * @list: Array of hashbuckets, each is a list of requests in this bucket
+ */
+struct zfcp_reqlist {
+	spinlock_t lock;
+	struct list_head buckets[ZFCP_REQ_LIST_BUCKETS];
+};
+
+static inline int zfcp_reqlist_hash(unsigned long req_id)
+{
+	return req_id % ZFCP_REQ_LIST_BUCKETS;
+}
+
+/**
+ * zfcp_reqlist_alloc - Allocate and initialize reqlist
+ *
+ * Returns pointer to allocated reqlist on success, or NULL on
+ * allocation failure.
+ */
+static inline struct zfcp_reqlist *zfcp_reqlist_alloc(void)
+{
+	unsigned int i;
+	struct zfcp_reqlist *rl;
+
+	rl = kzalloc(sizeof(struct zfcp_reqlist), GFP_KERNEL);
+	if (!rl)
+		return NULL;
+
+	spin_lock_init(&rl->lock);
+
+	for (i = 0; i < ZFCP_REQ_LIST_BUCKETS; i++)
+		INIT_LIST_HEAD(&rl->buckets[i]);
+
+	return rl;
+}
+
+/**
+ * zfcp_reqlist_isempty - Check whether the request list empty
+ * @rl: pointer to reqlist
+ *
+ * Returns: 1 if list is empty, 0 if not
+ */
+static inline int zfcp_reqlist_isempty(struct zfcp_reqlist *rl)
+{
+	unsigned int i;
+
+	for (i = 0; i < ZFCP_REQ_LIST_BUCKETS; i++)
+		if (!list_empty(&rl->buckets[i]))
+			return 0;
+	return 1;
+}
+
+/**
+ * zfcp_reqlist_free - Free allocated memory for reqlist
+ * @rl: The reqlist where to free memory
+ */
+static inline void zfcp_reqlist_free(struct zfcp_reqlist *rl)
+{
+	/* sanity check */
+	BUG_ON(!zfcp_reqlist_isempty(rl));
+
+	kfree(rl);
+}
+
+static inline struct zfcp_fsf_req *
+_zfcp_reqlist_find(struct zfcp_reqlist *rl, unsigned long req_id)
+{
+	struct zfcp_fsf_req *req;
+	unsigned int i;
+
+	i = zfcp_reqlist_hash(req_id);
+	list_for_each_entry(req, &rl->buckets[i], list)
+		if (req->req_id == req_id)
+			return req;
+	return NULL;
+}
+
+/**
+ * zfcp_reqlist_find - Lookup FSF request by its request id
+ * @rl: The reqlist where to lookup the FSF request
+ * @req_id: The request id to look for
+ *
+ * Returns a pointer to the FSF request with the specified request id
+ * or NULL if there is no known FSF request with this id.
+ */
+static inline struct zfcp_fsf_req *
+zfcp_reqlist_find(struct zfcp_reqlist *rl, unsigned long req_id)
+{
+	unsigned long flags;
+	struct zfcp_fsf_req *req;
+
+	spin_lock_irqsave(&rl->lock, flags);
+	req = _zfcp_reqlist_find(rl, req_id);
+	spin_unlock_irqrestore(&rl->lock, flags);
+
+	return req;
+}
+
+/**
+ * zfcp_reqlist_find_rm - Lookup request by id and remove it from reqlist
+ * @rl: reqlist where to search and remove entry
+ * @req_id: The request id of the request to look for
+ *
+ * This functions tries to find the FSF request with the specified
+ * id and then removes it from the reqlist. The reqlist lock is held
+ * during both steps of the operation.
+ *
+ * Returns: Pointer to the FSF request if the request has been found,
+ * NULL if it has not been found.
+ */
+static inline struct zfcp_fsf_req *
+zfcp_reqlist_find_rm(struct zfcp_reqlist *rl, unsigned long req_id)
+{
+	unsigned long flags;
+	struct zfcp_fsf_req *req;
+
+	spin_lock_irqsave(&rl->lock, flags);
+	req = _zfcp_reqlist_find(rl, req_id);
+	if (req)
+		list_del(&req->list);
+	spin_unlock_irqrestore(&rl->lock, flags);
+
+	return req;
+}
+
+/**
+ * zfcp_reqlist_add - Add entry to reqlist
+ * @rl: reqlist where to add the entry
+ * @req: The entry to add
+ *
+ * The request id always increases. As an optimization new requests
+ * are added here with list_add_tail at the end of the bucket lists
+ * while old requests are looked up starting at the beginning of the
+ * lists.
+ */
+static inline void zfcp_reqlist_add(struct zfcp_reqlist *rl,
+				    struct zfcp_fsf_req *req)
+{
+	unsigned int i;
+	unsigned long flags;
+
+	i = zfcp_reqlist_hash(req->req_id);
+
+	spin_lock_irqsave(&rl->lock, flags);
+	list_add_tail(&req->list, &rl->buckets[i]);
+	spin_unlock_irqrestore(&rl->lock, flags);
+}
+
+/**
+ * zfcp_reqlist_move - Move all entries from reqlist to simple list
+ * @rl: The zfcp_reqlist where to remove all entries
+ * @list: The list where to move all entries
+ */
+static inline void zfcp_reqlist_move(struct zfcp_reqlist *rl,
+				     struct list_head *list)
+{
+	unsigned int i;
+	unsigned long flags;
+
+	spin_lock_irqsave(&rl->lock, flags);
+	for (i = 0; i < ZFCP_REQ_LIST_BUCKETS; i++)
+		list_splice_init(&rl->buckets[i], list);
+	spin_unlock_irqrestore(&rl->lock, flags);
+}
+
+#endif /* ZFCP_REQLIST_H */
diff --git a/ap/os/linux/linux-3.4.x/drivers/s390/scsi/zfcp_scsi.c b/ap/os/linux/linux-3.4.x/drivers/s390/scsi/zfcp_scsi.c
new file mode 100644
index 0000000..7b35364
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/s390/scsi/zfcp_scsi.c
@@ -0,0 +1,733 @@
+/*
+ * zfcp device driver
+ *
+ * Interface to Linux SCSI midlayer.
+ *
+ * Copyright IBM Corp. 2002, 2013
+ */
+
+#define KMSG_COMPONENT "zfcp"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/slab.h>
+#include <scsi/fc/fc_fcp.h>
+#include <scsi/scsi_eh.h>
+#include <linux/atomic.h>
+#include "zfcp_ext.h"
+#include "zfcp_dbf.h"
+#include "zfcp_fc.h"
+#include "zfcp_reqlist.h"
+
+static unsigned int default_depth = 32;
+module_param_named(queue_depth, default_depth, uint, 0600);
+MODULE_PARM_DESC(queue_depth, "Default queue depth for new SCSI devices");
+
+static bool enable_dif;
+module_param_named(dif, enable_dif, bool, 0400);
+MODULE_PARM_DESC(dif, "Enable DIF/DIX data integrity support");
+
+static bool allow_lun_scan = 1;
+module_param(allow_lun_scan, bool, 0600);
+MODULE_PARM_DESC(allow_lun_scan, "For NPIV, scan and attach all storage LUNs");
+
+static int zfcp_scsi_change_queue_depth(struct scsi_device *sdev, int depth,
+					int reason)
+{
+	switch (reason) {
+	case SCSI_QDEPTH_DEFAULT:
+		scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), depth);
+		break;
+	case SCSI_QDEPTH_QFULL:
+		scsi_track_queue_full(sdev, depth);
+		break;
+	case SCSI_QDEPTH_RAMP_UP:
+		scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), depth);
+		break;
+	default:
+		return -EOPNOTSUPP;
+	}
+	return sdev->queue_depth;
+}
+
+static void zfcp_scsi_slave_destroy(struct scsi_device *sdev)
+{
+	struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev);
+
+	/* if previous slave_alloc returned early, there is nothing to do */
+	if (!zfcp_sdev->port)
+		return;
+
+	zfcp_erp_lun_shutdown_wait(sdev, "scssd_1");
+	put_device(&zfcp_sdev->port->dev);
+}
+
+static int zfcp_scsi_slave_configure(struct scsi_device *sdp)
+{
+	if (sdp->tagged_supported)
+		scsi_adjust_queue_depth(sdp, MSG_SIMPLE_TAG, default_depth);
+	else
+		scsi_adjust_queue_depth(sdp, 0, 1);
+	return 0;
+}
+
+static void zfcp_scsi_command_fail(struct scsi_cmnd *scpnt, int result)
+{
+	set_host_byte(scpnt, result);
+	zfcp_dbf_scsi_fail_send(scpnt);
+	scpnt->scsi_done(scpnt);
+}
+
+static
+int zfcp_scsi_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *scpnt)
+{
+	struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(scpnt->device);
+	struct fc_rport *rport = starget_to_rport(scsi_target(scpnt->device));
+	int    status, scsi_result, ret;
+
+	/* reset the status for this request */
+	scpnt->result = 0;
+	scpnt->host_scribble = NULL;
+
+	scsi_result = fc_remote_port_chkready(rport);
+	if (unlikely(scsi_result)) {
+		scpnt->result = scsi_result;
+		zfcp_dbf_scsi_fail_send(scpnt);
+		scpnt->scsi_done(scpnt);
+		return 0;
+	}
+
+	status = atomic_read(&zfcp_sdev->status);
+	if (unlikely(status & ZFCP_STATUS_COMMON_ERP_FAILED) &&
+		     !(atomic_read(&zfcp_sdev->port->status) &
+		       ZFCP_STATUS_COMMON_ERP_FAILED)) {
+		/* only LUN access denied, but port is good
+		 * not covered by FC transport, have to fail here */
+		zfcp_scsi_command_fail(scpnt, DID_ERROR);
+		return 0;
+	}
+
+	if (unlikely(!(status & ZFCP_STATUS_COMMON_UNBLOCKED))) {
+		/* This could be either
+		 * open LUN pending: this is temporary, will result in
+		 *	open LUN or ERP_FAILED, so retry command
+		 * call to rport_delete pending: mimic retry from
+		 * 	fc_remote_port_chkready until rport is BLOCKED
+		 */
+		zfcp_scsi_command_fail(scpnt, DID_IMM_RETRY);
+		return 0;
+	}
+
+	ret = zfcp_fsf_fcp_cmnd(scpnt);
+	if (unlikely(ret == -EBUSY))
+		return SCSI_MLQUEUE_DEVICE_BUSY;
+	else if (unlikely(ret < 0))
+		return SCSI_MLQUEUE_HOST_BUSY;
+
+	return ret;
+}
+
+static int zfcp_scsi_slave_alloc(struct scsi_device *sdev)
+{
+	struct fc_rport *rport = starget_to_rport(scsi_target(sdev));
+	struct zfcp_adapter *adapter =
+		(struct zfcp_adapter *) sdev->host->hostdata[0];
+	struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev);
+	struct zfcp_port *port;
+	struct zfcp_unit *unit;
+	int npiv = adapter->connection_features & FSF_FEATURE_NPIV_MODE;
+
+	port = zfcp_get_port_by_wwpn(adapter, rport->port_name);
+	if (!port)
+		return -ENXIO;
+
+	unit = zfcp_unit_find(port, zfcp_scsi_dev_lun(sdev));
+	if (unit)
+		put_device(&unit->dev);
+
+	if (!unit && !(allow_lun_scan && npiv)) {
+		put_device(&port->dev);
+		return -ENXIO;
+	}
+
+	zfcp_sdev->port = port;
+	zfcp_sdev->latencies.write.channel.min = 0xFFFFFFFF;
+	zfcp_sdev->latencies.write.fabric.min = 0xFFFFFFFF;
+	zfcp_sdev->latencies.read.channel.min = 0xFFFFFFFF;
+	zfcp_sdev->latencies.read.fabric.min = 0xFFFFFFFF;
+	zfcp_sdev->latencies.cmd.channel.min = 0xFFFFFFFF;
+	zfcp_sdev->latencies.cmd.fabric.min = 0xFFFFFFFF;
+	spin_lock_init(&zfcp_sdev->latencies.lock);
+
+	zfcp_erp_set_lun_status(sdev, ZFCP_STATUS_COMMON_RUNNING);
+	zfcp_erp_lun_reopen(sdev, 0, "scsla_1");
+	zfcp_erp_wait(port->adapter);
+
+	return 0;
+}
+
+static int zfcp_scsi_eh_abort_handler(struct scsi_cmnd *scpnt)
+{
+	struct Scsi_Host *scsi_host = scpnt->device->host;
+	struct zfcp_adapter *adapter =
+		(struct zfcp_adapter *) scsi_host->hostdata[0];
+	struct zfcp_fsf_req *old_req, *abrt_req;
+	unsigned long flags;
+	unsigned long old_reqid = (unsigned long) scpnt->host_scribble;
+	int retval = SUCCESS, ret;
+	int retry = 3;
+	char *dbf_tag;
+
+	/* avoid race condition between late normal completion and abort */
+	write_lock_irqsave(&adapter->abort_lock, flags);
+
+	old_req = zfcp_reqlist_find(adapter->req_list, old_reqid);
+	if (!old_req) {
+		write_unlock_irqrestore(&adapter->abort_lock, flags);
+		zfcp_dbf_scsi_abort("abrt_or", scpnt, NULL);
+		return FAILED; /* completion could be in progress */
+	}
+	old_req->data = NULL;
+
+	/* don't access old fsf_req after releasing the abort_lock */
+	write_unlock_irqrestore(&adapter->abort_lock, flags);
+
+	while (retry--) {
+		abrt_req = zfcp_fsf_abort_fcp_cmnd(scpnt);
+		if (abrt_req)
+			break;
+
+		zfcp_erp_wait(adapter);
+		ret = fc_block_scsi_eh(scpnt);
+		if (ret) {
+			zfcp_dbf_scsi_abort("abrt_bl", scpnt, NULL);
+			return ret;
+		}
+		if (!(atomic_read(&adapter->status) &
+		      ZFCP_STATUS_COMMON_RUNNING)) {
+			zfcp_dbf_scsi_abort("abrt_ru", scpnt, NULL);
+			return SUCCESS;
+		}
+	}
+	if (!abrt_req) {
+		zfcp_dbf_scsi_abort("abrt_ar", scpnt, NULL);
+		return FAILED;
+	}
+
+	wait_for_completion(&abrt_req->completion);
+
+	if (abrt_req->status & ZFCP_STATUS_FSFREQ_ABORTSUCCEEDED)
+		dbf_tag = "abrt_ok";
+	else if (abrt_req->status & ZFCP_STATUS_FSFREQ_ABORTNOTNEEDED)
+		dbf_tag = "abrt_nn";
+	else {
+		dbf_tag = "abrt_fa";
+		retval = FAILED;
+	}
+	zfcp_dbf_scsi_abort(dbf_tag, scpnt, abrt_req);
+	zfcp_fsf_req_free(abrt_req);
+	return retval;
+}
+
+static int zfcp_task_mgmt_function(struct scsi_cmnd *scpnt, u8 tm_flags)
+{
+	struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(scpnt->device);
+	struct zfcp_adapter *adapter = zfcp_sdev->port->adapter;
+	struct zfcp_fsf_req *fsf_req = NULL;
+	int retval = SUCCESS, ret;
+	int retry = 3;
+
+	while (retry--) {
+		fsf_req = zfcp_fsf_fcp_task_mgmt(scpnt, tm_flags);
+		if (fsf_req)
+			break;
+
+		zfcp_erp_wait(adapter);
+		ret = fc_block_scsi_eh(scpnt);
+		if (ret)
+			return ret;
+
+		if (!(atomic_read(&adapter->status) &
+		      ZFCP_STATUS_COMMON_RUNNING)) {
+			zfcp_dbf_scsi_devreset("nres", scpnt, tm_flags);
+			return SUCCESS;
+		}
+	}
+	if (!fsf_req)
+		return FAILED;
+
+	wait_for_completion(&fsf_req->completion);
+
+	if (fsf_req->status & ZFCP_STATUS_FSFREQ_TMFUNCFAILED) {
+		zfcp_dbf_scsi_devreset("fail", scpnt, tm_flags);
+		retval = FAILED;
+	} else
+		zfcp_dbf_scsi_devreset("okay", scpnt, tm_flags);
+
+	zfcp_fsf_req_free(fsf_req);
+	return retval;
+}
+
+static int zfcp_scsi_eh_device_reset_handler(struct scsi_cmnd *scpnt)
+{
+	return zfcp_task_mgmt_function(scpnt, FCP_TMF_LUN_RESET);
+}
+
+static int zfcp_scsi_eh_target_reset_handler(struct scsi_cmnd *scpnt)
+{
+	return zfcp_task_mgmt_function(scpnt, FCP_TMF_TGT_RESET);
+}
+
+static int zfcp_scsi_eh_host_reset_handler(struct scsi_cmnd *scpnt)
+{
+	struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(scpnt->device);
+	struct zfcp_adapter *adapter = zfcp_sdev->port->adapter;
+	int ret;
+
+	zfcp_erp_adapter_reopen(adapter, 0, "schrh_1");
+	zfcp_erp_wait(adapter);
+	ret = fc_block_scsi_eh(scpnt);
+	if (ret)
+		return ret;
+
+	return SUCCESS;
+}
+
+struct scsi_transport_template *zfcp_scsi_transport_template;
+
+static struct scsi_host_template zfcp_scsi_host_template = {
+	.module			 = THIS_MODULE,
+	.name			 = "zfcp",
+	.queuecommand		 = zfcp_scsi_queuecommand,
+	.eh_abort_handler	 = zfcp_scsi_eh_abort_handler,
+	.eh_device_reset_handler = zfcp_scsi_eh_device_reset_handler,
+	.eh_target_reset_handler = zfcp_scsi_eh_target_reset_handler,
+	.eh_host_reset_handler	 = zfcp_scsi_eh_host_reset_handler,
+	.slave_alloc		 = zfcp_scsi_slave_alloc,
+	.slave_configure	 = zfcp_scsi_slave_configure,
+	.slave_destroy		 = zfcp_scsi_slave_destroy,
+	.change_queue_depth	 = zfcp_scsi_change_queue_depth,
+	.proc_name		 = "zfcp",
+	.can_queue		 = 4096,
+	.this_id		 = -1,
+	.sg_tablesize		 = (((QDIO_MAX_ELEMENTS_PER_BUFFER - 1)
+				     * ZFCP_QDIO_MAX_SBALS_PER_REQ) - 2),
+				   /* GCD, adjusted later */
+	.max_sectors		 = (((QDIO_MAX_ELEMENTS_PER_BUFFER - 1)
+				     * ZFCP_QDIO_MAX_SBALS_PER_REQ) - 2) * 8,
+				   /* GCD, adjusted later */
+	.dma_boundary		 = ZFCP_QDIO_SBALE_LEN - 1,
+	.cmd_per_lun		 = 1,
+	.use_clustering		 = 1,
+	.shost_attrs		 = zfcp_sysfs_shost_attrs,
+	.sdev_attrs		 = zfcp_sysfs_sdev_attrs,
+};
+
+/**
+ * zfcp_scsi_adapter_register - Register SCSI and FC host with SCSI midlayer
+ * @adapter: The zfcp adapter to register with the SCSI midlayer
+ */
+int zfcp_scsi_adapter_register(struct zfcp_adapter *adapter)
+{
+	struct ccw_dev_id dev_id;
+
+	if (adapter->scsi_host)
+		return 0;
+
+	ccw_device_get_id(adapter->ccw_device, &dev_id);
+	/* register adapter as SCSI host with mid layer of SCSI stack */
+	adapter->scsi_host = scsi_host_alloc(&zfcp_scsi_host_template,
+					     sizeof (struct zfcp_adapter *));
+	if (!adapter->scsi_host) {
+		dev_err(&adapter->ccw_device->dev,
+			"Registering the FCP device with the "
+			"SCSI stack failed\n");
+		return -EIO;
+	}
+
+	/* tell the SCSI stack some characteristics of this adapter */
+	adapter->scsi_host->max_id = 511;
+	adapter->scsi_host->max_lun = 0xFFFFFFFF;
+	adapter->scsi_host->max_channel = 0;
+	adapter->scsi_host->unique_id = dev_id.devno;
+	adapter->scsi_host->max_cmd_len = 16; /* in struct fcp_cmnd */
+	adapter->scsi_host->transportt = zfcp_scsi_transport_template;
+
+	adapter->scsi_host->hostdata[0] = (unsigned long) adapter;
+
+	if (scsi_add_host(adapter->scsi_host, &adapter->ccw_device->dev)) {
+		scsi_host_put(adapter->scsi_host);
+		return -EIO;
+	}
+
+	return 0;
+}
+
+/**
+ * zfcp_scsi_adapter_unregister - Unregister SCSI and FC host from SCSI midlayer
+ * @adapter: The zfcp adapter to unregister.
+ */
+void zfcp_scsi_adapter_unregister(struct zfcp_adapter *adapter)
+{
+	struct Scsi_Host *shost;
+	struct zfcp_port *port;
+
+	shost = adapter->scsi_host;
+	if (!shost)
+		return;
+
+	read_lock_irq(&adapter->port_list_lock);
+	list_for_each_entry(port, &adapter->port_list, list)
+		port->rport = NULL;
+	read_unlock_irq(&adapter->port_list_lock);
+
+	fc_remove_host(shost);
+	scsi_remove_host(shost);
+	scsi_host_put(shost);
+	adapter->scsi_host = NULL;
+}
+
+static struct fc_host_statistics*
+zfcp_init_fc_host_stats(struct zfcp_adapter *adapter)
+{
+	struct fc_host_statistics *fc_stats;
+
+	if (!adapter->fc_stats) {
+		fc_stats = kmalloc(sizeof(*fc_stats), GFP_KERNEL);
+		if (!fc_stats)
+			return NULL;
+		adapter->fc_stats = fc_stats; /* freed in adapter_release */
+	}
+	memset(adapter->fc_stats, 0, sizeof(*adapter->fc_stats));
+	return adapter->fc_stats;
+}
+
+static void zfcp_adjust_fc_host_stats(struct fc_host_statistics *fc_stats,
+				      struct fsf_qtcb_bottom_port *data,
+				      struct fsf_qtcb_bottom_port *old)
+{
+	fc_stats->seconds_since_last_reset =
+		data->seconds_since_last_reset - old->seconds_since_last_reset;
+	fc_stats->tx_frames = data->tx_frames - old->tx_frames;
+	fc_stats->tx_words = data->tx_words - old->tx_words;
+	fc_stats->rx_frames = data->rx_frames - old->rx_frames;
+	fc_stats->rx_words = data->rx_words - old->rx_words;
+	fc_stats->lip_count = data->lip - old->lip;
+	fc_stats->nos_count = data->nos - old->nos;
+	fc_stats->error_frames = data->error_frames - old->error_frames;
+	fc_stats->dumped_frames = data->dumped_frames - old->dumped_frames;
+	fc_stats->link_failure_count = data->link_failure - old->link_failure;
+	fc_stats->loss_of_sync_count = data->loss_of_sync - old->loss_of_sync;
+	fc_stats->loss_of_signal_count =
+		data->loss_of_signal - old->loss_of_signal;
+	fc_stats->prim_seq_protocol_err_count =
+		data->psp_error_counts - old->psp_error_counts;
+	fc_stats->invalid_tx_word_count =
+		data->invalid_tx_words - old->invalid_tx_words;
+	fc_stats->invalid_crc_count = data->invalid_crcs - old->invalid_crcs;
+	fc_stats->fcp_input_requests =
+		data->input_requests - old->input_requests;
+	fc_stats->fcp_output_requests =
+		data->output_requests - old->output_requests;
+	fc_stats->fcp_control_requests =
+		data->control_requests - old->control_requests;
+	fc_stats->fcp_input_megabytes = data->input_mb - old->input_mb;
+	fc_stats->fcp_output_megabytes = data->output_mb - old->output_mb;
+}
+
+static void zfcp_set_fc_host_stats(struct fc_host_statistics *fc_stats,
+				   struct fsf_qtcb_bottom_port *data)
+{
+	fc_stats->seconds_since_last_reset = data->seconds_since_last_reset;
+	fc_stats->tx_frames = data->tx_frames;
+	fc_stats->tx_words = data->tx_words;
+	fc_stats->rx_frames = data->rx_frames;
+	fc_stats->rx_words = data->rx_words;
+	fc_stats->lip_count = data->lip;
+	fc_stats->nos_count = data->nos;
+	fc_stats->error_frames = data->error_frames;
+	fc_stats->dumped_frames = data->dumped_frames;
+	fc_stats->link_failure_count = data->link_failure;
+	fc_stats->loss_of_sync_count = data->loss_of_sync;
+	fc_stats->loss_of_signal_count = data->loss_of_signal;
+	fc_stats->prim_seq_protocol_err_count = data->psp_error_counts;
+	fc_stats->invalid_tx_word_count = data->invalid_tx_words;
+	fc_stats->invalid_crc_count = data->invalid_crcs;
+	fc_stats->fcp_input_requests = data->input_requests;
+	fc_stats->fcp_output_requests = data->output_requests;
+	fc_stats->fcp_control_requests = data->control_requests;
+	fc_stats->fcp_input_megabytes = data->input_mb;
+	fc_stats->fcp_output_megabytes = data->output_mb;
+}
+
+static struct fc_host_statistics *zfcp_get_fc_host_stats(struct Scsi_Host *host)
+{
+	struct zfcp_adapter *adapter;
+	struct fc_host_statistics *fc_stats;
+	struct fsf_qtcb_bottom_port *data;
+	int ret;
+
+	adapter = (struct zfcp_adapter *)host->hostdata[0];
+	fc_stats = zfcp_init_fc_host_stats(adapter);
+	if (!fc_stats)
+		return NULL;
+
+	data = kzalloc(sizeof(*data), GFP_KERNEL);
+	if (!data)
+		return NULL;
+
+	ret = zfcp_fsf_exchange_port_data_sync(adapter->qdio, data);
+	if (ret) {
+		kfree(data);
+		return NULL;
+	}
+
+	if (adapter->stats_reset &&
+	    ((jiffies/HZ - adapter->stats_reset) <
+	     data->seconds_since_last_reset))
+		zfcp_adjust_fc_host_stats(fc_stats, data,
+					  adapter->stats_reset_data);
+	else
+		zfcp_set_fc_host_stats(fc_stats, data);
+
+	kfree(data);
+	return fc_stats;
+}
+
+static void zfcp_reset_fc_host_stats(struct Scsi_Host *shost)
+{
+	struct zfcp_adapter *adapter;
+	struct fsf_qtcb_bottom_port *data;
+	int ret;
+
+	adapter = (struct zfcp_adapter *)shost->hostdata[0];
+	data = kzalloc(sizeof(*data), GFP_KERNEL);
+	if (!data)
+		return;
+
+	ret = zfcp_fsf_exchange_port_data_sync(adapter->qdio, data);
+	if (ret)
+		kfree(data);
+	else {
+		adapter->stats_reset = jiffies/HZ;
+		kfree(adapter->stats_reset_data);
+		adapter->stats_reset_data = data; /* finally freed in
+						     adapter_release */
+	}
+}
+
+static void zfcp_get_host_port_state(struct Scsi_Host *shost)
+{
+	struct zfcp_adapter *adapter =
+		(struct zfcp_adapter *)shost->hostdata[0];
+	int status = atomic_read(&adapter->status);
+
+	if ((status & ZFCP_STATUS_COMMON_RUNNING) &&
+	    !(status & ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED))
+		fc_host_port_state(shost) = FC_PORTSTATE_ONLINE;
+	else if (status & ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED)
+		fc_host_port_state(shost) = FC_PORTSTATE_LINKDOWN;
+	else if (status & ZFCP_STATUS_COMMON_ERP_FAILED)
+		fc_host_port_state(shost) = FC_PORTSTATE_ERROR;
+	else
+		fc_host_port_state(shost) = FC_PORTSTATE_UNKNOWN;
+}
+
+static void zfcp_set_rport_dev_loss_tmo(struct fc_rport *rport, u32 timeout)
+{
+	rport->dev_loss_tmo = timeout;
+}
+
+/**
+ * zfcp_scsi_terminate_rport_io - Terminate all I/O on a rport
+ * @rport: The FC rport where to teminate I/O
+ *
+ * Abort all pending SCSI commands for a port by closing the
+ * port. Using a reopen avoids a conflict with a shutdown
+ * overwriting a reopen. The "forced" ensures that a disappeared port
+ * is not opened again as valid due to the cached plogi data in
+ * non-NPIV mode.
+ */
+static void zfcp_scsi_terminate_rport_io(struct fc_rport *rport)
+{
+	struct zfcp_port *port;
+	struct Scsi_Host *shost = rport_to_shost(rport);
+	struct zfcp_adapter *adapter =
+		(struct zfcp_adapter *)shost->hostdata[0];
+
+	port = zfcp_get_port_by_wwpn(adapter, rport->port_name);
+
+	if (port) {
+		zfcp_erp_port_forced_reopen(port, 0, "sctrpi1");
+		put_device(&port->dev);
+	}
+}
+
+static void zfcp_scsi_rport_register(struct zfcp_port *port)
+{
+	struct fc_rport_identifiers ids;
+	struct fc_rport *rport;
+
+	if (port->rport)
+		return;
+
+	ids.node_name = port->wwnn;
+	ids.port_name = port->wwpn;
+	ids.port_id = port->d_id;
+	ids.roles = FC_RPORT_ROLE_FCP_TARGET;
+
+	rport = fc_remote_port_add(port->adapter->scsi_host, 0, &ids);
+	if (!rport) {
+		dev_err(&port->adapter->ccw_device->dev,
+			"Registering port 0x%016Lx failed\n",
+			(unsigned long long)port->wwpn);
+		return;
+	}
+
+	rport->maxframe_size = port->maxframe_size;
+	rport->supported_classes = port->supported_classes;
+	port->rport = rport;
+	port->starget_id = rport->scsi_target_id;
+
+	zfcp_unit_queue_scsi_scan(port);
+}
+
+static void zfcp_scsi_rport_block(struct zfcp_port *port)
+{
+	struct fc_rport *rport = port->rport;
+
+	if (rport) {
+		fc_remote_port_delete(rport);
+		port->rport = NULL;
+	}
+}
+
+void zfcp_scsi_schedule_rport_register(struct zfcp_port *port)
+{
+	get_device(&port->dev);
+	port->rport_task = RPORT_ADD;
+
+	if (!queue_work(port->adapter->work_queue, &port->rport_work))
+		put_device(&port->dev);
+}
+
+void zfcp_scsi_schedule_rport_block(struct zfcp_port *port)
+{
+	get_device(&port->dev);
+	port->rport_task = RPORT_DEL;
+
+	if (port->rport && queue_work(port->adapter->work_queue,
+				      &port->rport_work))
+		return;
+
+	put_device(&port->dev);
+}
+
+void zfcp_scsi_schedule_rports_block(struct zfcp_adapter *adapter)
+{
+	unsigned long flags;
+	struct zfcp_port *port;
+
+	read_lock_irqsave(&adapter->port_list_lock, flags);
+	list_for_each_entry(port, &adapter->port_list, list)
+		zfcp_scsi_schedule_rport_block(port);
+	read_unlock_irqrestore(&adapter->port_list_lock, flags);
+}
+
+void zfcp_scsi_rport_work(struct work_struct *work)
+{
+	struct zfcp_port *port = container_of(work, struct zfcp_port,
+					      rport_work);
+
+	while (port->rport_task) {
+		if (port->rport_task == RPORT_ADD) {
+			port->rport_task = RPORT_NONE;
+			zfcp_scsi_rport_register(port);
+		} else {
+			port->rport_task = RPORT_NONE;
+			zfcp_scsi_rport_block(port);
+		}
+	}
+
+	put_device(&port->dev);
+}
+
+/**
+ * zfcp_scsi_set_prot - Configure DIF/DIX support in scsi_host
+ * @adapter: The adapter where to configure DIF/DIX for the SCSI host
+ */
+void zfcp_scsi_set_prot(struct zfcp_adapter *adapter)
+{
+	unsigned int mask = 0;
+	unsigned int data_div;
+	struct Scsi_Host *shost = adapter->scsi_host;
+
+	data_div = atomic_read(&adapter->status) &
+		   ZFCP_STATUS_ADAPTER_DATA_DIV_ENABLED;
+
+	if (enable_dif &&
+	    adapter->adapter_features & FSF_FEATURE_DIF_PROT_TYPE1)
+		mask |= SHOST_DIF_TYPE1_PROTECTION;
+
+	if (enable_dif && data_div &&
+	    adapter->adapter_features & FSF_FEATURE_DIX_PROT_TCPIP) {
+		mask |= SHOST_DIX_TYPE1_PROTECTION;
+		scsi_host_set_guard(shost, SHOST_DIX_GUARD_IP);
+		shost->sg_prot_tablesize = adapter->qdio->max_sbale_per_req / 2;
+		shost->sg_tablesize = adapter->qdio->max_sbale_per_req / 2;
+		shost->max_sectors = shost->sg_tablesize * 8;
+	}
+
+	scsi_host_set_prot(shost, mask);
+}
+
+/**
+ * zfcp_scsi_dif_sense_error - Report DIF/DIX error as driver sense error
+ * @scmd: The SCSI command to report the error for
+ * @ascq: The ASCQ to put in the sense buffer
+ *
+ * See the error handling in sd_done for the sense codes used here.
+ * Set DID_SOFT_ERROR to retry the request, if possible.
+ */
+void zfcp_scsi_dif_sense_error(struct scsi_cmnd *scmd, int ascq)
+{
+	scsi_build_sense_buffer(1, scmd->sense_buffer,
+				ILLEGAL_REQUEST, 0x10, ascq);
+	set_driver_byte(scmd, DRIVER_SENSE);
+	scmd->result |= SAM_STAT_CHECK_CONDITION;
+	set_host_byte(scmd, DID_SOFT_ERROR);
+}
+
+struct fc_function_template zfcp_transport_functions = {
+	.show_starget_port_id = 1,
+	.show_starget_port_name = 1,
+	.show_starget_node_name = 1,
+	.show_rport_supported_classes = 1,
+	.show_rport_maxframe_size = 1,
+	.show_rport_dev_loss_tmo = 1,
+	.show_host_node_name = 1,
+	.show_host_port_name = 1,
+	.show_host_permanent_port_name = 1,
+	.show_host_supported_classes = 1,
+	.show_host_supported_fc4s = 1,
+	.show_host_supported_speeds = 1,
+	.show_host_maxframe_size = 1,
+	.show_host_serial_number = 1,
+	.get_fc_host_stats = zfcp_get_fc_host_stats,
+	.reset_fc_host_stats = zfcp_reset_fc_host_stats,
+	.set_rport_dev_loss_tmo = zfcp_set_rport_dev_loss_tmo,
+	.get_host_port_state = zfcp_get_host_port_state,
+	.terminate_rport_io = zfcp_scsi_terminate_rport_io,
+	.show_host_port_state = 1,
+	.show_host_active_fc4s = 1,
+	.bsg_request = zfcp_fc_exec_bsg_job,
+	.bsg_timeout = zfcp_fc_timeout_bsg_job,
+	/* no functions registered for following dynamic attributes but
+	   directly set by LLDD */
+	.show_host_port_type = 1,
+	.show_host_symbolic_name = 1,
+	.show_host_speed = 1,
+	.show_host_port_id = 1,
+	.dd_bsg_size = sizeof(struct zfcp_fsf_ct_els),
+};
diff --git a/ap/os/linux/linux-3.4.x/drivers/s390/scsi/zfcp_sysfs.c b/ap/os/linux/linux-3.4.x/drivers/s390/scsi/zfcp_sysfs.c
new file mode 100644
index 0000000..9e62210
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/s390/scsi/zfcp_sysfs.c
@@ -0,0 +1,568 @@
+/*
+ * zfcp device driver
+ *
+ * sysfs attributes.
+ *
+ * Copyright IBM Corporation 2008, 2010
+ */
+
+#define KMSG_COMPONENT "zfcp"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
+#include <linux/slab.h>
+#include "zfcp_ext.h"
+
+#define ZFCP_DEV_ATTR(_feat, _name, _mode, _show, _store) \
+struct device_attribute dev_attr_##_feat##_##_name = __ATTR(_name, _mode,\
+							    _show, _store)
+#define ZFCP_DEFINE_ATTR(_feat_def, _feat, _name, _format, _value)	       \
+static ssize_t zfcp_sysfs_##_feat##_##_name##_show(struct device *dev,	       \
+						   struct device_attribute *at,\
+						   char *buf)		       \
+{									       \
+	struct _feat_def *_feat = container_of(dev, struct _feat_def, dev);    \
+									       \
+	return sprintf(buf, _format, _value);				       \
+}									       \
+static ZFCP_DEV_ATTR(_feat, _name, S_IRUGO,				       \
+		     zfcp_sysfs_##_feat##_##_name##_show, NULL);
+
+#define ZFCP_DEFINE_A_ATTR(_name, _format, _value)			     \
+static ssize_t zfcp_sysfs_adapter_##_name##_show(struct device *dev,	     \
+						 struct device_attribute *at,\
+						 char *buf)		     \
+{									     \
+	struct ccw_device *cdev = to_ccwdev(dev);			     \
+	struct zfcp_adapter *adapter = zfcp_ccw_adapter_by_cdev(cdev);	     \
+	int i;								     \
+									     \
+	if (!adapter)							     \
+		return -ENODEV;						     \
+									     \
+	i = sprintf(buf, _format, _value);				     \
+	zfcp_ccw_adapter_put(adapter);					     \
+	return i;							     \
+}									     \
+static ZFCP_DEV_ATTR(adapter, _name, S_IRUGO,				     \
+		     zfcp_sysfs_adapter_##_name##_show, NULL);
+
+ZFCP_DEFINE_A_ATTR(status, "0x%08x\n", atomic_read(&adapter->status));
+ZFCP_DEFINE_A_ATTR(peer_wwnn, "0x%016llx\n",
+		   (unsigned long long) adapter->peer_wwnn);
+ZFCP_DEFINE_A_ATTR(peer_wwpn, "0x%016llx\n",
+		   (unsigned long long) adapter->peer_wwpn);
+ZFCP_DEFINE_A_ATTR(peer_d_id, "0x%06x\n", adapter->peer_d_id);
+ZFCP_DEFINE_A_ATTR(card_version, "0x%04x\n", adapter->hydra_version);
+ZFCP_DEFINE_A_ATTR(lic_version, "0x%08x\n", adapter->fsf_lic_version);
+ZFCP_DEFINE_A_ATTR(hardware_version, "0x%08x\n", adapter->hardware_version);
+ZFCP_DEFINE_A_ATTR(in_recovery, "%d\n", (atomic_read(&adapter->status) &
+					 ZFCP_STATUS_COMMON_ERP_INUSE) != 0);
+
+ZFCP_DEFINE_ATTR(zfcp_port, port, status, "0x%08x\n",
+		 atomic_read(&port->status));
+ZFCP_DEFINE_ATTR(zfcp_port, port, in_recovery, "%d\n",
+		 (atomic_read(&port->status) &
+		  ZFCP_STATUS_COMMON_ERP_INUSE) != 0);
+ZFCP_DEFINE_ATTR(zfcp_port, port, access_denied, "%d\n",
+		 (atomic_read(&port->status) &
+		  ZFCP_STATUS_COMMON_ACCESS_DENIED) != 0);
+
+ZFCP_DEFINE_ATTR(zfcp_unit, unit, status, "0x%08x\n",
+		 zfcp_unit_sdev_status(unit));
+ZFCP_DEFINE_ATTR(zfcp_unit, unit, in_recovery, "%d\n",
+		 (zfcp_unit_sdev_status(unit) &
+		  ZFCP_STATUS_COMMON_ERP_INUSE) != 0);
+ZFCP_DEFINE_ATTR(zfcp_unit, unit, access_denied, "%d\n",
+		 (zfcp_unit_sdev_status(unit) &
+		  ZFCP_STATUS_COMMON_ACCESS_DENIED) != 0);
+ZFCP_DEFINE_ATTR(zfcp_unit, unit, access_shared, "%d\n",
+		 (zfcp_unit_sdev_status(unit) &
+		  ZFCP_STATUS_LUN_SHARED) != 0);
+ZFCP_DEFINE_ATTR(zfcp_unit, unit, access_readonly, "%d\n",
+		 (zfcp_unit_sdev_status(unit) &
+		  ZFCP_STATUS_LUN_READONLY) != 0);
+
+static ssize_t zfcp_sysfs_port_failed_show(struct device *dev,
+					   struct device_attribute *attr,
+					   char *buf)
+{
+	struct zfcp_port *port = container_of(dev, struct zfcp_port, dev);
+
+	if (atomic_read(&port->status) & ZFCP_STATUS_COMMON_ERP_FAILED)
+		return sprintf(buf, "1\n");
+
+	return sprintf(buf, "0\n");
+}
+
+static ssize_t zfcp_sysfs_port_failed_store(struct device *dev,
+					    struct device_attribute *attr,
+					    const char *buf, size_t count)
+{
+	struct zfcp_port *port = container_of(dev, struct zfcp_port, dev);
+	unsigned long val;
+
+	if (strict_strtoul(buf, 0, &val) || val != 0)
+		return -EINVAL;
+
+	zfcp_erp_set_port_status(port, ZFCP_STATUS_COMMON_RUNNING);
+	zfcp_erp_port_reopen(port, ZFCP_STATUS_COMMON_ERP_FAILED, "sypfai2");
+	zfcp_erp_wait(port->adapter);
+
+	return count;
+}
+static ZFCP_DEV_ATTR(port, failed, S_IWUSR | S_IRUGO,
+		     zfcp_sysfs_port_failed_show,
+		     zfcp_sysfs_port_failed_store);
+
+static ssize_t zfcp_sysfs_unit_failed_show(struct device *dev,
+					   struct device_attribute *attr,
+					   char *buf)
+{
+	struct zfcp_unit *unit = container_of(dev, struct zfcp_unit, dev);
+	struct scsi_device *sdev;
+	unsigned int status, failed = 1;
+
+	sdev = zfcp_unit_sdev(unit);
+	if (sdev) {
+		status = atomic_read(&sdev_to_zfcp(sdev)->status);
+		failed = status & ZFCP_STATUS_COMMON_ERP_FAILED ? 1 : 0;
+		scsi_device_put(sdev);
+	}
+
+	return sprintf(buf, "%d\n", failed);
+}
+
+static ssize_t zfcp_sysfs_unit_failed_store(struct device *dev,
+					    struct device_attribute *attr,
+					    const char *buf, size_t count)
+{
+	struct zfcp_unit *unit = container_of(dev, struct zfcp_unit, dev);
+	unsigned long val;
+	struct scsi_device *sdev;
+
+	if (strict_strtoul(buf, 0, &val) || val != 0)
+		return -EINVAL;
+
+	sdev = zfcp_unit_sdev(unit);
+	if (sdev) {
+		zfcp_erp_set_lun_status(sdev, ZFCP_STATUS_COMMON_RUNNING);
+		zfcp_erp_lun_reopen(sdev, ZFCP_STATUS_COMMON_ERP_FAILED,
+				    "syufai2");
+		zfcp_erp_wait(unit->port->adapter);
+	} else
+		zfcp_unit_scsi_scan(unit);
+
+	return count;
+}
+static ZFCP_DEV_ATTR(unit, failed, S_IWUSR | S_IRUGO,
+		     zfcp_sysfs_unit_failed_show,
+		     zfcp_sysfs_unit_failed_store);
+
+static ssize_t zfcp_sysfs_adapter_failed_show(struct device *dev,
+					      struct device_attribute *attr,
+					      char *buf)
+{
+	struct ccw_device *cdev = to_ccwdev(dev);
+	struct zfcp_adapter *adapter = zfcp_ccw_adapter_by_cdev(cdev);
+	int i;
+
+	if (!adapter)
+		return -ENODEV;
+
+	if (atomic_read(&adapter->status) & ZFCP_STATUS_COMMON_ERP_FAILED)
+		i = sprintf(buf, "1\n");
+	else
+		i = sprintf(buf, "0\n");
+
+	zfcp_ccw_adapter_put(adapter);
+	return i;
+}
+
+static ssize_t zfcp_sysfs_adapter_failed_store(struct device *dev,
+					       struct device_attribute *attr,
+					       const char *buf, size_t count)
+{
+	struct ccw_device *cdev = to_ccwdev(dev);
+	struct zfcp_adapter *adapter = zfcp_ccw_adapter_by_cdev(cdev);
+	unsigned long val;
+	int retval = 0;
+
+	if (!adapter)
+		return -ENODEV;
+
+	if (strict_strtoul(buf, 0, &val) || val != 0) {
+		retval = -EINVAL;
+		goto out;
+	}
+
+	zfcp_erp_set_adapter_status(adapter, ZFCP_STATUS_COMMON_RUNNING);
+	zfcp_erp_adapter_reopen(adapter, ZFCP_STATUS_COMMON_ERP_FAILED,
+				"syafai2");
+	zfcp_erp_wait(adapter);
+out:
+	zfcp_ccw_adapter_put(adapter);
+	return retval ? retval : (ssize_t) count;
+}
+static ZFCP_DEV_ATTR(adapter, failed, S_IWUSR | S_IRUGO,
+		     zfcp_sysfs_adapter_failed_show,
+		     zfcp_sysfs_adapter_failed_store);
+
+static ssize_t zfcp_sysfs_port_rescan_store(struct device *dev,
+					    struct device_attribute *attr,
+					    const char *buf, size_t count)
+{
+	struct ccw_device *cdev = to_ccwdev(dev);
+	struct zfcp_adapter *adapter = zfcp_ccw_adapter_by_cdev(cdev);
+
+	if (!adapter)
+		return -ENODEV;
+
+	/* sync the user-space- with the kernel-invocation of scan_work */
+	queue_work(adapter->work_queue, &adapter->scan_work);
+	flush_work(&adapter->scan_work);
+	zfcp_ccw_adapter_put(adapter);
+
+	return (ssize_t) count;
+}
+static ZFCP_DEV_ATTR(adapter, port_rescan, S_IWUSR, NULL,
+		     zfcp_sysfs_port_rescan_store);
+
+DEFINE_MUTEX(zfcp_sysfs_port_units_mutex);
+
+static ssize_t zfcp_sysfs_port_remove_store(struct device *dev,
+					    struct device_attribute *attr,
+					    const char *buf, size_t count)
+{
+	struct ccw_device *cdev = to_ccwdev(dev);
+	struct zfcp_adapter *adapter = zfcp_ccw_adapter_by_cdev(cdev);
+	struct zfcp_port *port;
+	u64 wwpn;
+	int retval = -EINVAL;
+
+	if (!adapter)
+		return -ENODEV;
+
+	if (strict_strtoull(buf, 0, (unsigned long long *) &wwpn))
+		goto out;
+
+	port = zfcp_get_port_by_wwpn(adapter, wwpn);
+	if (!port)
+		goto out;
+	else
+		retval = 0;
+
+	mutex_lock(&zfcp_sysfs_port_units_mutex);
+	if (atomic_read(&port->units) > 0) {
+		retval = -EBUSY;
+		mutex_unlock(&zfcp_sysfs_port_units_mutex);
+		goto out;
+	}
+	/* port is about to be removed, so no more unit_add */
+	atomic_set(&port->units, -1);
+	mutex_unlock(&zfcp_sysfs_port_units_mutex);
+
+	write_lock_irq(&adapter->port_list_lock);
+	list_del(&port->list);
+	write_unlock_irq(&adapter->port_list_lock);
+
+	put_device(&port->dev);
+
+	zfcp_erp_port_shutdown(port, 0, "syprs_1");
+	zfcp_device_unregister(&port->dev, &zfcp_sysfs_port_attrs);
+ out:
+	zfcp_ccw_adapter_put(adapter);
+	return retval ? retval : (ssize_t) count;
+}
+static ZFCP_DEV_ATTR(adapter, port_remove, S_IWUSR, NULL,
+		     zfcp_sysfs_port_remove_store);
+
+static struct attribute *zfcp_adapter_attrs[] = {
+	&dev_attr_adapter_failed.attr,
+	&dev_attr_adapter_in_recovery.attr,
+	&dev_attr_adapter_port_remove.attr,
+	&dev_attr_adapter_port_rescan.attr,
+	&dev_attr_adapter_peer_wwnn.attr,
+	&dev_attr_adapter_peer_wwpn.attr,
+	&dev_attr_adapter_peer_d_id.attr,
+	&dev_attr_adapter_card_version.attr,
+	&dev_attr_adapter_lic_version.attr,
+	&dev_attr_adapter_status.attr,
+	&dev_attr_adapter_hardware_version.attr,
+	NULL
+};
+
+struct attribute_group zfcp_sysfs_adapter_attrs = {
+	.attrs = zfcp_adapter_attrs,
+};
+
+static ssize_t zfcp_sysfs_unit_add_store(struct device *dev,
+					 struct device_attribute *attr,
+					 const char *buf, size_t count)
+{
+	struct zfcp_port *port = container_of(dev, struct zfcp_port, dev);
+	u64 fcp_lun;
+	int retval;
+
+	if (strict_strtoull(buf, 0, (unsigned long long *) &fcp_lun))
+		return -EINVAL;
+
+	retval = zfcp_unit_add(port, fcp_lun);
+	if (retval)
+		return retval;
+
+	return count;
+}
+static DEVICE_ATTR(unit_add, S_IWUSR, NULL, zfcp_sysfs_unit_add_store);
+
+static ssize_t zfcp_sysfs_unit_remove_store(struct device *dev,
+					    struct device_attribute *attr,
+					    const char *buf, size_t count)
+{
+	struct zfcp_port *port = container_of(dev, struct zfcp_port, dev);
+	u64 fcp_lun;
+
+	if (strict_strtoull(buf, 0, (unsigned long long *) &fcp_lun))
+		return -EINVAL;
+
+	if (zfcp_unit_remove(port, fcp_lun))
+		return -EINVAL;
+
+	return count;
+}
+static DEVICE_ATTR(unit_remove, S_IWUSR, NULL, zfcp_sysfs_unit_remove_store);
+
+static struct attribute *zfcp_port_attrs[] = {
+	&dev_attr_unit_add.attr,
+	&dev_attr_unit_remove.attr,
+	&dev_attr_port_failed.attr,
+	&dev_attr_port_in_recovery.attr,
+	&dev_attr_port_status.attr,
+	&dev_attr_port_access_denied.attr,
+	NULL
+};
+
+/**
+ * zfcp_sysfs_port_attrs - sysfs attributes for all other ports
+ */
+struct attribute_group zfcp_sysfs_port_attrs = {
+	.attrs = zfcp_port_attrs,
+};
+
+static struct attribute *zfcp_unit_attrs[] = {
+	&dev_attr_unit_failed.attr,
+	&dev_attr_unit_in_recovery.attr,
+	&dev_attr_unit_status.attr,
+	&dev_attr_unit_access_denied.attr,
+	&dev_attr_unit_access_shared.attr,
+	&dev_attr_unit_access_readonly.attr,
+	NULL
+};
+
+struct attribute_group zfcp_sysfs_unit_attrs = {
+	.attrs = zfcp_unit_attrs,
+};
+
+#define ZFCP_DEFINE_LATENCY_ATTR(_name) 				\
+static ssize_t								\
+zfcp_sysfs_unit_##_name##_latency_show(struct device *dev,		\
+				       struct device_attribute *attr,	\
+				       char *buf) {			\
+	struct scsi_device *sdev = to_scsi_device(dev);			\
+	struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev);		\
+	struct zfcp_latencies *lat = &zfcp_sdev->latencies;		\
+	struct zfcp_adapter *adapter = zfcp_sdev->port->adapter;	\
+	unsigned long long fsum, fmin, fmax, csum, cmin, cmax, cc;	\
+									\
+	spin_lock_bh(&lat->lock);					\
+	fsum = lat->_name.fabric.sum * adapter->timer_ticks;		\
+	fmin = lat->_name.fabric.min * adapter->timer_ticks;		\
+	fmax = lat->_name.fabric.max * adapter->timer_ticks;		\
+	csum = lat->_name.channel.sum * adapter->timer_ticks;		\
+	cmin = lat->_name.channel.min * adapter->timer_ticks;		\
+	cmax = lat->_name.channel.max * adapter->timer_ticks;		\
+	cc  = lat->_name.counter;					\
+	spin_unlock_bh(&lat->lock);					\
+									\
+	do_div(fsum, 1000);						\
+	do_div(fmin, 1000);						\
+	do_div(fmax, 1000);						\
+	do_div(csum, 1000);						\
+	do_div(cmin, 1000);						\
+	do_div(cmax, 1000);						\
+									\
+	return sprintf(buf, "%llu %llu %llu %llu %llu %llu %llu\n",	\
+		       fmin, fmax, fsum, cmin, cmax, csum, cc); 	\
+}									\
+static ssize_t								\
+zfcp_sysfs_unit_##_name##_latency_store(struct device *dev,		\
+					struct device_attribute *attr,	\
+					const char *buf, size_t count)	\
+{									\
+	struct scsi_device *sdev = to_scsi_device(dev);			\
+	struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev);		\
+	struct zfcp_latencies *lat = &zfcp_sdev->latencies;		\
+	unsigned long flags;						\
+									\
+	spin_lock_irqsave(&lat->lock, flags);				\
+	lat->_name.fabric.sum = 0;					\
+	lat->_name.fabric.min = 0xFFFFFFFF;				\
+	lat->_name.fabric.max = 0;					\
+	lat->_name.channel.sum = 0;					\
+	lat->_name.channel.min = 0xFFFFFFFF;				\
+	lat->_name.channel.max = 0;					\
+	lat->_name.counter = 0;						\
+	spin_unlock_irqrestore(&lat->lock, flags);			\
+									\
+	return (ssize_t) count;						\
+}									\
+static DEVICE_ATTR(_name##_latency, S_IWUSR | S_IRUGO,			\
+		   zfcp_sysfs_unit_##_name##_latency_show,		\
+		   zfcp_sysfs_unit_##_name##_latency_store);
+
+ZFCP_DEFINE_LATENCY_ATTR(read);
+ZFCP_DEFINE_LATENCY_ATTR(write);
+ZFCP_DEFINE_LATENCY_ATTR(cmd);
+
+#define ZFCP_DEFINE_SCSI_ATTR(_name, _format, _value)			\
+static ssize_t zfcp_sysfs_scsi_##_name##_show(struct device *dev,	\
+					      struct device_attribute *attr,\
+					      char *buf)                 \
+{                                                                        \
+	struct scsi_device *sdev = to_scsi_device(dev);			 \
+	struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev);		 \
+	struct zfcp_port *port = zfcp_sdev->port;			 \
+									 \
+	return sprintf(buf, _format, _value);                            \
+}                                                                        \
+static DEVICE_ATTR(_name, S_IRUGO, zfcp_sysfs_scsi_##_name##_show, NULL);
+
+ZFCP_DEFINE_SCSI_ATTR(hba_id, "%s\n",
+		      dev_name(&port->adapter->ccw_device->dev));
+ZFCP_DEFINE_SCSI_ATTR(wwpn, "0x%016llx\n",
+		      (unsigned long long) port->wwpn);
+
+static ssize_t zfcp_sysfs_scsi_fcp_lun_show(struct device *dev,
+					    struct device_attribute *attr,
+					    char *buf)
+{
+	struct scsi_device *sdev = to_scsi_device(dev);
+
+	return sprintf(buf, "0x%016llx\n", zfcp_scsi_dev_lun(sdev));
+}
+static DEVICE_ATTR(fcp_lun, S_IRUGO, zfcp_sysfs_scsi_fcp_lun_show, NULL);
+
+struct device_attribute *zfcp_sysfs_sdev_attrs[] = {
+	&dev_attr_fcp_lun,
+	&dev_attr_wwpn,
+	&dev_attr_hba_id,
+	&dev_attr_read_latency,
+	&dev_attr_write_latency,
+	&dev_attr_cmd_latency,
+	NULL
+};
+
+static ssize_t zfcp_sysfs_adapter_util_show(struct device *dev,
+					    struct device_attribute *attr,
+					    char *buf)
+{
+	struct Scsi_Host *scsi_host = dev_to_shost(dev);
+	struct fsf_qtcb_bottom_port *qtcb_port;
+	struct zfcp_adapter *adapter;
+	int retval;
+
+	adapter = (struct zfcp_adapter *) scsi_host->hostdata[0];
+	if (!(adapter->adapter_features & FSF_FEATURE_MEASUREMENT_DATA))
+		return -EOPNOTSUPP;
+
+	qtcb_port = kzalloc(sizeof(struct fsf_qtcb_bottom_port), GFP_KERNEL);
+	if (!qtcb_port)
+		return -ENOMEM;
+
+	retval = zfcp_fsf_exchange_port_data_sync(adapter->qdio, qtcb_port);
+	if (!retval)
+		retval = sprintf(buf, "%u %u %u\n", qtcb_port->cp_util,
+				 qtcb_port->cb_util, qtcb_port->a_util);
+	kfree(qtcb_port);
+	return retval;
+}
+static DEVICE_ATTR(utilization, S_IRUGO, zfcp_sysfs_adapter_util_show, NULL);
+
+static int zfcp_sysfs_adapter_ex_config(struct device *dev,
+					struct fsf_statistics_info *stat_inf)
+{
+	struct Scsi_Host *scsi_host = dev_to_shost(dev);
+	struct fsf_qtcb_bottom_config *qtcb_config;
+	struct zfcp_adapter *adapter;
+	int retval;
+
+	adapter = (struct zfcp_adapter *) scsi_host->hostdata[0];
+	if (!(adapter->adapter_features & FSF_FEATURE_MEASUREMENT_DATA))
+		return -EOPNOTSUPP;
+
+	qtcb_config = kzalloc(sizeof(struct fsf_qtcb_bottom_config),
+			      GFP_KERNEL);
+	if (!qtcb_config)
+		return -ENOMEM;
+
+	retval = zfcp_fsf_exchange_config_data_sync(adapter->qdio, qtcb_config);
+	if (!retval)
+		*stat_inf = qtcb_config->stat_info;
+
+	kfree(qtcb_config);
+	return retval;
+}
+
+#define ZFCP_SHOST_ATTR(_name, _format, _arg...)			\
+static ssize_t zfcp_sysfs_adapter_##_name##_show(struct device *dev,	\
+						 struct device_attribute *attr,\
+						 char *buf)		\
+{									\
+	struct fsf_statistics_info stat_info;				\
+	int retval;							\
+									\
+	retval = zfcp_sysfs_adapter_ex_config(dev, &stat_info);		\
+	if (retval)							\
+		return retval;						\
+									\
+	return sprintf(buf, _format, ## _arg);				\
+}									\
+static DEVICE_ATTR(_name, S_IRUGO, zfcp_sysfs_adapter_##_name##_show, NULL);
+
+ZFCP_SHOST_ATTR(requests, "%llu %llu %llu\n",
+		(unsigned long long) stat_info.input_req,
+		(unsigned long long) stat_info.output_req,
+		(unsigned long long) stat_info.control_req);
+
+ZFCP_SHOST_ATTR(megabytes, "%llu %llu\n",
+		(unsigned long long) stat_info.input_mb,
+		(unsigned long long) stat_info.output_mb);
+
+ZFCP_SHOST_ATTR(seconds_active, "%llu\n",
+		(unsigned long long) stat_info.seconds_act);
+
+static ssize_t zfcp_sysfs_adapter_q_full_show(struct device *dev,
+					      struct device_attribute *attr,
+					      char *buf)
+{
+	struct Scsi_Host *scsi_host = class_to_shost(dev);
+	struct zfcp_qdio *qdio =
+		((struct zfcp_adapter *) scsi_host->hostdata[0])->qdio;
+	u64 util;
+
+	spin_lock_bh(&qdio->stat_lock);
+	util = qdio->req_q_util;
+	spin_unlock_bh(&qdio->stat_lock);
+
+	return sprintf(buf, "%d %llu\n", atomic_read(&qdio->req_q_full),
+		       (unsigned long long)util);
+}
+static DEVICE_ATTR(queue_full, S_IRUGO, zfcp_sysfs_adapter_q_full_show, NULL);
+
+struct device_attribute *zfcp_sysfs_shost_attrs[] = {
+	&dev_attr_utilization,
+	&dev_attr_requests,
+	&dev_attr_megabytes,
+	&dev_attr_seconds_active,
+	&dev_attr_queue_full,
+	NULL
+};
diff --git a/ap/os/linux/linux-3.4.x/drivers/s390/scsi/zfcp_unit.c b/ap/os/linux/linux-3.4.x/drivers/s390/scsi/zfcp_unit.c
new file mode 100644
index 0000000..4e6a535
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/s390/scsi/zfcp_unit.c
@@ -0,0 +1,260 @@
+/*
+ * zfcp device driver
+ *
+ * Tracking of manually configured LUNs and helper functions to
+ * register the LUNs with the SCSI midlayer.
+ *
+ * Copyright IBM Corporation 2010
+ */
+
+#include "zfcp_def.h"
+#include "zfcp_ext.h"
+
+/**
+ * zfcp_unit_scsi_scan - Register LUN with SCSI midlayer
+ * @unit: The zfcp LUN/unit to register
+ *
+ * When the SCSI midlayer is not allowed to automatically scan and
+ * attach SCSI devices, zfcp has to register the single devices with
+ * the SCSI midlayer.
+ */
+void zfcp_unit_scsi_scan(struct zfcp_unit *unit)
+{
+	struct fc_rport *rport = unit->port->rport;
+	unsigned int lun;
+
+	lun = scsilun_to_int((struct scsi_lun *) &unit->fcp_lun);
+
+	if (rport && rport->port_state == FC_PORTSTATE_ONLINE)
+		scsi_scan_target(&rport->dev, 0, rport->scsi_target_id, lun, 1);
+}
+
+static void zfcp_unit_scsi_scan_work(struct work_struct *work)
+{
+	struct zfcp_unit *unit = container_of(work, struct zfcp_unit,
+					      scsi_work);
+
+	zfcp_unit_scsi_scan(unit);
+	put_device(&unit->dev);
+}
+
+/**
+ * zfcp_unit_queue_scsi_scan - Register configured units on port
+ * @port: The zfcp_port where to register units
+ *
+ * After opening a port, all units configured on this port have to be
+ * registered with the SCSI midlayer. This function should be called
+ * after calling fc_remote_port_add, so that the fc_rport is already
+ * ONLINE and the call to scsi_scan_target runs the same way as the
+ * call in the FC transport class.
+ */
+void zfcp_unit_queue_scsi_scan(struct zfcp_port *port)
+{
+	struct zfcp_unit *unit;
+
+	read_lock_irq(&port->unit_list_lock);
+	list_for_each_entry(unit, &port->unit_list, list) {
+		get_device(&unit->dev);
+		if (scsi_queue_work(port->adapter->scsi_host,
+				    &unit->scsi_work) <= 0)
+			put_device(&unit->dev);
+	}
+	read_unlock_irq(&port->unit_list_lock);
+}
+
+static struct zfcp_unit *_zfcp_unit_find(struct zfcp_port *port, u64 fcp_lun)
+{
+	struct zfcp_unit *unit;
+
+	list_for_each_entry(unit, &port->unit_list, list)
+		if (unit->fcp_lun == fcp_lun) {
+			get_device(&unit->dev);
+			return unit;
+		}
+
+	return NULL;
+}
+
+/**
+ * zfcp_unit_find - Find and return zfcp_unit with specified FCP LUN
+ * @port: zfcp_port where to look for the unit
+ * @fcp_lun: 64 Bit FCP LUN used to identify the zfcp_unit
+ *
+ * If zfcp_unit is found, a reference is acquired that has to be
+ * released later.
+ *
+ * Returns: Pointer to the zfcp_unit, or NULL if there is no zfcp_unit
+ *          with the specified FCP LUN.
+ */
+struct zfcp_unit *zfcp_unit_find(struct zfcp_port *port, u64 fcp_lun)
+{
+	struct zfcp_unit *unit;
+
+	read_lock_irq(&port->unit_list_lock);
+	unit = _zfcp_unit_find(port, fcp_lun);
+	read_unlock_irq(&port->unit_list_lock);
+	return unit;
+}
+
+/**
+ * zfcp_unit_release - Drop reference to zfcp_port and free memory of zfcp_unit.
+ * @dev: pointer to device in zfcp_unit
+ */
+static void zfcp_unit_release(struct device *dev)
+{
+	struct zfcp_unit *unit = container_of(dev, struct zfcp_unit, dev);
+
+	atomic_dec(&unit->port->units);
+	kfree(unit);
+}
+
+/**
+ * zfcp_unit_enqueue - enqueue unit to unit list of a port.
+ * @port: pointer to port where unit is added
+ * @fcp_lun: FCP LUN of unit to be enqueued
+ * Returns: 0 success
+ *
+ * Sets up some unit internal structures and creates sysfs entry.
+ */
+int zfcp_unit_add(struct zfcp_port *port, u64 fcp_lun)
+{
+	struct zfcp_unit *unit;
+	int retval = 0;
+
+	mutex_lock(&zfcp_sysfs_port_units_mutex);
+	if (atomic_read(&port->units) == -1) {
+		/* port is already gone */
+		retval = -ENODEV;
+		goto out;
+	}
+
+	unit = zfcp_unit_find(port, fcp_lun);
+	if (unit) {
+		put_device(&unit->dev);
+		retval = -EEXIST;
+		goto out;
+	}
+
+	unit = kzalloc(sizeof(struct zfcp_unit), GFP_KERNEL);
+	if (!unit) {
+		retval = -ENOMEM;
+		goto out;
+	}
+
+	unit->port = port;
+	unit->fcp_lun = fcp_lun;
+	unit->dev.parent = &port->dev;
+	unit->dev.release = zfcp_unit_release;
+	INIT_WORK(&unit->scsi_work, zfcp_unit_scsi_scan_work);
+
+	if (dev_set_name(&unit->dev, "0x%016llx",
+			 (unsigned long long) fcp_lun)) {
+		kfree(unit);
+		retval = -ENOMEM;
+		goto out;
+	}
+
+	if (device_register(&unit->dev)) {
+		put_device(&unit->dev);
+		retval = -ENOMEM;
+		goto out;
+	}
+
+	if (sysfs_create_group(&unit->dev.kobj, &zfcp_sysfs_unit_attrs)) {
+		device_unregister(&unit->dev);
+		retval = -EINVAL;
+		goto out;
+	}
+
+	atomic_inc(&port->units); /* under zfcp_sysfs_port_units_mutex ! */
+
+	write_lock_irq(&port->unit_list_lock);
+	list_add_tail(&unit->list, &port->unit_list);
+	write_unlock_irq(&port->unit_list_lock);
+
+	zfcp_unit_scsi_scan(unit);
+
+out:
+	mutex_unlock(&zfcp_sysfs_port_units_mutex);
+	return retval;
+}
+
+/**
+ * zfcp_unit_sdev - Return SCSI device for zfcp_unit
+ * @unit: The zfcp_unit where to get the SCSI device for
+ *
+ * Returns: scsi_device pointer on success, NULL if there is no SCSI
+ *          device for this zfcp_unit
+ *
+ * On success, the caller also holds a reference to the SCSI device
+ * that must be released with scsi_device_put.
+ */
+struct scsi_device *zfcp_unit_sdev(struct zfcp_unit *unit)
+{
+	struct Scsi_Host *shost;
+	struct zfcp_port *port;
+	unsigned int lun;
+
+	lun = scsilun_to_int((struct scsi_lun *) &unit->fcp_lun);
+	port = unit->port;
+	shost = port->adapter->scsi_host;
+	return scsi_device_lookup(shost, 0, port->starget_id, lun);
+}
+
+/**
+ * zfcp_unit_sdev_status - Return zfcp LUN status for SCSI device
+ * @unit: The unit to lookup the SCSI device for
+ *
+ * Returns the zfcp LUN status field of the SCSI device if the SCSI device
+ * for the zfcp_unit exists, 0 otherwise.
+ */
+unsigned int zfcp_unit_sdev_status(struct zfcp_unit *unit)
+{
+	unsigned int status = 0;
+	struct scsi_device *sdev;
+	struct zfcp_scsi_dev *zfcp_sdev;
+
+	sdev = zfcp_unit_sdev(unit);
+	if (sdev) {
+		zfcp_sdev = sdev_to_zfcp(sdev);
+		status = atomic_read(&zfcp_sdev->status);
+		scsi_device_put(sdev);
+	}
+
+	return status;
+}
+
+/**
+ * zfcp_unit_remove - Remove entry from list of configured units
+ * @port: The port where to remove the unit from the configuration
+ * @fcp_lun: The 64 bit LUN of the unit to remove
+ *
+ * Returns: -EINVAL if a unit with the specified LUN does not exist,
+ *          0 on success.
+ */
+int zfcp_unit_remove(struct zfcp_port *port, u64 fcp_lun)
+{
+	struct zfcp_unit *unit;
+	struct scsi_device *sdev;
+
+	write_lock_irq(&port->unit_list_lock);
+	unit = _zfcp_unit_find(port, fcp_lun);
+	if (unit)
+		list_del(&unit->list);
+	write_unlock_irq(&port->unit_list_lock);
+
+	if (!unit)
+		return -EINVAL;
+
+	sdev = zfcp_unit_sdev(unit);
+	if (sdev) {
+		scsi_remove_device(sdev);
+		scsi_device_put(sdev);
+	}
+
+	put_device(&unit->dev);
+
+	zfcp_device_unregister(&unit->dev, &zfcp_sysfs_unit_attrs);
+
+	return 0;
+}