[Feature]add MT2731_MP2_MR2_SVN388 baseline version

Change-Id: Ief04314834b31e27effab435d3ca8ba33b499059
diff --git a/src/kernel/linux/v4.14/drivers/s390/char/Kconfig b/src/kernel/linux/v4.14/drivers/s390/char/Kconfig
new file mode 100644
index 0000000..97c4c9f
--- /dev/null
+++ b/src/kernel/linux/v4.14/drivers/s390/char/Kconfig
@@ -0,0 +1,207 @@
+comment "S/390 character device drivers"
+	depends on S390
+
+config TN3270
+	def_tristate y
+	prompt "Support for locally attached 3270 terminals"
+	depends on CCW
+	help
+	  Include support for IBM 3270 terminals.
+
+config TN3270_TTY
+	def_tristate y
+	prompt "Support for tty input/output on 3270 terminals"
+	depends on TN3270 && TTY
+	help
+	  Include support for using an IBM 3270 terminal as a Linux tty.
+
+config TN3270_FS
+	def_tristate m
+	prompt "Support for fullscreen applications on 3270 terminals"
+	depends on TN3270
+	help
+	  Include support for fullscreen applications on an IBM 3270 terminal.
+
+config TN3270_CONSOLE
+	def_bool y
+	prompt "Support for console on 3270 terminal"
+	depends on TN3270=y && TN3270_TTY=y
+	help
+	  Include support for using an IBM 3270 terminal as a Linux system
+	  console.  Available only if 3270 support is compiled in statically.
+
+config TN3215
+	def_bool y
+	prompt "Support for 3215 line mode terminal"
+	depends on CCW && TTY
+	help
+	  Include support for IBM 3215 line-mode terminals.
+
+config TN3215_CONSOLE
+	def_bool y
+	prompt "Support for console on 3215 line mode terminal"
+	depends on TN3215
+	help
+	  Include support for using an IBM 3215 line-mode terminal as a
+	  Linux system console.
+
+config CCW_CONSOLE
+	def_bool y if TN3215_CONSOLE || TN3270_CONSOLE
+
+config SCLP_TTY
+	def_bool y
+	prompt "Support for SCLP line mode terminal"
+	depends on S390 && TTY
+	help
+	  Include support for IBM SCLP line-mode terminals.
+
+config SCLP_CONSOLE
+	def_bool y
+	prompt "Support for console on SCLP line mode terminal"
+	depends on SCLP_TTY
+	help
+	  Include support for using an IBM HWC line-mode terminal as the Linux
+	  system console.
+
+config SCLP_VT220_TTY
+	def_bool y
+	prompt "Support for SCLP VT220-compatible terminal"
+	depends on S390 && TTY
+	help
+	  Include support for an IBM SCLP VT220-compatible terminal.
+
+config SCLP_VT220_CONSOLE
+	def_bool y
+	prompt "Support for console on SCLP VT220-compatible terminal"
+	depends on SCLP_VT220_TTY
+	help
+	  Include support for using an IBM SCLP VT220-compatible terminal as a
+	  Linux system console.
+
+config SCLP_ASYNC
+	def_tristate m
+	prompt "Support for Call Home via Asynchronous SCLP Records"
+	depends on S390
+	help
+	  This option enables the call home function, which is able to inform
+	  the service element and connected organisations about a kernel panic.
+	  You should only select this option if you know what you are doing,
+	  want for inform other people about your kernel panics,
+	  need this feature and intend to run your kernel in LPAR.
+
+config SCLP_ASYNC_ID
+       string "Component ID for Call Home"
+       depends on SCLP_ASYNC
+       default "000000000"
+       help
+	 The Component ID for Call Home is used to identify the correct
+	 problem reporting queue the call home records should be sent to.
+
+	 If your are unsure, please use the default value "000000000".
+
+config HMC_DRV
+	def_tristate m
+	prompt "Support for file transfers from HMC drive CD/DVD-ROM"
+	depends on S390
+	select CRC16
+	help
+	  This option enables support for file transfers from a Hardware
+	  Management Console (HMC) drive CD/DVD-ROM. It is available as a
+	  module, called 'hmcdrv', and also as kernel built-in. There is one
+	  optional parameter for this module: cachesize=N, which modifies the
+	  transfer cache size from it's default value 0.5MB to N bytes. If N
+	  is zero, then no caching is performed.
+
+config SCLP_OFB
+	def_bool n
+	prompt "Support for Open-for-Business SCLP Event"
+	depends on S390
+	help
+	  This option enables the Open-for-Business interface to the s390
+	  Service Element.
+
+config S390_TAPE
+	def_tristate m
+	prompt "S/390 tape device support"
+	depends on CCW
+	help
+	  Select this option if you want to access channel-attached tape
+	  devices on IBM S/390 or zSeries.
+	  If you select this option you will also want to select at
+	  least one of the tape interface options and one of the tape
+	  hardware options in order to access a tape device.
+	  This option is also available as a module. The module will be
+	  called tape390 and include all selected interfaces and
+	  hardware drivers.
+
+comment "S/390 tape hardware support"
+	depends on S390_TAPE
+
+config S390_TAPE_34XX
+	def_tristate m
+	prompt "Support for 3480/3490 tape hardware"
+	depends on S390_TAPE
+	help
+	  Select this option if you want to access IBM 3480/3490 magnetic
+	  tape subsystems and 100% compatibles.
+	  It is safe to say "Y" here.
+
+config S390_TAPE_3590
+	def_tristate m
+	prompt "Support for 3590 tape hardware"
+	depends on S390_TAPE
+	help
+	  Select this option if you want to access IBM 3590 magnetic
+	  tape subsystems and 100% compatibles.
+	  It is safe to say "Y" here.
+
+config VMLOGRDR
+	def_tristate m
+	prompt "Support for the z/VM recording system services (VM only)"
+	depends on IUCV
+	help
+	  Select this option if you want to be able to receive records collected
+	  by the z/VM recording system services, eg. from *LOGREC, *ACCOUNT or
+	  *SYMPTOM.
+	  This driver depends on the IUCV support driver.
+
+config VMCP
+	def_bool y
+	prompt "Support for the z/VM CP interface"
+	depends on S390
+	select CMA
+	help
+	  Select this option if you want to be able to interact with the control
+	  program on z/VM
+
+config VMCP_CMA_SIZE
+	int "Memory in MiB reserved for z/VM CP interface"
+	default "4"
+	depends on VMCP
+	help
+	  Specify the default amount of memory in MiB reserved for the z/VM CP
+	  interface. If needed this memory is used for large contiguous memory
+	  allocations. The default can be changed with the kernel command line
+	  parameter "vmcp_cma".
+
+config MONREADER
+	def_tristate m
+	prompt "API for reading z/VM monitor service records"
+	depends on IUCV
+	help
+	  Character device driver for reading z/VM monitor service records
+
+config MONWRITER
+	def_tristate m
+	prompt "API for writing z/VM monitor service records"
+	depends on S390
+	help
+	  Character device driver for writing z/VM monitor service records
+
+config S390_VMUR
+	def_tristate m
+	prompt "z/VM unit record device driver"
+	depends on S390
+	help
+	  Character device driver for z/VM reader, puncher and printer.
+
diff --git a/src/kernel/linux/v4.14/drivers/s390/char/Makefile b/src/kernel/linux/v4.14/drivers/s390/char/Makefile
new file mode 100644
index 0000000..ecc24a4
--- /dev/null
+++ b/src/kernel/linux/v4.14/drivers/s390/char/Makefile
@@ -0,0 +1,54 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# S/390 character devices
+#
+
+ifdef CONFIG_FUNCTION_TRACER
+# Do not trace early setup code
+CFLAGS_REMOVE_sclp_early_core.o	= $(CC_FLAGS_FTRACE)
+endif
+
+GCOV_PROFILE_sclp_early_core.o		:= n
+KCOV_INSTRUMENT_sclp_early_core.o	:= n
+UBSAN_SANITIZE_sclp_early_core.o	:= n
+
+ifneq ($(CC_FLAGS_MARCH),-march=z900)
+CFLAGS_REMOVE_sclp_early_core.o	+= $(CC_FLAGS_MARCH)
+CFLAGS_sclp_early_core.o		+= -march=z900
+endif
+
+CFLAGS_REMOVE_sclp_early_core.o	+= $(CC_FLAGS_EXPOLINE)
+
+obj-y += ctrlchar.o keyboard.o defkeymap.o sclp.o sclp_rw.o sclp_quiesce.o \
+	 sclp_cmd.o sclp_config.o sclp_cpi_sys.o sclp_ocf.o sclp_ctl.o \
+	 sclp_early.o sclp_early_core.o
+
+obj-$(CONFIG_TN3270) += raw3270.o
+obj-$(CONFIG_TN3270_CONSOLE) += con3270.o
+obj-$(CONFIG_TN3270_TTY) += tty3270.o
+obj-$(CONFIG_TN3270_FS) += fs3270.o
+
+obj-$(CONFIG_TN3215) += con3215.o
+
+obj-$(CONFIG_SCLP_TTY) += sclp_tty.o
+obj-$(CONFIG_SCLP_CONSOLE) += sclp_con.o
+obj-$(CONFIG_SCLP_VT220_TTY) += sclp_vt220.o
+obj-$(CONFIG_SCLP_ASYNC) += sclp_async.o
+
+obj-$(CONFIG_PCI) += sclp_pci.o
+
+obj-$(CONFIG_VMLOGRDR) += vmlogrdr.o
+obj-$(CONFIG_VMCP) += vmcp.o
+
+tape-$(CONFIG_PROC_FS) += tape_proc.o
+tape-objs := tape_core.o tape_std.o tape_char.o $(tape-y)
+obj-$(CONFIG_S390_TAPE) += tape.o tape_class.o
+obj-$(CONFIG_S390_TAPE_34XX) += tape_34xx.o
+obj-$(CONFIG_S390_TAPE_3590) += tape_3590.o
+obj-$(CONFIG_MONREADER) += monreader.o
+obj-$(CONFIG_MONWRITER) += monwriter.o
+obj-$(CONFIG_S390_VMUR) += vmur.o
+obj-$(CONFIG_CRASH_DUMP) += sclp_sdias.o zcore.o
+
+hmcdrv-objs := hmcdrv_mod.o hmcdrv_dev.o hmcdrv_ftp.o hmcdrv_cache.o diag_ftp.o sclp_ftp.o
+obj-$(CONFIG_HMC_DRV) += hmcdrv.o
diff --git a/src/kernel/linux/v4.14/drivers/s390/char/con3215.c b/src/kernel/linux/v4.14/drivers/s390/char/con3215.c
new file mode 100644
index 0000000..353f0be
--- /dev/null
+++ b/src/kernel/linux/v4.14/drivers/s390/char/con3215.c
@@ -0,0 +1,1218 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * 3215 line mode terminal driver.
+ *
+ * Copyright IBM Corp. 1999, 2009
+ * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
+ *
+ * Updated:
+ *  Aug-2000: Added tab support
+ *	      Dan Morrison, IBM Corporation <dmorriso@cse.buffalo.edu>
+ */
+
+#include <linux/types.h>
+#include <linux/kdev_t.h>
+#include <linux/tty.h>
+#include <linux/tty_flip.h>
+#include <linux/vt_kern.h>
+#include <linux/init.h>
+#include <linux/console.h>
+#include <linux/interrupt.h>
+#include <linux/err.h>
+#include <linux/reboot.h>
+#include <linux/serial.h> /* ASYNC_* flags */
+#include <linux/slab.h>
+#include <asm/ccwdev.h>
+#include <asm/cio.h>
+#include <asm/io.h>
+#include <asm/ebcdic.h>
+#include <linux/uaccess.h>
+#include <asm/delay.h>
+#include <asm/cpcmd.h>
+#include <asm/setup.h>
+
+#include "ctrlchar.h"
+
+#define NR_3215		    1
+#define NR_3215_REQ	    (4*NR_3215)
+#define RAW3215_BUFFER_SIZE 65536     /* output buffer size */
+#define RAW3215_INBUF_SIZE  256	      /* input buffer size */
+#define RAW3215_MIN_SPACE   128	      /* minimum free space for wakeup */
+#define RAW3215_MIN_WRITE   1024      /* min. length for immediate output */
+#define RAW3215_MAX_BYTES   3968      /* max. bytes to write with one ssch */
+#define RAW3215_MAX_NEWLINE 50	      /* max. lines to write with one ssch */
+#define RAW3215_NR_CCWS	    3
+#define RAW3215_TIMEOUT	    HZ/10     /* time for delayed output */
+
+#define RAW3215_FIXED	    1	      /* 3215 console device is not be freed */
+#define RAW3215_WORKING	    4	      /* set if a request is being worked on */
+#define RAW3215_THROTTLED   8	      /* set if reading is disabled */
+#define RAW3215_STOPPED	    16	      /* set if writing is disabled */
+#define RAW3215_TIMER_RUNS  64	      /* set if the output delay timer is on */
+#define RAW3215_FLUSHING    128	      /* set to flush buffer (no delay) */
+
+#define TAB_STOP_SIZE	    8	      /* tab stop size */
+
+/*
+ * Request types for a 3215 device
+ */
+enum raw3215_type {
+	RAW3215_FREE, RAW3215_READ, RAW3215_WRITE
+};
+
+/*
+ * Request structure for a 3215 device
+ */
+struct raw3215_req {
+	enum raw3215_type type;	      /* type of the request */
+	int start, len;		      /* start index & len in output buffer */
+	int delayable;		      /* indication to wait for more data */
+	int residual;		      /* residual count for read request */
+	struct ccw1 ccws[RAW3215_NR_CCWS]; /* space for the channel program */
+	struct raw3215_info *info;    /* pointer to main structure */
+	struct raw3215_req *next;     /* pointer to next request */
+} __attribute__ ((aligned(8)));
+
+struct raw3215_info {
+	struct tty_port port;
+	struct ccw_device *cdev;      /* device for tty driver */
+	spinlock_t *lock;	      /* pointer to irq lock */
+	int flags;		      /* state flags */
+	char *buffer;		      /* pointer to output buffer */
+	char *inbuf;		      /* pointer to input buffer */
+	int head;		      /* first free byte in output buffer */
+	int count;		      /* number of bytes in output buffer */
+	int written;		      /* number of bytes in write requests */
+	struct raw3215_req *queued_read; /* pointer to queued read requests */
+	struct raw3215_req *queued_write;/* pointer to queued write requests */
+	struct tasklet_struct tlet;   /* tasklet to invoke tty_wakeup */
+	wait_queue_head_t empty_wait; /* wait queue for flushing */
+	struct timer_list timer;      /* timer for delayed output */
+	int line_pos;		      /* position on the line (for tabs) */
+	char ubuffer[80];	      /* copy_from_user buffer */
+};
+
+/* array of 3215 devices structures */
+static struct raw3215_info *raw3215[NR_3215];
+/* spinlock to protect the raw3215 array */
+static DEFINE_SPINLOCK(raw3215_device_lock);
+/* list of free request structures */
+static struct raw3215_req *raw3215_freelist;
+/* spinlock to protect free list */
+static spinlock_t raw3215_freelist_lock;
+
+static struct tty_driver *tty3215_driver;
+
+/*
+ * Get a request structure from the free list
+ */
+static inline struct raw3215_req *raw3215_alloc_req(void)
+{
+	struct raw3215_req *req;
+	unsigned long flags;
+
+	spin_lock_irqsave(&raw3215_freelist_lock, flags);
+	req = raw3215_freelist;
+	raw3215_freelist = req->next;
+	spin_unlock_irqrestore(&raw3215_freelist_lock, flags);
+	return req;
+}
+
+/*
+ * Put a request structure back to the free list
+ */
+static inline void raw3215_free_req(struct raw3215_req *req)
+{
+	unsigned long flags;
+
+	if (req->type == RAW3215_FREE)
+		return;		/* don't free a free request */
+	req->type = RAW3215_FREE;
+	spin_lock_irqsave(&raw3215_freelist_lock, flags);
+	req->next = raw3215_freelist;
+	raw3215_freelist = req;
+	spin_unlock_irqrestore(&raw3215_freelist_lock, flags);
+}
+
+/*
+ * Set up a read request that reads up to 160 byte from the 3215 device.
+ * If there is a queued read request it is used, but that shouldn't happen
+ * because a 3215 terminal won't accept a new read before the old one is
+ * completed.
+ */
+static void raw3215_mk_read_req(struct raw3215_info *raw)
+{
+	struct raw3215_req *req;
+	struct ccw1 *ccw;
+
+	/* there can only be ONE read request at a time */
+	req = raw->queued_read;
+	if (req == NULL) {
+		/* no queued read request, use new req structure */
+		req = raw3215_alloc_req();
+		req->type = RAW3215_READ;
+		req->info = raw;
+		raw->queued_read = req;
+	}
+
+	ccw = req->ccws;
+	ccw->cmd_code = 0x0A; /* read inquiry */
+	ccw->flags = 0x20;    /* ignore incorrect length */
+	ccw->count = 160;
+	ccw->cda = (__u32) __pa(raw->inbuf);
+}
+
+/*
+ * Set up a write request with the information from the main structure.
+ * A ccw chain is created that writes as much as possible from the output
+ * buffer to the 3215 device. If a queued write exists it is replaced by
+ * the new, probably lengthened request.
+ */
+static void raw3215_mk_write_req(struct raw3215_info *raw)
+{
+	struct raw3215_req *req;
+	struct ccw1 *ccw;
+	int len, count, ix, lines;
+
+	if (raw->count <= raw->written)
+		return;
+	/* check if there is a queued write request */
+	req = raw->queued_write;
+	if (req == NULL) {
+		/* no queued write request, use new req structure */
+		req = raw3215_alloc_req();
+		req->type = RAW3215_WRITE;
+		req->info = raw;
+		raw->queued_write = req;
+	} else {
+		raw->written -= req->len;
+	}
+
+	ccw = req->ccws;
+	req->start = (raw->head - raw->count + raw->written) &
+		     (RAW3215_BUFFER_SIZE - 1);
+	/*
+	 * now we have to count newlines. We can at max accept
+	 * RAW3215_MAX_NEWLINE newlines in a single ssch due to
+	 * a restriction in VM
+	 */
+	lines = 0;
+	ix = req->start;
+	while (lines < RAW3215_MAX_NEWLINE && ix != raw->head) {
+		if (raw->buffer[ix] == 0x15)
+			lines++;
+		ix = (ix + 1) & (RAW3215_BUFFER_SIZE - 1);
+	}
+	len = ((ix - 1 - req->start) & (RAW3215_BUFFER_SIZE - 1)) + 1;
+	if (len > RAW3215_MAX_BYTES)
+		len = RAW3215_MAX_BYTES;
+	req->len = len;
+	raw->written += len;
+
+	/* set the indication if we should try to enlarge this request */
+	req->delayable = (ix == raw->head) && (len < RAW3215_MIN_WRITE);
+
+	ix = req->start;
+	while (len > 0) {
+		if (ccw > req->ccws)
+			ccw[-1].flags |= 0x40; /* use command chaining */
+		ccw->cmd_code = 0x01; /* write, auto carrier return */
+		ccw->flags = 0x20;    /* ignore incorrect length ind.  */
+		ccw->cda =
+			(__u32) __pa(raw->buffer + ix);
+		count = len;
+		if (ix + count > RAW3215_BUFFER_SIZE)
+			count = RAW3215_BUFFER_SIZE - ix;
+		ccw->count = count;
+		len -= count;
+		ix = (ix + count) & (RAW3215_BUFFER_SIZE - 1);
+		ccw++;
+	}
+	/*
+	 * Add a NOP to the channel program. 3215 devices are purely
+	 * emulated and its much better to avoid the channel end
+	 * interrupt in this case.
+	 */
+	if (ccw > req->ccws)
+		ccw[-1].flags |= 0x40; /* use command chaining */
+	ccw->cmd_code = 0x03; /* NOP */
+	ccw->flags = 0;
+	ccw->cda = 0;
+	ccw->count = 1;
+}
+
+/*
+ * Start a read or a write request
+ */
+static void raw3215_start_io(struct raw3215_info *raw)
+{
+	struct raw3215_req *req;
+	int res;
+
+	req = raw->queued_read;
+	if (req != NULL &&
+	    !(raw->flags & (RAW3215_WORKING | RAW3215_THROTTLED))) {
+		/* dequeue request */
+		raw->queued_read = NULL;
+		res = ccw_device_start(raw->cdev, req->ccws,
+				       (unsigned long) req, 0, 0);
+		if (res != 0) {
+			/* do_IO failed, put request back to queue */
+			raw->queued_read = req;
+		} else {
+			raw->flags |= RAW3215_WORKING;
+		}
+	}
+	req = raw->queued_write;
+	if (req != NULL &&
+	    !(raw->flags & (RAW3215_WORKING | RAW3215_STOPPED))) {
+		/* dequeue request */
+		raw->queued_write = NULL;
+		res = ccw_device_start(raw->cdev, req->ccws,
+				       (unsigned long) req, 0, 0);
+		if (res != 0) {
+			/* do_IO failed, put request back to queue */
+			raw->queued_write = req;
+		} else {
+			raw->flags |= RAW3215_WORKING;
+		}
+	}
+}
+
+/*
+ * Function to start a delayed output after RAW3215_TIMEOUT seconds
+ */
+static void raw3215_timeout(unsigned long __data)
+{
+	struct raw3215_info *raw = (struct raw3215_info *) __data;
+	unsigned long flags;
+
+	spin_lock_irqsave(get_ccwdev_lock(raw->cdev), flags);
+	raw->flags &= ~RAW3215_TIMER_RUNS;
+	if (!tty_port_suspended(&raw->port)) {
+		raw3215_mk_write_req(raw);
+		raw3215_start_io(raw);
+		if ((raw->queued_read || raw->queued_write) &&
+		    !(raw->flags & RAW3215_WORKING) &&
+		    !(raw->flags & RAW3215_TIMER_RUNS)) {
+			raw->timer.expires = RAW3215_TIMEOUT + jiffies;
+			add_timer(&raw->timer);
+			raw->flags |= RAW3215_TIMER_RUNS;
+		}
+	}
+	spin_unlock_irqrestore(get_ccwdev_lock(raw->cdev), flags);
+}
+
+/*
+ * Function to conditionally start an IO. A read is started immediately,
+ * a write is only started immediately if the flush flag is on or the
+ * amount of data is bigger than RAW3215_MIN_WRITE. If a write is not
+ * done immediately a timer is started with a delay of RAW3215_TIMEOUT.
+ */
+static inline void raw3215_try_io(struct raw3215_info *raw)
+{
+	if (!tty_port_initialized(&raw->port) || tty_port_suspended(&raw->port))
+		return;
+	if (raw->queued_read != NULL)
+		raw3215_start_io(raw);
+	else if (raw->queued_write != NULL) {
+		if ((raw->queued_write->delayable == 0) ||
+		    (raw->flags & RAW3215_FLUSHING)) {
+			/* execute write requests bigger than minimum size */
+			raw3215_start_io(raw);
+		}
+	}
+	if ((raw->queued_read || raw->queued_write) &&
+	    !(raw->flags & RAW3215_WORKING) &&
+	    !(raw->flags & RAW3215_TIMER_RUNS)) {
+		raw->timer.expires = RAW3215_TIMEOUT + jiffies;
+		add_timer(&raw->timer);
+		raw->flags |= RAW3215_TIMER_RUNS;
+	}
+}
+
+/*
+ * Call tty_wakeup from tasklet context
+ */
+static void raw3215_wakeup(unsigned long data)
+{
+	struct raw3215_info *raw = (struct raw3215_info *) data;
+	struct tty_struct *tty;
+
+	tty = tty_port_tty_get(&raw->port);
+	if (tty) {
+		tty_wakeup(tty);
+		tty_kref_put(tty);
+	}
+}
+
+/*
+ * Try to start the next IO and wake up processes waiting on the tty.
+ */
+static void raw3215_next_io(struct raw3215_info *raw, struct tty_struct *tty)
+{
+	raw3215_mk_write_req(raw);
+	raw3215_try_io(raw);
+	if (tty && RAW3215_BUFFER_SIZE - raw->count >= RAW3215_MIN_SPACE)
+		tasklet_schedule(&raw->tlet);
+}
+
+/*
+ * Interrupt routine, called from common io layer
+ */
+static void raw3215_irq(struct ccw_device *cdev, unsigned long intparm,
+			struct irb *irb)
+{
+	struct raw3215_info *raw;
+	struct raw3215_req *req;
+	struct tty_struct *tty;
+	int cstat, dstat;
+	int count;
+
+	raw = dev_get_drvdata(&cdev->dev);
+	req = (struct raw3215_req *) intparm;
+	tty = tty_port_tty_get(&raw->port);
+	cstat = irb->scsw.cmd.cstat;
+	dstat = irb->scsw.cmd.dstat;
+	if (cstat != 0)
+		raw3215_next_io(raw, tty);
+	if (dstat & 0x01) { /* we got a unit exception */
+		dstat &= ~0x01;	 /* we can ignore it */
+	}
+	switch (dstat) {
+	case 0x80:
+		if (cstat != 0)
+			break;
+		/* Attention interrupt, someone hit the enter key */
+		raw3215_mk_read_req(raw);
+		raw3215_next_io(raw, tty);
+		break;
+	case 0x08:
+	case 0x0C:
+		/* Channel end interrupt. */
+		if ((raw = req->info) == NULL)
+			goto put_tty;	     /* That shouldn't happen ... */
+		if (req->type == RAW3215_READ) {
+			/* store residual count, then wait for device end */
+			req->residual = irb->scsw.cmd.count;
+		}
+		if (dstat == 0x08)
+			break;
+	case 0x04:
+		/* Device end interrupt. */
+		if ((raw = req->info) == NULL)
+			goto put_tty;	     /* That shouldn't happen ... */
+		if (req->type == RAW3215_READ && tty != NULL) {
+			unsigned int cchar;
+
+			count = 160 - req->residual;
+			EBCASC(raw->inbuf, count);
+			cchar = ctrlchar_handle(raw->inbuf, count, tty);
+			switch (cchar & CTRLCHAR_MASK) {
+			case CTRLCHAR_SYSRQ:
+				break;
+
+			case CTRLCHAR_CTRL:
+				tty_insert_flip_char(&raw->port, cchar,
+						TTY_NORMAL);
+				tty_flip_buffer_push(&raw->port);
+				break;
+
+			case CTRLCHAR_NONE:
+				if (count < 2 ||
+				    (strncmp(raw->inbuf+count-2, "\252n", 2) &&
+				     strncmp(raw->inbuf+count-2, "^n", 2)) ) {
+					/* add the auto \n */
+					raw->inbuf[count] = '\n';
+					count++;
+				} else
+					count -= 2;
+				tty_insert_flip_string(&raw->port, raw->inbuf,
+						count);
+				tty_flip_buffer_push(&raw->port);
+				break;
+			}
+		} else if (req->type == RAW3215_WRITE) {
+			raw->count -= req->len;
+			raw->written -= req->len;
+		}
+		raw->flags &= ~RAW3215_WORKING;
+		raw3215_free_req(req);
+		/* check for empty wait */
+		if (waitqueue_active(&raw->empty_wait) &&
+		    raw->queued_write == NULL &&
+		    raw->queued_read == NULL) {
+			wake_up_interruptible(&raw->empty_wait);
+		}
+		raw3215_next_io(raw, tty);
+		break;
+	default:
+		/* Strange interrupt, I'll do my best to clean up */
+		if (req != NULL && req->type != RAW3215_FREE) {
+			if (req->type == RAW3215_WRITE) {
+				raw->count -= req->len;
+				raw->written -= req->len;
+			}
+			raw->flags &= ~RAW3215_WORKING;
+			raw3215_free_req(req);
+		}
+		raw3215_next_io(raw, tty);
+	}
+put_tty:
+	tty_kref_put(tty);
+}
+
+/*
+ * Drop the oldest line from the output buffer.
+ */
+static void raw3215_drop_line(struct raw3215_info *raw)
+{
+	int ix;
+	char ch;
+
+	BUG_ON(raw->written != 0);
+	ix = (raw->head - raw->count) & (RAW3215_BUFFER_SIZE - 1);
+	while (raw->count > 0) {
+		ch = raw->buffer[ix];
+		ix = (ix + 1) & (RAW3215_BUFFER_SIZE - 1);
+		raw->count--;
+		if (ch == 0x15)
+			break;
+	}
+	raw->head = ix;
+}
+
+/*
+ * Wait until length bytes are available int the output buffer.
+ * Has to be called with the s390irq lock held. Can be called
+ * disabled.
+ */
+static void raw3215_make_room(struct raw3215_info *raw, unsigned int length)
+{
+	while (RAW3215_BUFFER_SIZE - raw->count < length) {
+		/* While console is frozen for suspend we have no other
+		 * choice but to drop message from the buffer to make
+		 * room for even more messages. */
+		if (tty_port_suspended(&raw->port)) {
+			raw3215_drop_line(raw);
+			continue;
+		}
+		/* there might be a request pending */
+		raw->flags |= RAW3215_FLUSHING;
+		raw3215_mk_write_req(raw);
+		raw3215_try_io(raw);
+		raw->flags &= ~RAW3215_FLUSHING;
+#ifdef CONFIG_TN3215_CONSOLE
+		ccw_device_wait_idle(raw->cdev);
+#endif
+		/* Enough room freed up ? */
+		if (RAW3215_BUFFER_SIZE - raw->count >= length)
+			break;
+		/* there might be another cpu waiting for the lock */
+		spin_unlock(get_ccwdev_lock(raw->cdev));
+		udelay(100);
+		spin_lock(get_ccwdev_lock(raw->cdev));
+	}
+}
+
+/*
+ * String write routine for 3215 devices
+ */
+static void raw3215_write(struct raw3215_info *raw, const char *str,
+			  unsigned int length)
+{
+	unsigned long flags;
+	int c, count;
+
+	while (length > 0) {
+		spin_lock_irqsave(get_ccwdev_lock(raw->cdev), flags);
+		count = (length > RAW3215_BUFFER_SIZE) ?
+					     RAW3215_BUFFER_SIZE : length;
+		length -= count;
+
+		raw3215_make_room(raw, count);
+
+		/* copy string to output buffer and convert it to EBCDIC */
+		while (1) {
+			c = min_t(int, count,
+				  min(RAW3215_BUFFER_SIZE - raw->count,
+				      RAW3215_BUFFER_SIZE - raw->head));
+			if (c <= 0)
+				break;
+			memcpy(raw->buffer + raw->head, str, c);
+			ASCEBC(raw->buffer + raw->head, c);
+			raw->head = (raw->head + c) & (RAW3215_BUFFER_SIZE - 1);
+			raw->count += c;
+			raw->line_pos += c;
+			str += c;
+			count -= c;
+		}
+		if (!(raw->flags & RAW3215_WORKING)) {
+			raw3215_mk_write_req(raw);
+			/* start or queue request */
+			raw3215_try_io(raw);
+		}
+		spin_unlock_irqrestore(get_ccwdev_lock(raw->cdev), flags);
+	}
+}
+
+/*
+ * Put character routine for 3215 devices
+ */
+static void raw3215_putchar(struct raw3215_info *raw, unsigned char ch)
+{
+	unsigned long flags;
+	unsigned int length, i;
+
+	spin_lock_irqsave(get_ccwdev_lock(raw->cdev), flags);
+	if (ch == '\t') {
+		length = TAB_STOP_SIZE - (raw->line_pos%TAB_STOP_SIZE);
+		raw->line_pos += length;
+		ch = ' ';
+	} else if (ch == '\n') {
+		length = 1;
+		raw->line_pos = 0;
+	} else {
+		length = 1;
+		raw->line_pos++;
+	}
+	raw3215_make_room(raw, length);
+
+	for (i = 0; i < length; i++) {
+		raw->buffer[raw->head] = (char) _ascebc[(int) ch];
+		raw->head = (raw->head + 1) & (RAW3215_BUFFER_SIZE - 1);
+		raw->count++;
+	}
+	if (!(raw->flags & RAW3215_WORKING)) {
+		raw3215_mk_write_req(raw);
+		/* start or queue request */
+		raw3215_try_io(raw);
+	}
+	spin_unlock_irqrestore(get_ccwdev_lock(raw->cdev), flags);
+}
+
+/*
+ * Flush routine, it simply sets the flush flag and tries to start
+ * pending IO.
+ */
+static void raw3215_flush_buffer(struct raw3215_info *raw)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(get_ccwdev_lock(raw->cdev), flags);
+	if (raw->count > 0) {
+		raw->flags |= RAW3215_FLUSHING;
+		raw3215_try_io(raw);
+		raw->flags &= ~RAW3215_FLUSHING;
+	}
+	spin_unlock_irqrestore(get_ccwdev_lock(raw->cdev), flags);
+}
+
+/*
+ * Fire up a 3215 device.
+ */
+static int raw3215_startup(struct raw3215_info *raw)
+{
+	unsigned long flags;
+
+	if (tty_port_initialized(&raw->port))
+		return 0;
+	raw->line_pos = 0;
+	tty_port_set_initialized(&raw->port, 1);
+	spin_lock_irqsave(get_ccwdev_lock(raw->cdev), flags);
+	raw3215_try_io(raw);
+	spin_unlock_irqrestore(get_ccwdev_lock(raw->cdev), flags);
+
+	return 0;
+}
+
+/*
+ * Shutdown a 3215 device.
+ */
+static void raw3215_shutdown(struct raw3215_info *raw)
+{
+	DECLARE_WAITQUEUE(wait, current);
+	unsigned long flags;
+
+	if (!tty_port_initialized(&raw->port) || (raw->flags & RAW3215_FIXED))
+		return;
+	/* Wait for outstanding requests, then free irq */
+	spin_lock_irqsave(get_ccwdev_lock(raw->cdev), flags);
+	if ((raw->flags & RAW3215_WORKING) ||
+	    raw->queued_write != NULL ||
+	    raw->queued_read != NULL) {
+		add_wait_queue(&raw->empty_wait, &wait);
+		set_current_state(TASK_INTERRUPTIBLE);
+		spin_unlock_irqrestore(get_ccwdev_lock(raw->cdev), flags);
+		schedule();
+		spin_lock_irqsave(get_ccwdev_lock(raw->cdev), flags);
+		remove_wait_queue(&raw->empty_wait, &wait);
+		set_current_state(TASK_RUNNING);
+		tty_port_set_initialized(&raw->port, 1);
+	}
+	spin_unlock_irqrestore(get_ccwdev_lock(raw->cdev), flags);
+}
+
+static struct raw3215_info *raw3215_alloc_info(void)
+{
+	struct raw3215_info *info;
+
+	info = kzalloc(sizeof(struct raw3215_info), GFP_KERNEL | GFP_DMA);
+	if (!info)
+		return NULL;
+
+	info->buffer = kzalloc(RAW3215_BUFFER_SIZE, GFP_KERNEL | GFP_DMA);
+	info->inbuf = kzalloc(RAW3215_INBUF_SIZE, GFP_KERNEL | GFP_DMA);
+	if (!info->buffer || !info->inbuf) {
+		kfree(info->inbuf);
+		kfree(info->buffer);
+		kfree(info);
+		return NULL;
+	}
+
+	setup_timer(&info->timer, raw3215_timeout, (unsigned long)info);
+	init_waitqueue_head(&info->empty_wait);
+	tasklet_init(&info->tlet, raw3215_wakeup, (unsigned long)info);
+	tty_port_init(&info->port);
+
+	return info;
+}
+
+static void raw3215_free_info(struct raw3215_info *raw)
+{
+	kfree(raw->inbuf);
+	kfree(raw->buffer);
+	tty_port_destroy(&raw->port);
+	kfree(raw);
+}
+
+static int raw3215_probe (struct ccw_device *cdev)
+{
+	struct raw3215_info *raw;
+	int line;
+
+	/* Console is special. */
+	if (raw3215[0] && (raw3215[0] == dev_get_drvdata(&cdev->dev)))
+		return 0;
+
+	raw = raw3215_alloc_info();
+	if (raw == NULL)
+		return -ENOMEM;
+
+	raw->cdev = cdev;
+	dev_set_drvdata(&cdev->dev, raw);
+	cdev->handler = raw3215_irq;
+
+	spin_lock(&raw3215_device_lock);
+	for (line = 0; line < NR_3215; line++) {
+		if (!raw3215[line]) {
+			raw3215[line] = raw;
+			break;
+		}
+	}
+	spin_unlock(&raw3215_device_lock);
+	if (line == NR_3215) {
+		raw3215_free_info(raw);
+		return -ENODEV;
+	}
+
+	return 0;
+}
+
+static void raw3215_remove (struct ccw_device *cdev)
+{
+	struct raw3215_info *raw;
+	unsigned int line;
+
+	ccw_device_set_offline(cdev);
+	raw = dev_get_drvdata(&cdev->dev);
+	if (raw) {
+		spin_lock(&raw3215_device_lock);
+		for (line = 0; line < NR_3215; line++)
+			if (raw3215[line] == raw)
+				break;
+		raw3215[line] = NULL;
+		spin_unlock(&raw3215_device_lock);
+		dev_set_drvdata(&cdev->dev, NULL);
+		raw3215_free_info(raw);
+	}
+}
+
+static int raw3215_set_online (struct ccw_device *cdev)
+{
+	struct raw3215_info *raw;
+
+	raw = dev_get_drvdata(&cdev->dev);
+	if (!raw)
+		return -ENODEV;
+
+	return raw3215_startup(raw);
+}
+
+static int raw3215_set_offline (struct ccw_device *cdev)
+{
+	struct raw3215_info *raw;
+
+	raw = dev_get_drvdata(&cdev->dev);
+	if (!raw)
+		return -ENODEV;
+
+	raw3215_shutdown(raw);
+
+	return 0;
+}
+
+static int raw3215_pm_stop(struct ccw_device *cdev)
+{
+	struct raw3215_info *raw;
+	unsigned long flags;
+
+	/* Empty the output buffer, then prevent new I/O. */
+	raw = dev_get_drvdata(&cdev->dev);
+	spin_lock_irqsave(get_ccwdev_lock(raw->cdev), flags);
+	raw3215_make_room(raw, RAW3215_BUFFER_SIZE);
+	tty_port_set_suspended(&raw->port, 1);
+	spin_unlock_irqrestore(get_ccwdev_lock(raw->cdev), flags);
+	return 0;
+}
+
+static int raw3215_pm_start(struct ccw_device *cdev)
+{
+	struct raw3215_info *raw;
+	unsigned long flags;
+
+	/* Allow I/O again and flush output buffer. */
+	raw = dev_get_drvdata(&cdev->dev);
+	spin_lock_irqsave(get_ccwdev_lock(raw->cdev), flags);
+	tty_port_set_suspended(&raw->port, 0);
+	raw->flags |= RAW3215_FLUSHING;
+	raw3215_try_io(raw);
+	raw->flags &= ~RAW3215_FLUSHING;
+	spin_unlock_irqrestore(get_ccwdev_lock(raw->cdev), flags);
+	return 0;
+}
+
+static struct ccw_device_id raw3215_id[] = {
+	{ CCW_DEVICE(0x3215, 0) },
+	{ /* end of list */ },
+};
+
+static struct ccw_driver raw3215_ccw_driver = {
+	.driver = {
+		.name	= "3215",
+		.owner	= THIS_MODULE,
+	},
+	.ids		= raw3215_id,
+	.probe		= &raw3215_probe,
+	.remove		= &raw3215_remove,
+	.set_online	= &raw3215_set_online,
+	.set_offline	= &raw3215_set_offline,
+	.freeze		= &raw3215_pm_stop,
+	.thaw		= &raw3215_pm_start,
+	.restore	= &raw3215_pm_start,
+	.int_class	= IRQIO_C15,
+};
+
+#ifdef CONFIG_TN3215_CONSOLE
+/*
+ * Write a string to the 3215 console
+ */
+static void con3215_write(struct console *co, const char *str,
+			  unsigned int count)
+{
+	struct raw3215_info *raw;
+	int i;
+
+	if (count <= 0)
+		return;
+	raw = raw3215[0];	/* console 3215 is the first one */
+	while (count > 0) {
+		for (i = 0; i < count; i++)
+			if (str[i] == '\t' || str[i] == '\n')
+				break;
+		raw3215_write(raw, str, i);
+		count -= i;
+		str += i;
+		if (count > 0) {
+			raw3215_putchar(raw, *str);
+			count--;
+			str++;
+		}
+	}
+}
+
+static struct tty_driver *con3215_device(struct console *c, int *index)
+{
+	*index = c->index;
+	return tty3215_driver;
+}
+
+/*
+ * panic() calls con3215_flush through a panic_notifier
+ * before the system enters a disabled, endless loop.
+ */
+static void con3215_flush(void)
+{
+	struct raw3215_info *raw;
+	unsigned long flags;
+
+	raw = raw3215[0];  /* console 3215 is the first one */
+	if (tty_port_suspended(&raw->port))
+		/* The console is still frozen for suspend. */
+		if (ccw_device_force_console(raw->cdev))
+			/* Forcing didn't work, no panic message .. */
+			return;
+	spin_lock_irqsave(get_ccwdev_lock(raw->cdev), flags);
+	raw3215_make_room(raw, RAW3215_BUFFER_SIZE);
+	spin_unlock_irqrestore(get_ccwdev_lock(raw->cdev), flags);
+}
+
+static int con3215_notify(struct notifier_block *self,
+			  unsigned long event, void *data)
+{
+	con3215_flush();
+	return NOTIFY_OK;
+}
+
+static struct notifier_block on_panic_nb = {
+	.notifier_call = con3215_notify,
+	.priority = 0,
+};
+
+static struct notifier_block on_reboot_nb = {
+	.notifier_call = con3215_notify,
+	.priority = 0,
+};
+
+/*
+ *  The console structure for the 3215 console
+ */
+static struct console con3215 = {
+	.name	 = "ttyS",
+	.write	 = con3215_write,
+	.device	 = con3215_device,
+	.flags	 = CON_PRINTBUFFER,
+};
+
+/*
+ * 3215 console initialization code called from console_init().
+ */
+static int __init con3215_init(void)
+{
+	struct ccw_device *cdev;
+	struct raw3215_info *raw;
+	struct raw3215_req *req;
+	int i;
+
+	/* Check if 3215 is to be the console */
+	if (!CONSOLE_IS_3215)
+		return -ENODEV;
+
+	/* Set the console mode for VM */
+	if (MACHINE_IS_VM) {
+		cpcmd("TERM CONMODE 3215", NULL, 0, NULL);
+		cpcmd("TERM AUTOCR OFF", NULL, 0, NULL);
+	}
+
+	/* allocate 3215 request structures */
+	raw3215_freelist = NULL;
+	spin_lock_init(&raw3215_freelist_lock);
+	for (i = 0; i < NR_3215_REQ; i++) {
+		req = kzalloc(sizeof(struct raw3215_req), GFP_KERNEL | GFP_DMA);
+		if (!req)
+			return -ENOMEM;
+		req->next = raw3215_freelist;
+		raw3215_freelist = req;
+	}
+
+	cdev = ccw_device_create_console(&raw3215_ccw_driver);
+	if (IS_ERR(cdev))
+		return -ENODEV;
+
+	raw3215[0] = raw = raw3215_alloc_info();
+	raw->cdev = cdev;
+	dev_set_drvdata(&cdev->dev, raw);
+	cdev->handler = raw3215_irq;
+
+	raw->flags |= RAW3215_FIXED;
+	if (ccw_device_enable_console(cdev)) {
+		ccw_device_destroy_console(cdev);
+		raw3215_free_info(raw);
+		raw3215[0] = NULL;
+		return -ENODEV;
+	}
+
+	/* Request the console irq */
+	if (raw3215_startup(raw) != 0) {
+		raw3215_free_info(raw);
+		raw3215[0] = NULL;
+		return -ENODEV;
+	}
+	atomic_notifier_chain_register(&panic_notifier_list, &on_panic_nb);
+	register_reboot_notifier(&on_reboot_nb);
+	register_console(&con3215);
+	return 0;
+}
+console_initcall(con3215_init);
+#endif
+
+static int tty3215_install(struct tty_driver *driver, struct tty_struct *tty)
+{
+	struct raw3215_info *raw;
+
+	raw = raw3215[tty->index];
+	if (raw == NULL)
+		return -ENODEV;
+
+	tty->driver_data = raw;
+
+	return tty_port_install(&raw->port, driver, tty);
+}
+
+/*
+ * tty3215_open
+ *
+ * This routine is called whenever a 3215 tty is opened.
+ */
+static int tty3215_open(struct tty_struct *tty, struct file * filp)
+{
+	struct raw3215_info *raw = tty->driver_data;
+	int retval;
+
+	tty_port_tty_set(&raw->port, tty);
+
+	raw->port.low_latency = 0; /* don't use bottom half for pushing chars */
+	/*
+	 * Start up 3215 device
+	 */
+	retval = raw3215_startup(raw);
+	if (retval)
+		return retval;
+
+	return 0;
+}
+
+/*
+ * tty3215_close()
+ *
+ * This routine is called when the 3215 tty is closed. We wait
+ * for the remaining request to be completed. Then we clean up.
+ */
+static void tty3215_close(struct tty_struct *tty, struct file * filp)
+{
+	struct raw3215_info *raw;
+
+	raw = (struct raw3215_info *) tty->driver_data;
+	if (raw == NULL || tty->count > 1)
+		return;
+	tty->closing = 1;
+	/* Shutdown the terminal */
+	raw3215_shutdown(raw);
+	tasklet_kill(&raw->tlet);
+	tty->closing = 0;
+	tty_port_tty_set(&raw->port, NULL);
+}
+
+/*
+ * Returns the amount of free space in the output buffer.
+ */
+static int tty3215_write_room(struct tty_struct *tty)
+{
+	struct raw3215_info *raw;
+
+	raw = (struct raw3215_info *) tty->driver_data;
+
+	/* Subtract TAB_STOP_SIZE to allow for a tab, 8 <<< 64K */
+	if ((RAW3215_BUFFER_SIZE - raw->count - TAB_STOP_SIZE) >= 0)
+		return RAW3215_BUFFER_SIZE - raw->count - TAB_STOP_SIZE;
+	else
+		return 0;
+}
+
+/*
+ * String write routine for 3215 ttys
+ */
+static int tty3215_write(struct tty_struct * tty,
+			 const unsigned char *buf, int count)
+{
+	struct raw3215_info *raw;
+	int i, written;
+
+	if (!tty)
+		return 0;
+	raw = (struct raw3215_info *) tty->driver_data;
+	written = count;
+	while (count > 0) {
+		for (i = 0; i < count; i++)
+			if (buf[i] == '\t' || buf[i] == '\n')
+				break;
+		raw3215_write(raw, buf, i);
+		count -= i;
+		buf += i;
+		if (count > 0) {
+			raw3215_putchar(raw, *buf);
+			count--;
+			buf++;
+		}
+	}
+	return written;
+}
+
+/*
+ * Put character routine for 3215 ttys
+ */
+static int tty3215_put_char(struct tty_struct *tty, unsigned char ch)
+{
+	struct raw3215_info *raw;
+
+	if (!tty)
+		return 0;
+	raw = (struct raw3215_info *) tty->driver_data;
+	raw3215_putchar(raw, ch);
+	return 1;
+}
+
+static void tty3215_flush_chars(struct tty_struct *tty)
+{
+}
+
+/*
+ * Returns the number of characters in the output buffer
+ */
+static int tty3215_chars_in_buffer(struct tty_struct *tty)
+{
+	struct raw3215_info *raw;
+
+	raw = (struct raw3215_info *) tty->driver_data;
+	return raw->count;
+}
+
+static void tty3215_flush_buffer(struct tty_struct *tty)
+{
+	struct raw3215_info *raw;
+
+	raw = (struct raw3215_info *) tty->driver_data;
+	raw3215_flush_buffer(raw);
+	tty_wakeup(tty);
+}
+
+/*
+ * Disable reading from a 3215 tty
+ */
+static void tty3215_throttle(struct tty_struct * tty)
+{
+	struct raw3215_info *raw;
+
+	raw = (struct raw3215_info *) tty->driver_data;
+	raw->flags |= RAW3215_THROTTLED;
+}
+
+/*
+ * Enable reading from a 3215 tty
+ */
+static void tty3215_unthrottle(struct tty_struct * tty)
+{
+	struct raw3215_info *raw;
+	unsigned long flags;
+
+	raw = (struct raw3215_info *) tty->driver_data;
+	if (raw->flags & RAW3215_THROTTLED) {
+		spin_lock_irqsave(get_ccwdev_lock(raw->cdev), flags);
+		raw->flags &= ~RAW3215_THROTTLED;
+		raw3215_try_io(raw);
+		spin_unlock_irqrestore(get_ccwdev_lock(raw->cdev), flags);
+	}
+}
+
+/*
+ * Disable writing to a 3215 tty
+ */
+static void tty3215_stop(struct tty_struct *tty)
+{
+	struct raw3215_info *raw;
+
+	raw = (struct raw3215_info *) tty->driver_data;
+	raw->flags |= RAW3215_STOPPED;
+}
+
+/*
+ * Enable writing to a 3215 tty
+ */
+static void tty3215_start(struct tty_struct *tty)
+{
+	struct raw3215_info *raw;
+	unsigned long flags;
+
+	raw = (struct raw3215_info *) tty->driver_data;
+	if (raw->flags & RAW3215_STOPPED) {
+		spin_lock_irqsave(get_ccwdev_lock(raw->cdev), flags);
+		raw->flags &= ~RAW3215_STOPPED;
+		raw3215_try_io(raw);
+		spin_unlock_irqrestore(get_ccwdev_lock(raw->cdev), flags);
+	}
+}
+
+static const struct tty_operations tty3215_ops = {
+	.install = tty3215_install,
+	.open = tty3215_open,
+	.close = tty3215_close,
+	.write = tty3215_write,
+	.put_char = tty3215_put_char,
+	.flush_chars = tty3215_flush_chars,
+	.write_room = tty3215_write_room,
+	.chars_in_buffer = tty3215_chars_in_buffer,
+	.flush_buffer = tty3215_flush_buffer,
+	.throttle = tty3215_throttle,
+	.unthrottle = tty3215_unthrottle,
+	.stop = tty3215_stop,
+	.start = tty3215_start,
+};
+
+/*
+ * 3215 tty registration code called from tty_init().
+ * Most kernel services (incl. kmalloc) are available at this poimt.
+ */
+static int __init tty3215_init(void)
+{
+	struct tty_driver *driver;
+	int ret;
+
+	if (!CONSOLE_IS_3215)
+		return 0;
+
+	driver = alloc_tty_driver(NR_3215);
+	if (!driver)
+		return -ENOMEM;
+
+	ret = ccw_driver_register(&raw3215_ccw_driver);
+	if (ret) {
+		put_tty_driver(driver);
+		return ret;
+	}
+	/*
+	 * Initialize the tty_driver structure
+	 * Entries in tty3215_driver that are NOT initialized:
+	 * proc_entry, set_termios, flush_buffer, set_ldisc, write_proc
+	 */
+
+	driver->driver_name = "tty3215";
+	driver->name = "ttyS";
+	driver->major = TTY_MAJOR;
+	driver->minor_start = 64;
+	driver->type = TTY_DRIVER_TYPE_SYSTEM;
+	driver->subtype = SYSTEM_TYPE_TTY;
+	driver->init_termios = tty_std_termios;
+	driver->init_termios.c_iflag = IGNBRK | IGNPAR;
+	driver->init_termios.c_oflag = ONLCR;
+	driver->init_termios.c_lflag = ISIG;
+	driver->flags = TTY_DRIVER_REAL_RAW;
+	tty_set_operations(driver, &tty3215_ops);
+	ret = tty_register_driver(driver);
+	if (ret) {
+		put_tty_driver(driver);
+		return ret;
+	}
+	tty3215_driver = driver;
+	return 0;
+}
+device_initcall(tty3215_init);
diff --git a/src/kernel/linux/v4.14/drivers/s390/char/con3270.c b/src/kernel/linux/v4.14/drivers/s390/char/con3270.c
new file mode 100644
index 0000000..1868ff8
--- /dev/null
+++ b/src/kernel/linux/v4.14/drivers/s390/char/con3270.c
@@ -0,0 +1,649 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * IBM/3270 Driver - console view.
+ *
+ * Author(s):
+ *   Original 3270 Code for 2.4 written by Richard Hitt (UTS Global)
+ *   Rewritten for 2.5 by Martin Schwidefsky <schwidefsky@de.ibm.com>
+ *     Copyright IBM Corp. 2003, 2009
+ */
+
+#include <linux/module.h>
+#include <linux/console.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/list.h>
+#include <linux/types.h>
+#include <linux/slab.h>
+#include <linux/err.h>
+#include <linux/reboot.h>
+
+#include <asm/ccwdev.h>
+#include <asm/cio.h>
+#include <asm/cpcmd.h>
+#include <asm/ebcdic.h>
+
+#include "raw3270.h"
+#include "tty3270.h"
+#include "ctrlchar.h"
+
+#define CON3270_OUTPUT_BUFFER_SIZE 1024
+#define CON3270_STRING_PAGES 4
+
+static struct raw3270_fn con3270_fn;
+
+static bool auto_update = true;
+module_param(auto_update, bool, 0);
+
+/*
+ * Main 3270 console view data structure.
+ */
+struct con3270 {
+	struct raw3270_view view;
+	struct list_head freemem;	/* list of free memory for strings. */
+
+	/* Output stuff. */
+	struct list_head lines;		/* list of lines. */
+	struct list_head update;	/* list of lines to update. */
+	int line_nr;			/* line number for next update. */
+	int nr_lines;			/* # lines in list. */
+	int nr_up;			/* # lines up in history. */
+	unsigned long update_flags;	/* Update indication bits. */
+	struct string *cline;		/* current output line. */
+	struct string *status;		/* last line of display. */
+	struct raw3270_request *write;	/* single write request. */
+	struct timer_list timer;
+
+	/* Input stuff. */
+	struct string *input;		/* input string for read request. */
+	struct raw3270_request *read;	/* single read request. */
+	struct raw3270_request *kreset;	/* single keyboard reset request. */
+	struct tasklet_struct readlet;	/* tasklet to issue read request. */
+};
+
+static struct con3270 *condev;
+
+/* con3270->update_flags. See con3270_update for details. */
+#define CON_UPDATE_ERASE	1	/* Use EWRITEA instead of WRITE. */
+#define CON_UPDATE_LIST		2	/* Update lines in tty3270->update. */
+#define CON_UPDATE_STATUS	4	/* Update status line. */
+#define CON_UPDATE_ALL		8	/* Recreate screen. */
+
+static void con3270_update(struct con3270 *);
+
+/*
+ * Setup timeout for a device. On timeout trigger an update.
+ */
+static void con3270_set_timer(struct con3270 *cp, int expires)
+{
+	if (expires == 0)
+		del_timer(&cp->timer);
+	else
+		mod_timer(&cp->timer, jiffies + expires);
+}
+
+/*
+ * The status line is the last line of the screen. It shows the string
+ * "console view" in the lower left corner and "Running"/"More..."/"Holding"
+ * in the lower right corner of the screen.
+ */
+static void
+con3270_update_status(struct con3270 *cp)
+{
+	char *str;
+
+	str = (cp->nr_up != 0) ? "History" : "Running";
+	memcpy(cp->status->string + 24, str, 7);
+	codepage_convert(cp->view.ascebc, cp->status->string + 24, 7);
+	cp->update_flags |= CON_UPDATE_STATUS;
+}
+
+static void
+con3270_create_status(struct con3270 *cp)
+{
+	static const unsigned char blueprint[] =
+		{ TO_SBA, 0, 0, TO_SF,TF_LOG,TO_SA,TAT_COLOR, TAC_GREEN,
+		  'c','o','n','s','o','l','e',' ','v','i','e','w',
+		  TO_RA,0,0,0,'R','u','n','n','i','n','g',TO_SF,TF_LOG };
+
+	cp->status = alloc_string(&cp->freemem, sizeof(blueprint));
+	/* Copy blueprint to status line */
+	memcpy(cp->status->string, blueprint, sizeof(blueprint));
+	/* Set TO_RA addresses. */
+	raw3270_buffer_address(cp->view.dev, cp->status->string + 1,
+			       cp->view.cols * (cp->view.rows - 1));
+	raw3270_buffer_address(cp->view.dev, cp->status->string + 21,
+			       cp->view.cols * cp->view.rows - 8);
+	/* Convert strings to ebcdic. */
+	codepage_convert(cp->view.ascebc, cp->status->string + 8, 12);
+	codepage_convert(cp->view.ascebc, cp->status->string + 24, 7);
+}
+
+/*
+ * Set output offsets to 3270 datastream fragment of a console string.
+ */
+static void
+con3270_update_string(struct con3270 *cp, struct string *s, int nr)
+{
+	if (s->len < 4) {
+		/* This indicates a bug, but printing a warning would
+		 * cause a deadlock. */
+		return;
+	}
+	if (s->string[s->len - 4] != TO_RA)
+		return;
+	raw3270_buffer_address(cp->view.dev, s->string + s->len - 3,
+			       cp->view.cols * (nr + 1));
+}
+
+/*
+ * Rebuild update list to print all lines.
+ */
+static void
+con3270_rebuild_update(struct con3270 *cp)
+{
+	struct string *s, *n;
+	int nr;
+
+	/* 
+	 * Throw away update list and create a new one,
+	 * containing all lines that will fit on the screen.
+	 */
+	list_for_each_entry_safe(s, n, &cp->update, update)
+		list_del_init(&s->update);
+	nr = cp->view.rows - 2 + cp->nr_up;
+	list_for_each_entry_reverse(s, &cp->lines, list) {
+		if (nr < cp->view.rows - 1)
+			list_add(&s->update, &cp->update);
+		if (--nr < 0)
+			break;
+	}
+	cp->line_nr = 0;
+	cp->update_flags |= CON_UPDATE_LIST;
+}
+
+/*
+ * Alloc string for size bytes. Free strings from history if necessary.
+ */
+static struct string *
+con3270_alloc_string(struct con3270 *cp, size_t size)
+{
+	struct string *s, *n;
+
+	s = alloc_string(&cp->freemem, size);
+	if (s)
+		return s;
+	list_for_each_entry_safe(s, n, &cp->lines, list) {
+		list_del(&s->list);
+		if (!list_empty(&s->update))
+			list_del(&s->update);
+		cp->nr_lines--;
+		if (free_string(&cp->freemem, s) >= size)
+			break;
+	}
+	s = alloc_string(&cp->freemem, size);
+	BUG_ON(!s);
+	if (cp->nr_up != 0 && cp->nr_up + cp->view.rows > cp->nr_lines) {
+		cp->nr_up = cp->nr_lines - cp->view.rows + 1;
+		con3270_rebuild_update(cp);
+		con3270_update_status(cp);
+	}
+	return s;
+}
+
+/*
+ * Write completion callback.
+ */
+static void
+con3270_write_callback(struct raw3270_request *rq, void *data)
+{
+	raw3270_request_reset(rq);
+	xchg(&((struct con3270 *) rq->view)->write, rq);
+}
+
+/*
+ * Update console display.
+ */
+static void
+con3270_update(struct con3270 *cp)
+{
+	struct raw3270_request *wrq;
+	char wcc, prolog[6];
+	unsigned long flags;
+	unsigned long updated;
+	struct string *s, *n;
+	int rc;
+
+	if (!auto_update && !raw3270_view_active(&cp->view))
+		return;
+	if (cp->view.dev)
+		raw3270_activate_view(&cp->view);
+
+	wrq = xchg(&cp->write, 0);
+	if (!wrq) {
+		con3270_set_timer(cp, 1);
+		return;
+	}
+
+	spin_lock_irqsave(&cp->view.lock, flags);
+	updated = 0;
+	if (cp->update_flags & CON_UPDATE_ALL) {
+		con3270_rebuild_update(cp);
+		con3270_update_status(cp);
+		cp->update_flags = CON_UPDATE_ERASE | CON_UPDATE_LIST |
+			CON_UPDATE_STATUS;
+	}
+	if (cp->update_flags & CON_UPDATE_ERASE) {
+		/* Use erase write alternate to initialize display. */
+		raw3270_request_set_cmd(wrq, TC_EWRITEA);
+		updated |= CON_UPDATE_ERASE;
+	} else
+		raw3270_request_set_cmd(wrq, TC_WRITE);
+
+	wcc = TW_NONE;
+	raw3270_request_add_data(wrq, &wcc, 1);
+
+	/*
+	 * Update status line.
+	 */
+	if (cp->update_flags & CON_UPDATE_STATUS)
+		if (raw3270_request_add_data(wrq, cp->status->string,
+					     cp->status->len) == 0)
+			updated |= CON_UPDATE_STATUS;
+
+	if (cp->update_flags & CON_UPDATE_LIST) {
+		prolog[0] = TO_SBA;
+		prolog[3] = TO_SA;
+		prolog[4] = TAT_COLOR;
+		prolog[5] = TAC_TURQ;
+		raw3270_buffer_address(cp->view.dev, prolog + 1,
+				       cp->view.cols * cp->line_nr);
+		raw3270_request_add_data(wrq, prolog, 6);
+		/* Write strings in the update list to the screen. */
+		list_for_each_entry_safe(s, n, &cp->update, update) {
+			if (s != cp->cline)
+				con3270_update_string(cp, s, cp->line_nr);
+			if (raw3270_request_add_data(wrq, s->string,
+						     s->len) != 0)
+				break;
+			list_del_init(&s->update);
+			if (s != cp->cline)
+				cp->line_nr++;
+		}
+		if (list_empty(&cp->update))
+			updated |= CON_UPDATE_LIST;
+	}
+	wrq->callback = con3270_write_callback;
+	rc = raw3270_start(&cp->view, wrq);
+	if (rc == 0) {
+		cp->update_flags &= ~updated;
+		if (cp->update_flags)
+			con3270_set_timer(cp, 1);
+	} else {
+		raw3270_request_reset(wrq);
+		xchg(&cp->write, wrq);
+	}
+	spin_unlock_irqrestore(&cp->view.lock, flags);
+}
+
+/*
+ * Read tasklet.
+ */
+static void
+con3270_read_tasklet(struct raw3270_request *rrq)
+{
+	static char kreset_data = TW_KR;
+	struct con3270 *cp;
+	unsigned long flags;
+	int nr_up, deactivate;
+
+	cp = (struct con3270 *) rrq->view;
+	spin_lock_irqsave(&cp->view.lock, flags);
+	nr_up = cp->nr_up;
+	deactivate = 0;
+	/* Check aid byte. */
+	switch (cp->input->string[0]) {
+	case 0x7d:	/* enter: jump to bottom. */
+		nr_up = 0;
+		break;
+	case 0xf3:	/* PF3: deactivate the console view. */
+		deactivate = 1;
+		break;
+	case 0x6d:	/* clear: start from scratch. */
+		cp->update_flags = CON_UPDATE_ALL;
+		con3270_set_timer(cp, 1);
+		break;
+	case 0xf7:	/* PF7: do a page up in the console log. */
+		nr_up += cp->view.rows - 2;
+		if (nr_up + cp->view.rows - 1 > cp->nr_lines) {
+			nr_up = cp->nr_lines - cp->view.rows + 1;
+			if (nr_up < 0)
+				nr_up = 0;
+		}
+		break;
+	case 0xf8:	/* PF8: do a page down in the console log. */
+		nr_up -= cp->view.rows - 2;
+		if (nr_up < 0)
+			nr_up = 0;
+		break;
+	}
+	if (nr_up != cp->nr_up) {
+		cp->nr_up = nr_up;
+		con3270_rebuild_update(cp);
+		con3270_update_status(cp);
+		con3270_set_timer(cp, 1);
+	}
+	spin_unlock_irqrestore(&cp->view.lock, flags);
+
+	/* Start keyboard reset command. */
+	raw3270_request_reset(cp->kreset);
+	raw3270_request_set_cmd(cp->kreset, TC_WRITE);
+	raw3270_request_add_data(cp->kreset, &kreset_data, 1);
+	raw3270_start(&cp->view, cp->kreset);
+
+	if (deactivate)
+		raw3270_deactivate_view(&cp->view);
+
+	raw3270_request_reset(rrq);
+	xchg(&cp->read, rrq);
+	raw3270_put_view(&cp->view);
+}
+
+/*
+ * Read request completion callback.
+ */
+static void
+con3270_read_callback(struct raw3270_request *rq, void *data)
+{
+	raw3270_get_view(rq->view);
+	/* Schedule tasklet to pass input to tty. */
+	tasklet_schedule(&((struct con3270 *) rq->view)->readlet);
+}
+
+/*
+ * Issue a read request. Called only from interrupt function.
+ */
+static void
+con3270_issue_read(struct con3270 *cp)
+{
+	struct raw3270_request *rrq;
+	int rc;
+
+	rrq = xchg(&cp->read, 0);
+	if (!rrq)
+		/* Read already scheduled. */
+		return;
+	rrq->callback = con3270_read_callback;
+	rrq->callback_data = cp;
+	raw3270_request_set_cmd(rrq, TC_READMOD);
+	raw3270_request_set_data(rrq, cp->input->string, cp->input->len);
+	/* Issue the read modified request. */
+	rc = raw3270_start_irq(&cp->view, rrq);
+	if (rc)
+		raw3270_request_reset(rrq);
+}
+
+/*
+ * Switch to the console view.
+ */
+static int
+con3270_activate(struct raw3270_view *view)
+{
+	struct con3270 *cp;
+
+	cp = (struct con3270 *) view;
+	cp->update_flags = CON_UPDATE_ALL;
+	con3270_set_timer(cp, 1);
+	return 0;
+}
+
+static void
+con3270_deactivate(struct raw3270_view *view)
+{
+	struct con3270 *cp;
+
+	cp = (struct con3270 *) view;
+	del_timer(&cp->timer);
+}
+
+static void
+con3270_irq(struct con3270 *cp, struct raw3270_request *rq, struct irb *irb)
+{
+	/* Handle ATTN. Schedule tasklet to read aid. */
+	if (irb->scsw.cmd.dstat & DEV_STAT_ATTENTION)
+		con3270_issue_read(cp);
+
+	if (rq) {
+		if (irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK)
+			rq->rc = -EIO;
+		else
+			/* Normal end. Copy residual count. */
+			rq->rescnt = irb->scsw.cmd.count;
+	} else if (irb->scsw.cmd.dstat & DEV_STAT_DEV_END) {
+		/* Interrupt without an outstanding request -> update all */
+		cp->update_flags = CON_UPDATE_ALL;
+		con3270_set_timer(cp, 1);
+	}
+}
+
+/* Console view to a 3270 device. */
+static struct raw3270_fn con3270_fn = {
+	.activate = con3270_activate,
+	.deactivate = con3270_deactivate,
+	.intv = (void *) con3270_irq
+};
+
+static inline void
+con3270_cline_add(struct con3270 *cp)
+{
+	if (!list_empty(&cp->cline->list))
+		/* Already added. */
+		return;
+	list_add_tail(&cp->cline->list, &cp->lines);
+	cp->nr_lines++;
+	con3270_rebuild_update(cp);
+}
+
+static inline void
+con3270_cline_insert(struct con3270 *cp, unsigned char c)
+{
+	cp->cline->string[cp->cline->len++] = 
+		cp->view.ascebc[(c < ' ') ? ' ' : c];
+	if (list_empty(&cp->cline->update)) {
+		list_add_tail(&cp->cline->update, &cp->update);
+		cp->update_flags |= CON_UPDATE_LIST;
+	}
+}
+
+static inline void
+con3270_cline_end(struct con3270 *cp)
+{
+	struct string *s;
+	unsigned int size;
+
+	/* Copy cline. */
+	size = (cp->cline->len < cp->view.cols - 5) ?
+		cp->cline->len + 4 : cp->view.cols;
+	s = con3270_alloc_string(cp, size);
+	memcpy(s->string, cp->cline->string, cp->cline->len);
+	if (cp->cline->len < cp->view.cols - 5) {
+		s->string[s->len - 4] = TO_RA;
+		s->string[s->len - 1] = 0;
+	} else {
+		while (--size >= cp->cline->len)
+			s->string[size] = cp->view.ascebc[' '];
+	}
+	/* Replace cline with allocated line s and reset cline. */
+	list_add(&s->list, &cp->cline->list);
+	list_del_init(&cp->cline->list);
+	if (!list_empty(&cp->cline->update)) {
+		list_add(&s->update, &cp->cline->update);
+		list_del_init(&cp->cline->update);
+	}
+	cp->cline->len = 0;
+}
+
+/*
+ * Write a string to the 3270 console
+ */
+static void
+con3270_write(struct console *co, const char *str, unsigned int count)
+{
+	struct con3270 *cp;
+	unsigned long flags;
+	unsigned char c;
+
+	cp = condev;
+	spin_lock_irqsave(&cp->view.lock, flags);
+	while (count-- > 0) {
+		c = *str++;
+		if (cp->cline->len == 0)
+			con3270_cline_add(cp);
+		if (c != '\n')
+			con3270_cline_insert(cp, c);
+		if (c == '\n' || cp->cline->len >= cp->view.cols)
+			con3270_cline_end(cp);
+	}
+	/* Setup timer to output current console buffer after 1/10 second */
+	cp->nr_up = 0;
+	if (cp->view.dev && !timer_pending(&cp->timer))
+		con3270_set_timer(cp, HZ/10);
+	spin_unlock_irqrestore(&cp->view.lock,flags);
+}
+
+static struct tty_driver *
+con3270_device(struct console *c, int *index)
+{
+	*index = c->index;
+	return tty3270_driver;
+}
+
+/*
+ * Wait for end of write request.
+ */
+static void
+con3270_wait_write(struct con3270 *cp)
+{
+	while (!cp->write) {
+		raw3270_wait_cons_dev(cp->view.dev);
+		barrier();
+	}
+}
+
+/*
+ * panic() calls con3270_flush through a panic_notifier
+ * before the system enters a disabled, endless loop.
+ */
+static void
+con3270_flush(void)
+{
+	struct con3270 *cp;
+	unsigned long flags;
+
+	cp = condev;
+	if (!cp->view.dev)
+		return;
+	raw3270_pm_unfreeze(&cp->view);
+	raw3270_activate_view(&cp->view);
+	spin_lock_irqsave(&cp->view.lock, flags);
+	con3270_wait_write(cp);
+	cp->nr_up = 0;
+	con3270_rebuild_update(cp);
+	con3270_update_status(cp);
+	while (cp->update_flags != 0) {
+		spin_unlock_irqrestore(&cp->view.lock, flags);
+		con3270_update(cp);
+		spin_lock_irqsave(&cp->view.lock, flags);
+		con3270_wait_write(cp);
+	}
+	spin_unlock_irqrestore(&cp->view.lock, flags);
+}
+
+static int con3270_notify(struct notifier_block *self,
+			  unsigned long event, void *data)
+{
+	con3270_flush();
+	return NOTIFY_OK;
+}
+
+static struct notifier_block on_panic_nb = {
+	.notifier_call = con3270_notify,
+	.priority = 0,
+};
+
+static struct notifier_block on_reboot_nb = {
+	.notifier_call = con3270_notify,
+	.priority = 0,
+};
+
+/*
+ *  The console structure for the 3270 console
+ */
+static struct console con3270 = {
+	.name	 = "tty3270",
+	.write	 = con3270_write,
+	.device	 = con3270_device,
+	.flags	 = CON_PRINTBUFFER,
+};
+
+/*
+ * 3270 console initialization code called from console_init().
+ */
+static int __init
+con3270_init(void)
+{
+	struct raw3270 *rp;
+	void *cbuf;
+	int i;
+
+	/* Check if 3270 is to be the console */
+	if (!CONSOLE_IS_3270)
+		return -ENODEV;
+
+	/* Set the console mode for VM */
+	if (MACHINE_IS_VM) {
+		cpcmd("TERM CONMODE 3270", NULL, 0, NULL);
+		cpcmd("TERM AUTOCR OFF", NULL, 0, NULL);
+	}
+
+	rp = raw3270_setup_console();
+	if (IS_ERR(rp))
+		return PTR_ERR(rp);
+
+	condev = kzalloc(sizeof(struct con3270), GFP_KERNEL | GFP_DMA);
+	if (!condev)
+		return -ENOMEM;
+	condev->view.dev = rp;
+
+	condev->read = raw3270_request_alloc(0);
+	condev->read->callback = con3270_read_callback;
+	condev->read->callback_data = condev;
+	condev->write = raw3270_request_alloc(CON3270_OUTPUT_BUFFER_SIZE);
+	condev->kreset = raw3270_request_alloc(1);
+
+	INIT_LIST_HEAD(&condev->lines);
+	INIT_LIST_HEAD(&condev->update);
+	setup_timer(&condev->timer, (void (*)(unsigned long)) con3270_update,
+		    (unsigned long) condev);
+	tasklet_init(&condev->readlet, 
+		     (void (*)(unsigned long)) con3270_read_tasklet,
+		     (unsigned long) condev->read);
+
+	raw3270_add_view(&condev->view, &con3270_fn, 1, RAW3270_VIEW_LOCK_IRQ);
+
+	INIT_LIST_HEAD(&condev->freemem);
+	for (i = 0; i < CON3270_STRING_PAGES; i++) {
+		cbuf = (void *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
+		add_string_memory(&condev->freemem, cbuf, PAGE_SIZE);
+	}
+	condev->cline = alloc_string(&condev->freemem, condev->view.cols);
+	condev->cline->len = 0;
+	con3270_create_status(condev);
+	condev->input = alloc_string(&condev->freemem, 80);
+	atomic_notifier_chain_register(&panic_notifier_list, &on_panic_nb);
+	register_reboot_notifier(&on_reboot_nb);
+	register_console(&con3270);
+	return 0;
+}
+
+console_initcall(con3270_init);
diff --git a/src/kernel/linux/v4.14/drivers/s390/char/ctrlchar.c b/src/kernel/linux/v4.14/drivers/s390/char/ctrlchar.c
new file mode 100644
index 0000000..e1686a6
--- /dev/null
+++ b/src/kernel/linux/v4.14/drivers/s390/char/ctrlchar.c
@@ -0,0 +1,79 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ *  Unified handling of special chars.
+ *
+ *    Copyright IBM Corp. 2001
+ *    Author(s): Fritz Elfert <felfert@millenux.com> <elfert@de.ibm.com>
+ *
+ */
+
+#include <linux/stddef.h>
+#include <asm/errno.h>
+#include <linux/sysrq.h>
+#include <linux/ctype.h>
+
+#include "ctrlchar.h"
+
+#ifdef CONFIG_MAGIC_SYSRQ
+static struct sysrq_work ctrlchar_sysrq;
+
+static void
+ctrlchar_handle_sysrq(struct work_struct *work)
+{
+	struct sysrq_work *sysrq = container_of(work, struct sysrq_work, work);
+
+	handle_sysrq(sysrq->key);
+}
+
+void schedule_sysrq_work(struct sysrq_work *sw)
+{
+	INIT_WORK(&sw->work, ctrlchar_handle_sysrq);
+	schedule_work(&sw->work);
+}
+#endif
+
+
+/**
+ * Check for special chars at start of input.
+ *
+ * @param buf Console input buffer.
+ * @param len Length of valid data in buffer.
+ * @param tty The tty struct for this console.
+ * @return CTRLCHAR_NONE, if nothing matched,
+ *         CTRLCHAR_SYSRQ, if sysrq was encountered
+ *         otherwise char to be inserted logically or'ed
+ *         with CTRLCHAR_CTRL
+ */
+unsigned int
+ctrlchar_handle(const unsigned char *buf, int len, struct tty_struct *tty)
+{
+	if ((len < 2) || (len > 3))
+		return CTRLCHAR_NONE;
+
+	/* hat is 0xb1 in codepage 037 (US etc.) and thus */
+	/* converted to 0x5e in ascii ('^') */
+	if ((buf[0] != '^') && (buf[0] != '\252'))
+		return CTRLCHAR_NONE;
+
+#ifdef CONFIG_MAGIC_SYSRQ
+	/* racy */
+	if (len == 3 && buf[1] == '-') {
+		ctrlchar_sysrq.key = buf[2];
+		schedule_sysrq_work(&ctrlchar_sysrq);
+		return CTRLCHAR_SYSRQ;
+	}
+#endif
+
+	if (len != 2)
+		return CTRLCHAR_NONE;
+
+	switch (tolower(buf[1])) {
+	case 'c':
+		return INTR_CHAR(tty) | CTRLCHAR_CTRL;
+	case 'd':
+		return EOF_CHAR(tty)  | CTRLCHAR_CTRL;
+	case 'z':
+		return SUSP_CHAR(tty) | CTRLCHAR_CTRL;
+	}
+	return CTRLCHAR_NONE;
+}
diff --git a/src/kernel/linux/v4.14/drivers/s390/char/ctrlchar.h b/src/kernel/linux/v4.14/drivers/s390/char/ctrlchar.h
new file mode 100644
index 0000000..e52afa3
--- /dev/null
+++ b/src/kernel/linux/v4.14/drivers/s390/char/ctrlchar.h
@@ -0,0 +1,32 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ *  Unified handling of special chars.
+ *
+ *    Copyright IBM Corp. 2001
+ *    Author(s): Fritz Elfert <felfert@millenux.com> <elfert@de.ibm.com>
+ *
+ */
+
+#include <linux/tty.h>
+#include <linux/sysrq.h>
+#include <linux/workqueue.h>
+
+extern unsigned int
+ctrlchar_handle(const unsigned char *buf, int len, struct tty_struct *tty);
+
+
+#define CTRLCHAR_NONE  (1 << 8)
+#define CTRLCHAR_CTRL  (2 << 8)
+#define CTRLCHAR_SYSRQ (3 << 8)
+
+#define CTRLCHAR_MASK (~0xffu)
+
+
+#ifdef CONFIG_MAGIC_SYSRQ
+struct sysrq_work {
+	int key;
+	struct work_struct work;
+};
+
+void schedule_sysrq_work(struct sysrq_work *sw);
+#endif
diff --git a/src/kernel/linux/v4.14/drivers/s390/char/defkeymap.c b/src/kernel/linux/v4.14/drivers/s390/char/defkeymap.c
new file mode 100644
index 0000000..98a5c45
--- /dev/null
+++ b/src/kernel/linux/v4.14/drivers/s390/char/defkeymap.c
@@ -0,0 +1,159 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/* Do not edit this file! It was automatically generated by   */
+/*    loadkeys --mktable defkeymap.map > defkeymap.c          */
+
+#include <linux/types.h>
+#include <linux/keyboard.h>
+#include <linux/kd.h>
+#include <linux/kbd_kern.h>
+#include <linux/kbd_diacr.h>
+
+u_short plain_map[NR_KEYS] = {
+	0xf000,	0xf000,	0xf000,	0xf000,	0xf000,	0xf000,	0xf000,	0xf000,
+	0xf000,	0xf000,	0xf000,	0xf000,	0xf000,	0xf000,	0xf000,	0xf000,
+	0xf000,	0xf000,	0xf000,	0xf000,	0xf000,	0xf000,	0xf000,	0xf000,
+	0xf000,	0xf000,	0xf000,	0xf000,	0xf000,	0xf000,	0xf000,	0xf000,
+	0xf000,	0xf000,	0xf000,	0xf000,	0xf000,	0xf000,	0xf000,	0xf000,
+	0xf000,	0xf000,	0xf000,	0xf000,	0xf000,	0xf000,	0xf000,	0xf000,
+	0xf000,	0xf000,	0xf000,	0xf000,	0xf000,	0xf000,	0xf000,	0xf000,
+	0xf000,	0xf000,	0xf000,	0xf000,	0xf000,	0xf000,	0xf000,	0xf000,
+	0xf020,	0xf000,	0xf0e2,	0xf0e4,	0xf0e0,	0xf0e1,	0xf0e3,	0xf0e5,
+	0xf0e7,	0xf0f1,	0xf0a2,	0xf02e,	0xf03c,	0xf028,	0xf02b,	0xf07c,
+	0xf026,	0xf0e9,	0xf0e2,	0xf0eb,	0xf0e8,	0xf0ed,	0xf0ee,	0xf0ef,
+	0xf0ec,	0xf0df,	0xf021,	0xf024,	0xf02a,	0xf029,	0xf03b,	0xf0ac,
+	0xf02d,	0xf02f,	0xf0c2,	0xf0c4,	0xf0c0,	0xf0c1,	0xf0c3,	0xf0c5,
+	0xf0c7,	0xf0d1,	0xf0a6,	0xf02c,	0xf025,	0xf05f,	0xf03e,	0xf03f,
+	0xf0f8,	0xf0c9,	0xf0ca,	0xf0cb,	0xf0c8,	0xf0cd,	0xf0ce,	0xf0cf,
+	0xf0cc,	0xf060,	0xf03a,	0xf023,	0xf040,	0xf027,	0xf03d,	0xf022,
+};
+
+static u_short shift_map[NR_KEYS] = {
+	0xf0d8,	0xf061,	0xf062,	0xf063,	0xf064,	0xf065,	0xf066,	0xf067,
+	0xf068,	0xf069,	0xf0ab,	0xf0bb,	0xf0f0,	0xf0fd,	0xf0fe,	0xf0b1,
+	0xf0b0,	0xf06a,	0xf06b,	0xf06c,	0xf06d,	0xf06e,	0xf06f,	0xf070,
+	0xf071,	0xf072,	0xf000,	0xf000,	0xf0e6,	0xf0b8,	0xf0c6,	0xf0a4,
+	0xf0b5,	0xf07e,	0xf073,	0xf074,	0xf075,	0xf076,	0xf077,	0xf078,
+	0xf079,	0xf07a,	0xf0a1,	0xf0bf,	0xf0d0,	0xf0dd,	0xf0de,	0xf0ae,
+	0xf402,	0xf0a3,	0xf0a5,	0xf0b7,	0xf0a9,	0xf0a7,	0xf0b6,	0xf0bc,
+	0xf0bd,	0xf0be,	0xf05b,	0xf05d,	0xf000,	0xf0a8,	0xf0b4,	0xf0d7,
+	0xf07b,	0xf041,	0xf042,	0xf043,	0xf044,	0xf045,	0xf046,	0xf047,
+	0xf048,	0xf049,	0xf000,	0xf0f4,	0xf0f6,	0xf0f2,	0xf0f3,	0xf0f5,
+	0xf07d,	0xf04a,	0xf04b,	0xf04c,	0xf04d,	0xf04e,	0xf04f,	0xf050,
+	0xf051,	0xf052,	0xf0b9,	0xf0fb,	0xf0fc,	0xf0f9,	0xf0fa,	0xf0ff,
+	0xf05c,	0xf0f7,	0xf053,	0xf054,	0xf055,	0xf056,	0xf057,	0xf058,
+	0xf059,	0xf05a,	0xf0b2,	0xf0d4,	0xf0d6,	0xf0d2,	0xf0d3,	0xf0d5,
+	0xf030,	0xf031,	0xf032,	0xf033,	0xf034,	0xf035,	0xf036,	0xf037,
+	0xf038,	0xf039,	0xf0b3,	0xf0db,	0xf0dc,	0xf0d9,	0xf0da,	0xf000,
+};
+
+static u_short ctrl_map[NR_KEYS] = {
+	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,
+	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,
+	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,
+	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,
+	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,
+	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,
+	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,
+	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,
+	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,
+	0xf200,	0xf200,	0xf11f,	0xf120,	0xf121,	0xf200,	0xf200,	0xf200,
+	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,
+	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,
+	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,
+	0xf200,	0xf200,	0xf200,	0xf01a,	0xf003,	0xf212,	0xf004,	0xf200,
+	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,
+	0xf200,	0xf200,	0xf109,	0xf10a,	0xf206,	0xf00a,	0xf200,	0xf200,
+};
+
+static u_short shift_ctrl_map[NR_KEYS] = {
+	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,
+	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,
+	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,
+	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,
+	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,
+	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,
+	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,
+	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,
+	0xf200,	0xf10c,	0xf10d,	0xf10e,	0xf10f,	0xf110,	0xf111,	0xf112,
+	0xf113,	0xf11e,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,
+	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,
+	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,
+	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,
+	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,
+	0xf200,	0xf100,	0xf101,	0xf211,	0xf103,	0xf104,	0xf105,	0xf20b,
+	0xf20a,	0xf108,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,
+};
+
+ushort *key_maps[MAX_NR_KEYMAPS] = {
+	plain_map, shift_map, NULL, NULL,
+	ctrl_map, shift_ctrl_map, NULL,
+};
+
+unsigned int keymap_count = 4;
+
+
+/*
+ * Philosophy: most people do not define more strings, but they who do
+ * often want quite a lot of string space. So, we statically allocate
+ * the default and allocate dynamically in chunks of 512 bytes.
+ */
+
+char func_buf[] = {
+	'\033', '[', '[', 'A', 0, 
+	'\033', '[', '[', 'B', 0, 
+	'\033', '[', '[', 'C', 0, 
+	'\033', '[', '[', 'D', 0, 
+	'\033', '[', '[', 'E', 0, 
+	'\033', '[', '1', '7', '~', 0, 
+	'\033', '[', '1', '8', '~', 0, 
+	'\033', '[', '1', '9', '~', 0, 
+	'\033', '[', '2', '0', '~', 0, 
+	'\033', '[', '2', '1', '~', 0, 
+	'\033', '[', '2', '3', '~', 0, 
+	'\033', '[', '2', '4', '~', 0, 
+	'\033', '[', '2', '5', '~', 0, 
+	'\033', '[', '2', '6', '~', 0, 
+	'\033', '[', '2', '8', '~', 0, 
+	'\033', '[', '2', '9', '~', 0, 
+	'\033', '[', '3', '1', '~', 0, 
+	'\033', '[', '3', '2', '~', 0, 
+	'\033', '[', '3', '3', '~', 0, 
+	'\033', '[', '3', '4', '~', 0, 
+};
+
+
+char *funcbufptr = func_buf;
+int funcbufsize = sizeof(func_buf);
+int funcbufleft = 0;          /* space left */
+
+char *func_table[MAX_NR_FUNC] = {
+	func_buf + 0,
+	func_buf + 5,
+	func_buf + 10,
+	func_buf + 15,
+	func_buf + 20,
+	func_buf + 25,
+	func_buf + 31,
+	func_buf + 37,
+	func_buf + 43,
+	func_buf + 49,
+	func_buf + 55,
+	func_buf + 61,
+	func_buf + 67,
+	func_buf + 73,
+	func_buf + 79,
+	func_buf + 85,
+	func_buf + 91,
+	func_buf + 97,
+	func_buf + 103,
+	func_buf + 109,
+	NULL,
+};
+
+struct kbdiacruc accent_table[MAX_DIACR] = {
+	{'^', 'c', 0003},	{'^', 'd', 0004},
+	{'^', 'z', 0032},	{'^', 0012, 0000},
+};
+
+unsigned int accent_table_size = 4;
diff --git a/src/kernel/linux/v4.14/drivers/s390/char/defkeymap.map b/src/kernel/linux/v4.14/drivers/s390/char/defkeymap.map
new file mode 100644
index 0000000..353b3f2
--- /dev/null
+++ b/src/kernel/linux/v4.14/drivers/s390/char/defkeymap.map
@@ -0,0 +1,191 @@
+# Default keymap for 3270 (ebcdic codepage 037).
+keymaps 0-1,4-5
+
+keycode   0 = nul		Oslash
+keycode   1 = nul		a
+keycode   2 = nul		b
+keycode   3 = nul		c
+keycode   4 = nul		d
+keycode   5 = nul		e
+keycode   6 = nul		f
+keycode   7 = nul		g
+keycode   8 = nul		h
+keycode   9 = nul		i
+keycode  10 = nul		guillemotleft
+keycode  11 = nul		guillemotright
+keycode  12 = nul		eth
+keycode  13 = nul		yacute
+keycode  14 = nul		thorn
+keycode  15 = nul		plusminus
+keycode  16 = nul		degree
+keycode  17 = nul		j
+keycode  18 = nul		k
+keycode  19 = nul		l
+keycode  20 = nul		m
+keycode  21 = nul		n
+keycode  22 = nul		o
+keycode  23 = nul		p
+keycode  24 = nul		q
+keycode  25 = nul		r
+keycode  26 = nul		nul
+keycode  27 = nul		nul
+keycode  28 = nul		ae
+keycode  29 = nul		cedilla
+keycode  30 = nul		AE
+keycode  31 = nul		currency
+keycode  32 = nul		mu
+keycode  33 = nul		tilde
+keycode  34 = nul		s
+keycode  35 = nul		t
+keycode  36 = nul		u
+keycode  37 = nul		v
+keycode  38 = nul		w
+keycode  39 = nul		x
+keycode  40 = nul		y
+keycode  41 = nul		z
+keycode  42 = nul		exclamdown
+keycode  43 = nul		questiondown
+keycode  44 = nul		ETH
+keycode  45 = nul		Yacute
+keycode  46 = nul		THORN
+keycode  47 = nul		registered
+keycode  48 = nul		dead_circumflex
+keycode  49 = nul		sterling
+keycode  50 = nul		yen
+keycode  51 = nul		periodcentered
+keycode  52 = nul		copyright
+keycode  53 = nul		section
+keycode  54 = nul		paragraph
+keycode  55 = nul		onequarter
+keycode  56 = nul		onehalf
+keycode  57 = nul		threequarters
+keycode  58 = nul		bracketleft
+keycode  59 = nul		bracketright
+keycode  60 = nul		nul
+keycode  61 = nul		diaeresis
+keycode  62 = nul		acute
+keycode  63 = nul		multiply
+keycode  64 = space		braceleft
+keycode  65 = nul		A
+keycode  66 = acircumflex	B
+keycode  67 = adiaeresis	C
+keycode  68 = agrave		D
+keycode  69 = aacute		E
+keycode  70 = atilde		F
+keycode  71 = aring		G
+keycode  72 = ccedilla		H
+keycode  73 = ntilde		I
+keycode  74 = cent		nul
+keycode  75 = period		ocircumflex
+keycode  76 = less		odiaeresis
+keycode  77 = parenleft		ograve
+keycode  78 = plus		oacute
+keycode  79 = bar		otilde
+keycode  80 = ampersand		braceright
+keycode  81 = eacute		J
+keycode  82 = acircumflex	K
+keycode  83 = ediaeresis	L
+keycode  84 = egrave		M
+keycode  85 = iacute		N
+keycode  86 = icircumflex	O
+keycode  87 = idiaeresis	P
+keycode  88 = igrave		Q
+keycode  89 = ssharp		R
+keycode  90 = exclam		onesuperior
+keycode  91 = dollar		ucircumflex
+keycode  92 = asterisk		udiaeresis
+keycode  93 = parenright	ugrave
+keycode  94 = semicolon		uacute
+keycode  95 = notsign		ydiaeresis
+keycode  96 = minus		backslash
+keycode  97 = slash		division
+keycode  98 = Acircumflex	S
+keycode  99 = Adiaeresis	T
+keycode 100 = Agrave		U
+keycode 101 = Aacute		V
+keycode 102 = Atilde		W
+keycode 103 = Aring		X
+keycode 104 = Ccedilla		Y
+keycode 105 = Ntilde		Z
+keycode 106 = brokenbar		twosuperior
+keycode 107 = comma		Ocircumflex
+keycode 108 = percent		Odiaeresis
+keycode 109 = underscore	Ograve
+keycode 110 = greater		Oacute
+keycode 111 = question		Otilde
+keycode 112 = oslash		zero
+keycode 113 = Eacute		one
+keycode 114 = Ecircumflex	two
+keycode 115 = Ediaeresis	three
+keycode 116 = Egrave		four
+keycode 117 = Iacute		five
+keycode 118 = Icircumflex	six
+keycode 119 = Idiaeresis	seven
+keycode 120 = Igrave		eight
+keycode 121 = grave		nine
+keycode 122 = colon		threesuperior
+keycode 123 = numbersign	Ucircumflex
+keycode 124 = at		Udiaeresis
+keycode 125 = apostrophe	Ugrave
+keycode 126 = equal		Uacute
+keycode 127 = quotedbl		nul
+
+# AID keys
+control keycode  74 = F22
+control keycode  75 = F23
+control keycode  76 = F24
+control keycode 107 = Control_z		# PA3
+control keycode 108 = Control_c		# PA1
+control keycode 109 = KeyboardSignal	# Clear
+control keycode 110 = Control_d		# PA2
+control keycode 122 = F10
+control keycode 123 = F11		# F11
+control keycode 124 = Last_Console	# F12
+control keycode 125 = Linefeed
+shift control keycode  65 = F13
+shift control keycode  66 = F14
+shift control keycode  67 = F15
+shift control keycode  68 = F16
+shift control keycode  69 = F17
+shift control keycode  70 = F18
+shift control keycode  71 = F19
+shift control keycode  72 = F20
+shift control keycode  73 = F21
+shift control keycode 113 = F1
+shift control keycode 114 = F2
+shift control keycode 115 = Incr_Console
+shift control keycode 116 = F4
+shift control keycode 117 = F5
+shift control keycode 118 = F6
+shift control keycode 119 = Scroll_Backward
+shift control keycode 120 = Scroll_Forward
+shift control keycode 121 = F9
+
+string F1 = "\033[[A"
+string F2 = "\033[[B"
+string F3 = "\033[[C"
+string F4 = "\033[[D"
+string F5 = "\033[[E"
+string F6 = "\033[17~"
+string F7 = "\033[18~"
+string F8 = "\033[19~"
+string F9 = "\033[20~"
+string F10 = "\033[21~"
+string F11 = "\033[23~"
+string F12 = "\033[24~"
+string F13 = "\033[25~"
+string F14 = "\033[26~"
+string F15 = "\033[28~"
+string F16 = "\033[29~"
+string F17 = "\033[31~"
+string F18 = "\033[32~"
+string F19 = "\033[33~"
+string F20 = "\033[34~"
+# string F21 ??
+# string F22 ??
+# string F23 ??
+# string F24 ??
+compose '^' 'c' to Control_c
+compose '^' 'd' to Control_d
+compose '^' 'z' to Control_z
+compose '^' '\012' to nul
diff --git a/src/kernel/linux/v4.14/drivers/s390/char/diag_ftp.c b/src/kernel/linux/v4.14/drivers/s390/char/diag_ftp.c
new file mode 100644
index 0000000..6bf1058
--- /dev/null
+++ b/src/kernel/linux/v4.14/drivers/s390/char/diag_ftp.c
@@ -0,0 +1,240 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ *    DIAGNOSE X'2C4' instruction based HMC FTP services, useable on z/VM
+ *
+ *    Copyright IBM Corp. 2013
+ *    Author(s): Ralf Hoppe (rhoppe@de.ibm.com)
+ *
+ */
+
+#define KMSG_COMPONENT "hmcdrv"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/irq.h>
+#include <linux/wait.h>
+#include <linux/string.h>
+#include <asm/ctl_reg.h>
+#include <asm/diag.h>
+
+#include "hmcdrv_ftp.h"
+#include "diag_ftp.h"
+
+/* DIAGNOSE X'2C4' return codes in Ry */
+#define DIAG_FTP_RET_OK	0 /* HMC FTP started successfully */
+#define DIAG_FTP_RET_EBUSY	4 /* HMC FTP service currently busy */
+#define DIAG_FTP_RET_EIO	8 /* HMC FTP service I/O error */
+/* and an artificial extension */
+#define DIAG_FTP_RET_EPERM	2 /* HMC FTP service privilege error */
+
+/* FTP service status codes (after INTR at guest real location 133) */
+#define DIAG_FTP_STAT_OK	0U /* request completed successfully */
+#define DIAG_FTP_STAT_PGCC	4U /* program check condition */
+#define DIAG_FTP_STAT_PGIOE	8U /* paging I/O error */
+#define DIAG_FTP_STAT_TIMEOUT	12U /* timeout */
+#define DIAG_FTP_STAT_EBASE	16U /* base of error codes from SCLP */
+#define DIAG_FTP_STAT_LDFAIL	(DIAG_FTP_STAT_EBASE + 1U) /* failed */
+#define DIAG_FTP_STAT_LDNPERM	(DIAG_FTP_STAT_EBASE + 2U) /* not allowed */
+#define DIAG_FTP_STAT_LDRUNS	(DIAG_FTP_STAT_EBASE + 3U) /* runs */
+#define DIAG_FTP_STAT_LDNRUNS	(DIAG_FTP_STAT_EBASE + 4U) /* not runs */
+
+/**
+ * struct diag_ftp_ldfpl - load file FTP parameter list (LDFPL)
+ * @bufaddr: real buffer address (at 4k boundary)
+ * @buflen: length of buffer
+ * @offset: dir/file offset
+ * @intparm: interruption parameter (unused)
+ * @transferred: bytes transferred
+ * @fsize: file size, filled on GET
+ * @failaddr: failing address
+ * @spare: padding
+ * @fident: file name - ASCII
+ */
+struct diag_ftp_ldfpl {
+	u64 bufaddr;
+	u64 buflen;
+	u64 offset;
+	u64 intparm;
+	u64 transferred;
+	u64 fsize;
+	u64 failaddr;
+	u64 spare;
+	u8 fident[HMCDRV_FTP_FIDENT_MAX];
+} __packed;
+
+static DECLARE_COMPLETION(diag_ftp_rx_complete);
+static int diag_ftp_subcode;
+
+/**
+ * diag_ftp_handler() - FTP services IRQ handler
+ * @extirq: external interrupt (sub-) code
+ * @param32: 32-bit interruption parameter from &struct diag_ftp_ldfpl
+ * @param64: unused (for 64-bit interrupt parameters)
+ */
+static void diag_ftp_handler(struct ext_code extirq,
+			     unsigned int param32,
+			     unsigned long param64)
+{
+	if ((extirq.subcode >> 8) != 8)
+		return; /* not a FTP services sub-code */
+
+	inc_irq_stat(IRQEXT_FTP);
+	diag_ftp_subcode = extirq.subcode & 0xffU;
+	complete(&diag_ftp_rx_complete);
+}
+
+/**
+ * diag_ftp_2c4() - DIAGNOSE X'2C4' service call
+ * @fpl: pointer to prepared LDFPL
+ * @cmd: FTP command to be executed
+ *
+ * Performs a DIAGNOSE X'2C4' call with (input/output) FTP parameter list
+ * @fpl and FTP function code @cmd. In case of an error the function does
+ * nothing and returns an (negative) error code.
+ *
+ * Notes:
+ * 1. This function only initiates a transfer, so the caller must wait
+ *    for completion (asynchronous execution).
+ * 2. The FTP parameter list @fpl must be aligned to a double-word boundary.
+ * 3. fpl->bufaddr must be a real address, 4k aligned
+ */
+static int diag_ftp_2c4(struct diag_ftp_ldfpl *fpl,
+			enum hmcdrv_ftp_cmdid cmd)
+{
+	int rc;
+
+	diag_stat_inc(DIAG_STAT_X2C4);
+	asm volatile(
+		"	diag	%[addr],%[cmd],0x2c4\n"
+		"0:	j	2f\n"
+		"1:	la	%[rc],%[err]\n"
+		"2:\n"
+		EX_TABLE(0b, 1b)
+		: [rc] "=d" (rc), "+m" (*fpl)
+		: [cmd] "0" (cmd), [addr] "d" (virt_to_phys(fpl)),
+		  [err] "i" (DIAG_FTP_RET_EPERM)
+		: "cc");
+
+	switch (rc) {
+	case DIAG_FTP_RET_OK:
+		return 0;
+	case DIAG_FTP_RET_EBUSY:
+		return -EBUSY;
+	case DIAG_FTP_RET_EPERM:
+		return -EPERM;
+	case DIAG_FTP_RET_EIO:
+	default:
+		return -EIO;
+	}
+}
+
+/**
+ * diag_ftp_cmd() - executes a DIAG X'2C4' FTP command, targeting a HMC
+ * @ftp: pointer to FTP command specification
+ * @fsize: return of file size (or NULL if undesirable)
+ *
+ * Attention: Notice that this function is not reentrant - so the caller
+ * must ensure locking.
+ *
+ * Return: number of bytes read/written or a (negative) error code
+ */
+ssize_t diag_ftp_cmd(const struct hmcdrv_ftp_cmdspec *ftp, size_t *fsize)
+{
+	struct diag_ftp_ldfpl *ldfpl;
+	ssize_t len;
+#ifdef DEBUG
+	unsigned long start_jiffies;
+
+	pr_debug("starting DIAG X'2C4' on '%s', requesting %zd bytes\n",
+		 ftp->fname, ftp->len);
+	start_jiffies = jiffies;
+#endif
+	init_completion(&diag_ftp_rx_complete);
+
+	ldfpl = (void *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
+	if (!ldfpl) {
+		len = -ENOMEM;
+		goto out;
+	}
+
+	len = strlcpy(ldfpl->fident, ftp->fname, sizeof(ldfpl->fident));
+	if (len >= HMCDRV_FTP_FIDENT_MAX) {
+		len = -EINVAL;
+		goto out_free;
+	}
+
+	ldfpl->transferred = 0;
+	ldfpl->fsize = 0;
+	ldfpl->offset = ftp->ofs;
+	ldfpl->buflen = ftp->len;
+	ldfpl->bufaddr = virt_to_phys(ftp->buf);
+
+	len = diag_ftp_2c4(ldfpl, ftp->id);
+	if (len)
+		goto out_free;
+
+	/*
+	 * There is no way to cancel the running diag X'2C4', the code
+	 * needs to wait unconditionally until the transfer is complete.
+	 */
+	wait_for_completion(&diag_ftp_rx_complete);
+
+#ifdef DEBUG
+	pr_debug("completed DIAG X'2C4' after %lu ms\n",
+		 (jiffies - start_jiffies) * 1000 / HZ);
+	pr_debug("status of DIAG X'2C4' is %u, with %lld/%lld bytes\n",
+		 diag_ftp_subcode, ldfpl->transferred, ldfpl->fsize);
+#endif
+
+	switch (diag_ftp_subcode) {
+	case DIAG_FTP_STAT_OK: /* success */
+		len = ldfpl->transferred;
+		if (fsize)
+			*fsize = ldfpl->fsize;
+		break;
+	case DIAG_FTP_STAT_LDNPERM:
+		len = -EPERM;
+		break;
+	case DIAG_FTP_STAT_LDRUNS:
+		len = -EBUSY;
+		break;
+	case DIAG_FTP_STAT_LDFAIL:
+		len = -ENOENT; /* no such file or media */
+		break;
+	default:
+		len = -EIO;
+		break;
+	}
+
+out_free:
+	free_page((unsigned long) ldfpl);
+out:
+	return len;
+}
+
+/**
+ * diag_ftp_startup() - startup of FTP services, when running on z/VM
+ *
+ * Return: 0 on success, else an (negative) error code
+ */
+int diag_ftp_startup(void)
+{
+	int rc;
+
+	rc = register_external_irq(EXT_IRQ_CP_SERVICE, diag_ftp_handler);
+	if (rc)
+		return rc;
+
+	irq_subclass_register(IRQ_SUBCLASS_SERVICE_SIGNAL);
+	return 0;
+}
+
+/**
+ * diag_ftp_shutdown() - shutdown of FTP services, when running on z/VM
+ */
+void diag_ftp_shutdown(void)
+{
+	irq_subclass_unregister(IRQ_SUBCLASS_SERVICE_SIGNAL);
+	unregister_external_irq(EXT_IRQ_CP_SERVICE, diag_ftp_handler);
+}
diff --git a/src/kernel/linux/v4.14/drivers/s390/char/diag_ftp.h b/src/kernel/linux/v4.14/drivers/s390/char/diag_ftp.h
new file mode 100644
index 0000000..5d036ba
--- /dev/null
+++ b/src/kernel/linux/v4.14/drivers/s390/char/diag_ftp.h
@@ -0,0 +1,22 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ *    DIAGNOSE X'2C4' instruction based SE/HMC FTP Services, useable on z/VM
+ *
+ *    Notice that all functions exported here are not reentrant.
+ *    So usage should be exclusive, ensured by the caller (e.g. using a
+ *    mutex).
+ *
+ *    Copyright IBM Corp. 2013
+ *    Author(s): Ralf Hoppe (rhoppe@de.ibm.com)
+ */
+
+#ifndef __DIAG_FTP_H__
+#define __DIAG_FTP_H__
+
+#include "hmcdrv_ftp.h"
+
+int diag_ftp_startup(void);
+void diag_ftp_shutdown(void);
+ssize_t diag_ftp_cmd(const struct hmcdrv_ftp_cmdspec *ftp, size_t *fsize);
+
+#endif	 /* __DIAG_FTP_H__ */
diff --git a/src/kernel/linux/v4.14/drivers/s390/char/fs3270.c b/src/kernel/linux/v4.14/drivers/s390/char/fs3270.c
new file mode 100644
index 0000000..4f73a38
--- /dev/null
+++ b/src/kernel/linux/v4.14/drivers/s390/char/fs3270.c
@@ -0,0 +1,576 @@
+/*
+ * IBM/3270 Driver - fullscreen driver.
+ *
+ * Author(s):
+ *   Original 3270 Code for 2.4 written by Richard Hitt (UTS Global)
+ *   Rewritten for 2.5/2.6 by Martin Schwidefsky <schwidefsky@de.ibm.com>
+ *     Copyright IBM Corp. 2003, 2009
+ */
+
+#include <linux/bootmem.h>
+#include <linux/console.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/compat.h>
+#include <linux/sched/signal.h>
+#include <linux/module.h>
+#include <linux/list.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+
+#include <asm/compat.h>
+#include <asm/ccwdev.h>
+#include <asm/cio.h>
+#include <asm/ebcdic.h>
+#include <asm/idals.h>
+
+#include "raw3270.h"
+#include "ctrlchar.h"
+
+static struct raw3270_fn fs3270_fn;
+
+struct fs3270 {
+	struct raw3270_view view;
+	struct pid *fs_pid;		/* Pid of controlling program. */
+	int read_command;		/* ccw command to use for reads. */
+	int write_command;		/* ccw command to use for writes. */
+	int attention;			/* Got attention. */
+	int active;			/* Fullscreen view is active. */
+	struct raw3270_request *init;	/* single init request. */
+	wait_queue_head_t wait;		/* Init & attention wait queue. */
+	struct idal_buffer *rdbuf;	/* full-screen-deactivate buffer */
+	size_t rdbuf_size;		/* size of data returned by RDBUF */
+};
+
+static DEFINE_MUTEX(fs3270_mutex);
+
+static void
+fs3270_wake_up(struct raw3270_request *rq, void *data)
+{
+	wake_up((wait_queue_head_t *) data);
+}
+
+static inline int
+fs3270_working(struct fs3270 *fp)
+{
+	/*
+	 * The fullscreen view is in working order if the view
+	 * has been activated AND the initial request is finished.
+	 */
+	return fp->active && raw3270_request_final(fp->init);
+}
+
+static int
+fs3270_do_io(struct raw3270_view *view, struct raw3270_request *rq)
+{
+	struct fs3270 *fp;
+	int rc;
+
+	fp = (struct fs3270 *) view;
+	rq->callback = fs3270_wake_up;
+	rq->callback_data = &fp->wait;
+
+	do {
+		if (!fs3270_working(fp)) {
+			/* Fullscreen view isn't ready yet. */
+			rc = wait_event_interruptible(fp->wait,
+						      fs3270_working(fp));
+			if (rc != 0)
+				break;
+		}
+		rc = raw3270_start(view, rq);
+		if (rc == 0) {
+			/* Started successfully. Now wait for completion. */
+			wait_event(fp->wait, raw3270_request_final(rq));
+		}
+	} while (rc == -EACCES);
+	return rc;
+}
+
+/*
+ * Switch to the fullscreen view.
+ */
+static void
+fs3270_reset_callback(struct raw3270_request *rq, void *data)
+{
+	struct fs3270 *fp;
+
+	fp = (struct fs3270 *) rq->view;
+	raw3270_request_reset(rq);
+	wake_up(&fp->wait);
+}
+
+static void
+fs3270_restore_callback(struct raw3270_request *rq, void *data)
+{
+	struct fs3270 *fp;
+
+	fp = (struct fs3270 *) rq->view;
+	if (rq->rc != 0 || rq->rescnt != 0) {
+		if (fp->fs_pid)
+			kill_pid(fp->fs_pid, SIGHUP, 1);
+	}
+	fp->rdbuf_size = 0;
+	raw3270_request_reset(rq);
+	wake_up(&fp->wait);
+}
+
+static int
+fs3270_activate(struct raw3270_view *view)
+{
+	struct fs3270 *fp;
+	char *cp;
+	int rc;
+
+	fp = (struct fs3270 *) view;
+
+	/* If an old init command is still running just return. */
+	if (!raw3270_request_final(fp->init))
+		return 0;
+
+	if (fp->rdbuf_size == 0) {
+		/* No saved buffer. Just clear the screen. */
+		raw3270_request_set_cmd(fp->init, TC_EWRITEA);
+		fp->init->callback = fs3270_reset_callback;
+	} else {
+		/* Restore fullscreen buffer saved by fs3270_deactivate. */
+		raw3270_request_set_cmd(fp->init, TC_EWRITEA);
+		raw3270_request_set_idal(fp->init, fp->rdbuf);
+		fp->init->ccw.count = fp->rdbuf_size;
+		cp = fp->rdbuf->data[0];
+		cp[0] = TW_KR;
+		cp[1] = TO_SBA;
+		cp[2] = cp[6];
+		cp[3] = cp[7];
+		cp[4] = TO_IC;
+		cp[5] = TO_SBA;
+		cp[6] = 0x40;
+		cp[7] = 0x40;
+		fp->init->rescnt = 0;
+		fp->init->callback = fs3270_restore_callback;
+	}
+	rc = fp->init->rc = raw3270_start_locked(view, fp->init);
+	if (rc)
+		fp->init->callback(fp->init, NULL);
+	else
+		fp->active = 1;
+	return rc;
+}
+
+/*
+ * Shutdown fullscreen view.
+ */
+static void
+fs3270_save_callback(struct raw3270_request *rq, void *data)
+{
+	struct fs3270 *fp;
+
+	fp = (struct fs3270 *) rq->view;
+
+	/* Correct idal buffer element 0 address. */
+	fp->rdbuf->data[0] -= 5;
+	fp->rdbuf->size += 5;
+
+	/*
+	 * If the rdbuf command failed or the idal buffer is
+	 * to small for the amount of data returned by the
+	 * rdbuf command, then we have no choice but to send
+	 * a SIGHUP to the application.
+	 */
+	if (rq->rc != 0 || rq->rescnt == 0) {
+		if (fp->fs_pid)
+			kill_pid(fp->fs_pid, SIGHUP, 1);
+		fp->rdbuf_size = 0;
+	} else
+		fp->rdbuf_size = fp->rdbuf->size - rq->rescnt;
+	raw3270_request_reset(rq);
+	wake_up(&fp->wait);
+}
+
+static void
+fs3270_deactivate(struct raw3270_view *view)
+{
+	struct fs3270 *fp;
+
+	fp = (struct fs3270 *) view;
+	fp->active = 0;
+
+	/* If an old init command is still running just return. */
+	if (!raw3270_request_final(fp->init))
+		return;
+
+	/* Prepare read-buffer request. */
+	raw3270_request_set_cmd(fp->init, TC_RDBUF);
+	/*
+	 * Hackish: skip first 5 bytes of the idal buffer to make
+	 * room for the TW_KR/TO_SBA/<address>/<address>/TO_IC sequence
+	 * in the activation command.
+	 */
+	fp->rdbuf->data[0] += 5;
+	fp->rdbuf->size -= 5;
+	raw3270_request_set_idal(fp->init, fp->rdbuf);
+	fp->init->rescnt = 0;
+	fp->init->callback = fs3270_save_callback;
+
+	/* Start I/O to read in the 3270 buffer. */
+	fp->init->rc = raw3270_start_locked(view, fp->init);
+	if (fp->init->rc)
+		fp->init->callback(fp->init, NULL);
+}
+
+static void
+fs3270_irq(struct fs3270 *fp, struct raw3270_request *rq, struct irb *irb)
+{
+	/* Handle ATTN. Set indication and wake waiters for attention. */
+	if (irb->scsw.cmd.dstat & DEV_STAT_ATTENTION) {
+		fp->attention = 1;
+		wake_up(&fp->wait);
+	}
+
+	if (rq) {
+		if (irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK)
+			rq->rc = -EIO;
+		else
+			/* Normal end. Copy residual count. */
+			rq->rescnt = irb->scsw.cmd.count;
+	}
+}
+
+/*
+ * Process reads from fullscreen 3270.
+ */
+static ssize_t
+fs3270_read(struct file *filp, char __user *data, size_t count, loff_t *off)
+{
+	struct fs3270 *fp;
+	struct raw3270_request *rq;
+	struct idal_buffer *ib;
+	ssize_t rc;
+	
+	if (count == 0 || count > 65535)
+		return -EINVAL;
+	fp = filp->private_data;
+	if (!fp)
+		return -ENODEV;
+	ib = idal_buffer_alloc(count, 0);
+	if (IS_ERR(ib))
+		return -ENOMEM;
+	rq = raw3270_request_alloc(0);
+	if (!IS_ERR(rq)) {
+		if (fp->read_command == 0 && fp->write_command != 0)
+			fp->read_command = 6;
+		raw3270_request_set_cmd(rq, fp->read_command ? : 2);
+		raw3270_request_set_idal(rq, ib);
+		rc = wait_event_interruptible(fp->wait, fp->attention);
+		fp->attention = 0;
+		if (rc == 0) {
+			rc = fs3270_do_io(&fp->view, rq);
+			if (rc == 0) {
+				count -= rq->rescnt;
+				if (idal_buffer_to_user(ib, data, count) != 0)
+					rc = -EFAULT;
+				else
+					rc = count;
+
+			}
+		}
+		raw3270_request_free(rq);
+	} else
+		rc = PTR_ERR(rq);
+	idal_buffer_free(ib);
+	return rc;
+}
+
+/*
+ * Process writes to fullscreen 3270.
+ */
+static ssize_t
+fs3270_write(struct file *filp, const char __user *data, size_t count, loff_t *off)
+{
+	struct fs3270 *fp;
+	struct raw3270_request *rq;
+	struct idal_buffer *ib;
+	int write_command;
+	ssize_t rc;
+
+	fp = filp->private_data;
+	if (!fp)
+		return -ENODEV;
+	ib = idal_buffer_alloc(count, 0);
+	if (IS_ERR(ib))
+		return -ENOMEM;
+	rq = raw3270_request_alloc(0);
+	if (!IS_ERR(rq)) {
+		if (idal_buffer_from_user(ib, data, count) == 0) {
+			write_command = fp->write_command ? : 1;
+			if (write_command == 5)
+				write_command = 13;
+			raw3270_request_set_cmd(rq, write_command);
+			raw3270_request_set_idal(rq, ib);
+			rc = fs3270_do_io(&fp->view, rq);
+			if (rc == 0)
+				rc = count - rq->rescnt;
+		} else
+			rc = -EFAULT;
+		raw3270_request_free(rq);
+	} else
+		rc = PTR_ERR(rq);
+	idal_buffer_free(ib);
+	return rc;
+}
+
+/*
+ * process ioctl commands for the tube driver
+ */
+static long
+fs3270_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
+{
+	char __user *argp;
+	struct fs3270 *fp;
+	struct raw3270_iocb iocb;
+	int rc;
+
+	fp = filp->private_data;
+	if (!fp)
+		return -ENODEV;
+	if (is_compat_task())
+		argp = compat_ptr(arg);
+	else
+		argp = (char __user *)arg;
+	rc = 0;
+	mutex_lock(&fs3270_mutex);
+	switch (cmd) {
+	case TUBICMD:
+		fp->read_command = arg;
+		break;
+	case TUBOCMD:
+		fp->write_command = arg;
+		break;
+	case TUBGETI:
+		rc = put_user(fp->read_command, argp);
+		break;
+	case TUBGETO:
+		rc = put_user(fp->write_command, argp);
+		break;
+	case TUBGETMOD:
+		iocb.model = fp->view.model;
+		iocb.line_cnt = fp->view.rows;
+		iocb.col_cnt = fp->view.cols;
+		iocb.pf_cnt = 24;
+		iocb.re_cnt = 20;
+		iocb.map = 0;
+		if (copy_to_user(argp, &iocb, sizeof(struct raw3270_iocb)))
+			rc = -EFAULT;
+		break;
+	}
+	mutex_unlock(&fs3270_mutex);
+	return rc;
+}
+
+/*
+ * Allocate fs3270 structure.
+ */
+static struct fs3270 *
+fs3270_alloc_view(void)
+{
+	struct fs3270 *fp;
+
+	fp = kzalloc(sizeof(struct fs3270),GFP_KERNEL);
+	if (!fp)
+		return ERR_PTR(-ENOMEM);
+	fp->init = raw3270_request_alloc(0);
+	if (IS_ERR(fp->init)) {
+		kfree(fp);
+		return ERR_PTR(-ENOMEM);
+	}
+	return fp;
+}
+
+/*
+ * Free fs3270 structure.
+ */
+static void
+fs3270_free_view(struct raw3270_view *view)
+{
+	struct fs3270 *fp;
+
+	fp = (struct fs3270 *) view;
+	if (fp->rdbuf)
+		idal_buffer_free(fp->rdbuf);
+	raw3270_request_free(((struct fs3270 *) view)->init);
+	kfree(view);
+}
+
+/*
+ * Unlink fs3270 data structure from filp.
+ */
+static void
+fs3270_release(struct raw3270_view *view)
+{
+	struct fs3270 *fp;
+
+	fp = (struct fs3270 *) view;
+	if (fp->fs_pid)
+		kill_pid(fp->fs_pid, SIGHUP, 1);
+}
+
+/* View to a 3270 device. Can be console, tty or fullscreen. */
+static struct raw3270_fn fs3270_fn = {
+	.activate = fs3270_activate,
+	.deactivate = fs3270_deactivate,
+	.intv = (void *) fs3270_irq,
+	.release = fs3270_release,
+	.free = fs3270_free_view
+};
+
+/*
+ * This routine is called whenever a 3270 fullscreen device is opened.
+ */
+static int
+fs3270_open(struct inode *inode, struct file *filp)
+{
+	struct fs3270 *fp;
+	struct idal_buffer *ib;
+	int minor, rc = 0;
+
+	if (imajor(file_inode(filp)) != IBM_FS3270_MAJOR)
+		return -ENODEV;
+	minor = iminor(file_inode(filp));
+	/* Check for minor 0 multiplexer. */
+	if (minor == 0) {
+		struct tty_struct *tty = get_current_tty();
+		if (!tty || tty->driver->major != IBM_TTY3270_MAJOR) {
+			tty_kref_put(tty);
+			return -ENODEV;
+		}
+		minor = tty->index;
+		tty_kref_put(tty);
+	}
+	mutex_lock(&fs3270_mutex);
+	/* Check if some other program is already using fullscreen mode. */
+	fp = (struct fs3270 *) raw3270_find_view(&fs3270_fn, minor);
+	if (!IS_ERR(fp)) {
+		raw3270_put_view(&fp->view);
+		rc = -EBUSY;
+		goto out;
+	}
+	/* Allocate fullscreen view structure. */
+	fp = fs3270_alloc_view();
+	if (IS_ERR(fp)) {
+		rc = PTR_ERR(fp);
+		goto out;
+	}
+
+	init_waitqueue_head(&fp->wait);
+	fp->fs_pid = get_pid(task_pid(current));
+	rc = raw3270_add_view(&fp->view, &fs3270_fn, minor,
+			      RAW3270_VIEW_LOCK_BH);
+	if (rc) {
+		fs3270_free_view(&fp->view);
+		goto out;
+	}
+
+	/* Allocate idal-buffer. */
+	ib = idal_buffer_alloc(2*fp->view.rows*fp->view.cols + 5, 0);
+	if (IS_ERR(ib)) {
+		raw3270_put_view(&fp->view);
+		raw3270_del_view(&fp->view);
+		rc = PTR_ERR(ib);
+		goto out;
+	}
+	fp->rdbuf = ib;
+
+	rc = raw3270_activate_view(&fp->view);
+	if (rc) {
+		raw3270_put_view(&fp->view);
+		raw3270_del_view(&fp->view);
+		goto out;
+	}
+	nonseekable_open(inode, filp);
+	filp->private_data = fp;
+out:
+	mutex_unlock(&fs3270_mutex);
+	return rc;
+}
+
+/*
+ * This routine is called when the 3270 tty is closed. We wait
+ * for the remaining request to be completed. Then we clean up.
+ */
+static int
+fs3270_close(struct inode *inode, struct file *filp)
+{
+	struct fs3270 *fp;
+
+	fp = filp->private_data;
+	filp->private_data = NULL;
+	if (fp) {
+		put_pid(fp->fs_pid);
+		fp->fs_pid = NULL;
+		raw3270_reset(&fp->view);
+		raw3270_put_view(&fp->view);
+		raw3270_del_view(&fp->view);
+	}
+	return 0;
+}
+
+static const struct file_operations fs3270_fops = {
+	.owner		 = THIS_MODULE,		/* owner */
+	.read		 = fs3270_read,		/* read */
+	.write		 = fs3270_write,	/* write */
+	.unlocked_ioctl	 = fs3270_ioctl,	/* ioctl */
+	.compat_ioctl	 = fs3270_ioctl,	/* ioctl */
+	.open		 = fs3270_open,		/* open */
+	.release	 = fs3270_close,	/* release */
+	.llseek		= no_llseek,
+};
+
+static void fs3270_create_cb(int minor)
+{
+	__register_chrdev(IBM_FS3270_MAJOR, minor, 1, "tub", &fs3270_fops);
+	device_create(class3270, NULL, MKDEV(IBM_FS3270_MAJOR, minor),
+		      NULL, "3270/tub%d", minor);
+}
+
+static void fs3270_destroy_cb(int minor)
+{
+	device_destroy(class3270, MKDEV(IBM_FS3270_MAJOR, minor));
+	__unregister_chrdev(IBM_FS3270_MAJOR, minor, 1, "tub");
+}
+
+static struct raw3270_notifier fs3270_notifier =
+{
+	.create = fs3270_create_cb,
+	.destroy = fs3270_destroy_cb,
+};
+
+/*
+ * 3270 fullscreen driver initialization.
+ */
+static int __init
+fs3270_init(void)
+{
+	int rc;
+
+	rc = __register_chrdev(IBM_FS3270_MAJOR, 0, 1, "fs3270", &fs3270_fops);
+	if (rc)
+		return rc;
+	device_create(class3270, NULL, MKDEV(IBM_FS3270_MAJOR, 0),
+		      NULL, "3270/tub");
+	raw3270_register_notifier(&fs3270_notifier);
+	return 0;
+}
+
+static void __exit
+fs3270_exit(void)
+{
+	raw3270_unregister_notifier(&fs3270_notifier);
+	device_destroy(class3270, MKDEV(IBM_FS3270_MAJOR, 0));
+	__unregister_chrdev(IBM_FS3270_MAJOR, 0, 1, "fs3270");
+}
+
+MODULE_LICENSE("GPL");
+MODULE_ALIAS_CHARDEV_MAJOR(IBM_FS3270_MAJOR);
+
+module_init(fs3270_init);
+module_exit(fs3270_exit);
diff --git a/src/kernel/linux/v4.14/drivers/s390/char/hmcdrv_cache.c b/src/kernel/linux/v4.14/drivers/s390/char/hmcdrv_cache.c
new file mode 100644
index 0000000..1f5bdb2
--- /dev/null
+++ b/src/kernel/linux/v4.14/drivers/s390/char/hmcdrv_cache.c
@@ -0,0 +1,253 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ *    SE/HMC Drive (Read) Cache Functions
+ *
+ *    Copyright IBM Corp. 2013
+ *    Author(s): Ralf Hoppe (rhoppe@de.ibm.com)
+ *
+ */
+
+#define KMSG_COMPONENT "hmcdrv"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/jiffies.h>
+
+#include "hmcdrv_ftp.h"
+#include "hmcdrv_cache.h"
+
+#define HMCDRV_CACHE_TIMEOUT		30 /* aging timeout in seconds */
+
+/**
+ * struct hmcdrv_cache_entry - file cache (only used on read/dir)
+ * @id: FTP command ID
+ * @content: kernel-space buffer, 4k aligned
+ * @len: size of @content cache (0 if caching disabled)
+ * @ofs: start of content within file (-1 if no cached content)
+ * @fname: file name
+ * @fsize: file size
+ * @timeout: cache timeout in jiffies
+ *
+ * Notice that the first three members (id, fname, fsize) are cached on all
+ * read/dir requests. But content is cached only under some preconditions.
+ * Uncached content is signalled by a negative value of @ofs.
+ */
+struct hmcdrv_cache_entry {
+	enum hmcdrv_ftp_cmdid id;
+	char fname[HMCDRV_FTP_FIDENT_MAX];
+	size_t fsize;
+	loff_t ofs;
+	unsigned long timeout;
+	void *content;
+	size_t len;
+};
+
+static int hmcdrv_cache_order; /* cache allocated page order */
+
+static struct hmcdrv_cache_entry hmcdrv_cache_file = {
+	.fsize = SIZE_MAX,
+	.ofs = -1,
+	.len = 0,
+	.fname = {'\0'}
+};
+
+/**
+ * hmcdrv_cache_get() - looks for file data/content in read cache
+ * @ftp: pointer to FTP command specification
+ *
+ * Return: number of bytes read from cache or a negative number if nothing
+ * in content cache (for the file/cmd specified in @ftp)
+ */
+static ssize_t hmcdrv_cache_get(const struct hmcdrv_ftp_cmdspec *ftp)
+{
+	loff_t pos; /* position in cache (signed) */
+	ssize_t len;
+
+	if ((ftp->id != hmcdrv_cache_file.id) ||
+	    strcmp(hmcdrv_cache_file.fname, ftp->fname))
+		return -1;
+
+	if (ftp->ofs >= hmcdrv_cache_file.fsize) /* EOF ? */
+		return 0;
+
+	if ((hmcdrv_cache_file.ofs < 0) || /* has content? */
+	    time_after(jiffies, hmcdrv_cache_file.timeout))
+		return -1;
+
+	/* there seems to be cached content - calculate the maximum number
+	 * of bytes that can be returned (regarding file size and offset)
+	 */
+	len = hmcdrv_cache_file.fsize - ftp->ofs;
+
+	if (len > ftp->len)
+		len = ftp->len;
+
+	/* check if the requested chunk falls into our cache (which starts
+	 * at offset 'hmcdrv_cache_file.ofs' in the file of interest)
+	 */
+	pos = ftp->ofs - hmcdrv_cache_file.ofs;
+
+	if ((pos >= 0) &&
+	    ((pos + len) <= hmcdrv_cache_file.len)) {
+
+		memcpy(ftp->buf,
+		       hmcdrv_cache_file.content + pos,
+		       len);
+		pr_debug("using cached content of '%s', returning %zd/%zd bytes\n",
+			 hmcdrv_cache_file.fname, len,
+			 hmcdrv_cache_file.fsize);
+
+		return len;
+	}
+
+	return -1;
+}
+
+/**
+ * hmcdrv_cache_do() - do a HMC drive CD/DVD transfer with cache update
+ * @ftp: pointer to FTP command specification
+ * @func: FTP transfer function to be used
+ *
+ * Return: number of bytes read/written or a (negative) error code
+ */
+static ssize_t hmcdrv_cache_do(const struct hmcdrv_ftp_cmdspec *ftp,
+			       hmcdrv_cache_ftpfunc func)
+{
+	ssize_t len;
+
+	/* only cache content if the read/dir cache really exists
+	 * (hmcdrv_cache_file.len > 0), is large enough to handle the
+	 * request (hmcdrv_cache_file.len >= ftp->len) and there is a need
+	 * to do so (ftp->len > 0)
+	 */
+	if ((ftp->len > 0) && (hmcdrv_cache_file.len >= ftp->len)) {
+
+		/* because the cache is not located at ftp->buf, we have to
+		 * assemble a new HMC drive FTP cmd specification (pointing
+		 * to our cache, and using the increased size)
+		 */
+		struct hmcdrv_ftp_cmdspec cftp = *ftp; /* make a copy */
+		cftp.buf = hmcdrv_cache_file.content;  /* and update */
+		cftp.len = hmcdrv_cache_file.len;      /* buffer data */
+
+		len = func(&cftp, &hmcdrv_cache_file.fsize); /* now do */
+
+		if (len > 0) {
+			pr_debug("caching %zd bytes content for '%s'\n",
+				 len, ftp->fname);
+
+			if (len > ftp->len)
+				len = ftp->len;
+
+			hmcdrv_cache_file.ofs = ftp->ofs;
+			hmcdrv_cache_file.timeout = jiffies +
+				HMCDRV_CACHE_TIMEOUT * HZ;
+			memcpy(ftp->buf, hmcdrv_cache_file.content, len);
+		}
+	} else {
+		len = func(ftp, &hmcdrv_cache_file.fsize);
+		hmcdrv_cache_file.ofs = -1; /* invalidate content */
+	}
+
+	if (len > 0) {
+		/* cache some file info (FTP command, file name and file
+		 * size) unconditionally
+		 */
+		strlcpy(hmcdrv_cache_file.fname, ftp->fname,
+			HMCDRV_FTP_FIDENT_MAX);
+		hmcdrv_cache_file.id = ftp->id;
+		pr_debug("caching cmd %d, file size %zu for '%s'\n",
+			 ftp->id, hmcdrv_cache_file.fsize, ftp->fname);
+	}
+
+	return len;
+}
+
+/**
+ * hmcdrv_cache_cmd() - perform a cached HMC drive CD/DVD transfer
+ * @ftp: pointer to FTP command specification
+ * @func: FTP transfer function to be used
+ *
+ * Attention: Notice that this function is not reentrant - so the caller
+ * must ensure exclusive execution.
+ *
+ * Return: number of bytes read/written or a (negative) error code
+ */
+ssize_t hmcdrv_cache_cmd(const struct hmcdrv_ftp_cmdspec *ftp,
+			 hmcdrv_cache_ftpfunc func)
+{
+	ssize_t len;
+
+	if ((ftp->id == HMCDRV_FTP_DIR) || /* read cache */
+	    (ftp->id == HMCDRV_FTP_NLIST) ||
+	    (ftp->id == HMCDRV_FTP_GET)) {
+
+		len = hmcdrv_cache_get(ftp);
+
+		if (len >= 0) /* got it from cache ? */
+			return len; /* yes */
+
+		len = hmcdrv_cache_do(ftp, func);
+
+		if (len >= 0)
+			return len;
+
+	} else {
+		len = func(ftp, NULL); /* simply do original command */
+	}
+
+	/* invalidate the (read) cache in case there was a write operation
+	 * or an error on read/dir
+	 */
+	hmcdrv_cache_file.id = HMCDRV_FTP_NOOP;
+	hmcdrv_cache_file.fsize = LLONG_MAX;
+	hmcdrv_cache_file.ofs = -1;
+
+	return len;
+}
+
+/**
+ * hmcdrv_cache_startup() - startup of HMC drive cache
+ * @cachesize: cache size
+ *
+ * Return: 0 on success, else a (negative) error code
+ */
+int hmcdrv_cache_startup(size_t cachesize)
+{
+	if (cachesize > 0) { /* perform caching ? */
+		hmcdrv_cache_order = get_order(cachesize);
+		hmcdrv_cache_file.content =
+			(void *) __get_free_pages(GFP_KERNEL | GFP_DMA,
+						  hmcdrv_cache_order);
+
+		if (!hmcdrv_cache_file.content) {
+			pr_err("Allocating the requested cache size of %zu bytes failed\n",
+			       cachesize);
+			return -ENOMEM;
+		}
+
+		pr_debug("content cache enabled, size is %zu bytes\n",
+			 cachesize);
+	}
+
+	hmcdrv_cache_file.len = cachesize;
+	return 0;
+}
+
+/**
+ * hmcdrv_cache_shutdown() - shutdown of HMC drive cache
+ */
+void hmcdrv_cache_shutdown(void)
+{
+	if (hmcdrv_cache_file.content) {
+		free_pages((unsigned long) hmcdrv_cache_file.content,
+			   hmcdrv_cache_order);
+		hmcdrv_cache_file.content = NULL;
+	}
+
+	hmcdrv_cache_file.id = HMCDRV_FTP_NOOP;
+	hmcdrv_cache_file.fsize = LLONG_MAX;
+	hmcdrv_cache_file.ofs = -1;
+	hmcdrv_cache_file.len = 0; /* no cache */
+}
diff --git a/src/kernel/linux/v4.14/drivers/s390/char/hmcdrv_cache.h b/src/kernel/linux/v4.14/drivers/s390/char/hmcdrv_cache.h
new file mode 100644
index 0000000..d69f9fe
--- /dev/null
+++ b/src/kernel/linux/v4.14/drivers/s390/char/hmcdrv_cache.h
@@ -0,0 +1,25 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ *    SE/HMC Drive (Read) Cache Functions
+ *
+ *    Copyright IBM Corp. 2013
+ *    Author(s): Ralf Hoppe (rhoppe@de.ibm.com)
+ */
+
+#ifndef __HMCDRV_CACHE_H__
+#define __HMCDRV_CACHE_H__
+
+#include <linux/mmzone.h>
+#include "hmcdrv_ftp.h"
+
+#define HMCDRV_CACHE_SIZE_DFLT	(MAX_ORDER_NR_PAGES * PAGE_SIZE / 2UL)
+
+typedef ssize_t (*hmcdrv_cache_ftpfunc)(const struct hmcdrv_ftp_cmdspec *ftp,
+					size_t *fsize);
+
+ssize_t hmcdrv_cache_cmd(const struct hmcdrv_ftp_cmdspec *ftp,
+			 hmcdrv_cache_ftpfunc func);
+int hmcdrv_cache_startup(size_t cachesize);
+void hmcdrv_cache_shutdown(void);
+
+#endif	 /* __HMCDRV_CACHE_H__ */
diff --git a/src/kernel/linux/v4.14/drivers/s390/char/hmcdrv_dev.c b/src/kernel/linux/v4.14/drivers/s390/char/hmcdrv_dev.c
new file mode 100644
index 0000000..20e9cd5
--- /dev/null
+++ b/src/kernel/linux/v4.14/drivers/s390/char/hmcdrv_dev.c
@@ -0,0 +1,369 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ *    HMC Drive CD/DVD Device
+ *
+ *    Copyright IBM Corp. 2013
+ *    Author(s): Ralf Hoppe (rhoppe@de.ibm.com)
+ *
+ *    This file provides a Linux "misc" character device for access to an
+ *    assigned HMC drive CD/DVD-ROM. It works as follows: First create the
+ *    device by calling hmcdrv_dev_init(). After open() a lseek(fd, 0,
+ *    SEEK_END) indicates that a new FTP command follows (not needed on the
+ *    first command after open). Then write() the FTP command ASCII string
+ *    to it, e.g. "dir /" or "nls <directory>" or "get <filename>". At the
+ *    end read() the response.
+ */
+
+#define KMSG_COMPONENT "hmcdrv"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/fs.h>
+#include <linux/cdev.h>
+#include <linux/miscdevice.h>
+#include <linux/device.h>
+#include <linux/capability.h>
+#include <linux/delay.h>
+#include <linux/uaccess.h>
+
+#include "hmcdrv_dev.h"
+#include "hmcdrv_ftp.h"
+
+/* If the following macro is defined, then the HMC device creates it's own
+ * separated device class (and dynamically assigns a major number). If not
+ * defined then the HMC device is assigned to the "misc" class devices.
+ *
+#define HMCDRV_DEV_CLASS "hmcftp"
+ */
+
+#define HMCDRV_DEV_NAME  "hmcdrv"
+#define HMCDRV_DEV_BUSY_DELAY	 500 /* delay between -EBUSY trials in ms */
+#define HMCDRV_DEV_BUSY_RETRIES  3   /* number of retries on -EBUSY */
+
+struct hmcdrv_dev_node {
+
+#ifdef HMCDRV_DEV_CLASS
+	struct cdev dev; /* character device structure */
+	umode_t mode;	 /* mode of device node (unused, zero) */
+#else
+	struct miscdevice dev; /* "misc" device structure */
+#endif
+
+};
+
+static int hmcdrv_dev_open(struct inode *inode, struct file *fp);
+static int hmcdrv_dev_release(struct inode *inode, struct file *fp);
+static loff_t hmcdrv_dev_seek(struct file *fp, loff_t pos, int whence);
+static ssize_t hmcdrv_dev_read(struct file *fp, char __user *ubuf,
+			       size_t len, loff_t *pos);
+static ssize_t hmcdrv_dev_write(struct file *fp, const char __user *ubuf,
+				size_t len, loff_t *pos);
+static ssize_t hmcdrv_dev_transfer(char __kernel *cmd, loff_t offset,
+				   char __user *buf, size_t len);
+
+/*
+ * device operations
+ */
+static const struct file_operations hmcdrv_dev_fops = {
+	.open = hmcdrv_dev_open,
+	.llseek = hmcdrv_dev_seek,
+	.release = hmcdrv_dev_release,
+	.read = hmcdrv_dev_read,
+	.write = hmcdrv_dev_write,
+};
+
+static struct hmcdrv_dev_node hmcdrv_dev; /* HMC device struct (static) */
+
+#ifdef HMCDRV_DEV_CLASS
+
+static struct class *hmcdrv_dev_class; /* device class pointer */
+static dev_t hmcdrv_dev_no; /* device number (major/minor) */
+
+/**
+ * hmcdrv_dev_name() - provides a naming hint for a device node in /dev
+ * @dev: device for which the naming/mode hint is
+ * @mode: file mode for device node created in /dev
+ *
+ * See: devtmpfs.c, function devtmpfs_create_node()
+ *
+ * Return: recommended device file name in /dev
+ */
+static char *hmcdrv_dev_name(struct device *dev, umode_t *mode)
+{
+	char *nodename = NULL;
+	const char *devname = dev_name(dev); /* kernel device name */
+
+	if (devname)
+		nodename = kasprintf(GFP_KERNEL, "%s", devname);
+
+	/* on device destroy (rmmod) the mode pointer may be NULL
+	 */
+	if (mode)
+		*mode = hmcdrv_dev.mode;
+
+	return nodename;
+}
+
+#endif	/* HMCDRV_DEV_CLASS */
+
+/*
+ * open()
+ */
+static int hmcdrv_dev_open(struct inode *inode, struct file *fp)
+{
+	int rc;
+
+	/* check for non-blocking access, which is really unsupported
+	 */
+	if (fp->f_flags & O_NONBLOCK)
+		return -EINVAL;
+
+	/* Because it makes no sense to open this device read-only (then a
+	 * FTP command cannot be emitted), we respond with an error.
+	 */
+	if ((fp->f_flags & O_ACCMODE) == O_RDONLY)
+		return -EINVAL;
+
+	/* prevent unloading this module as long as anyone holds the
+	 * device file open - so increment the reference count here
+	 */
+	if (!try_module_get(THIS_MODULE))
+		return -ENODEV;
+
+	fp->private_data = NULL; /* no command yet */
+	rc = hmcdrv_ftp_startup();
+	if (rc)
+		module_put(THIS_MODULE);
+
+	pr_debug("open file '/dev/%pD' with return code %d\n", fp, rc);
+	return rc;
+}
+
+/*
+ * release()
+ */
+static int hmcdrv_dev_release(struct inode *inode, struct file *fp)
+{
+	pr_debug("closing file '/dev/%pD'\n", fp);
+	kfree(fp->private_data);
+	fp->private_data = NULL;
+	hmcdrv_ftp_shutdown();
+	module_put(THIS_MODULE);
+	return 0;
+}
+
+/*
+ * lseek()
+ */
+static loff_t hmcdrv_dev_seek(struct file *fp, loff_t pos, int whence)
+{
+	switch (whence) {
+	case SEEK_CUR: /* relative to current file position */
+		pos += fp->f_pos; /* new position stored in 'pos' */
+		break;
+
+	case SEEK_SET: /* absolute (relative to beginning of file) */
+		break; /* SEEK_SET */
+
+		/* We use SEEK_END as a special indicator for a SEEK_SET
+		 * (set absolute position), combined with a FTP command
+		 * clear.
+		 */
+	case SEEK_END:
+		if (fp->private_data) {
+			kfree(fp->private_data);
+			fp->private_data = NULL;
+		}
+
+		break; /* SEEK_END */
+
+	default: /* SEEK_DATA, SEEK_HOLE: unsupported */
+		return -EINVAL;
+	}
+
+	if (pos < 0)
+		return -EINVAL;
+
+	if (fp->f_pos != pos)
+		++fp->f_version;
+
+	fp->f_pos = pos;
+	return pos;
+}
+
+/*
+ * transfer (helper function)
+ */
+static ssize_t hmcdrv_dev_transfer(char __kernel *cmd, loff_t offset,
+				   char __user *buf, size_t len)
+{
+	ssize_t retlen;
+	unsigned trials = HMCDRV_DEV_BUSY_RETRIES;
+
+	do {
+		retlen = hmcdrv_ftp_cmd(cmd, offset, buf, len);
+
+		if (retlen != -EBUSY)
+			break;
+
+		msleep(HMCDRV_DEV_BUSY_DELAY);
+
+	} while (--trials > 0);
+
+	return retlen;
+}
+
+/*
+ * read()
+ */
+static ssize_t hmcdrv_dev_read(struct file *fp, char __user *ubuf,
+			       size_t len, loff_t *pos)
+{
+	ssize_t retlen;
+
+	if (((fp->f_flags & O_ACCMODE) == O_WRONLY) ||
+	    (fp->private_data == NULL)) { /* no FTP cmd defined ? */
+		return -EBADF;
+	}
+
+	retlen = hmcdrv_dev_transfer((char *) fp->private_data,
+				     *pos, ubuf, len);
+
+	pr_debug("read from file '/dev/%pD' at %lld returns %zd/%zu\n",
+		 fp, (long long) *pos, retlen, len);
+
+	if (retlen > 0)
+		*pos += retlen;
+
+	return retlen;
+}
+
+/*
+ * write()
+ */
+static ssize_t hmcdrv_dev_write(struct file *fp, const char __user *ubuf,
+				size_t len, loff_t *pos)
+{
+	ssize_t retlen;
+
+	pr_debug("writing file '/dev/%pD' at pos. %lld with length %zd\n",
+		 fp, (long long) *pos, len);
+
+	if (!fp->private_data) { /* first expect a cmd write */
+		fp->private_data = kmalloc(len + 1, GFP_KERNEL);
+
+		if (!fp->private_data)
+			return -ENOMEM;
+
+		if (!copy_from_user(fp->private_data, ubuf, len)) {
+			((char *)fp->private_data)[len] = '\0';
+			return len;
+		}
+
+		kfree(fp->private_data);
+		fp->private_data = NULL;
+		return -EFAULT;
+	}
+
+	retlen = hmcdrv_dev_transfer((char *) fp->private_data,
+				     *pos, (char __user *) ubuf, len);
+	if (retlen > 0)
+		*pos += retlen;
+
+	pr_debug("write to file '/dev/%pD' returned %zd\n", fp, retlen);
+
+	return retlen;
+}
+
+/**
+ * hmcdrv_dev_init() - creates a HMC drive CD/DVD device
+ *
+ * This function creates a HMC drive CD/DVD kernel device and an associated
+ * device under /dev, using a dynamically allocated major number.
+ *
+ * Return: 0 on success, else an error code.
+ */
+int hmcdrv_dev_init(void)
+{
+	int rc;
+
+#ifdef HMCDRV_DEV_CLASS
+	struct device *dev;
+
+	rc = alloc_chrdev_region(&hmcdrv_dev_no, 0, 1, HMCDRV_DEV_NAME);
+
+	if (rc)
+		goto out_err;
+
+	cdev_init(&hmcdrv_dev.dev, &hmcdrv_dev_fops);
+	hmcdrv_dev.dev.owner = THIS_MODULE;
+	rc = cdev_add(&hmcdrv_dev.dev, hmcdrv_dev_no, 1);
+
+	if (rc)
+		goto out_unreg;
+
+	/* At this point the character device exists in the kernel (see
+	 * /proc/devices), but not under /dev nor /sys/devices/virtual. So
+	 * we have to create an associated class (see /sys/class).
+	 */
+	hmcdrv_dev_class = class_create(THIS_MODULE, HMCDRV_DEV_CLASS);
+
+	if (IS_ERR(hmcdrv_dev_class)) {
+		rc = PTR_ERR(hmcdrv_dev_class);
+		goto out_devdel;
+	}
+
+	/* Finally a device node in /dev has to be established (as 'mkdev'
+	 * does from the command line). Notice that assignment of a device
+	 * node name/mode function is optional (only for mode != 0600).
+	 */
+	hmcdrv_dev.mode = 0; /* "unset" */
+	hmcdrv_dev_class->devnode = hmcdrv_dev_name;
+
+	dev = device_create(hmcdrv_dev_class, NULL, hmcdrv_dev_no, NULL,
+			    "%s", HMCDRV_DEV_NAME);
+	if (!IS_ERR(dev))
+		return 0;
+
+	rc = PTR_ERR(dev);
+	class_destroy(hmcdrv_dev_class);
+	hmcdrv_dev_class = NULL;
+
+out_devdel:
+	cdev_del(&hmcdrv_dev.dev);
+
+out_unreg:
+	unregister_chrdev_region(hmcdrv_dev_no, 1);
+
+out_err:
+
+#else  /* !HMCDRV_DEV_CLASS */
+	hmcdrv_dev.dev.minor = MISC_DYNAMIC_MINOR;
+	hmcdrv_dev.dev.name = HMCDRV_DEV_NAME;
+	hmcdrv_dev.dev.fops = &hmcdrv_dev_fops;
+	hmcdrv_dev.dev.mode = 0; /* finally produces 0600 */
+	rc = misc_register(&hmcdrv_dev.dev);
+#endif	/* HMCDRV_DEV_CLASS */
+
+	return rc;
+}
+
+/**
+ * hmcdrv_dev_exit() - destroys a HMC drive CD/DVD device
+ */
+void hmcdrv_dev_exit(void)
+{
+#ifdef HMCDRV_DEV_CLASS
+	if (!IS_ERR_OR_NULL(hmcdrv_dev_class)) {
+		device_destroy(hmcdrv_dev_class, hmcdrv_dev_no);
+		class_destroy(hmcdrv_dev_class);
+	}
+
+	cdev_del(&hmcdrv_dev.dev);
+	unregister_chrdev_region(hmcdrv_dev_no, 1);
+#else  /* !HMCDRV_DEV_CLASS */
+	misc_deregister(&hmcdrv_dev.dev);
+#endif	/* HMCDRV_DEV_CLASS */
+}
diff --git a/src/kernel/linux/v4.14/drivers/s390/char/hmcdrv_dev.h b/src/kernel/linux/v4.14/drivers/s390/char/hmcdrv_dev.h
new file mode 100644
index 0000000..558eba9
--- /dev/null
+++ b/src/kernel/linux/v4.14/drivers/s390/char/hmcdrv_dev.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ *    SE/HMC Drive FTP Device
+ *
+ *    Copyright IBM Corp. 2013
+ *    Author(s): Ralf Hoppe (rhoppe@de.ibm.com)
+ */
+
+#ifndef __HMCDRV_DEV_H__
+#define __HMCDRV_DEV_H__
+
+int hmcdrv_dev_init(void);
+void hmcdrv_dev_exit(void);
+
+#endif	 /* __HMCDRV_DEV_H__ */
diff --git a/src/kernel/linux/v4.14/drivers/s390/char/hmcdrv_ftp.c b/src/kernel/linux/v4.14/drivers/s390/char/hmcdrv_ftp.c
new file mode 100644
index 0000000..0e70397
--- /dev/null
+++ b/src/kernel/linux/v4.14/drivers/s390/char/hmcdrv_ftp.c
@@ -0,0 +1,344 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ *    HMC Drive FTP Services
+ *
+ *    Copyright IBM Corp. 2013
+ *    Author(s): Ralf Hoppe (rhoppe@de.ibm.com)
+ */
+
+#define KMSG_COMPONENT "hmcdrv"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+#include <linux/export.h>
+
+#include <linux/ctype.h>
+#include <linux/crc16.h>
+
+#include "hmcdrv_ftp.h"
+#include "hmcdrv_cache.h"
+#include "sclp_ftp.h"
+#include "diag_ftp.h"
+
+/**
+ * struct hmcdrv_ftp_ops - HMC drive FTP operations
+ * @startup: startup function
+ * @shutdown: shutdown function
+ * @cmd: FTP transfer function
+ */
+struct hmcdrv_ftp_ops {
+	int (*startup)(void);
+	void (*shutdown)(void);
+	ssize_t (*transfer)(const struct hmcdrv_ftp_cmdspec *ftp,
+			    size_t *fsize);
+};
+
+static enum hmcdrv_ftp_cmdid hmcdrv_ftp_cmd_getid(const char *cmd, int len);
+static int hmcdrv_ftp_parse(char *cmd, struct hmcdrv_ftp_cmdspec *ftp);
+
+static const struct hmcdrv_ftp_ops *hmcdrv_ftp_funcs; /* current operations */
+static DEFINE_MUTEX(hmcdrv_ftp_mutex); /* mutex for hmcdrv_ftp_funcs */
+static unsigned hmcdrv_ftp_refcnt; /* start/shutdown reference counter */
+
+/**
+ * hmcdrv_ftp_cmd_getid() - determine FTP command ID from a command string
+ * @cmd: FTP command string (NOT zero-terminated)
+ * @len: length of FTP command string in @cmd
+ */
+static enum hmcdrv_ftp_cmdid hmcdrv_ftp_cmd_getid(const char *cmd, int len)
+{
+	/* HMC FTP command descriptor */
+	struct hmcdrv_ftp_cmd_desc {
+		const char *str;	   /* command string */
+		enum hmcdrv_ftp_cmdid cmd; /* associated command as enum */
+	};
+
+	/* Description of all HMC drive FTP commands
+	 *
+	 * Notes:
+	 * 1. Array size should be a prime number.
+	 * 2. Do not change the order of commands in table (because the
+	 *    index is determined by CRC % ARRAY_SIZE).
+	 * 3. Original command 'nlist' was renamed, else the CRC would
+	 *    collide with 'append' (see point 2).
+	 */
+	static const struct hmcdrv_ftp_cmd_desc ftpcmds[7] = {
+
+		{.str = "get", /* [0] get (CRC = 0x68eb) */
+		 .cmd = HMCDRV_FTP_GET},
+		{.str = "dir", /* [1] dir (CRC = 0x6a9e) */
+		 .cmd = HMCDRV_FTP_DIR},
+		{.str = "delete", /* [2] delete (CRC = 0x53ae) */
+		 .cmd = HMCDRV_FTP_DELETE},
+		{.str = "nls", /* [3] nls (CRC = 0xf87c) */
+		 .cmd = HMCDRV_FTP_NLIST},
+		{.str = "put", /* [4] put (CRC = 0xac56) */
+		 .cmd = HMCDRV_FTP_PUT},
+		{.str = "append", /* [5] append (CRC = 0xf56e) */
+		 .cmd = HMCDRV_FTP_APPEND},
+		{.str = NULL} /* [6] unused */
+	};
+
+	const struct hmcdrv_ftp_cmd_desc *pdesc;
+
+	u16 crc = 0xffffU;
+
+	if (len == 0)
+		return HMCDRV_FTP_NOOP; /* error indiactor */
+
+	crc = crc16(crc, cmd, len);
+	pdesc = ftpcmds + (crc % ARRAY_SIZE(ftpcmds));
+	pr_debug("FTP command '%s' has CRC 0x%04x, at table pos. %lu\n",
+		 cmd, crc, (crc % ARRAY_SIZE(ftpcmds)));
+
+	if (!pdesc->str || strncmp(pdesc->str, cmd, len))
+		return HMCDRV_FTP_NOOP;
+
+	pr_debug("FTP command '%s' found, with ID %d\n",
+		 pdesc->str, pdesc->cmd);
+
+	return pdesc->cmd;
+}
+
+/**
+ * hmcdrv_ftp_parse() - HMC drive FTP command parser
+ * @cmd: FTP command string "<cmd> <filename>"
+ * @ftp: Pointer to FTP command specification buffer (output)
+ *
+ * Return: 0 on success, else a (negative) error code
+ */
+static int hmcdrv_ftp_parse(char *cmd, struct hmcdrv_ftp_cmdspec *ftp)
+{
+	char *start;
+	int argc = 0;
+
+	ftp->id = HMCDRV_FTP_NOOP;
+	ftp->fname = NULL;
+
+	while (*cmd != '\0') {
+
+		while (isspace(*cmd))
+			++cmd;
+
+		if (*cmd == '\0')
+			break;
+
+		start = cmd;
+
+		switch (argc) {
+		case 0: /* 1st argument (FTP command) */
+			while ((*cmd != '\0') && !isspace(*cmd))
+				++cmd;
+			ftp->id = hmcdrv_ftp_cmd_getid(start, cmd - start);
+			break;
+		case 1: /* 2nd / last argument (rest of line) */
+			while ((*cmd != '\0') && !iscntrl(*cmd))
+				++cmd;
+			ftp->fname = start;
+			/* fall through */
+		default:
+			*cmd = '\0';
+			break;
+		} /* switch */
+
+		++argc;
+	} /* while */
+
+	if (!ftp->fname || (ftp->id == HMCDRV_FTP_NOOP))
+		return -EINVAL;
+
+	return 0;
+}
+
+/**
+ * hmcdrv_ftp_do() - perform a HMC drive FTP, with data from kernel-space
+ * @ftp: pointer to FTP command specification
+ *
+ * Return: number of bytes read/written or a negative error code
+ */
+ssize_t hmcdrv_ftp_do(const struct hmcdrv_ftp_cmdspec *ftp)
+{
+	ssize_t len;
+
+	mutex_lock(&hmcdrv_ftp_mutex);
+
+	if (hmcdrv_ftp_funcs && hmcdrv_ftp_refcnt) {
+		pr_debug("starting transfer, cmd %d for '%s' at %lld with %zd bytes\n",
+			 ftp->id, ftp->fname, (long long) ftp->ofs, ftp->len);
+		len = hmcdrv_cache_cmd(ftp, hmcdrv_ftp_funcs->transfer);
+	} else {
+		len = -ENXIO;
+	}
+
+	mutex_unlock(&hmcdrv_ftp_mutex);
+	return len;
+}
+EXPORT_SYMBOL(hmcdrv_ftp_do);
+
+/**
+ * hmcdrv_ftp_probe() - probe for the HMC drive FTP service
+ *
+ * Return: 0 if service is available, else an (negative) error code
+ */
+int hmcdrv_ftp_probe(void)
+{
+	int rc;
+
+	struct hmcdrv_ftp_cmdspec ftp = {
+		.id = HMCDRV_FTP_NOOP,
+		.ofs = 0,
+		.fname = "",
+		.len = PAGE_SIZE
+	};
+
+	ftp.buf = (void *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
+
+	if (!ftp.buf)
+		return -ENOMEM;
+
+	rc = hmcdrv_ftp_startup();
+
+	if (rc)
+		goto out;
+
+	rc = hmcdrv_ftp_do(&ftp);
+	hmcdrv_ftp_shutdown();
+
+	switch (rc) {
+	case -ENOENT: /* no such file/media or currently busy, */
+	case -EBUSY:  /* but service seems to be available */
+		rc = 0;
+		break;
+	default: /* leave 'rc' as it is for [0, -EPERM, -E...] */
+		if (rc > 0)
+			rc = 0; /* clear length (success) */
+		break;
+	} /* switch */
+out:
+	free_page((unsigned long) ftp.buf);
+	return rc;
+}
+EXPORT_SYMBOL(hmcdrv_ftp_probe);
+
+/**
+ * hmcdrv_ftp_cmd() - Perform a HMC drive FTP, with data from user-space
+ *
+ * @cmd: FTP command string "<cmd> <filename>"
+ * @offset: file position to read/write
+ * @buf: user-space buffer for read/written directory/file
+ * @len: size of @buf (read/dir) or number of bytes to write
+ *
+ * This function must not be called before hmcdrv_ftp_startup() was called.
+ *
+ * Return: number of bytes read/written or a negative error code
+ */
+ssize_t hmcdrv_ftp_cmd(char __kernel *cmd, loff_t offset,
+		       char __user *buf, size_t len)
+{
+	int order;
+
+	struct hmcdrv_ftp_cmdspec ftp = {.len = len, .ofs = offset};
+	ssize_t retlen = hmcdrv_ftp_parse(cmd, &ftp);
+
+	if (retlen)
+		return retlen;
+
+	order = get_order(ftp.len);
+	ftp.buf = (void *) __get_free_pages(GFP_KERNEL | GFP_DMA, order);
+
+	if (!ftp.buf)
+		return -ENOMEM;
+
+	switch (ftp.id) {
+	case HMCDRV_FTP_DIR:
+	case HMCDRV_FTP_NLIST:
+	case HMCDRV_FTP_GET:
+		retlen = hmcdrv_ftp_do(&ftp);
+
+		if ((retlen >= 0) &&
+		    copy_to_user(buf, ftp.buf, retlen))
+			retlen = -EFAULT;
+		break;
+
+	case HMCDRV_FTP_PUT:
+	case HMCDRV_FTP_APPEND:
+		if (!copy_from_user(ftp.buf, buf, ftp.len))
+			retlen = hmcdrv_ftp_do(&ftp);
+		else
+			retlen = -EFAULT;
+		break;
+
+	case HMCDRV_FTP_DELETE:
+		retlen = hmcdrv_ftp_do(&ftp);
+		break;
+
+	default:
+		retlen = -EOPNOTSUPP;
+		break;
+	}
+
+	free_pages((unsigned long) ftp.buf, order);
+	return retlen;
+}
+
+/**
+ * hmcdrv_ftp_startup() - startup of HMC drive FTP functionality for a
+ * dedicated (owner) instance
+ *
+ * Return: 0 on success, else an (negative) error code
+ */
+int hmcdrv_ftp_startup(void)
+{
+	static const struct hmcdrv_ftp_ops hmcdrv_ftp_zvm = {
+		.startup = diag_ftp_startup,
+		.shutdown = diag_ftp_shutdown,
+		.transfer = diag_ftp_cmd
+	};
+
+	static const struct hmcdrv_ftp_ops hmcdrv_ftp_lpar = {
+		.startup = sclp_ftp_startup,
+		.shutdown = sclp_ftp_shutdown,
+		.transfer = sclp_ftp_cmd
+	};
+
+	int rc = 0;
+
+	mutex_lock(&hmcdrv_ftp_mutex); /* block transfers while start-up */
+
+	if (hmcdrv_ftp_refcnt == 0) {
+		if (MACHINE_IS_VM)
+			hmcdrv_ftp_funcs = &hmcdrv_ftp_zvm;
+		else if (MACHINE_IS_LPAR || MACHINE_IS_KVM)
+			hmcdrv_ftp_funcs = &hmcdrv_ftp_lpar;
+		else
+			rc = -EOPNOTSUPP;
+
+		if (hmcdrv_ftp_funcs)
+			rc = hmcdrv_ftp_funcs->startup();
+	}
+
+	if (!rc)
+		++hmcdrv_ftp_refcnt;
+
+	mutex_unlock(&hmcdrv_ftp_mutex);
+	return rc;
+}
+EXPORT_SYMBOL(hmcdrv_ftp_startup);
+
+/**
+ * hmcdrv_ftp_shutdown() - shutdown of HMC drive FTP functionality for a
+ * dedicated (owner) instance
+ */
+void hmcdrv_ftp_shutdown(void)
+{
+	mutex_lock(&hmcdrv_ftp_mutex);
+	--hmcdrv_ftp_refcnt;
+
+	if ((hmcdrv_ftp_refcnt == 0) && hmcdrv_ftp_funcs)
+		hmcdrv_ftp_funcs->shutdown();
+
+	mutex_unlock(&hmcdrv_ftp_mutex);
+}
+EXPORT_SYMBOL(hmcdrv_ftp_shutdown);
diff --git a/src/kernel/linux/v4.14/drivers/s390/char/hmcdrv_ftp.h b/src/kernel/linux/v4.14/drivers/s390/char/hmcdrv_ftp.h
new file mode 100644
index 0000000..d12ca12
--- /dev/null
+++ b/src/kernel/linux/v4.14/drivers/s390/char/hmcdrv_ftp.h
@@ -0,0 +1,64 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ *    SE/HMC Drive FTP Services
+ *
+ *    Copyright IBM Corp. 2013
+ *    Author(s): Ralf Hoppe (rhoppe@de.ibm.com)
+ */
+
+#ifndef __HMCDRV_FTP_H__
+#define __HMCDRV_FTP_H__
+
+#include <linux/types.h> /* size_t, loff_t */
+
+/*
+ * HMC drive FTP Service max. length of path (w/ EOS)
+ */
+#define HMCDRV_FTP_FIDENT_MAX 192
+
+/**
+ * enum hmcdrv_ftp_cmdid - HMC drive FTP commands
+ * @HMCDRV_FTP_NOOP: do nothing (only for probing)
+ * @HMCDRV_FTP_GET: read a file
+ * @HMCDRV_FTP_PUT: (over-) write a file
+ * @HMCDRV_FTP_APPEND: append to a file
+ * @HMCDRV_FTP_DIR: list directory long (ls -l)
+ * @HMCDRV_FTP_NLIST: list files, no directories (name list)
+ * @HMCDRV_FTP_DELETE: delete a file
+ * @HMCDRV_FTP_CANCEL: cancel operation (SCLP/LPAR only)
+ */
+enum hmcdrv_ftp_cmdid {
+	HMCDRV_FTP_NOOP = 0,
+	HMCDRV_FTP_GET = 1,
+	HMCDRV_FTP_PUT = 2,
+	HMCDRV_FTP_APPEND = 3,
+	HMCDRV_FTP_DIR = 4,
+	HMCDRV_FTP_NLIST = 5,
+	HMCDRV_FTP_DELETE = 6,
+	HMCDRV_FTP_CANCEL = 7
+};
+
+/**
+ * struct hmcdrv_ftp_cmdspec - FTP command specification
+ * @id: FTP command ID
+ * @ofs: offset in file
+ * @fname: filename (ASCII), null-terminated
+ * @buf: kernel-space transfer data buffer, 4k aligned
+ * @len: (max) number of bytes to transfer from/to @buf
+ */
+struct hmcdrv_ftp_cmdspec {
+	enum hmcdrv_ftp_cmdid id;
+	loff_t ofs;
+	const char *fname;
+	void __kernel *buf;
+	size_t len;
+};
+
+int hmcdrv_ftp_startup(void);
+void hmcdrv_ftp_shutdown(void);
+int hmcdrv_ftp_probe(void);
+ssize_t hmcdrv_ftp_do(const struct hmcdrv_ftp_cmdspec *ftp);
+ssize_t hmcdrv_ftp_cmd(char __kernel *cmd, loff_t offset,
+		       char __user *buf, size_t len);
+
+#endif	 /* __HMCDRV_FTP_H__ */
diff --git a/src/kernel/linux/v4.14/drivers/s390/char/hmcdrv_mod.c b/src/kernel/linux/v4.14/drivers/s390/char/hmcdrv_mod.c
new file mode 100644
index 0000000..251a318
--- /dev/null
+++ b/src/kernel/linux/v4.14/drivers/s390/char/hmcdrv_mod.c
@@ -0,0 +1,63 @@
+/*
+ *    HMC Drive DVD Module
+ *
+ *    Copyright IBM Corp. 2013
+ *    Author(s): Ralf Hoppe (rhoppe@de.ibm.com)
+ */
+
+#define KMSG_COMPONENT "hmcdrv"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/stat.h>
+
+#include "hmcdrv_ftp.h"
+#include "hmcdrv_dev.h"
+#include "hmcdrv_cache.h"
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Copyright 2013 IBM Corporation");
+MODULE_DESCRIPTION("HMC drive DVD access");
+
+/*
+ * module parameter 'cachesize'
+ */
+static size_t hmcdrv_mod_cachesize = HMCDRV_CACHE_SIZE_DFLT;
+module_param_named(cachesize, hmcdrv_mod_cachesize, ulong, S_IRUGO);
+
+/**
+ * hmcdrv_mod_init() - module init function
+ */
+static int __init hmcdrv_mod_init(void)
+{
+	int rc = hmcdrv_ftp_probe(); /* perform w/o cache */
+
+	if (rc)
+		return rc;
+
+	rc = hmcdrv_cache_startup(hmcdrv_mod_cachesize);
+
+	if (rc)
+		return rc;
+
+	rc = hmcdrv_dev_init();
+
+	if (rc)
+		hmcdrv_cache_shutdown();
+
+	return rc;
+}
+
+/**
+ * hmcdrv_mod_exit() - module exit function
+ */
+static void __exit hmcdrv_mod_exit(void)
+{
+	hmcdrv_dev_exit();
+	hmcdrv_cache_shutdown();
+}
+
+module_init(hmcdrv_mod_init);
+module_exit(hmcdrv_mod_exit);
diff --git a/src/kernel/linux/v4.14/drivers/s390/char/keyboard.c b/src/kernel/linux/v4.14/drivers/s390/char/keyboard.c
new file mode 100644
index 0000000..5b505fd
--- /dev/null
+++ b/src/kernel/linux/v4.14/drivers/s390/char/keyboard.c
@@ -0,0 +1,549 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ *    ebcdic keycode functions for s390 console drivers
+ *
+ *  S390 version
+ *    Copyright IBM Corp. 2003
+ *    Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com),
+ */
+
+#include <linux/module.h>
+#include <linux/sched/signal.h>
+#include <linux/slab.h>
+#include <linux/sysrq.h>
+
+#include <linux/consolemap.h>
+#include <linux/kbd_kern.h>
+#include <linux/kbd_diacr.h>
+#include <linux/uaccess.h>
+
+#include "keyboard.h"
+
+/*
+ * Handler Tables.
+ */
+#define K_HANDLERS\
+	k_self,		k_fn,		k_spec,		k_ignore,\
+	k_dead,		k_ignore,	k_ignore,	k_ignore,\
+	k_ignore,	k_ignore,	k_ignore,	k_ignore,\
+	k_ignore,	k_ignore,	k_ignore,	k_ignore
+
+typedef void (k_handler_fn)(struct kbd_data *, unsigned char);
+static k_handler_fn K_HANDLERS;
+static k_handler_fn *k_handler[16] = { K_HANDLERS };
+
+/* maximum values each key_handler can handle */
+static const int kbd_max_vals[] = {
+	255, ARRAY_SIZE(func_table) - 1, NR_FN_HANDLER - 1, 0,
+	NR_DEAD - 1, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+static const int KBD_NR_TYPES = ARRAY_SIZE(kbd_max_vals);
+
+static unsigned char ret_diacr[NR_DEAD] = {
+	'`', '\'', '^', '~', '"', ','
+};
+
+/*
+ * Alloc/free of kbd_data structures.
+ */
+struct kbd_data *
+kbd_alloc(void) {
+	struct kbd_data *kbd;
+	int i;
+
+	kbd = kzalloc(sizeof(struct kbd_data), GFP_KERNEL);
+	if (!kbd)
+		goto out;
+	kbd->key_maps = kzalloc(sizeof(key_maps), GFP_KERNEL);
+	if (!kbd->key_maps)
+		goto out_kbd;
+	for (i = 0; i < ARRAY_SIZE(key_maps); i++) {
+		if (key_maps[i]) {
+			kbd->key_maps[i] = kmemdup(key_maps[i],
+						   sizeof(u_short) * NR_KEYS,
+						   GFP_KERNEL);
+			if (!kbd->key_maps[i])
+				goto out_maps;
+		}
+	}
+	kbd->func_table = kzalloc(sizeof(func_table), GFP_KERNEL);
+	if (!kbd->func_table)
+		goto out_maps;
+	for (i = 0; i < ARRAY_SIZE(func_table); i++) {
+		if (func_table[i]) {
+			kbd->func_table[i] = kstrdup(func_table[i],
+						     GFP_KERNEL);
+			if (!kbd->func_table[i])
+				goto out_func;
+		}
+	}
+	kbd->fn_handler =
+		kzalloc(sizeof(fn_handler_fn *) * NR_FN_HANDLER, GFP_KERNEL);
+	if (!kbd->fn_handler)
+		goto out_func;
+	kbd->accent_table = kmemdup(accent_table,
+				    sizeof(struct kbdiacruc) * MAX_DIACR,
+				    GFP_KERNEL);
+	if (!kbd->accent_table)
+		goto out_fn_handler;
+	kbd->accent_table_size = accent_table_size;
+	return kbd;
+
+out_fn_handler:
+	kfree(kbd->fn_handler);
+out_func:
+	for (i = 0; i < ARRAY_SIZE(func_table); i++)
+		kfree(kbd->func_table[i]);
+	kfree(kbd->func_table);
+out_maps:
+	for (i = 0; i < ARRAY_SIZE(key_maps); i++)
+		kfree(kbd->key_maps[i]);
+	kfree(kbd->key_maps);
+out_kbd:
+	kfree(kbd);
+out:
+	return NULL;
+}
+
+void
+kbd_free(struct kbd_data *kbd)
+{
+	int i;
+
+	kfree(kbd->accent_table);
+	kfree(kbd->fn_handler);
+	for (i = 0; i < ARRAY_SIZE(func_table); i++)
+		kfree(kbd->func_table[i]);
+	kfree(kbd->func_table);
+	for (i = 0; i < ARRAY_SIZE(key_maps); i++)
+		kfree(kbd->key_maps[i]);
+	kfree(kbd->key_maps);
+	kfree(kbd);
+}
+
+/*
+ * Generate ascii -> ebcdic translation table from kbd_data.
+ */
+void
+kbd_ascebc(struct kbd_data *kbd, unsigned char *ascebc)
+{
+	unsigned short *keymap, keysym;
+	int i, j, k;
+
+	memset(ascebc, 0x40, 256);
+	for (i = 0; i < ARRAY_SIZE(key_maps); i++) {
+		keymap = kbd->key_maps[i];
+		if (!keymap)
+			continue;
+		for (j = 0; j < NR_KEYS; j++) {
+			k = ((i & 1) << 7) + j;
+			keysym = keymap[j];
+			if (KTYP(keysym) == (KT_LATIN | 0xf0) ||
+			    KTYP(keysym) == (KT_LETTER | 0xf0))
+				ascebc[KVAL(keysym)] = k;
+			else if (KTYP(keysym) == (KT_DEAD | 0xf0))
+				ascebc[ret_diacr[KVAL(keysym)]] = k;
+		}
+	}
+}
+
+#if 0
+/*
+ * Generate ebcdic -> ascii translation table from kbd_data.
+ */
+void
+kbd_ebcasc(struct kbd_data *kbd, unsigned char *ebcasc)
+{
+	unsigned short *keymap, keysym;
+	int i, j, k;
+
+	memset(ebcasc, ' ', 256);
+	for (i = 0; i < ARRAY_SIZE(key_maps); i++) {
+		keymap = kbd->key_maps[i];
+		if (!keymap)
+			continue;
+		for (j = 0; j < NR_KEYS; j++) {
+			keysym = keymap[j];
+			k = ((i & 1) << 7) + j;
+			if (KTYP(keysym) == (KT_LATIN | 0xf0) ||
+			    KTYP(keysym) == (KT_LETTER | 0xf0))
+				ebcasc[k] = KVAL(keysym);
+			else if (KTYP(keysym) == (KT_DEAD | 0xf0))
+				ebcasc[k] = ret_diacr[KVAL(keysym)];
+		}
+	}
+}
+#endif
+
+/*
+ * We have a combining character DIACR here, followed by the character CH.
+ * If the combination occurs in the table, return the corresponding value.
+ * Otherwise, if CH is a space or equals DIACR, return DIACR.
+ * Otherwise, conclude that DIACR was not combining after all,
+ * queue it and return CH.
+ */
+static unsigned int
+handle_diacr(struct kbd_data *kbd, unsigned int ch)
+{
+	int i, d;
+
+	d = kbd->diacr;
+	kbd->diacr = 0;
+
+	for (i = 0; i < kbd->accent_table_size; i++) {
+		if (kbd->accent_table[i].diacr == d &&
+		    kbd->accent_table[i].base == ch)
+			return kbd->accent_table[i].result;
+	}
+
+	if (ch == ' ' || ch == d)
+		return d;
+
+	kbd_put_queue(kbd->port, d);
+	return ch;
+}
+
+/*
+ * Handle dead key.
+ */
+static void
+k_dead(struct kbd_data *kbd, unsigned char value)
+{
+	value = ret_diacr[value];
+	kbd->diacr = (kbd->diacr ? handle_diacr(kbd, value) : value);
+}
+
+/*
+ * Normal character handler.
+ */
+static void
+k_self(struct kbd_data *kbd, unsigned char value)
+{
+	if (kbd->diacr)
+		value = handle_diacr(kbd, value);
+	kbd_put_queue(kbd->port, value);
+}
+
+/*
+ * Special key handlers
+ */
+static void
+k_ignore(struct kbd_data *kbd, unsigned char value)
+{
+}
+
+/*
+ * Function key handler.
+ */
+static void
+k_fn(struct kbd_data *kbd, unsigned char value)
+{
+	if (kbd->func_table[value])
+		kbd_puts_queue(kbd->port, kbd->func_table[value]);
+}
+
+static void
+k_spec(struct kbd_data *kbd, unsigned char value)
+{
+	if (value >= NR_FN_HANDLER)
+		return;
+	if (kbd->fn_handler[value])
+		kbd->fn_handler[value](kbd);
+}
+
+/*
+ * Put utf8 character to tty flip buffer.
+ * UTF-8 is defined for words of up to 31 bits,
+ * but we need only 16 bits here
+ */
+static void
+to_utf8(struct tty_port *port, ushort c)
+{
+	if (c < 0x80)
+		/*  0******* */
+		kbd_put_queue(port, c);
+	else if (c < 0x800) {
+		/* 110***** 10****** */
+		kbd_put_queue(port, 0xc0 | (c >> 6));
+		kbd_put_queue(port, 0x80 | (c & 0x3f));
+	} else {
+		/* 1110**** 10****** 10****** */
+		kbd_put_queue(port, 0xe0 | (c >> 12));
+		kbd_put_queue(port, 0x80 | ((c >> 6) & 0x3f));
+		kbd_put_queue(port, 0x80 | (c & 0x3f));
+	}
+}
+
+/*
+ * Process keycode.
+ */
+void
+kbd_keycode(struct kbd_data *kbd, unsigned int keycode)
+{
+	unsigned short keysym;
+	unsigned char type, value;
+
+	if (!kbd)
+		return;
+
+	if (keycode >= 384)
+		keysym = kbd->key_maps[5][keycode - 384];
+	else if (keycode >= 256)
+		keysym = kbd->key_maps[4][keycode - 256];
+	else if (keycode >= 128)
+		keysym = kbd->key_maps[1][keycode - 128];
+	else
+		keysym = kbd->key_maps[0][keycode];
+
+	type = KTYP(keysym);
+	if (type >= 0xf0) {
+		type -= 0xf0;
+		if (type == KT_LETTER)
+			type = KT_LATIN;
+		value = KVAL(keysym);
+#ifdef CONFIG_MAGIC_SYSRQ	       /* Handle the SysRq Hack */
+		if (kbd->sysrq) {
+			if (kbd->sysrq == K(KT_LATIN, '-')) {
+				kbd->sysrq = 0;
+				handle_sysrq(value);
+				return;
+			}
+			if (value == '-') {
+				kbd->sysrq = K(KT_LATIN, '-');
+				return;
+			}
+			/* Incomplete sysrq sequence. */
+			(*k_handler[KTYP(kbd->sysrq)])(kbd, KVAL(kbd->sysrq));
+			kbd->sysrq = 0;
+		} else if ((type == KT_LATIN && value == '^') ||
+			   (type == KT_DEAD && ret_diacr[value] == '^')) {
+			kbd->sysrq = K(type, value);
+			return;
+		}
+#endif
+		(*k_handler[type])(kbd, value);
+	} else
+		to_utf8(kbd->port, keysym);
+}
+
+/*
+ * Ioctl stuff.
+ */
+static int
+do_kdsk_ioctl(struct kbd_data *kbd, struct kbentry __user *user_kbe,
+	      int cmd, int perm)
+{
+	struct kbentry tmp;
+	ushort *key_map, val, ov;
+
+	if (copy_from_user(&tmp, user_kbe, sizeof(struct kbentry)))
+		return -EFAULT;
+#if NR_KEYS < 256
+	if (tmp.kb_index >= NR_KEYS)
+		return -EINVAL;
+#endif
+#if MAX_NR_KEYMAPS < 256
+	if (tmp.kb_table >= MAX_NR_KEYMAPS)
+		return -EINVAL;	
+#endif
+
+	switch (cmd) {
+	case KDGKBENT:
+		key_map = kbd->key_maps[tmp.kb_table];
+		if (key_map) {
+		    val = U(key_map[tmp.kb_index]);
+		    if (KTYP(val) >= KBD_NR_TYPES)
+			val = K_HOLE;
+		} else
+		    val = (tmp.kb_index ? K_HOLE : K_NOSUCHMAP);
+		return put_user(val, &user_kbe->kb_value);
+	case KDSKBENT:
+		if (!perm)
+			return -EPERM;
+		if (!tmp.kb_index && tmp.kb_value == K_NOSUCHMAP) {
+			/* disallocate map */
+			key_map = kbd->key_maps[tmp.kb_table];
+			if (key_map) {
+			    kbd->key_maps[tmp.kb_table] = NULL;
+			    kfree(key_map);
+			}
+			break;
+		}
+
+		if (KTYP(tmp.kb_value) >= KBD_NR_TYPES)
+			return -EINVAL;
+		if (KVAL(tmp.kb_value) > kbd_max_vals[KTYP(tmp.kb_value)])
+			return -EINVAL;
+
+		if (!(key_map = kbd->key_maps[tmp.kb_table])) {
+			int j;
+
+			key_map = kmalloc(sizeof(plain_map),
+						     GFP_KERNEL);
+			if (!key_map)
+				return -ENOMEM;
+			kbd->key_maps[tmp.kb_table] = key_map;
+			for (j = 0; j < NR_KEYS; j++)
+				key_map[j] = U(K_HOLE);
+		}
+		ov = U(key_map[tmp.kb_index]);
+		if (tmp.kb_value == ov)
+			break;	/* nothing to do */
+		/*
+		 * Attention Key.
+		 */
+		if (((ov == K_SAK) || (tmp.kb_value == K_SAK)) &&
+		    !capable(CAP_SYS_ADMIN))
+			return -EPERM;
+		key_map[tmp.kb_index] = U(tmp.kb_value);
+		break;
+	}
+	return 0;
+}
+
+static int
+do_kdgkb_ioctl(struct kbd_data *kbd, struct kbsentry __user *u_kbs,
+	       int cmd, int perm)
+{
+	unsigned char kb_func;
+	char *p;
+	int len;
+
+	/* Get u_kbs->kb_func. */
+	if (get_user(kb_func, &u_kbs->kb_func))
+		return -EFAULT;
+#if MAX_NR_FUNC < 256
+	if (kb_func >= MAX_NR_FUNC)
+		return -EINVAL;
+#endif
+
+	switch (cmd) {
+	case KDGKBSENT:
+		p = kbd->func_table[kb_func];
+		if (p) {
+			len = strlen(p);
+			if (len >= sizeof(u_kbs->kb_string))
+				len = sizeof(u_kbs->kb_string) - 1;
+			if (copy_to_user(u_kbs->kb_string, p, len))
+				return -EFAULT;
+		} else
+			len = 0;
+		if (put_user('\0', u_kbs->kb_string + len))
+			return -EFAULT;
+		break;
+	case KDSKBSENT:
+		if (!perm)
+			return -EPERM;
+		p = strndup_user(u_kbs->kb_string, sizeof(u_kbs->kb_string));
+		if (IS_ERR(p))
+			return PTR_ERR(p);
+		kfree(kbd->func_table[kb_func]);
+		kbd->func_table[kb_func] = p;
+		break;
+	}
+	return 0;
+}
+
+int kbd_ioctl(struct kbd_data *kbd, unsigned int cmd, unsigned long arg)
+{
+	struct tty_struct *tty;
+	void __user *argp;
+	unsigned int ct;
+	int perm;
+
+	argp = (void __user *)arg;
+
+	/*
+	 * To have permissions to do most of the vt ioctls, we either have
+	 * to be the owner of the tty, or have CAP_SYS_TTY_CONFIG.
+	 */
+	tty = tty_port_tty_get(kbd->port);
+	/* FIXME this test is pretty racy */
+	perm = current->signal->tty == tty || capable(CAP_SYS_TTY_CONFIG);
+	tty_kref_put(tty);
+	switch (cmd) {
+	case KDGKBTYPE:
+		return put_user(KB_101, (char __user *)argp);
+	case KDGKBENT:
+	case KDSKBENT:
+		return do_kdsk_ioctl(kbd, argp, cmd, perm);
+	case KDGKBSENT:
+	case KDSKBSENT:
+		return do_kdgkb_ioctl(kbd, argp, cmd, perm);
+	case KDGKBDIACR:
+	{
+		struct kbdiacrs __user *a = argp;
+		struct kbdiacr diacr;
+		int i;
+
+		if (put_user(kbd->accent_table_size, &a->kb_cnt))
+			return -EFAULT;
+		for (i = 0; i < kbd->accent_table_size; i++) {
+			diacr.diacr = kbd->accent_table[i].diacr;
+			diacr.base = kbd->accent_table[i].base;
+			diacr.result = kbd->accent_table[i].result;
+			if (copy_to_user(a->kbdiacr + i, &diacr, sizeof(struct kbdiacr)))
+			return -EFAULT;
+		}
+		return 0;
+	}
+	case KDGKBDIACRUC:
+	{
+		struct kbdiacrsuc __user *a = argp;
+
+		ct = kbd->accent_table_size;
+		if (put_user(ct, &a->kb_cnt))
+			return -EFAULT;
+		if (copy_to_user(a->kbdiacruc, kbd->accent_table,
+				 ct * sizeof(struct kbdiacruc)))
+			return -EFAULT;
+		return 0;
+	}
+	case KDSKBDIACR:
+	{
+		struct kbdiacrs __user *a = argp;
+		struct kbdiacr diacr;
+		int i;
+
+		if (!perm)
+			return -EPERM;
+		if (get_user(ct, &a->kb_cnt))
+			return -EFAULT;
+		if (ct >= MAX_DIACR)
+			return -EINVAL;
+		kbd->accent_table_size = ct;
+		for (i = 0; i < ct; i++) {
+			if (copy_from_user(&diacr, a->kbdiacr + i, sizeof(struct kbdiacr)))
+				return -EFAULT;
+			kbd->accent_table[i].diacr = diacr.diacr;
+			kbd->accent_table[i].base = diacr.base;
+			kbd->accent_table[i].result = diacr.result;
+		}
+		return 0;
+	}
+	case KDSKBDIACRUC:
+	{
+		struct kbdiacrsuc __user *a = argp;
+
+		if (!perm)
+			return -EPERM;
+		if (get_user(ct, &a->kb_cnt))
+			return -EFAULT;
+		if (ct >= MAX_DIACR)
+			return -EINVAL;
+		kbd->accent_table_size = ct;
+		if (copy_from_user(kbd->accent_table, a->kbdiacruc,
+				   ct * sizeof(struct kbdiacruc)))
+			return -EFAULT;
+		return 0;
+	}
+	default:
+		return -ENOIOCTLCMD;
+	}
+}
+
+EXPORT_SYMBOL(kbd_ioctl);
+EXPORT_SYMBOL(kbd_ascebc);
+EXPORT_SYMBOL(kbd_free);
+EXPORT_SYMBOL(kbd_alloc);
+EXPORT_SYMBOL(kbd_keycode);
diff --git a/src/kernel/linux/v4.14/drivers/s390/char/keyboard.h b/src/kernel/linux/v4.14/drivers/s390/char/keyboard.h
new file mode 100644
index 0000000..a074d97
--- /dev/null
+++ b/src/kernel/linux/v4.14/drivers/s390/char/keyboard.h
@@ -0,0 +1,57 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ *    ebcdic keycode functions for s390 console drivers
+ *
+ *    Copyright IBM Corp. 2003
+ *    Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com),
+ */
+
+#include <linux/tty.h>
+#include <linux/tty_flip.h>
+#include <linux/keyboard.h>
+
+#define NR_FN_HANDLER	20
+
+struct kbd_data;
+
+typedef void (fn_handler_fn)(struct kbd_data *);
+
+/*
+ * FIXME: explain key_maps tricks.
+ */
+
+struct kbd_data {
+	struct tty_port *port;
+	unsigned short **key_maps;
+	char **func_table;
+	fn_handler_fn **fn_handler;
+	struct kbdiacruc *accent_table;
+	unsigned int accent_table_size;
+	unsigned int diacr;
+	unsigned short sysrq;
+};
+
+struct kbd_data *kbd_alloc(void);
+void kbd_free(struct kbd_data *);
+void kbd_ascebc(struct kbd_data *, unsigned char *);
+
+void kbd_keycode(struct kbd_data *, unsigned int);
+int kbd_ioctl(struct kbd_data *, unsigned int, unsigned long);
+
+/*
+ * Helper Functions.
+ */
+static inline void
+kbd_put_queue(struct tty_port *port, int ch)
+{
+	tty_insert_flip_char(port, ch, 0);
+	tty_schedule_flip(port);
+}
+
+static inline void
+kbd_puts_queue(struct tty_port *port, char *cp)
+{
+	while (*cp)
+		tty_insert_flip_char(port, *cp++, 0);
+	tty_schedule_flip(port);
+}
diff --git a/src/kernel/linux/v4.14/drivers/s390/char/monreader.c b/src/kernel/linux/v4.14/drivers/s390/char/monreader.c
new file mode 100644
index 0000000..027ac6a
--- /dev/null
+++ b/src/kernel/linux/v4.14/drivers/s390/char/monreader.c
@@ -0,0 +1,651 @@
+/*
+ * Character device driver for reading z/VM *MONITOR service records.
+ *
+ * Copyright IBM Corp. 2004, 2009
+ *
+ * Author: Gerald Schaefer <gerald.schaefer@de.ibm.com>
+ */
+
+#define KMSG_COMPONENT "monreader"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/init.h>
+#include <linux/errno.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/miscdevice.h>
+#include <linux/ctype.h>
+#include <linux/spinlock.h>
+#include <linux/interrupt.h>
+#include <linux/poll.h>
+#include <linux/device.h>
+#include <linux/slab.h>
+#include <net/iucv/iucv.h>
+#include <linux/uaccess.h>
+#include <asm/ebcdic.h>
+#include <asm/extmem.h>
+
+
+#define MON_COLLECT_SAMPLE 0x80
+#define MON_COLLECT_EVENT  0x40
+#define MON_SERVICE	   "*MONITOR"
+#define MON_IN_USE	   0x01
+#define MON_MSGLIM	   255
+
+static char mon_dcss_name[9] = "MONDCSS\0";
+
+struct mon_msg {
+	u32 pos;
+	u32 mca_offset;
+	struct iucv_message msg;
+	char msglim_reached;
+	char replied_msglim;
+};
+
+struct mon_private {
+	struct iucv_path *path;
+	struct mon_msg *msg_array[MON_MSGLIM];
+	unsigned int   write_index;
+	unsigned int   read_index;
+	atomic_t msglim_count;
+	atomic_t read_ready;
+	atomic_t iucv_connected;
+	atomic_t iucv_severed;
+};
+
+static unsigned long mon_in_use = 0;
+
+static unsigned long mon_dcss_start;
+static unsigned long mon_dcss_end;
+
+static DECLARE_WAIT_QUEUE_HEAD(mon_read_wait_queue);
+static DECLARE_WAIT_QUEUE_HEAD(mon_conn_wait_queue);
+
+static u8 user_data_connect[16] = {
+	/* Version code, must be 0x01 for shared mode */
+	0x01,
+	/* what to collect */
+	MON_COLLECT_SAMPLE | MON_COLLECT_EVENT,
+	/* DCSS name in EBCDIC, 8 bytes padded with blanks */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+};
+
+static u8 user_data_sever[16] = {
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+};
+
+static struct device *monreader_device;
+
+/******************************************************************************
+ *                             helper functions                               *
+ *****************************************************************************/
+/*
+ * Create the 8 bytes EBCDIC DCSS segment name from
+ * an ASCII name, incl. padding
+ */
+static void dcss_mkname(char *ascii_name, char *ebcdic_name)
+{
+	int i;
+
+	for (i = 0; i < 8; i++) {
+		if (ascii_name[i] == '\0')
+			break;
+		ebcdic_name[i] = toupper(ascii_name[i]);
+	}
+	for (; i < 8; i++)
+		ebcdic_name[i] = ' ';
+	ASCEBC(ebcdic_name, 8);
+}
+
+static inline unsigned long mon_mca_start(struct mon_msg *monmsg)
+{
+	return *(u32 *) &monmsg->msg.rmmsg;
+}
+
+static inline unsigned long mon_mca_end(struct mon_msg *monmsg)
+{
+	return *(u32 *) &monmsg->msg.rmmsg[4];
+}
+
+static inline u8 mon_mca_type(struct mon_msg *monmsg, u8 index)
+{
+	return *((u8 *) mon_mca_start(monmsg) + monmsg->mca_offset + index);
+}
+
+static inline u32 mon_mca_size(struct mon_msg *monmsg)
+{
+	return mon_mca_end(monmsg) - mon_mca_start(monmsg) + 1;
+}
+
+static inline u32 mon_rec_start(struct mon_msg *monmsg)
+{
+	return *((u32 *) (mon_mca_start(monmsg) + monmsg->mca_offset + 4));
+}
+
+static inline u32 mon_rec_end(struct mon_msg *monmsg)
+{
+	return *((u32 *) (mon_mca_start(monmsg) + monmsg->mca_offset + 8));
+}
+
+static int mon_check_mca(struct mon_msg *monmsg)
+{
+	if ((mon_rec_end(monmsg) <= mon_rec_start(monmsg)) ||
+	    (mon_rec_start(monmsg) < mon_dcss_start) ||
+	    (mon_rec_end(monmsg) > mon_dcss_end) ||
+	    (mon_mca_type(monmsg, 0) == 0) ||
+	    (mon_mca_size(monmsg) % 12 != 0) ||
+	    (mon_mca_end(monmsg) <= mon_mca_start(monmsg)) ||
+	    (mon_mca_end(monmsg) > mon_dcss_end) ||
+	    (mon_mca_start(monmsg) < mon_dcss_start) ||
+	    ((mon_mca_type(monmsg, 1) == 0) && (mon_mca_type(monmsg, 2) == 0)))
+		return -EINVAL;
+	return 0;
+}
+
+static int mon_send_reply(struct mon_msg *monmsg,
+			  struct mon_private *monpriv)
+{
+	int rc;
+
+	rc = iucv_message_reply(monpriv->path, &monmsg->msg,
+				IUCV_IPRMDATA, NULL, 0);
+	atomic_dec(&monpriv->msglim_count);
+	if (likely(!monmsg->msglim_reached)) {
+		monmsg->pos = 0;
+		monmsg->mca_offset = 0;
+		monpriv->read_index = (monpriv->read_index + 1) %
+				      MON_MSGLIM;
+		atomic_dec(&monpriv->read_ready);
+	} else
+		monmsg->replied_msglim = 1;
+	if (rc) {
+		pr_err("Reading monitor data failed with rc=%i\n", rc);
+		return -EIO;
+	}
+	return 0;
+}
+
+static void mon_free_mem(struct mon_private *monpriv)
+{
+	int i;
+
+	for (i = 0; i < MON_MSGLIM; i++)
+		kfree(monpriv->msg_array[i]);
+	kfree(monpriv);
+}
+
+static struct mon_private *mon_alloc_mem(void)
+{
+	int i;
+	struct mon_private *monpriv;
+
+	monpriv = kzalloc(sizeof(struct mon_private), GFP_KERNEL);
+	if (!monpriv)
+		return NULL;
+	for (i = 0; i < MON_MSGLIM; i++) {
+		monpriv->msg_array[i] = kzalloc(sizeof(struct mon_msg),
+						    GFP_KERNEL);
+		if (!monpriv->msg_array[i]) {
+			mon_free_mem(monpriv);
+			return NULL;
+		}
+	}
+	return monpriv;
+}
+
+static inline void mon_next_mca(struct mon_msg *monmsg)
+{
+	if (likely((mon_mca_size(monmsg) - monmsg->mca_offset) == 12))
+		return;
+	monmsg->mca_offset += 12;
+	monmsg->pos = 0;
+}
+
+static struct mon_msg *mon_next_message(struct mon_private *monpriv)
+{
+	struct mon_msg *monmsg;
+
+	if (!atomic_read(&monpriv->read_ready))
+		return NULL;
+	monmsg = monpriv->msg_array[monpriv->read_index];
+	if (unlikely(monmsg->replied_msglim)) {
+		monmsg->replied_msglim = 0;
+		monmsg->msglim_reached = 0;
+		monmsg->pos = 0;
+		monmsg->mca_offset = 0;
+		monpriv->read_index = (monpriv->read_index + 1) %
+				      MON_MSGLIM;
+		atomic_dec(&monpriv->read_ready);
+		return ERR_PTR(-EOVERFLOW);
+	}
+	return monmsg;
+}
+
+
+/******************************************************************************
+ *                               IUCV handler                                 *
+ *****************************************************************************/
+static void mon_iucv_path_complete(struct iucv_path *path, u8 *ipuser)
+{
+	struct mon_private *monpriv = path->private;
+
+	atomic_set(&monpriv->iucv_connected, 1);
+	wake_up(&mon_conn_wait_queue);
+}
+
+static void mon_iucv_path_severed(struct iucv_path *path, u8 *ipuser)
+{
+	struct mon_private *monpriv = path->private;
+
+	pr_err("z/VM *MONITOR system service disconnected with rc=%i\n",
+	       ipuser[0]);
+	iucv_path_sever(path, NULL);
+	atomic_set(&monpriv->iucv_severed, 1);
+	wake_up(&mon_conn_wait_queue);
+	wake_up_interruptible(&mon_read_wait_queue);
+}
+
+static void mon_iucv_message_pending(struct iucv_path *path,
+				     struct iucv_message *msg)
+{
+	struct mon_private *monpriv = path->private;
+
+	memcpy(&monpriv->msg_array[monpriv->write_index]->msg,
+	       msg, sizeof(*msg));
+	if (atomic_inc_return(&monpriv->msglim_count) == MON_MSGLIM) {
+		pr_warn("The read queue for monitor data is full\n");
+		monpriv->msg_array[monpriv->write_index]->msglim_reached = 1;
+	}
+	monpriv->write_index = (monpriv->write_index + 1) % MON_MSGLIM;
+	atomic_inc(&monpriv->read_ready);
+	wake_up_interruptible(&mon_read_wait_queue);
+}
+
+static struct iucv_handler monreader_iucv_handler = {
+	.path_complete	 = mon_iucv_path_complete,
+	.path_severed	 = mon_iucv_path_severed,
+	.message_pending = mon_iucv_message_pending,
+};
+
+/******************************************************************************
+ *                               file operations                              *
+ *****************************************************************************/
+static int mon_open(struct inode *inode, struct file *filp)
+{
+	struct mon_private *monpriv;
+	int rc;
+
+	/*
+	 * only one user allowed
+	 */
+	rc = -EBUSY;
+	if (test_and_set_bit(MON_IN_USE, &mon_in_use))
+		goto out;
+
+	rc = -ENOMEM;
+	monpriv = mon_alloc_mem();
+	if (!monpriv)
+		goto out_use;
+
+	/*
+	 * Connect to *MONITOR service
+	 */
+	monpriv->path = iucv_path_alloc(MON_MSGLIM, IUCV_IPRMDATA, GFP_KERNEL);
+	if (!monpriv->path)
+		goto out_priv;
+	rc = iucv_path_connect(monpriv->path, &monreader_iucv_handler,
+			       MON_SERVICE, NULL, user_data_connect, monpriv);
+	if (rc) {
+		pr_err("Connecting to the z/VM *MONITOR system service "
+		       "failed with rc=%i\n", rc);
+		rc = -EIO;
+		goto out_path;
+	}
+	/*
+	 * Wait for connection confirmation
+	 */
+	wait_event(mon_conn_wait_queue,
+		   atomic_read(&monpriv->iucv_connected) ||
+		   atomic_read(&monpriv->iucv_severed));
+	if (atomic_read(&monpriv->iucv_severed)) {
+		atomic_set(&monpriv->iucv_severed, 0);
+		atomic_set(&monpriv->iucv_connected, 0);
+		rc = -EIO;
+		goto out_path;
+	}
+	filp->private_data = monpriv;
+	dev_set_drvdata(monreader_device, monpriv);
+	return nonseekable_open(inode, filp);
+
+out_path:
+	iucv_path_free(monpriv->path);
+out_priv:
+	mon_free_mem(monpriv);
+out_use:
+	clear_bit(MON_IN_USE, &mon_in_use);
+out:
+	return rc;
+}
+
+static int mon_close(struct inode *inode, struct file *filp)
+{
+	int rc, i;
+	struct mon_private *monpriv = filp->private_data;
+
+	/*
+	 * Close IUCV connection and unregister
+	 */
+	if (monpriv->path) {
+		rc = iucv_path_sever(monpriv->path, user_data_sever);
+		if (rc)
+			pr_warn("Disconnecting the z/VM *MONITOR system service failed with rc=%i\n",
+				rc);
+		iucv_path_free(monpriv->path);
+	}
+
+	atomic_set(&monpriv->iucv_severed, 0);
+	atomic_set(&monpriv->iucv_connected, 0);
+	atomic_set(&monpriv->read_ready, 0);
+	atomic_set(&monpriv->msglim_count, 0);
+	monpriv->write_index  = 0;
+	monpriv->read_index   = 0;
+	dev_set_drvdata(monreader_device, NULL);
+
+	for (i = 0; i < MON_MSGLIM; i++)
+		kfree(monpriv->msg_array[i]);
+	kfree(monpriv);
+	clear_bit(MON_IN_USE, &mon_in_use);
+	return 0;
+}
+
+static ssize_t mon_read(struct file *filp, char __user *data,
+			size_t count, loff_t *ppos)
+{
+	struct mon_private *monpriv = filp->private_data;
+	struct mon_msg *monmsg;
+	int ret;
+	u32 mce_start;
+
+	monmsg = mon_next_message(monpriv);
+	if (IS_ERR(monmsg))
+		return PTR_ERR(monmsg);
+
+	if (!monmsg) {
+		if (filp->f_flags & O_NONBLOCK)
+			return -EAGAIN;
+		ret = wait_event_interruptible(mon_read_wait_queue,
+					atomic_read(&monpriv->read_ready) ||
+					atomic_read(&monpriv->iucv_severed));
+		if (ret)
+			return ret;
+		if (unlikely(atomic_read(&monpriv->iucv_severed)))
+			return -EIO;
+		monmsg = monpriv->msg_array[monpriv->read_index];
+	}
+
+	if (!monmsg->pos)
+		monmsg->pos = mon_mca_start(monmsg) + monmsg->mca_offset;
+	if (mon_check_mca(monmsg))
+		goto reply;
+
+	/* read monitor control element (12 bytes) first */
+	mce_start = mon_mca_start(monmsg) + monmsg->mca_offset;
+	if ((monmsg->pos >= mce_start) && (monmsg->pos < mce_start + 12)) {
+		count = min(count, (size_t) mce_start + 12 - monmsg->pos);
+		ret = copy_to_user(data, (void *) (unsigned long) monmsg->pos,
+				   count);
+		if (ret)
+			return -EFAULT;
+		monmsg->pos += count;
+		if (monmsg->pos == mce_start + 12)
+			monmsg->pos = mon_rec_start(monmsg);
+		goto out_copy;
+	}
+
+	/* read records */
+	if (monmsg->pos <= mon_rec_end(monmsg)) {
+		count = min(count, (size_t) mon_rec_end(monmsg) - monmsg->pos
+					    + 1);
+		ret = copy_to_user(data, (void *) (unsigned long) monmsg->pos,
+				   count);
+		if (ret)
+			return -EFAULT;
+		monmsg->pos += count;
+		if (monmsg->pos > mon_rec_end(monmsg))
+			mon_next_mca(monmsg);
+		goto out_copy;
+	}
+reply:
+	ret = mon_send_reply(monmsg, monpriv);
+	return ret;
+
+out_copy:
+	*ppos += count;
+	return count;
+}
+
+static unsigned int mon_poll(struct file *filp, struct poll_table_struct *p)
+{
+	struct mon_private *monpriv = filp->private_data;
+
+	poll_wait(filp, &mon_read_wait_queue, p);
+	if (unlikely(atomic_read(&monpriv->iucv_severed)))
+		return POLLERR;
+	if (atomic_read(&monpriv->read_ready))
+		return POLLIN | POLLRDNORM;
+	return 0;
+}
+
+static const struct file_operations mon_fops = {
+	.owner   = THIS_MODULE,
+	.open    = &mon_open,
+	.release = &mon_close,
+	.read    = &mon_read,
+	.poll    = &mon_poll,
+	.llseek  = noop_llseek,
+};
+
+static struct miscdevice mon_dev = {
+	.name       = "monreader",
+	.fops       = &mon_fops,
+	.minor      = MISC_DYNAMIC_MINOR,
+};
+
+
+/******************************************************************************
+ *				suspend / resume			      *
+ *****************************************************************************/
+static int monreader_freeze(struct device *dev)
+{
+	struct mon_private *monpriv = dev_get_drvdata(dev);
+	int rc;
+
+	if (!monpriv)
+		return 0;
+	if (monpriv->path) {
+		rc = iucv_path_sever(monpriv->path, user_data_sever);
+		if (rc)
+			pr_warn("Disconnecting the z/VM *MONITOR system service failed with rc=%i\n",
+				rc);
+		iucv_path_free(monpriv->path);
+	}
+	atomic_set(&monpriv->iucv_severed, 0);
+	atomic_set(&monpriv->iucv_connected, 0);
+	atomic_set(&monpriv->read_ready, 0);
+	atomic_set(&monpriv->msglim_count, 0);
+	monpriv->write_index  = 0;
+	monpriv->read_index   = 0;
+	monpriv->path = NULL;
+	return 0;
+}
+
+static int monreader_thaw(struct device *dev)
+{
+	struct mon_private *monpriv = dev_get_drvdata(dev);
+	int rc;
+
+	if (!monpriv)
+		return 0;
+	rc = -ENOMEM;
+	monpriv->path = iucv_path_alloc(MON_MSGLIM, IUCV_IPRMDATA, GFP_KERNEL);
+	if (!monpriv->path)
+		goto out;
+	rc = iucv_path_connect(monpriv->path, &monreader_iucv_handler,
+			       MON_SERVICE, NULL, user_data_connect, monpriv);
+	if (rc) {
+		pr_err("Connecting to the z/VM *MONITOR system service "
+		       "failed with rc=%i\n", rc);
+		goto out_path;
+	}
+	wait_event(mon_conn_wait_queue,
+		   atomic_read(&monpriv->iucv_connected) ||
+		   atomic_read(&monpriv->iucv_severed));
+	if (atomic_read(&monpriv->iucv_severed))
+		goto out_path;
+	return 0;
+out_path:
+	rc = -EIO;
+	iucv_path_free(monpriv->path);
+	monpriv->path = NULL;
+out:
+	atomic_set(&monpriv->iucv_severed, 1);
+	return rc;
+}
+
+static int monreader_restore(struct device *dev)
+{
+	int rc;
+
+	segment_unload(mon_dcss_name);
+	rc = segment_load(mon_dcss_name, SEGMENT_SHARED,
+			  &mon_dcss_start, &mon_dcss_end);
+	if (rc < 0) {
+		segment_warning(rc, mon_dcss_name);
+		panic("fatal monreader resume error: no monitor dcss\n");
+	}
+	return monreader_thaw(dev);
+}
+
+static const struct dev_pm_ops monreader_pm_ops = {
+	.freeze  = monreader_freeze,
+	.thaw	 = monreader_thaw,
+	.restore = monreader_restore,
+};
+
+static struct device_driver monreader_driver = {
+	.name = "monreader",
+	.bus  = &iucv_bus,
+	.pm   = &monreader_pm_ops,
+};
+
+
+/******************************************************************************
+ *                              module init/exit                              *
+ *****************************************************************************/
+static int __init mon_init(void)
+{
+	int rc;
+
+	if (!MACHINE_IS_VM) {
+		pr_err("The z/VM *MONITOR record device driver cannot be "
+		       "loaded without z/VM\n");
+		return -ENODEV;
+	}
+
+	/*
+	 * Register with IUCV and connect to *MONITOR service
+	 */
+	rc = iucv_register(&monreader_iucv_handler, 1);
+	if (rc) {
+		pr_err("The z/VM *MONITOR record device driver failed to "
+		       "register with IUCV\n");
+		return rc;
+	}
+
+	rc = driver_register(&monreader_driver);
+	if (rc)
+		goto out_iucv;
+	monreader_device = kzalloc(sizeof(struct device), GFP_KERNEL);
+	if (!monreader_device) {
+		rc = -ENOMEM;
+		goto out_driver;
+	}
+
+	dev_set_name(monreader_device, "monreader-dev");
+	monreader_device->bus = &iucv_bus;
+	monreader_device->parent = iucv_root;
+	monreader_device->driver = &monreader_driver;
+	monreader_device->release = (void (*)(struct device *))kfree;
+	rc = device_register(monreader_device);
+	if (rc) {
+		put_device(monreader_device);
+		goto out_driver;
+	}
+
+	rc = segment_type(mon_dcss_name);
+	if (rc < 0) {
+		segment_warning(rc, mon_dcss_name);
+		goto out_device;
+	}
+	if (rc != SEG_TYPE_SC) {
+		pr_err("The specified *MONITOR DCSS %s does not have the "
+		       "required type SC\n", mon_dcss_name);
+		rc = -EINVAL;
+		goto out_device;
+	}
+
+	rc = segment_load(mon_dcss_name, SEGMENT_SHARED,
+			  &mon_dcss_start, &mon_dcss_end);
+	if (rc < 0) {
+		segment_warning(rc, mon_dcss_name);
+		rc = -EINVAL;
+		goto out_device;
+	}
+	dcss_mkname(mon_dcss_name, &user_data_connect[8]);
+
+	/*
+	 * misc_register() has to be the last action in module_init(), because
+	 * file operations will be available right after this.
+	 */
+	rc = misc_register(&mon_dev);
+	if (rc < 0 )
+		goto out;
+	return 0;
+
+out:
+	segment_unload(mon_dcss_name);
+out_device:
+	device_unregister(monreader_device);
+out_driver:
+	driver_unregister(&monreader_driver);
+out_iucv:
+	iucv_unregister(&monreader_iucv_handler, 1);
+	return rc;
+}
+
+static void __exit mon_exit(void)
+{
+	segment_unload(mon_dcss_name);
+	misc_deregister(&mon_dev);
+	device_unregister(monreader_device);
+	driver_unregister(&monreader_driver);
+	iucv_unregister(&monreader_iucv_handler, 1);
+	return;
+}
+
+
+module_init(mon_init);
+module_exit(mon_exit);
+
+module_param_string(mondcss, mon_dcss_name, 9, 0444);
+MODULE_PARM_DESC(mondcss, "Name of DCSS segment to be used for *MONITOR "
+		 "service, max. 8 chars. Default is MONDCSS");
+
+MODULE_AUTHOR("Gerald Schaefer <geraldsc@de.ibm.com>");
+MODULE_DESCRIPTION("Character device driver for reading z/VM "
+		   "monitor service records.");
+MODULE_LICENSE("GPL");
diff --git a/src/kernel/linux/v4.14/drivers/s390/char/monwriter.c b/src/kernel/linux/v4.14/drivers/s390/char/monwriter.c
new file mode 100644
index 0000000..571a7e3
--- /dev/null
+++ b/src/kernel/linux/v4.14/drivers/s390/char/monwriter.c
@@ -0,0 +1,398 @@
+/*
+ * Character device driver for writing z/VM *MONITOR service records.
+ *
+ * Copyright IBM Corp. 2006, 2009
+ *
+ * Author(s): Melissa Howland <Melissa.Howland@us.ibm.com>
+ */
+
+#define KMSG_COMPONENT "monwriter"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/init.h>
+#include <linux/errno.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/miscdevice.h>
+#include <linux/ctype.h>
+#include <linux/poll.h>
+#include <linux/mutex.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+#include <asm/ebcdic.h>
+#include <asm/io.h>
+#include <asm/appldata.h>
+#include <asm/monwriter.h>
+
+#define MONWRITE_MAX_DATALEN	4010
+
+static int mon_max_bufs = 255;
+static int mon_buf_count;
+
+struct mon_buf {
+	struct list_head list;
+	struct monwrite_hdr hdr;
+	int diag_done;
+	char *data;
+};
+
+static LIST_HEAD(mon_priv_list);
+
+struct mon_private {
+	struct list_head priv_list;
+	struct list_head list;
+	struct monwrite_hdr hdr;
+	size_t hdr_to_read;
+	size_t data_to_read;
+	struct mon_buf *current_buf;
+	struct mutex thread_mutex;
+};
+
+/*
+ * helper functions
+ */
+
+static int monwrite_diag(struct monwrite_hdr *myhdr, char *buffer, int fcn)
+{
+	struct appldata_product_id id;
+	int rc;
+
+	strncpy(id.prod_nr, "LNXAPPL", 7);
+	id.prod_fn = myhdr->applid;
+	id.record_nr = myhdr->record_num;
+	id.version_nr = myhdr->version;
+	id.release_nr = myhdr->release;
+	id.mod_lvl = myhdr->mod_level;
+	rc = appldata_asm(&id, fcn, (void *) buffer, myhdr->datalen);
+	if (rc <= 0)
+		return rc;
+	pr_err("Writing monitor data failed with rc=%i\n", rc);
+	if (rc == 5)
+		return -EPERM;
+	return -EINVAL;
+}
+
+static struct mon_buf *monwrite_find_hdr(struct mon_private *monpriv,
+					 struct monwrite_hdr *monhdr)
+{
+	struct mon_buf *entry, *next;
+
+	list_for_each_entry_safe(entry, next, &monpriv->list, list)
+		if ((entry->hdr.mon_function == monhdr->mon_function ||
+		     monhdr->mon_function == MONWRITE_STOP_INTERVAL) &&
+		    entry->hdr.applid == monhdr->applid &&
+		    entry->hdr.record_num == monhdr->record_num &&
+		    entry->hdr.version == monhdr->version &&
+		    entry->hdr.release == monhdr->release &&
+		    entry->hdr.mod_level == monhdr->mod_level)
+			return entry;
+
+	return NULL;
+}
+
+static int monwrite_new_hdr(struct mon_private *monpriv)
+{
+	struct monwrite_hdr *monhdr = &monpriv->hdr;
+	struct mon_buf *monbuf;
+	int rc = 0;
+
+	if (monhdr->datalen > MONWRITE_MAX_DATALEN ||
+	    monhdr->mon_function > MONWRITE_START_CONFIG ||
+	    monhdr->hdrlen != sizeof(struct monwrite_hdr))
+		return -EINVAL;
+	monbuf = NULL;
+	if (monhdr->mon_function != MONWRITE_GEN_EVENT)
+		monbuf = monwrite_find_hdr(monpriv, monhdr);
+	if (monbuf) {
+		if (monhdr->mon_function == MONWRITE_STOP_INTERVAL) {
+			monhdr->datalen = monbuf->hdr.datalen;
+			rc = monwrite_diag(monhdr, monbuf->data,
+					   APPLDATA_STOP_REC);
+			list_del(&monbuf->list);
+			mon_buf_count--;
+			kfree(monbuf->data);
+			kfree(monbuf);
+			monbuf = NULL;
+		}
+	} else if (monhdr->mon_function != MONWRITE_STOP_INTERVAL) {
+		if (mon_buf_count >= mon_max_bufs)
+			return -ENOSPC;
+		monbuf = kzalloc(sizeof(struct mon_buf), GFP_KERNEL);
+		if (!monbuf)
+			return -ENOMEM;
+		monbuf->data = kzalloc(monhdr->datalen,
+				       GFP_KERNEL | GFP_DMA);
+		if (!monbuf->data) {
+			kfree(monbuf);
+			return -ENOMEM;
+		}
+		monbuf->hdr = *monhdr;
+		list_add_tail(&monbuf->list, &monpriv->list);
+		if (monhdr->mon_function != MONWRITE_GEN_EVENT)
+			mon_buf_count++;
+	}
+	monpriv->current_buf = monbuf;
+	return rc;
+}
+
+static int monwrite_new_data(struct mon_private *monpriv)
+{
+	struct monwrite_hdr *monhdr = &monpriv->hdr;
+	struct mon_buf *monbuf = monpriv->current_buf;
+	int rc = 0;
+
+	switch (monhdr->mon_function) {
+	case MONWRITE_START_INTERVAL:
+		if (!monbuf->diag_done) {
+			rc = monwrite_diag(monhdr, monbuf->data,
+					   APPLDATA_START_INTERVAL_REC);
+			monbuf->diag_done = 1;
+		}
+		break;
+	case MONWRITE_START_CONFIG:
+		if (!monbuf->diag_done) {
+			rc = monwrite_diag(monhdr, monbuf->data,
+					   APPLDATA_START_CONFIG_REC);
+			monbuf->diag_done = 1;
+		}
+		break;
+	case MONWRITE_GEN_EVENT:
+		rc = monwrite_diag(monhdr, monbuf->data,
+				   APPLDATA_GEN_EVENT_REC);
+		list_del(&monpriv->current_buf->list);
+		kfree(monpriv->current_buf->data);
+		kfree(monpriv->current_buf);
+		monpriv->current_buf = NULL;
+		break;
+	default:
+		/* monhdr->mon_function is checked in monwrite_new_hdr */
+		BUG();
+	}
+	return rc;
+}
+
+/*
+ * file operations
+ */
+
+static int monwrite_open(struct inode *inode, struct file *filp)
+{
+	struct mon_private *monpriv;
+
+	monpriv = kzalloc(sizeof(struct mon_private), GFP_KERNEL);
+	if (!monpriv)
+		return -ENOMEM;
+	INIT_LIST_HEAD(&monpriv->list);
+	monpriv->hdr_to_read = sizeof(monpriv->hdr);
+	mutex_init(&monpriv->thread_mutex);
+	filp->private_data = monpriv;
+	list_add_tail(&monpriv->priv_list, &mon_priv_list);
+	return nonseekable_open(inode, filp);
+}
+
+static int monwrite_close(struct inode *inode, struct file *filp)
+{
+	struct mon_private *monpriv = filp->private_data;
+	struct mon_buf *entry, *next;
+
+	list_for_each_entry_safe(entry, next, &monpriv->list, list) {
+		if (entry->hdr.mon_function != MONWRITE_GEN_EVENT)
+			monwrite_diag(&entry->hdr, entry->data,
+				      APPLDATA_STOP_REC);
+		mon_buf_count--;
+		list_del(&entry->list);
+		kfree(entry->data);
+		kfree(entry);
+	}
+	list_del(&monpriv->priv_list);
+	kfree(monpriv);
+	return 0;
+}
+
+static ssize_t monwrite_write(struct file *filp, const char __user *data,
+			      size_t count, loff_t *ppos)
+{
+	struct mon_private *monpriv = filp->private_data;
+	size_t len, written;
+	void *to;
+	int rc;
+
+	mutex_lock(&monpriv->thread_mutex);
+	for (written = 0; written < count; ) {
+		if (monpriv->hdr_to_read) {
+			len = min(count - written, monpriv->hdr_to_read);
+			to = (char *) &monpriv->hdr +
+				sizeof(monpriv->hdr) - monpriv->hdr_to_read;
+			if (copy_from_user(to, data + written, len)) {
+				rc = -EFAULT;
+				goto out_error;
+			}
+			monpriv->hdr_to_read -= len;
+			written += len;
+			if (monpriv->hdr_to_read > 0)
+				continue;
+			rc = monwrite_new_hdr(monpriv);
+			if (rc)
+				goto out_error;
+			monpriv->data_to_read = monpriv->current_buf ?
+				monpriv->current_buf->hdr.datalen : 0;
+		}
+
+		if (monpriv->data_to_read) {
+			len = min(count - written, monpriv->data_to_read);
+			to = monpriv->current_buf->data +
+				monpriv->hdr.datalen - monpriv->data_to_read;
+			if (copy_from_user(to, data + written, len)) {
+				rc = -EFAULT;
+				goto out_error;
+			}
+			monpriv->data_to_read -= len;
+			written += len;
+			if (monpriv->data_to_read > 0)
+				continue;
+			rc = monwrite_new_data(monpriv);
+			if (rc)
+				goto out_error;
+		}
+		monpriv->hdr_to_read = sizeof(monpriv->hdr);
+	}
+	mutex_unlock(&monpriv->thread_mutex);
+	return written;
+
+out_error:
+	monpriv->data_to_read = 0;
+	monpriv->hdr_to_read = sizeof(struct monwrite_hdr);
+	mutex_unlock(&monpriv->thread_mutex);
+	return rc;
+}
+
+static const struct file_operations monwrite_fops = {
+	.owner	 = THIS_MODULE,
+	.open	 = &monwrite_open,
+	.release = &monwrite_close,
+	.write	 = &monwrite_write,
+	.llseek  = noop_llseek,
+};
+
+static struct miscdevice mon_dev = {
+	.name	= "monwriter",
+	.fops	= &monwrite_fops,
+	.minor	= MISC_DYNAMIC_MINOR,
+};
+
+/*
+ * suspend/resume
+ */
+
+static int monwriter_freeze(struct device *dev)
+{
+	struct mon_private *monpriv;
+	struct mon_buf *monbuf;
+
+	list_for_each_entry(monpriv, &mon_priv_list, priv_list) {
+		list_for_each_entry(monbuf, &monpriv->list, list) {
+			if (monbuf->hdr.mon_function != MONWRITE_GEN_EVENT)
+				monwrite_diag(&monbuf->hdr, monbuf->data,
+					      APPLDATA_STOP_REC);
+		}
+	}
+	return 0;
+}
+
+static int monwriter_restore(struct device *dev)
+{
+	struct mon_private *monpriv;
+	struct mon_buf *monbuf;
+
+	list_for_each_entry(monpriv, &mon_priv_list, priv_list) {
+		list_for_each_entry(monbuf, &monpriv->list, list) {
+			if (monbuf->hdr.mon_function == MONWRITE_START_INTERVAL)
+				monwrite_diag(&monbuf->hdr, monbuf->data,
+					      APPLDATA_START_INTERVAL_REC);
+			if (monbuf->hdr.mon_function == MONWRITE_START_CONFIG)
+				monwrite_diag(&monbuf->hdr, monbuf->data,
+					      APPLDATA_START_CONFIG_REC);
+		}
+	}
+	return 0;
+}
+
+static int monwriter_thaw(struct device *dev)
+{
+	return monwriter_restore(dev);
+}
+
+static const struct dev_pm_ops monwriter_pm_ops = {
+	.freeze		= monwriter_freeze,
+	.thaw		= monwriter_thaw,
+	.restore	= monwriter_restore,
+};
+
+static struct platform_driver monwriter_pdrv = {
+	.driver = {
+		.name	= "monwriter",
+		.pm	= &monwriter_pm_ops,
+	},
+};
+
+static struct platform_device *monwriter_pdev;
+
+/*
+ * module init/exit
+ */
+
+static int __init mon_init(void)
+{
+	int rc;
+
+	if (!MACHINE_IS_VM)
+		return -ENODEV;
+
+	rc = platform_driver_register(&monwriter_pdrv);
+	if (rc)
+		return rc;
+
+	monwriter_pdev = platform_device_register_simple("monwriter", -1, NULL,
+							0);
+	if (IS_ERR(monwriter_pdev)) {
+		rc = PTR_ERR(monwriter_pdev);
+		goto out_driver;
+	}
+
+	/*
+	 * misc_register() has to be the last action in module_init(), because
+	 * file operations will be available right after this.
+	 */
+	rc = misc_register(&mon_dev);
+	if (rc)
+		goto out_device;
+	return 0;
+
+out_device:
+	platform_device_unregister(monwriter_pdev);
+out_driver:
+	platform_driver_unregister(&monwriter_pdrv);
+	return rc;
+}
+
+static void __exit mon_exit(void)
+{
+	misc_deregister(&mon_dev);
+	platform_device_unregister(monwriter_pdev);
+	platform_driver_unregister(&monwriter_pdrv);
+}
+
+module_init(mon_init);
+module_exit(mon_exit);
+
+module_param_named(max_bufs, mon_max_bufs, int, 0644);
+MODULE_PARM_DESC(max_bufs, "Maximum number of sample monitor data buffers "
+		 "that can be active at one time");
+
+MODULE_AUTHOR("Melissa Howland <Melissa.Howland@us.ibm.com>");
+MODULE_DESCRIPTION("Character device driver for writing z/VM "
+		   "APPLDATA monitor records.");
+MODULE_LICENSE("GPL");
diff --git a/src/kernel/linux/v4.14/drivers/s390/char/raw3270.c b/src/kernel/linux/v4.14/drivers/s390/char/raw3270.c
new file mode 100644
index 0000000..0f47fec
--- /dev/null
+++ b/src/kernel/linux/v4.14/drivers/s390/char/raw3270.c
@@ -0,0 +1,1357 @@
+/*
+ * IBM/3270 Driver - core functions.
+ *
+ * Author(s):
+ *   Original 3270 Code for 2.4 written by Richard Hitt (UTS Global)
+ *   Rewritten for 2.5 by Martin Schwidefsky <schwidefsky@de.ibm.com>
+ *     Copyright IBM Corp. 2003, 2009
+ */
+
+#include <linux/module.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/list.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/wait.h>
+
+#include <asm/ccwdev.h>
+#include <asm/cio.h>
+#include <asm/ebcdic.h>
+#include <asm/diag.h>
+
+#include "raw3270.h"
+
+#include <linux/major.h>
+#include <linux/kdev_t.h>
+#include <linux/device.h>
+#include <linux/mutex.h>
+
+struct class *class3270;
+
+/* The main 3270 data structure. */
+struct raw3270 {
+	struct list_head list;
+	struct ccw_device *cdev;
+	int minor;
+
+	short model, rows, cols;
+	unsigned int state;
+	unsigned long flags;
+
+	struct list_head req_queue;	/* Request queue. */
+	struct list_head view_list;	/* List of available views. */
+	struct raw3270_view *view;	/* Active view. */
+
+	struct timer_list timer;	/* Device timer. */
+
+	unsigned char *ascebc;		/* ascii -> ebcdic table */
+
+	struct raw3270_view init_view;
+	struct raw3270_request init_reset;
+	struct raw3270_request init_readpart;
+	struct raw3270_request init_readmod;
+	unsigned char init_data[256];
+};
+
+/* raw3270->state */
+#define RAW3270_STATE_INIT	0	/* Initial state */
+#define RAW3270_STATE_RESET	1	/* Reset command is pending */
+#define RAW3270_STATE_W4ATTN	2	/* Wait for attention interrupt */
+#define RAW3270_STATE_READMOD	3	/* Read partition is pending */
+#define RAW3270_STATE_READY	4	/* Device is usable by views */
+
+/* raw3270->flags */
+#define RAW3270_FLAGS_14BITADDR	0	/* 14-bit buffer addresses */
+#define RAW3270_FLAGS_BUSY	1	/* Device busy, leave it alone */
+#define RAW3270_FLAGS_CONSOLE	2	/* Device is the console. */
+#define RAW3270_FLAGS_FROZEN	3	/* set if 3270 is frozen for suspend */
+
+/* Semaphore to protect global data of raw3270 (devices, views, etc). */
+static DEFINE_MUTEX(raw3270_mutex);
+
+/* List of 3270 devices. */
+static LIST_HEAD(raw3270_devices);
+
+/*
+ * Flag to indicate if the driver has been registered. Some operations
+ * like waiting for the end of i/o need to be done differently as long
+ * as the kernel is still starting up (console support).
+ */
+static int raw3270_registered;
+
+/* Module parameters */
+static bool tubxcorrect;
+module_param(tubxcorrect, bool, 0);
+
+/*
+ * Wait queue for device init/delete, view delete.
+ */
+DECLARE_WAIT_QUEUE_HEAD(raw3270_wait_queue);
+
+static void __raw3270_disconnect(struct raw3270 *rp);
+
+/*
+ * Encode array for 12 bit 3270 addresses.
+ */
+static unsigned char raw3270_ebcgraf[64] =	{
+	0x40, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7,
+	0xc8, 0xc9, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f,
+	0x50, 0xd1, 0xd2, 0xd3, 0xd4, 0xd5, 0xd6, 0xd7,
+	0xd8, 0xd9, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f,
+	0x60, 0x61, 0xe2, 0xe3, 0xe4, 0xe5, 0xe6, 0xe7,
+	0xe8, 0xe9, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f,
+	0xf0, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7,
+	0xf8, 0xf9, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f
+};
+
+static inline int raw3270_state_ready(struct raw3270 *rp)
+{
+	return rp->state == RAW3270_STATE_READY;
+}
+
+static inline int raw3270_state_final(struct raw3270 *rp)
+{
+	return rp->state == RAW3270_STATE_INIT ||
+		rp->state == RAW3270_STATE_READY;
+}
+
+void
+raw3270_buffer_address(struct raw3270 *rp, char *cp, unsigned short addr)
+{
+	if (test_bit(RAW3270_FLAGS_14BITADDR, &rp->flags)) {
+		cp[0] = (addr >> 8) & 0x3f;
+		cp[1] = addr & 0xff;
+	} else {
+		cp[0] = raw3270_ebcgraf[(addr >> 6) & 0x3f];
+		cp[1] = raw3270_ebcgraf[addr & 0x3f];
+	}
+}
+
+/*
+ * Allocate a new 3270 ccw request
+ */
+struct raw3270_request *
+raw3270_request_alloc(size_t size)
+{
+	struct raw3270_request *rq;
+
+	/* Allocate request structure */
+	rq = kzalloc(sizeof(struct raw3270_request), GFP_KERNEL | GFP_DMA);
+	if (!rq)
+		return ERR_PTR(-ENOMEM);
+
+	/* alloc output buffer. */
+	if (size > 0) {
+		rq->buffer = kmalloc(size, GFP_KERNEL | GFP_DMA);
+		if (!rq->buffer) {
+			kfree(rq);
+			return ERR_PTR(-ENOMEM);
+		}
+	}
+	rq->size = size;
+	INIT_LIST_HEAD(&rq->list);
+
+	/*
+	 * Setup ccw.
+	 */
+	rq->ccw.cda = __pa(rq->buffer);
+	rq->ccw.flags = CCW_FLAG_SLI;
+
+	return rq;
+}
+
+/*
+ * Free 3270 ccw request
+ */
+void
+raw3270_request_free (struct raw3270_request *rq)
+{
+	kfree(rq->buffer);
+	kfree(rq);
+}
+
+/*
+ * Reset request to initial state.
+ */
+void
+raw3270_request_reset(struct raw3270_request *rq)
+{
+	BUG_ON(!list_empty(&rq->list));
+	rq->ccw.cmd_code = 0;
+	rq->ccw.count = 0;
+	rq->ccw.cda = __pa(rq->buffer);
+	rq->ccw.flags = CCW_FLAG_SLI;
+	rq->rescnt = 0;
+	rq->rc = 0;
+}
+
+/*
+ * Set command code to ccw of a request.
+ */
+void
+raw3270_request_set_cmd(struct raw3270_request *rq, u8 cmd)
+{
+	rq->ccw.cmd_code = cmd;
+}
+
+/*
+ * Add data fragment to output buffer.
+ */
+int
+raw3270_request_add_data(struct raw3270_request *rq, void *data, size_t size)
+{
+	if (size + rq->ccw.count > rq->size)
+		return -E2BIG;
+	memcpy(rq->buffer + rq->ccw.count, data, size);
+	rq->ccw.count += size;
+	return 0;
+}
+
+/*
+ * Set address/length pair to ccw of a request.
+ */
+void
+raw3270_request_set_data(struct raw3270_request *rq, void *data, size_t size)
+{
+	rq->ccw.cda = __pa(data);
+	rq->ccw.count = size;
+}
+
+/*
+ * Set idal buffer to ccw of a request.
+ */
+void
+raw3270_request_set_idal(struct raw3270_request *rq, struct idal_buffer *ib)
+{
+	rq->ccw.cda = __pa(ib->data);
+	rq->ccw.count = ib->size;
+	rq->ccw.flags |= CCW_FLAG_IDA;
+}
+
+/*
+ * Add the request to the request queue, try to start it if the
+ * 3270 device is idle. Return without waiting for end of i/o.
+ */
+static int
+__raw3270_start(struct raw3270 *rp, struct raw3270_view *view,
+		struct raw3270_request *rq)
+{
+	rq->view = view;
+	raw3270_get_view(view);
+	if (list_empty(&rp->req_queue) &&
+	    !test_bit(RAW3270_FLAGS_BUSY, &rp->flags)) {
+		/* No other requests are on the queue. Start this one. */
+		rq->rc = ccw_device_start(rp->cdev, &rq->ccw,
+					       (unsigned long) rq, 0, 0);
+		if (rq->rc) {
+			raw3270_put_view(view);
+			return rq->rc;
+		}
+	}
+	list_add_tail(&rq->list, &rp->req_queue);
+	return 0;
+}
+
+int
+raw3270_view_active(struct raw3270_view *view)
+{
+	struct raw3270 *rp = view->dev;
+
+	return rp && rp->view == view &&
+		!test_bit(RAW3270_FLAGS_FROZEN, &rp->flags);
+}
+
+int
+raw3270_start(struct raw3270_view *view, struct raw3270_request *rq)
+{
+	unsigned long flags;
+	struct raw3270 *rp;
+	int rc;
+
+	spin_lock_irqsave(get_ccwdev_lock(view->dev->cdev), flags);
+	rp = view->dev;
+	if (!rp || rp->view != view ||
+	    test_bit(RAW3270_FLAGS_FROZEN, &rp->flags))
+		rc = -EACCES;
+	else if (!raw3270_state_ready(rp))
+		rc = -EBUSY;
+	else
+		rc =  __raw3270_start(rp, view, rq);
+	spin_unlock_irqrestore(get_ccwdev_lock(view->dev->cdev), flags);
+	return rc;
+}
+
+int
+raw3270_start_locked(struct raw3270_view *view, struct raw3270_request *rq)
+{
+	struct raw3270 *rp;
+	int rc;
+
+	rp = view->dev;
+	if (!rp || rp->view != view ||
+	    test_bit(RAW3270_FLAGS_FROZEN, &rp->flags))
+		rc = -EACCES;
+	else if (!raw3270_state_ready(rp))
+		rc = -EBUSY;
+	else
+		rc =  __raw3270_start(rp, view, rq);
+	return rc;
+}
+
+int
+raw3270_start_irq(struct raw3270_view *view, struct raw3270_request *rq)
+{
+	struct raw3270 *rp;
+
+	rp = view->dev;
+	rq->view = view;
+	raw3270_get_view(view);
+	list_add_tail(&rq->list, &rp->req_queue);
+	return 0;
+}
+
+/*
+ * 3270 interrupt routine, called from the ccw_device layer
+ */
+static void
+raw3270_irq (struct ccw_device *cdev, unsigned long intparm, struct irb *irb)
+{
+	struct raw3270 *rp;
+	struct raw3270_view *view;
+	struct raw3270_request *rq;
+
+	rp = dev_get_drvdata(&cdev->dev);
+	if (!rp)
+		return;
+	rq = (struct raw3270_request *) intparm;
+	view = rq ? rq->view : rp->view;
+
+	if (!IS_ERR(irb)) {
+		/* Handle CE-DE-UE and subsequent UDE */
+		if (irb->scsw.cmd.dstat & DEV_STAT_DEV_END)
+			clear_bit(RAW3270_FLAGS_BUSY, &rp->flags);
+		if (irb->scsw.cmd.dstat == (DEV_STAT_CHN_END |
+					    DEV_STAT_DEV_END |
+					    DEV_STAT_UNIT_EXCEP))
+			set_bit(RAW3270_FLAGS_BUSY, &rp->flags);
+		/* Handle disconnected devices */
+		if ((irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK) &&
+		    (irb->ecw[0] & SNS0_INTERVENTION_REQ)) {
+			set_bit(RAW3270_FLAGS_BUSY, &rp->flags);
+			if (rp->state > RAW3270_STATE_RESET)
+				__raw3270_disconnect(rp);
+		}
+		/* Call interrupt handler of the view */
+		if (view)
+			view->fn->intv(view, rq, irb);
+	}
+
+	if (test_bit(RAW3270_FLAGS_BUSY, &rp->flags))
+		/* Device busy, do not start I/O */
+		return;
+
+	if (rq && !list_empty(&rq->list)) {
+		/* The request completed, remove from queue and do callback. */
+		list_del_init(&rq->list);
+		if (rq->callback)
+			rq->callback(rq, rq->callback_data);
+		/* Do put_device for get_device in raw3270_start. */
+		raw3270_put_view(view);
+	}
+
+	/*
+	 * Try to start each request on request queue until one is
+	 * started successful.
+	 */
+	while (!list_empty(&rp->req_queue)) {
+		rq = list_entry(rp->req_queue.next,struct raw3270_request,list);
+		rq->rc = ccw_device_start(rp->cdev, &rq->ccw,
+					  (unsigned long) rq, 0, 0);
+		if (rq->rc == 0)
+			break;
+		/* Start failed. Remove request and do callback. */
+		list_del_init(&rq->list);
+		if (rq->callback)
+			rq->callback(rq, rq->callback_data);
+		/* Do put_device for get_device in raw3270_start. */
+		raw3270_put_view(view);
+	}
+}
+
+/*
+ * To determine the size of the 3270 device we need to do:
+ * 1) send a 'read partition' data stream to the device
+ * 2) wait for the attn interrupt that precedes the query reply
+ * 3) do a read modified to get the query reply
+ * To make things worse we have to cope with intervention
+ * required (3270 device switched to 'stand-by') and command
+ * rejects (old devices that can't do 'read partition').
+ */
+struct raw3270_ua {	/* Query Reply structure for Usable Area */
+	struct {	/* Usable Area Query Reply Base */
+		short l;	/* Length of this structured field */
+		char  sfid;	/* 0x81 if Query Reply */
+		char  qcode;	/* 0x81 if Usable Area */
+		char  flags0;
+		char  flags1;
+		short w;	/* Width of usable area */
+		short h;	/* Heigth of usavle area */
+		char  units;	/* 0x00:in; 0x01:mm */
+		int   xr;
+		int   yr;
+		char  aw;
+		char  ah;
+		short buffsz;	/* Character buffer size, bytes */
+		char  xmin;
+		char  ymin;
+		char  xmax;
+		char  ymax;
+	} __attribute__ ((packed)) uab;
+	struct {	/* Alternate Usable Area Self-Defining Parameter */
+		char  l;	/* Length of this Self-Defining Parm */
+		char  sdpid;	/* 0x02 if Alternate Usable Area */
+		char  res;
+		char  auaid;	/* 0x01 is Id for the A U A */
+		short wauai;	/* Width of AUAi */
+		short hauai;	/* Height of AUAi */
+		char  auaunits;	/* 0x00:in, 0x01:mm */
+		int   auaxr;
+		int   auayr;
+		char  awauai;
+		char  ahauai;
+	} __attribute__ ((packed)) aua;
+} __attribute__ ((packed));
+
+static void
+raw3270_size_device_vm(struct raw3270 *rp)
+{
+	int rc, model;
+	struct ccw_dev_id dev_id;
+	struct diag210 diag_data;
+
+	ccw_device_get_id(rp->cdev, &dev_id);
+	diag_data.vrdcdvno = dev_id.devno;
+	diag_data.vrdclen = sizeof(struct diag210);
+	rc = diag210(&diag_data);
+	model = diag_data.vrdccrmd;
+	/* Use default model 2 if the size could not be detected */
+	if (rc || model < 2 || model > 5)
+		model = 2;
+	switch (model) {
+	case 2:
+		rp->model = model;
+		rp->rows = 24;
+		rp->cols = 80;
+		break;
+	case 3:
+		rp->model = model;
+		rp->rows = 32;
+		rp->cols = 80;
+		break;
+	case 4:
+		rp->model = model;
+		rp->rows = 43;
+		rp->cols = 80;
+		break;
+	case 5:
+		rp->model = model;
+		rp->rows = 27;
+		rp->cols = 132;
+		break;
+	}
+}
+
+static void
+raw3270_size_device(struct raw3270 *rp)
+{
+	struct raw3270_ua *uap;
+
+	/* Got a Query Reply */
+	uap = (struct raw3270_ua *) (rp->init_data + 1);
+	/* Paranoia check. */
+	if (rp->init_readmod.rc || rp->init_data[0] != 0x88 ||
+	    uap->uab.qcode != 0x81) {
+		/* Couldn't detect size. Use default model 2. */
+		rp->model = 2;
+		rp->rows = 24;
+		rp->cols = 80;
+		return;
+	}
+	/* Copy rows/columns of default Usable Area */
+	rp->rows = uap->uab.h;
+	rp->cols = uap->uab.w;
+	/* Check for 14 bit addressing */
+	if ((uap->uab.flags0 & 0x0d) == 0x01)
+		set_bit(RAW3270_FLAGS_14BITADDR, &rp->flags);
+	/* Check for Alternate Usable Area */
+	if (uap->uab.l == sizeof(struct raw3270_ua) &&
+	    uap->aua.sdpid == 0x02) {
+		rp->rows = uap->aua.hauai;
+		rp->cols = uap->aua.wauai;
+	}
+	/* Try to find a model. */
+	rp->model = 0;
+	if (rp->rows == 24 && rp->cols == 80)
+		rp->model = 2;
+	if (rp->rows == 32 && rp->cols == 80)
+		rp->model = 3;
+	if (rp->rows == 43 && rp->cols == 80)
+		rp->model = 4;
+	if (rp->rows == 27 && rp->cols == 132)
+		rp->model = 5;
+}
+
+static void
+raw3270_size_device_done(struct raw3270 *rp)
+{
+	struct raw3270_view *view;
+
+	rp->view = NULL;
+	rp->state = RAW3270_STATE_READY;
+	/* Notify views about new size */
+	list_for_each_entry(view, &rp->view_list, list)
+		if (view->fn->resize)
+			view->fn->resize(view, rp->model, rp->rows, rp->cols);
+	/* Setup processing done, now activate a view */
+	list_for_each_entry(view, &rp->view_list, list) {
+		rp->view = view;
+		if (view->fn->activate(view) == 0)
+			break;
+		rp->view = NULL;
+	}
+}
+
+static void
+raw3270_read_modified_cb(struct raw3270_request *rq, void *data)
+{
+	struct raw3270 *rp = rq->view->dev;
+
+	raw3270_size_device(rp);
+	raw3270_size_device_done(rp);
+}
+
+static void
+raw3270_read_modified(struct raw3270 *rp)
+{
+	if (rp->state != RAW3270_STATE_W4ATTN)
+		return;
+	/* Use 'read modified' to get the result of a read partition. */
+	memset(&rp->init_readmod, 0, sizeof(rp->init_readmod));
+	memset(&rp->init_data, 0, sizeof(rp->init_data));
+	rp->init_readmod.ccw.cmd_code = TC_READMOD;
+	rp->init_readmod.ccw.flags = CCW_FLAG_SLI;
+	rp->init_readmod.ccw.count = sizeof(rp->init_data);
+	rp->init_readmod.ccw.cda = (__u32) __pa(rp->init_data);
+	rp->init_readmod.callback = raw3270_read_modified_cb;
+	rp->state = RAW3270_STATE_READMOD;
+	raw3270_start_irq(&rp->init_view, &rp->init_readmod);
+}
+
+static void
+raw3270_writesf_readpart(struct raw3270 *rp)
+{
+	static const unsigned char wbuf[] =
+		{ 0x00, 0x07, 0x01, 0xff, 0x03, 0x00, 0x81 };
+
+	/* Store 'read partition' data stream to init_data */
+	memset(&rp->init_readpart, 0, sizeof(rp->init_readpart));
+	memset(&rp->init_data, 0, sizeof(rp->init_data));
+	memcpy(&rp->init_data, wbuf, sizeof(wbuf));
+	rp->init_readpart.ccw.cmd_code = TC_WRITESF;
+	rp->init_readpart.ccw.flags = CCW_FLAG_SLI;
+	rp->init_readpart.ccw.count = sizeof(wbuf);
+	rp->init_readpart.ccw.cda = (__u32) __pa(&rp->init_data);
+	rp->state = RAW3270_STATE_W4ATTN;
+	raw3270_start_irq(&rp->init_view, &rp->init_readpart);
+}
+
+/*
+ * Device reset
+ */
+static void
+raw3270_reset_device_cb(struct raw3270_request *rq, void *data)
+{
+	struct raw3270 *rp = rq->view->dev;
+
+	if (rp->state != RAW3270_STATE_RESET)
+		return;
+	if (rq->rc) {
+		/* Reset command failed. */
+		rp->state = RAW3270_STATE_INIT;
+	} else if (MACHINE_IS_VM) {
+		raw3270_size_device_vm(rp);
+		raw3270_size_device_done(rp);
+	} else
+		raw3270_writesf_readpart(rp);
+	memset(&rp->init_reset, 0, sizeof(rp->init_reset));
+}
+
+static int
+__raw3270_reset_device(struct raw3270 *rp)
+{
+	int rc;
+
+	/* Check if reset is already pending */
+	if (rp->init_reset.view)
+		return -EBUSY;
+	/* Store reset data stream to init_data/init_reset */
+	rp->init_data[0] = TW_KR;
+	rp->init_reset.ccw.cmd_code = TC_EWRITEA;
+	rp->init_reset.ccw.flags = CCW_FLAG_SLI;
+	rp->init_reset.ccw.count = 1;
+	rp->init_reset.ccw.cda = (__u32) __pa(rp->init_data);
+	rp->init_reset.callback = raw3270_reset_device_cb;
+	rc = __raw3270_start(rp, &rp->init_view, &rp->init_reset);
+	if (rc == 0 && rp->state == RAW3270_STATE_INIT)
+		rp->state = RAW3270_STATE_RESET;
+	return rc;
+}
+
+static int
+raw3270_reset_device(struct raw3270 *rp)
+{
+	unsigned long flags;
+	int rc;
+
+	spin_lock_irqsave(get_ccwdev_lock(rp->cdev), flags);
+	rc = __raw3270_reset_device(rp);
+	spin_unlock_irqrestore(get_ccwdev_lock(rp->cdev), flags);
+	return rc;
+}
+
+int
+raw3270_reset(struct raw3270_view *view)
+{
+	struct raw3270 *rp;
+	int rc;
+
+	rp = view->dev;
+	if (!rp || rp->view != view ||
+	    test_bit(RAW3270_FLAGS_FROZEN, &rp->flags))
+		rc = -EACCES;
+	else if (!raw3270_state_ready(rp))
+		rc = -EBUSY;
+	else
+		rc = raw3270_reset_device(view->dev);
+	return rc;
+}
+
+static void
+__raw3270_disconnect(struct raw3270 *rp)
+{
+	struct raw3270_request *rq;
+	struct raw3270_view *view;
+
+	rp->state = RAW3270_STATE_INIT;
+	rp->view = &rp->init_view;
+	/* Cancel all queued requests */
+	while (!list_empty(&rp->req_queue)) {
+		rq = list_entry(rp->req_queue.next,struct raw3270_request,list);
+		view = rq->view;
+		rq->rc = -EACCES;
+		list_del_init(&rq->list);
+		if (rq->callback)
+			rq->callback(rq, rq->callback_data);
+		raw3270_put_view(view);
+	}
+	/* Start from scratch */
+	__raw3270_reset_device(rp);
+}
+
+static void
+raw3270_init_irq(struct raw3270_view *view, struct raw3270_request *rq,
+		 struct irb *irb)
+{
+	struct raw3270 *rp;
+
+	if (rq) {
+		if (irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK) {
+			if (irb->ecw[0] & SNS0_CMD_REJECT)
+				rq->rc = -EOPNOTSUPP;
+			else
+				rq->rc = -EIO;
+		}
+	}
+	if (irb->scsw.cmd.dstat & DEV_STAT_ATTENTION) {
+		/* Queue read modified after attention interrupt */
+		rp = view->dev;
+		raw3270_read_modified(rp);
+	}
+}
+
+static struct raw3270_fn raw3270_init_fn = {
+	.intv = raw3270_init_irq
+};
+
+/*
+ * Setup new 3270 device.
+ */
+static int
+raw3270_setup_device(struct ccw_device *cdev, struct raw3270 *rp, char *ascebc)
+{
+	struct list_head *l;
+	struct raw3270 *tmp;
+	int minor;
+
+	memset(rp, 0, sizeof(struct raw3270));
+	/* Copy ebcdic -> ascii translation table. */
+	memcpy(ascebc, _ascebc, 256);
+	if (tubxcorrect) {
+		/* correct brackets and circumflex */
+		ascebc['['] = 0xad;
+		ascebc[']'] = 0xbd;
+		ascebc['^'] = 0xb0;
+	}
+	rp->ascebc = ascebc;
+
+	/* Set defaults. */
+	rp->rows = 24;
+	rp->cols = 80;
+
+	INIT_LIST_HEAD(&rp->req_queue);
+	INIT_LIST_HEAD(&rp->view_list);
+
+	rp->init_view.dev = rp;
+	rp->init_view.fn = &raw3270_init_fn;
+	rp->view = &rp->init_view;
+
+	/*
+	 * Add device to list and find the smallest unused minor
+	 * number for it. Note: there is no device with minor 0,
+	 * see special case for fs3270.c:fs3270_open().
+	 */
+	mutex_lock(&raw3270_mutex);
+	/* Keep the list sorted. */
+	minor = RAW3270_FIRSTMINOR;
+	rp->minor = -1;
+	list_for_each(l, &raw3270_devices) {
+		tmp = list_entry(l, struct raw3270, list);
+		if (tmp->minor > minor) {
+			rp->minor = minor;
+			__list_add(&rp->list, l->prev, l);
+			break;
+		}
+		minor++;
+	}
+	if (rp->minor == -1 && minor < RAW3270_MAXDEVS + RAW3270_FIRSTMINOR) {
+		rp->minor = minor;
+		list_add_tail(&rp->list, &raw3270_devices);
+	}
+	mutex_unlock(&raw3270_mutex);
+	/* No free minor number? Then give up. */
+	if (rp->minor == -1)
+		return -EUSERS;
+	rp->cdev = cdev;
+	dev_set_drvdata(&cdev->dev, rp);
+	cdev->handler = raw3270_irq;
+	return 0;
+}
+
+#ifdef CONFIG_TN3270_CONSOLE
+/* Tentative definition - see below for actual definition. */
+static struct ccw_driver raw3270_ccw_driver;
+
+/*
+ * Setup 3270 device configured as console.
+ */
+struct raw3270 __init *raw3270_setup_console(void)
+{
+	struct ccw_device *cdev;
+	unsigned long flags;
+	struct raw3270 *rp;
+	char *ascebc;
+	int rc;
+
+	cdev = ccw_device_create_console(&raw3270_ccw_driver);
+	if (IS_ERR(cdev))
+		return ERR_CAST(cdev);
+
+	rp = kzalloc(sizeof(struct raw3270), GFP_KERNEL | GFP_DMA);
+	ascebc = kzalloc(256, GFP_KERNEL);
+	rc = raw3270_setup_device(cdev, rp, ascebc);
+	if (rc)
+		return ERR_PTR(rc);
+	set_bit(RAW3270_FLAGS_CONSOLE, &rp->flags);
+
+	rc = ccw_device_enable_console(cdev);
+	if (rc) {
+		ccw_device_destroy_console(cdev);
+		return ERR_PTR(rc);
+	}
+
+	spin_lock_irqsave(get_ccwdev_lock(rp->cdev), flags);
+	do {
+		__raw3270_reset_device(rp);
+		while (!raw3270_state_final(rp)) {
+			ccw_device_wait_idle(rp->cdev);
+			barrier();
+		}
+	} while (rp->state != RAW3270_STATE_READY);
+	spin_unlock_irqrestore(get_ccwdev_lock(rp->cdev), flags);
+	return rp;
+}
+
+void
+raw3270_wait_cons_dev(struct raw3270 *rp)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(get_ccwdev_lock(rp->cdev), flags);
+	ccw_device_wait_idle(rp->cdev);
+	spin_unlock_irqrestore(get_ccwdev_lock(rp->cdev), flags);
+}
+
+#endif
+
+/*
+ * Create a 3270 device structure.
+ */
+static struct raw3270 *
+raw3270_create_device(struct ccw_device *cdev)
+{
+	struct raw3270 *rp;
+	char *ascebc;
+	int rc;
+
+	rp = kzalloc(sizeof(struct raw3270), GFP_KERNEL | GFP_DMA);
+	if (!rp)
+		return ERR_PTR(-ENOMEM);
+	ascebc = kmalloc(256, GFP_KERNEL);
+	if (!ascebc) {
+		kfree(rp);
+		return ERR_PTR(-ENOMEM);
+	}
+	rc = raw3270_setup_device(cdev, rp, ascebc);
+	if (rc) {
+		kfree(rp->ascebc);
+		kfree(rp);
+		rp = ERR_PTR(rc);
+	}
+	/* Get reference to ccw_device structure. */
+	get_device(&cdev->dev);
+	return rp;
+}
+
+/*
+ * Activate a view.
+ */
+int
+raw3270_activate_view(struct raw3270_view *view)
+{
+	struct raw3270 *rp;
+	struct raw3270_view *oldview, *nv;
+	unsigned long flags;
+	int rc;
+
+	rp = view->dev;
+	if (!rp)
+		return -ENODEV;
+	spin_lock_irqsave(get_ccwdev_lock(rp->cdev), flags);
+	if (rp->view == view)
+		rc = 0;
+	else if (!raw3270_state_ready(rp))
+		rc = -EBUSY;
+	else if (test_bit(RAW3270_FLAGS_FROZEN, &rp->flags))
+		rc = -EACCES;
+	else {
+		oldview = NULL;
+		if (rp->view && rp->view->fn->deactivate) {
+			oldview = rp->view;
+			oldview->fn->deactivate(oldview);
+		}
+		rp->view = view;
+		rc = view->fn->activate(view);
+		if (rc) {
+			/* Didn't work. Try to reactivate the old view. */
+			rp->view = oldview;
+			if (!oldview || oldview->fn->activate(oldview) != 0) {
+				/* Didn't work as well. Try any other view. */
+				list_for_each_entry(nv, &rp->view_list, list)
+					if (nv != view && nv != oldview) {
+						rp->view = nv;
+						if (nv->fn->activate(nv) == 0)
+							break;
+						rp->view = NULL;
+					}
+			}
+		}
+	}
+	spin_unlock_irqrestore(get_ccwdev_lock(rp->cdev), flags);
+	return rc;
+}
+
+/*
+ * Deactivate current view.
+ */
+void
+raw3270_deactivate_view(struct raw3270_view *view)
+{
+	unsigned long flags;
+	struct raw3270 *rp;
+
+	rp = view->dev;
+	if (!rp)
+		return;
+	spin_lock_irqsave(get_ccwdev_lock(rp->cdev), flags);
+	if (rp->view == view) {
+		view->fn->deactivate(view);
+		rp->view = NULL;
+		/* Move deactivated view to end of list. */
+		list_del_init(&view->list);
+		list_add_tail(&view->list, &rp->view_list);
+		/* Try to activate another view. */
+		if (raw3270_state_ready(rp) &&
+		    !test_bit(RAW3270_FLAGS_FROZEN, &rp->flags)) {
+			list_for_each_entry(view, &rp->view_list, list) {
+				rp->view = view;
+				if (view->fn->activate(view) == 0)
+					break;
+				rp->view = NULL;
+			}
+		}
+	}
+	spin_unlock_irqrestore(get_ccwdev_lock(rp->cdev), flags);
+}
+
+/*
+ * Add view to device with minor "minor".
+ */
+int
+raw3270_add_view(struct raw3270_view *view, struct raw3270_fn *fn, int minor, int subclass)
+{
+	unsigned long flags;
+	struct raw3270 *rp;
+	int rc;
+
+	if (minor <= 0)
+		return -ENODEV;
+	mutex_lock(&raw3270_mutex);
+	rc = -ENODEV;
+	list_for_each_entry(rp, &raw3270_devices, list) {
+		if (rp->minor != minor)
+			continue;
+		spin_lock_irqsave(get_ccwdev_lock(rp->cdev), flags);
+		atomic_set(&view->ref_count, 2);
+		view->dev = rp;
+		view->fn = fn;
+		view->model = rp->model;
+		view->rows = rp->rows;
+		view->cols = rp->cols;
+		view->ascebc = rp->ascebc;
+		spin_lock_init(&view->lock);
+		lockdep_set_subclass(&view->lock, subclass);
+		list_add(&view->list, &rp->view_list);
+		rc = 0;
+		spin_unlock_irqrestore(get_ccwdev_lock(rp->cdev), flags);
+		break;
+	}
+	mutex_unlock(&raw3270_mutex);
+	return rc;
+}
+
+/*
+ * Find specific view of device with minor "minor".
+ */
+struct raw3270_view *
+raw3270_find_view(struct raw3270_fn *fn, int minor)
+{
+	struct raw3270 *rp;
+	struct raw3270_view *view, *tmp;
+	unsigned long flags;
+
+	mutex_lock(&raw3270_mutex);
+	view = ERR_PTR(-ENODEV);
+	list_for_each_entry(rp, &raw3270_devices, list) {
+		if (rp->minor != minor)
+			continue;
+		spin_lock_irqsave(get_ccwdev_lock(rp->cdev), flags);
+		list_for_each_entry(tmp, &rp->view_list, list) {
+			if (tmp->fn == fn) {
+				raw3270_get_view(tmp);
+				view = tmp;
+				break;
+			}
+		}
+		spin_unlock_irqrestore(get_ccwdev_lock(rp->cdev), flags);
+		break;
+	}
+	mutex_unlock(&raw3270_mutex);
+	return view;
+}
+
+/*
+ * Remove view from device and free view structure via call to view->fn->free.
+ */
+void
+raw3270_del_view(struct raw3270_view *view)
+{
+	unsigned long flags;
+	struct raw3270 *rp;
+	struct raw3270_view *nv;
+
+	rp = view->dev;
+	spin_lock_irqsave(get_ccwdev_lock(rp->cdev), flags);
+	if (rp->view == view) {
+		view->fn->deactivate(view);
+		rp->view = NULL;
+	}
+	list_del_init(&view->list);
+	if (!rp->view && raw3270_state_ready(rp) &&
+	    !test_bit(RAW3270_FLAGS_FROZEN, &rp->flags)) {
+		/* Try to activate another view. */
+		list_for_each_entry(nv, &rp->view_list, list) {
+			if (nv->fn->activate(nv) == 0) {
+				rp->view = nv;
+				break;
+			}
+		}
+	}
+	spin_unlock_irqrestore(get_ccwdev_lock(rp->cdev), flags);
+	/* Wait for reference counter to drop to zero. */
+	atomic_dec(&view->ref_count);
+	wait_event(raw3270_wait_queue, atomic_read(&view->ref_count) == 0);
+	if (view->fn->free)
+		view->fn->free(view);
+}
+
+/*
+ * Remove a 3270 device structure.
+ */
+static void
+raw3270_delete_device(struct raw3270 *rp)
+{
+	struct ccw_device *cdev;
+
+	/* Remove from device chain. */
+	mutex_lock(&raw3270_mutex);
+	list_del_init(&rp->list);
+	mutex_unlock(&raw3270_mutex);
+
+	/* Disconnect from ccw_device. */
+	cdev = rp->cdev;
+	rp->cdev = NULL;
+	dev_set_drvdata(&cdev->dev, NULL);
+	cdev->handler = NULL;
+
+	/* Put ccw_device structure. */
+	put_device(&cdev->dev);
+
+	/* Now free raw3270 structure. */
+	kfree(rp->ascebc);
+	kfree(rp);
+}
+
+static int
+raw3270_probe (struct ccw_device *cdev)
+{
+	return 0;
+}
+
+/*
+ * Additional attributes for a 3270 device
+ */
+static ssize_t
+raw3270_model_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+	return snprintf(buf, PAGE_SIZE, "%i\n",
+			((struct raw3270 *) dev_get_drvdata(dev))->model);
+}
+static DEVICE_ATTR(model, 0444, raw3270_model_show, NULL);
+
+static ssize_t
+raw3270_rows_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+	return snprintf(buf, PAGE_SIZE, "%i\n",
+			((struct raw3270 *) dev_get_drvdata(dev))->rows);
+}
+static DEVICE_ATTR(rows, 0444, raw3270_rows_show, NULL);
+
+static ssize_t
+raw3270_columns_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+	return snprintf(buf, PAGE_SIZE, "%i\n",
+			((struct raw3270 *) dev_get_drvdata(dev))->cols);
+}
+static DEVICE_ATTR(columns, 0444, raw3270_columns_show, NULL);
+
+static struct attribute * raw3270_attrs[] = {
+	&dev_attr_model.attr,
+	&dev_attr_rows.attr,
+	&dev_attr_columns.attr,
+	NULL,
+};
+
+static const struct attribute_group raw3270_attr_group = {
+	.attrs = raw3270_attrs,
+};
+
+static int raw3270_create_attributes(struct raw3270 *rp)
+{
+	return sysfs_create_group(&rp->cdev->dev.kobj, &raw3270_attr_group);
+}
+
+/*
+ * Notifier for device addition/removal
+ */
+static LIST_HEAD(raw3270_notifier);
+
+int raw3270_register_notifier(struct raw3270_notifier *notifier)
+{
+	struct raw3270 *rp;
+
+	mutex_lock(&raw3270_mutex);
+	list_add_tail(&notifier->list, &raw3270_notifier);
+	list_for_each_entry(rp, &raw3270_devices, list)
+		notifier->create(rp->minor);
+	mutex_unlock(&raw3270_mutex);
+	return 0;
+}
+
+void raw3270_unregister_notifier(struct raw3270_notifier *notifier)
+{
+	struct raw3270 *rp;
+
+	mutex_lock(&raw3270_mutex);
+	list_for_each_entry(rp, &raw3270_devices, list)
+		notifier->destroy(rp->minor);
+	list_del(&notifier->list);
+	mutex_unlock(&raw3270_mutex);
+}
+
+/*
+ * Set 3270 device online.
+ */
+static int
+raw3270_set_online (struct ccw_device *cdev)
+{
+	struct raw3270_notifier *np;
+	struct raw3270 *rp;
+	int rc;
+
+	rp = raw3270_create_device(cdev);
+	if (IS_ERR(rp))
+		return PTR_ERR(rp);
+	rc = raw3270_create_attributes(rp);
+	if (rc)
+		goto failure;
+	raw3270_reset_device(rp);
+	mutex_lock(&raw3270_mutex);
+	list_for_each_entry(np, &raw3270_notifier, list)
+		np->create(rp->minor);
+	mutex_unlock(&raw3270_mutex);
+	return 0;
+
+failure:
+	raw3270_delete_device(rp);
+	return rc;
+}
+
+/*
+ * Remove 3270 device structure.
+ */
+static void
+raw3270_remove (struct ccw_device *cdev)
+{
+	unsigned long flags;
+	struct raw3270 *rp;
+	struct raw3270_view *v;
+	struct raw3270_notifier *np;
+
+	rp = dev_get_drvdata(&cdev->dev);
+	/*
+	 * _remove is the opposite of _probe; it's probe that
+	 * should set up rp.  raw3270_remove gets entered for
+	 * devices even if they haven't been varied online.
+	 * Thus, rp may validly be NULL here.
+	 */
+	if (rp == NULL)
+		return;
+
+	sysfs_remove_group(&cdev->dev.kobj, &raw3270_attr_group);
+
+	/* Deactivate current view and remove all views. */
+	spin_lock_irqsave(get_ccwdev_lock(cdev), flags);
+	if (rp->view) {
+		if (rp->view->fn->deactivate)
+			rp->view->fn->deactivate(rp->view);
+		rp->view = NULL;
+	}
+	while (!list_empty(&rp->view_list)) {
+		v = list_entry(rp->view_list.next, struct raw3270_view, list);
+		if (v->fn->release)
+			v->fn->release(v);
+		spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
+		raw3270_del_view(v);
+		spin_lock_irqsave(get_ccwdev_lock(cdev), flags);
+	}
+	spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
+
+	mutex_lock(&raw3270_mutex);
+	list_for_each_entry(np, &raw3270_notifier, list)
+		np->destroy(rp->minor);
+	mutex_unlock(&raw3270_mutex);
+
+	/* Reset 3270 device. */
+	raw3270_reset_device(rp);
+	/* And finally remove it. */
+	raw3270_delete_device(rp);
+}
+
+/*
+ * Set 3270 device offline.
+ */
+static int
+raw3270_set_offline (struct ccw_device *cdev)
+{
+	struct raw3270 *rp;
+
+	rp = dev_get_drvdata(&cdev->dev);
+	if (test_bit(RAW3270_FLAGS_CONSOLE, &rp->flags))
+		return -EBUSY;
+	raw3270_remove(cdev);
+	return 0;
+}
+
+static int raw3270_pm_stop(struct ccw_device *cdev)
+{
+	struct raw3270 *rp;
+	struct raw3270_view *view;
+	unsigned long flags;
+
+	rp = dev_get_drvdata(&cdev->dev);
+	if (!rp)
+		return 0;
+	spin_lock_irqsave(get_ccwdev_lock(rp->cdev), flags);
+	if (rp->view && rp->view->fn->deactivate)
+		rp->view->fn->deactivate(rp->view);
+	if (!test_bit(RAW3270_FLAGS_CONSOLE, &rp->flags)) {
+		/*
+		 * Release tty and fullscreen for all non-console
+		 * devices.
+		 */
+		list_for_each_entry(view, &rp->view_list, list) {
+			if (view->fn->release)
+				view->fn->release(view);
+		}
+	}
+	set_bit(RAW3270_FLAGS_FROZEN, &rp->flags);
+	spin_unlock_irqrestore(get_ccwdev_lock(rp->cdev), flags);
+	return 0;
+}
+
+static int raw3270_pm_start(struct ccw_device *cdev)
+{
+	struct raw3270 *rp;
+	unsigned long flags;
+
+	rp = dev_get_drvdata(&cdev->dev);
+	if (!rp)
+		return 0;
+	spin_lock_irqsave(get_ccwdev_lock(rp->cdev), flags);
+	clear_bit(RAW3270_FLAGS_FROZEN, &rp->flags);
+	if (rp->view && rp->view->fn->activate)
+		rp->view->fn->activate(rp->view);
+	spin_unlock_irqrestore(get_ccwdev_lock(rp->cdev), flags);
+	return 0;
+}
+
+void raw3270_pm_unfreeze(struct raw3270_view *view)
+{
+#ifdef CONFIG_TN3270_CONSOLE
+	struct raw3270 *rp;
+
+	rp = view->dev;
+	if (rp && test_bit(RAW3270_FLAGS_FROZEN, &rp->flags))
+		ccw_device_force_console(rp->cdev);
+#endif
+}
+
+static struct ccw_device_id raw3270_id[] = {
+	{ CCW_DEVICE(0x3270, 0) },
+	{ CCW_DEVICE(0x3271, 0) },
+	{ CCW_DEVICE(0x3272, 0) },
+	{ CCW_DEVICE(0x3273, 0) },
+	{ CCW_DEVICE(0x3274, 0) },
+	{ CCW_DEVICE(0x3275, 0) },
+	{ CCW_DEVICE(0x3276, 0) },
+	{ CCW_DEVICE(0x3277, 0) },
+	{ CCW_DEVICE(0x3278, 0) },
+	{ CCW_DEVICE(0x3279, 0) },
+	{ CCW_DEVICE(0x3174, 0) },
+	{ /* end of list */ },
+};
+
+static struct ccw_driver raw3270_ccw_driver = {
+	.driver = {
+		.name	= "3270",
+		.owner	= THIS_MODULE,
+	},
+	.ids		= raw3270_id,
+	.probe		= &raw3270_probe,
+	.remove		= &raw3270_remove,
+	.set_online	= &raw3270_set_online,
+	.set_offline	= &raw3270_set_offline,
+	.freeze		= &raw3270_pm_stop,
+	.thaw		= &raw3270_pm_start,
+	.restore	= &raw3270_pm_start,
+	.int_class	= IRQIO_C70,
+};
+
+static int
+raw3270_init(void)
+{
+	struct raw3270 *rp;
+	int rc;
+
+	if (raw3270_registered)
+		return 0;
+	raw3270_registered = 1;
+	rc = ccw_driver_register(&raw3270_ccw_driver);
+	if (rc == 0) {
+		/* Create attributes for early (= console) device. */
+		mutex_lock(&raw3270_mutex);
+		class3270 = class_create(THIS_MODULE, "3270");
+		list_for_each_entry(rp, &raw3270_devices, list) {
+			get_device(&rp->cdev->dev);
+			raw3270_create_attributes(rp);
+		}
+		mutex_unlock(&raw3270_mutex);
+	}
+	return rc;
+}
+
+static void
+raw3270_exit(void)
+{
+	ccw_driver_unregister(&raw3270_ccw_driver);
+	class_destroy(class3270);
+}
+
+MODULE_LICENSE("GPL");
+
+module_init(raw3270_init);
+module_exit(raw3270_exit);
+
+EXPORT_SYMBOL(class3270);
+EXPORT_SYMBOL(raw3270_request_alloc);
+EXPORT_SYMBOL(raw3270_request_free);
+EXPORT_SYMBOL(raw3270_request_reset);
+EXPORT_SYMBOL(raw3270_request_set_cmd);
+EXPORT_SYMBOL(raw3270_request_add_data);
+EXPORT_SYMBOL(raw3270_request_set_data);
+EXPORT_SYMBOL(raw3270_request_set_idal);
+EXPORT_SYMBOL(raw3270_buffer_address);
+EXPORT_SYMBOL(raw3270_add_view);
+EXPORT_SYMBOL(raw3270_del_view);
+EXPORT_SYMBOL(raw3270_find_view);
+EXPORT_SYMBOL(raw3270_activate_view);
+EXPORT_SYMBOL(raw3270_deactivate_view);
+EXPORT_SYMBOL(raw3270_start);
+EXPORT_SYMBOL(raw3270_start_locked);
+EXPORT_SYMBOL(raw3270_start_irq);
+EXPORT_SYMBOL(raw3270_reset);
+EXPORT_SYMBOL(raw3270_register_notifier);
+EXPORT_SYMBOL(raw3270_unregister_notifier);
+EXPORT_SYMBOL(raw3270_wait_queue);
diff --git a/src/kernel/linux/v4.14/drivers/s390/char/raw3270.h b/src/kernel/linux/v4.14/drivers/s390/char/raw3270.h
new file mode 100644
index 0000000..3afaa35
--- /dev/null
+++ b/src/kernel/linux/v4.14/drivers/s390/char/raw3270.h
@@ -0,0 +1,285 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * IBM/3270 Driver
+ *
+ * Author(s):
+ *   Original 3270 Code for 2.4 written by Richard Hitt (UTS Global)
+ *   Rewritten for 2.5 by Martin Schwidefsky <schwidefsky@de.ibm.com>
+ *     Copyright IBM Corp. 2003, 2009
+ */
+
+#include <asm/idals.h>
+#include <asm/ioctl.h>
+
+/* ioctls for fullscreen 3270 */
+#define TUBICMD		_IO('3', 3)	/* set ccw command for fs reads. */
+#define TUBOCMD		_IO('3', 4)	/* set ccw command for fs writes. */
+#define TUBGETI		_IO('3', 7)	/* get ccw command for fs reads. */
+#define TUBGETO		_IO('3', 8)	/* get ccw command for fs writes. */
+#define TUBSETMOD	_IO('3',12)	/* FIXME: what does it do ?*/
+#define TUBGETMOD	_IO('3',13)	/* FIXME: what does it do ?*/
+
+/* Local Channel Commands */
+#define TC_WRITE	0x01		/* Write */
+#define TC_RDBUF	0x02		/* Read Buffer */
+#define TC_EWRITE	0x05		/* Erase write */
+#define TC_READMOD	0x06		/* Read modified */
+#define TC_EWRITEA	0x0d		/* Erase write alternate */
+#define TC_WRITESF	0x11		/* Write structured field */
+
+/* Buffer Control Orders */
+#define TO_SF		0x1d		/* Start field */
+#define TO_SBA		0x11		/* Set buffer address */
+#define TO_IC		0x13		/* Insert cursor */
+#define TO_PT		0x05		/* Program tab */
+#define TO_RA		0x3c		/* Repeat to address */
+#define TO_SFE		0x29		/* Start field extended */
+#define TO_EUA		0x12		/* Erase unprotected to address */
+#define TO_MF		0x2c		/* Modify field */
+#define TO_SA		0x28		/* Set attribute */
+
+/* Field Attribute Bytes */
+#define TF_INPUT	0x40		/* Visible input */
+#define TF_INPUTN	0x4c		/* Invisible input */
+#define TF_INMDT	0xc1		/* Visible, Set-MDT */
+#define TF_LOG		0x60
+
+/* Character Attribute Bytes */
+#define TAT_RESET	0x00
+#define TAT_FIELD	0xc0
+#define TAT_EXTHI	0x41
+#define TAT_COLOR	0x42
+#define TAT_CHARS	0x43
+#define TAT_TRANS	0x46
+
+/* Extended-Highlighting Bytes */
+#define TAX_RESET	0x00
+#define TAX_BLINK	0xf1
+#define TAX_REVER	0xf2
+#define TAX_UNDER	0xf4
+
+/* Reset value */
+#define TAR_RESET	0x00
+
+/* Color values */
+#define TAC_RESET	0x00
+#define TAC_BLUE	0xf1
+#define TAC_RED		0xf2
+#define TAC_PINK	0xf3
+#define TAC_GREEN	0xf4
+#define TAC_TURQ	0xf5
+#define TAC_YELLOW	0xf6
+#define TAC_WHITE	0xf7
+#define TAC_DEFAULT	0x00
+
+/* Write Control Characters */
+#define TW_NONE		0x40		/* No particular action */
+#define TW_KR		0xc2		/* Keyboard restore */
+#define TW_PLUSALARM	0x04		/* Add this bit for alarm */
+
+#define RAW3270_FIRSTMINOR	1	/* First minor number */
+#define RAW3270_MAXDEVS		255	/* Max number of 3270 devices */
+
+/* For TUBGETMOD and TUBSETMOD. Should include. */
+struct raw3270_iocb {
+	short model;
+	short line_cnt;
+	short col_cnt;
+	short pf_cnt;
+	short re_cnt;
+	short map;
+};
+
+struct raw3270;
+struct raw3270_view;
+extern struct class *class3270;
+
+/* 3270 CCW request */
+struct raw3270_request {
+	struct list_head list;		/* list head for request queueing. */
+	struct raw3270_view *view;	/* view of this request */
+	struct ccw1 ccw;		/* single ccw. */
+	void *buffer;			/* output buffer. */
+	size_t size;			/* size of output buffer. */
+	int rescnt;			/* residual count from devstat. */
+	int rc;				/* return code for this request. */
+
+	/* Callback for delivering final status. */
+	void (*callback)(struct raw3270_request *, void *);
+	void *callback_data;
+};
+
+struct raw3270_request *raw3270_request_alloc(size_t size);
+struct raw3270_request *raw3270_request_alloc_bootmem(size_t size);
+void raw3270_request_free(struct raw3270_request *);
+void raw3270_request_reset(struct raw3270_request *);
+void raw3270_request_set_cmd(struct raw3270_request *, u8 cmd);
+int  raw3270_request_add_data(struct raw3270_request *, void *, size_t);
+void raw3270_request_set_data(struct raw3270_request *, void *, size_t);
+void raw3270_request_set_idal(struct raw3270_request *, struct idal_buffer *);
+
+static inline int
+raw3270_request_final(struct raw3270_request *rq)
+{
+	return list_empty(&rq->list);
+}
+
+void raw3270_buffer_address(struct raw3270 *, char *, unsigned short);
+
+/*
+ * Functions of a 3270 view.
+ */
+struct raw3270_fn {
+	int  (*activate)(struct raw3270_view *);
+	void (*deactivate)(struct raw3270_view *);
+	void (*intv)(struct raw3270_view *,
+		     struct raw3270_request *, struct irb *);
+	void (*release)(struct raw3270_view *);
+	void (*free)(struct raw3270_view *);
+	void (*resize)(struct raw3270_view *, int, int, int);
+};
+
+/*
+ * View structure chaining. The raw3270_view structure is meant to
+ * be embedded at the start of the real view data structure, e.g.:
+ *   struct example {
+ *     struct raw3270_view view;
+ *     ...
+ *   };
+ */
+struct raw3270_view {
+	struct list_head list;
+	spinlock_t lock;
+#define RAW3270_VIEW_LOCK_IRQ	0
+#define RAW3270_VIEW_LOCK_BH	1
+	atomic_t ref_count;
+	struct raw3270 *dev;
+	struct raw3270_fn *fn;
+	unsigned int model;
+	unsigned int rows, cols;	/* # of rows & colums of the view */
+	unsigned char *ascebc;		/* ascii -> ebcdic table */
+};
+
+int raw3270_add_view(struct raw3270_view *, struct raw3270_fn *, int, int);
+int raw3270_activate_view(struct raw3270_view *);
+void raw3270_del_view(struct raw3270_view *);
+void raw3270_deactivate_view(struct raw3270_view *);
+struct raw3270_view *raw3270_find_view(struct raw3270_fn *, int);
+int raw3270_start(struct raw3270_view *, struct raw3270_request *);
+int raw3270_start_locked(struct raw3270_view *, struct raw3270_request *);
+int raw3270_start_irq(struct raw3270_view *, struct raw3270_request *);
+int raw3270_reset(struct raw3270_view *);
+struct raw3270_view *raw3270_view(struct raw3270_view *);
+int raw3270_view_active(struct raw3270_view *);
+
+/* Reference count inliner for view structures. */
+static inline void
+raw3270_get_view(struct raw3270_view *view)
+{
+	atomic_inc(&view->ref_count);
+}
+
+extern wait_queue_head_t raw3270_wait_queue;
+
+static inline void
+raw3270_put_view(struct raw3270_view *view)
+{
+	if (atomic_dec_return(&view->ref_count) == 0)
+		wake_up(&raw3270_wait_queue);
+}
+
+struct raw3270 *raw3270_setup_console(void);
+void raw3270_wait_cons_dev(struct raw3270 *);
+
+/* Notifier for device addition/removal */
+struct raw3270_notifier {
+	struct list_head list;
+	void (*create)(int minor);
+	void (*destroy)(int minor);
+};
+
+int raw3270_register_notifier(struct raw3270_notifier *);
+void raw3270_unregister_notifier(struct raw3270_notifier *);
+void raw3270_pm_unfreeze(struct raw3270_view *);
+
+/*
+ * Little memory allocator for string objects. 
+ */
+struct string
+{
+	struct list_head list;
+	struct list_head update;
+	unsigned long size;
+	unsigned long len;
+	char string[0];
+} __attribute__ ((aligned(8)));
+
+static inline struct string *
+alloc_string(struct list_head *free_list, unsigned long len)
+{
+	struct string *cs, *tmp;
+	unsigned long size;
+
+	size = (len + 7L) & -8L;
+	list_for_each_entry(cs, free_list, list) {
+		if (cs->size < size)
+			continue;
+		if (cs->size > size + sizeof(struct string)) {
+			char *endaddr = (char *) (cs + 1) + cs->size;
+			tmp = (struct string *) (endaddr - size) - 1;
+			tmp->size = size;
+			cs->size -= size + sizeof(struct string);
+			cs = tmp;
+		} else
+			list_del(&cs->list);
+		cs->len = len;
+		INIT_LIST_HEAD(&cs->list);
+		INIT_LIST_HEAD(&cs->update);
+		return cs;
+	}
+	return NULL;
+}
+
+static inline unsigned long
+free_string(struct list_head *free_list, struct string *cs)
+{
+	struct string *tmp;
+	struct list_head *p, *left;
+
+	/* Find out the left neighbour in free memory list. */
+	left = free_list;
+	list_for_each(p, free_list) {
+		if (list_entry(p, struct string, list) > cs)
+			break;
+		left = p;
+	}
+	/* Try to merge with right neighbour = next element from left. */
+	if (left->next != free_list) {
+		tmp = list_entry(left->next, struct string, list);
+		if ((char *) (cs + 1) + cs->size == (char *) tmp) {
+			list_del(&tmp->list);
+			cs->size += tmp->size + sizeof(struct string);
+		}
+	}
+	/* Try to merge with left neighbour. */
+	if (left != free_list) {
+		tmp = list_entry(left, struct string, list);
+		if ((char *) (tmp + 1) + tmp->size == (char *) cs) {
+			tmp->size += cs->size + sizeof(struct string);
+			return tmp->size;
+		}
+	}
+	__list_add(&cs->list, left, left->next);
+	return cs->size;
+}
+
+static inline void
+add_string_memory(struct list_head *free_list, void *mem, unsigned long size)
+{
+	struct string *cs;
+
+	cs = (struct string *) mem;
+	cs->size = size - sizeof(struct string);
+	free_string(free_list, cs);
+}
+
diff --git a/src/kernel/linux/v4.14/drivers/s390/char/sclp.c b/src/kernel/linux/v4.14/drivers/s390/char/sclp.c
new file mode 100644
index 0000000..41d8aa9
--- /dev/null
+++ b/src/kernel/linux/v4.14/drivers/s390/char/sclp.c
@@ -0,0 +1,1240 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * core function to access sclp interface
+ *
+ * Copyright IBM Corp. 1999, 2009
+ *
+ * Author(s): Martin Peschke <mpeschke@de.ibm.com>
+ *	      Martin Schwidefsky <schwidefsky@de.ibm.com>
+ */
+
+#include <linux/kernel_stat.h>
+#include <linux/module.h>
+#include <linux/err.h>
+#include <linux/spinlock.h>
+#include <linux/interrupt.h>
+#include <linux/timer.h>
+#include <linux/reboot.h>
+#include <linux/jiffies.h>
+#include <linux/init.h>
+#include <linux/suspend.h>
+#include <linux/completion.h>
+#include <linux/platform_device.h>
+#include <asm/types.h>
+#include <asm/irq.h>
+
+#include "sclp.h"
+
+#define SCLP_HEADER		"sclp: "
+
+/* Lock to protect internal data consistency. */
+static DEFINE_SPINLOCK(sclp_lock);
+
+/* Mask of events that we can send to the sclp interface. */
+static sccb_mask_t sclp_receive_mask;
+
+/* Mask of events that we can receive from the sclp interface. */
+static sccb_mask_t sclp_send_mask;
+
+/* List of registered event listeners and senders. */
+static struct list_head sclp_reg_list;
+
+/* List of queued requests. */
+static struct list_head sclp_req_queue;
+
+/* Data for read and and init requests. */
+static struct sclp_req sclp_read_req;
+static struct sclp_req sclp_init_req;
+static char sclp_read_sccb[PAGE_SIZE] __attribute__((__aligned__(PAGE_SIZE)));
+static char sclp_init_sccb[PAGE_SIZE] __attribute__((__aligned__(PAGE_SIZE)));
+
+/* Suspend request */
+static DECLARE_COMPLETION(sclp_request_queue_flushed);
+
+/* Number of console pages to allocate, used by sclp_con.c and sclp_vt220.c */
+int sclp_console_pages = SCLP_CONSOLE_PAGES;
+/* Flag to indicate if buffer pages are dropped on buffer full condition */
+int sclp_console_drop = 1;
+/* Number of times the console dropped buffer pages */
+unsigned long sclp_console_full;
+
+static void sclp_suspend_req_cb(struct sclp_req *req, void *data)
+{
+	complete(&sclp_request_queue_flushed);
+}
+
+static int __init sclp_setup_console_pages(char *str)
+{
+	int pages, rc;
+
+	rc = kstrtoint(str, 0, &pages);
+	if (!rc && pages >= SCLP_CONSOLE_PAGES)
+		sclp_console_pages = pages;
+	return 1;
+}
+
+__setup("sclp_con_pages=", sclp_setup_console_pages);
+
+static int __init sclp_setup_console_drop(char *str)
+{
+	int drop, rc;
+
+	rc = kstrtoint(str, 0, &drop);
+	if (!rc)
+		sclp_console_drop = drop;
+	return 1;
+}
+
+__setup("sclp_con_drop=", sclp_setup_console_drop);
+
+static struct sclp_req sclp_suspend_req;
+
+/* Timer for request retries. */
+static struct timer_list sclp_request_timer;
+
+/* Timer for queued requests. */
+static struct timer_list sclp_queue_timer;
+
+/* Internal state: is a request active at the sclp? */
+static volatile enum sclp_running_state_t {
+	sclp_running_state_idle,
+	sclp_running_state_running,
+	sclp_running_state_reset_pending
+} sclp_running_state = sclp_running_state_idle;
+
+/* Internal state: is a read request pending? */
+static volatile enum sclp_reading_state_t {
+	sclp_reading_state_idle,
+	sclp_reading_state_reading
+} sclp_reading_state = sclp_reading_state_idle;
+
+/* Internal state: is the driver currently serving requests? */
+static volatile enum sclp_activation_state_t {
+	sclp_activation_state_active,
+	sclp_activation_state_deactivating,
+	sclp_activation_state_inactive,
+	sclp_activation_state_activating
+} sclp_activation_state = sclp_activation_state_active;
+
+/* Internal state: is an init mask request pending? */
+static volatile enum sclp_mask_state_t {
+	sclp_mask_state_idle,
+	sclp_mask_state_initializing
+} sclp_mask_state = sclp_mask_state_idle;
+
+/* Internal state: is the driver suspended? */
+static enum sclp_suspend_state_t {
+	sclp_suspend_state_running,
+	sclp_suspend_state_suspended,
+} sclp_suspend_state = sclp_suspend_state_running;
+
+/* Maximum retry counts */
+#define SCLP_INIT_RETRY		3
+#define SCLP_MASK_RETRY		3
+
+/* Timeout intervals in seconds.*/
+#define SCLP_BUSY_INTERVAL	10
+#define SCLP_RETRY_INTERVAL	30
+
+static void sclp_process_queue(void);
+static void __sclp_make_read_req(void);
+static int sclp_init_mask(int calculate);
+static int sclp_init(void);
+
+static void
+__sclp_queue_read_req(void)
+{
+	if (sclp_reading_state == sclp_reading_state_idle) {
+		sclp_reading_state = sclp_reading_state_reading;
+		__sclp_make_read_req();
+		/* Add request to head of queue */
+		list_add(&sclp_read_req.list, &sclp_req_queue);
+	}
+}
+
+/* Set up request retry timer. Called while sclp_lock is locked. */
+static inline void
+__sclp_set_request_timer(unsigned long time, void (*function)(unsigned long),
+			 unsigned long data)
+{
+	del_timer(&sclp_request_timer);
+	sclp_request_timer.function = function;
+	sclp_request_timer.data = data;
+	sclp_request_timer.expires = jiffies + time;
+	add_timer(&sclp_request_timer);
+}
+
+/* Request timeout handler. Restart the request queue. If DATA is non-zero,
+ * force restart of running request. */
+static void
+sclp_request_timeout(unsigned long data)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&sclp_lock, flags);
+	if (data) {
+		if (sclp_running_state == sclp_running_state_running) {
+			/* Break running state and queue NOP read event request
+			 * to get a defined interface state. */
+			__sclp_queue_read_req();
+			sclp_running_state = sclp_running_state_idle;
+		}
+	} else {
+		__sclp_set_request_timer(SCLP_BUSY_INTERVAL * HZ,
+					 sclp_request_timeout, 0);
+	}
+	spin_unlock_irqrestore(&sclp_lock, flags);
+	sclp_process_queue();
+}
+
+/*
+ * Returns the expire value in jiffies of the next pending request timeout,
+ * if any. Needs to be called with sclp_lock.
+ */
+static unsigned long __sclp_req_queue_find_next_timeout(void)
+{
+	unsigned long expires_next = 0;
+	struct sclp_req *req;
+
+	list_for_each_entry(req, &sclp_req_queue, list) {
+		if (!req->queue_expires)
+			continue;
+		if (!expires_next ||
+		   (time_before(req->queue_expires, expires_next)))
+				expires_next = req->queue_expires;
+	}
+	return expires_next;
+}
+
+/*
+ * Returns expired request, if any, and removes it from the list.
+ */
+static struct sclp_req *__sclp_req_queue_remove_expired_req(void)
+{
+	unsigned long flags, now;
+	struct sclp_req *req;
+
+	spin_lock_irqsave(&sclp_lock, flags);
+	now = jiffies;
+	/* Don't need list_for_each_safe because we break out after list_del */
+	list_for_each_entry(req, &sclp_req_queue, list) {
+		if (!req->queue_expires)
+			continue;
+		if (time_before_eq(req->queue_expires, now)) {
+			if (req->status == SCLP_REQ_QUEUED) {
+				req->status = SCLP_REQ_QUEUED_TIMEOUT;
+				list_del(&req->list);
+				goto out;
+			}
+		}
+	}
+	req = NULL;
+out:
+	spin_unlock_irqrestore(&sclp_lock, flags);
+	return req;
+}
+
+/*
+ * Timeout handler for queued requests. Removes request from list and
+ * invokes callback. This timer can be set per request in situations where
+ * waiting too long would be harmful to the system, e.g. during SE reboot.
+ */
+static void sclp_req_queue_timeout(unsigned long data)
+{
+	unsigned long flags, expires_next;
+	struct sclp_req *req;
+
+	do {
+		req = __sclp_req_queue_remove_expired_req();
+		if (req && req->callback)
+			req->callback(req, req->callback_data);
+	} while (req);
+
+	spin_lock_irqsave(&sclp_lock, flags);
+	expires_next = __sclp_req_queue_find_next_timeout();
+	if (expires_next)
+		mod_timer(&sclp_queue_timer, expires_next);
+	spin_unlock_irqrestore(&sclp_lock, flags);
+}
+
+/* Try to start a request. Return zero if the request was successfully
+ * started or if it will be started at a later time. Return non-zero otherwise.
+ * Called while sclp_lock is locked. */
+static int
+__sclp_start_request(struct sclp_req *req)
+{
+	int rc;
+
+	if (sclp_running_state != sclp_running_state_idle)
+		return 0;
+	del_timer(&sclp_request_timer);
+	rc = sclp_service_call(req->command, req->sccb);
+	req->start_count++;
+
+	if (rc == 0) {
+		/* Successfully started request */
+		req->status = SCLP_REQ_RUNNING;
+		sclp_running_state = sclp_running_state_running;
+		__sclp_set_request_timer(SCLP_RETRY_INTERVAL * HZ,
+					 sclp_request_timeout, 1);
+		return 0;
+	} else if (rc == -EBUSY) {
+		/* Try again later */
+		__sclp_set_request_timer(SCLP_BUSY_INTERVAL * HZ,
+					 sclp_request_timeout, 0);
+		return 0;
+	}
+	/* Request failed */
+	req->status = SCLP_REQ_FAILED;
+	return rc;
+}
+
+/* Try to start queued requests. */
+static void
+sclp_process_queue(void)
+{
+	struct sclp_req *req;
+	int rc;
+	unsigned long flags;
+
+	spin_lock_irqsave(&sclp_lock, flags);
+	if (sclp_running_state != sclp_running_state_idle) {
+		spin_unlock_irqrestore(&sclp_lock, flags);
+		return;
+	}
+	del_timer(&sclp_request_timer);
+	while (!list_empty(&sclp_req_queue)) {
+		req = list_entry(sclp_req_queue.next, struct sclp_req, list);
+		if (!req->sccb)
+			goto do_post;
+		rc = __sclp_start_request(req);
+		if (rc == 0)
+			break;
+		/* Request failed */
+		if (req->start_count > 1) {
+			/* Cannot abort already submitted request - could still
+			 * be active at the SCLP */
+			__sclp_set_request_timer(SCLP_BUSY_INTERVAL * HZ,
+						 sclp_request_timeout, 0);
+			break;
+		}
+do_post:
+		/* Post-processing for aborted request */
+		list_del(&req->list);
+		if (req->callback) {
+			spin_unlock_irqrestore(&sclp_lock, flags);
+			req->callback(req, req->callback_data);
+			spin_lock_irqsave(&sclp_lock, flags);
+		}
+	}
+	spin_unlock_irqrestore(&sclp_lock, flags);
+}
+
+static int __sclp_can_add_request(struct sclp_req *req)
+{
+	if (req == &sclp_suspend_req || req == &sclp_init_req)
+		return 1;
+	if (sclp_suspend_state != sclp_suspend_state_running)
+		return 0;
+	if (sclp_init_state != sclp_init_state_initialized)
+		return 0;
+	if (sclp_activation_state != sclp_activation_state_active)
+		return 0;
+	return 1;
+}
+
+/* Queue a new request. Return zero on success, non-zero otherwise. */
+int
+sclp_add_request(struct sclp_req *req)
+{
+	unsigned long flags;
+	int rc;
+
+	spin_lock_irqsave(&sclp_lock, flags);
+	if (!__sclp_can_add_request(req)) {
+		spin_unlock_irqrestore(&sclp_lock, flags);
+		return -EIO;
+	}
+	req->status = SCLP_REQ_QUEUED;
+	req->start_count = 0;
+	list_add_tail(&req->list, &sclp_req_queue);
+	rc = 0;
+	if (req->queue_timeout) {
+		req->queue_expires = jiffies + req->queue_timeout * HZ;
+		if (!timer_pending(&sclp_queue_timer) ||
+		    time_after(sclp_queue_timer.expires, req->queue_expires))
+			mod_timer(&sclp_queue_timer, req->queue_expires);
+	} else
+		req->queue_expires = 0;
+	/* Start if request is first in list */
+	if (sclp_running_state == sclp_running_state_idle &&
+	    req->list.prev == &sclp_req_queue) {
+		if (!req->sccb) {
+			list_del(&req->list);
+			rc = -ENODATA;
+			goto out;
+		}
+		rc = __sclp_start_request(req);
+		if (rc)
+			list_del(&req->list);
+	}
+out:
+	spin_unlock_irqrestore(&sclp_lock, flags);
+	return rc;
+}
+
+EXPORT_SYMBOL(sclp_add_request);
+
+/* Dispatch events found in request buffer to registered listeners. Return 0
+ * if all events were dispatched, non-zero otherwise. */
+static int
+sclp_dispatch_evbufs(struct sccb_header *sccb)
+{
+	unsigned long flags;
+	struct evbuf_header *evbuf;
+	struct list_head *l;
+	struct sclp_register *reg;
+	int offset;
+	int rc;
+
+	spin_lock_irqsave(&sclp_lock, flags);
+	rc = 0;
+	for (offset = sizeof(struct sccb_header); offset < sccb->length;
+	     offset += evbuf->length) {
+		evbuf = (struct evbuf_header *) ((addr_t) sccb + offset);
+		/* Check for malformed hardware response */
+		if (evbuf->length == 0)
+			break;
+		/* Search for event handler */
+		reg = NULL;
+		list_for_each(l, &sclp_reg_list) {
+			reg = list_entry(l, struct sclp_register, list);
+			if (reg->receive_mask & (1 << (32 - evbuf->type)))
+				break;
+			else
+				reg = NULL;
+		}
+		if (reg && reg->receiver_fn) {
+			spin_unlock_irqrestore(&sclp_lock, flags);
+			reg->receiver_fn(evbuf);
+			spin_lock_irqsave(&sclp_lock, flags);
+		} else if (reg == NULL)
+			rc = -EOPNOTSUPP;
+	}
+	spin_unlock_irqrestore(&sclp_lock, flags);
+	return rc;
+}
+
+/* Read event data request callback. */
+static void
+sclp_read_cb(struct sclp_req *req, void *data)
+{
+	unsigned long flags;
+	struct sccb_header *sccb;
+
+	sccb = (struct sccb_header *) req->sccb;
+	if (req->status == SCLP_REQ_DONE && (sccb->response_code == 0x20 ||
+	    sccb->response_code == 0x220))
+		sclp_dispatch_evbufs(sccb);
+	spin_lock_irqsave(&sclp_lock, flags);
+	sclp_reading_state = sclp_reading_state_idle;
+	spin_unlock_irqrestore(&sclp_lock, flags);
+}
+
+/* Prepare read event data request. Called while sclp_lock is locked. */
+static void __sclp_make_read_req(void)
+{
+	struct sccb_header *sccb;
+
+	sccb = (struct sccb_header *) sclp_read_sccb;
+	clear_page(sccb);
+	memset(&sclp_read_req, 0, sizeof(struct sclp_req));
+	sclp_read_req.command = SCLP_CMDW_READ_EVENT_DATA;
+	sclp_read_req.status = SCLP_REQ_QUEUED;
+	sclp_read_req.start_count = 0;
+	sclp_read_req.callback = sclp_read_cb;
+	sclp_read_req.sccb = sccb;
+	sccb->length = PAGE_SIZE;
+	sccb->function_code = 0;
+	sccb->control_mask[2] = 0x80;
+}
+
+/* Search request list for request with matching sccb. Return request if found,
+ * NULL otherwise. Called while sclp_lock is locked. */
+static inline struct sclp_req *
+__sclp_find_req(u32 sccb)
+{
+	struct list_head *l;
+	struct sclp_req *req;
+
+	list_for_each(l, &sclp_req_queue) {
+		req = list_entry(l, struct sclp_req, list);
+		if (sccb == (u32) (addr_t) req->sccb)
+				return req;
+	}
+	return NULL;
+}
+
+/* Handler for external interruption. Perform request post-processing.
+ * Prepare read event data request if necessary. Start processing of next
+ * request on queue. */
+static void sclp_interrupt_handler(struct ext_code ext_code,
+				   unsigned int param32, unsigned long param64)
+{
+	struct sclp_req *req;
+	u32 finished_sccb;
+	u32 evbuf_pending;
+
+	inc_irq_stat(IRQEXT_SCP);
+	spin_lock(&sclp_lock);
+	finished_sccb = param32 & 0xfffffff8;
+	evbuf_pending = param32 & 0x3;
+	if (finished_sccb) {
+		del_timer(&sclp_request_timer);
+		sclp_running_state = sclp_running_state_reset_pending;
+		req = __sclp_find_req(finished_sccb);
+		if (req) {
+			/* Request post-processing */
+			list_del(&req->list);
+			req->status = SCLP_REQ_DONE;
+			if (req->callback) {
+				spin_unlock(&sclp_lock);
+				req->callback(req, req->callback_data);
+				spin_lock(&sclp_lock);
+			}
+		}
+		sclp_running_state = sclp_running_state_idle;
+	}
+	if (evbuf_pending &&
+	    sclp_activation_state == sclp_activation_state_active)
+		__sclp_queue_read_req();
+	spin_unlock(&sclp_lock);
+	sclp_process_queue();
+}
+
+/* Convert interval in jiffies to TOD ticks. */
+static inline u64
+sclp_tod_from_jiffies(unsigned long jiffies)
+{
+	return (u64) (jiffies / HZ) << 32;
+}
+
+/* Wait until a currently running request finished. Note: while this function
+ * is running, no timers are served on the calling CPU. */
+void
+sclp_sync_wait(void)
+{
+	unsigned long long old_tick;
+	unsigned long flags;
+	unsigned long cr0, cr0_sync;
+	u64 timeout;
+	int irq_context;
+
+	/* We'll be disabling timer interrupts, so we need a custom timeout
+	 * mechanism */
+	timeout = 0;
+	if (timer_pending(&sclp_request_timer)) {
+		/* Get timeout TOD value */
+		timeout = get_tod_clock_fast() +
+			  sclp_tod_from_jiffies(sclp_request_timer.expires -
+						jiffies);
+	}
+	local_irq_save(flags);
+	/* Prevent bottom half from executing once we force interrupts open */
+	irq_context = in_interrupt();
+	if (!irq_context)
+		local_bh_disable();
+	/* Enable service-signal interruption, disable timer interrupts */
+	old_tick = local_tick_disable();
+	trace_hardirqs_on();
+	__ctl_store(cr0, 0, 0);
+	cr0_sync = cr0 & ~CR0_IRQ_SUBCLASS_MASK;
+	cr0_sync |= 1UL << (63 - 54);
+	__ctl_load(cr0_sync, 0, 0);
+	__arch_local_irq_stosm(0x01);
+	/* Loop until driver state indicates finished request */
+	while (sclp_running_state != sclp_running_state_idle) {
+		/* Check for expired request timer */
+		if (timer_pending(&sclp_request_timer) &&
+		    get_tod_clock_fast() > timeout &&
+		    del_timer(&sclp_request_timer))
+			sclp_request_timer.function(sclp_request_timer.data);
+		cpu_relax();
+	}
+	local_irq_disable();
+	__ctl_load(cr0, 0, 0);
+	if (!irq_context)
+		_local_bh_enable();
+	local_tick_enable(old_tick);
+	local_irq_restore(flags);
+}
+EXPORT_SYMBOL(sclp_sync_wait);
+
+/* Dispatch changes in send and receive mask to registered listeners. */
+static void
+sclp_dispatch_state_change(void)
+{
+	struct list_head *l;
+	struct sclp_register *reg;
+	unsigned long flags;
+	sccb_mask_t receive_mask;
+	sccb_mask_t send_mask;
+
+	do {
+		spin_lock_irqsave(&sclp_lock, flags);
+		reg = NULL;
+		list_for_each(l, &sclp_reg_list) {
+			reg = list_entry(l, struct sclp_register, list);
+			receive_mask = reg->send_mask & sclp_receive_mask;
+			send_mask = reg->receive_mask & sclp_send_mask;
+			if (reg->sclp_receive_mask != receive_mask ||
+			    reg->sclp_send_mask != send_mask) {
+				reg->sclp_receive_mask = receive_mask;
+				reg->sclp_send_mask = send_mask;
+				break;
+			} else
+				reg = NULL;
+		}
+		spin_unlock_irqrestore(&sclp_lock, flags);
+		if (reg && reg->state_change_fn)
+			reg->state_change_fn(reg);
+	} while (reg);
+}
+
+struct sclp_statechangebuf {
+	struct evbuf_header	header;
+	u8		validity_sclp_active_facility_mask : 1;
+	u8		validity_sclp_receive_mask : 1;
+	u8		validity_sclp_send_mask : 1;
+	u8		validity_read_data_function_mask : 1;
+	u16		_zeros : 12;
+	u16		mask_length;
+	u64		sclp_active_facility_mask;
+	sccb_mask_t	sclp_receive_mask;
+	sccb_mask_t	sclp_send_mask;
+	u32		read_data_function_mask;
+} __attribute__((packed));
+
+
+/* State change event callback. Inform listeners of changes. */
+static void
+sclp_state_change_cb(struct evbuf_header *evbuf)
+{
+	unsigned long flags;
+	struct sclp_statechangebuf *scbuf;
+
+	scbuf = (struct sclp_statechangebuf *) evbuf;
+	if (scbuf->mask_length != sizeof(sccb_mask_t))
+		return;
+	spin_lock_irqsave(&sclp_lock, flags);
+	if (scbuf->validity_sclp_receive_mask)
+		sclp_receive_mask = scbuf->sclp_receive_mask;
+	if (scbuf->validity_sclp_send_mask)
+		sclp_send_mask = scbuf->sclp_send_mask;
+	spin_unlock_irqrestore(&sclp_lock, flags);
+	if (scbuf->validity_sclp_active_facility_mask)
+		sclp.facilities = scbuf->sclp_active_facility_mask;
+	sclp_dispatch_state_change();
+}
+
+static struct sclp_register sclp_state_change_event = {
+	.receive_mask = EVTYP_STATECHANGE_MASK,
+	.receiver_fn = sclp_state_change_cb
+};
+
+/* Calculate receive and send mask of currently registered listeners.
+ * Called while sclp_lock is locked. */
+static inline void
+__sclp_get_mask(sccb_mask_t *receive_mask, sccb_mask_t *send_mask)
+{
+	struct list_head *l;
+	struct sclp_register *t;
+
+	*receive_mask = 0;
+	*send_mask = 0;
+	list_for_each(l, &sclp_reg_list) {
+		t = list_entry(l, struct sclp_register, list);
+		*receive_mask |= t->receive_mask;
+		*send_mask |= t->send_mask;
+	}
+}
+
+/* Register event listener. Return 0 on success, non-zero otherwise. */
+int
+sclp_register(struct sclp_register *reg)
+{
+	unsigned long flags;
+	sccb_mask_t receive_mask;
+	sccb_mask_t send_mask;
+	int rc;
+
+	rc = sclp_init();
+	if (rc)
+		return rc;
+	spin_lock_irqsave(&sclp_lock, flags);
+	/* Check event mask for collisions */
+	__sclp_get_mask(&receive_mask, &send_mask);
+	if (reg->receive_mask & receive_mask || reg->send_mask & send_mask) {
+		spin_unlock_irqrestore(&sclp_lock, flags);
+		return -EBUSY;
+	}
+	/* Trigger initial state change callback */
+	reg->sclp_receive_mask = 0;
+	reg->sclp_send_mask = 0;
+	reg->pm_event_posted = 0;
+	list_add(&reg->list, &sclp_reg_list);
+	spin_unlock_irqrestore(&sclp_lock, flags);
+	rc = sclp_init_mask(1);
+	if (rc) {
+		spin_lock_irqsave(&sclp_lock, flags);
+		list_del(&reg->list);
+		spin_unlock_irqrestore(&sclp_lock, flags);
+	}
+	return rc;
+}
+
+EXPORT_SYMBOL(sclp_register);
+
+/* Unregister event listener. */
+void
+sclp_unregister(struct sclp_register *reg)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&sclp_lock, flags);
+	list_del(&reg->list);
+	spin_unlock_irqrestore(&sclp_lock, flags);
+	sclp_init_mask(1);
+}
+
+EXPORT_SYMBOL(sclp_unregister);
+
+/* Remove event buffers which are marked processed. Return the number of
+ * remaining event buffers. */
+int
+sclp_remove_processed(struct sccb_header *sccb)
+{
+	struct evbuf_header *evbuf;
+	int unprocessed;
+	u16 remaining;
+
+	evbuf = (struct evbuf_header *) (sccb + 1);
+	unprocessed = 0;
+	remaining = sccb->length - sizeof(struct sccb_header);
+	while (remaining > 0) {
+		remaining -= evbuf->length;
+		if (evbuf->flags & 0x80) {
+			sccb->length -= evbuf->length;
+			memcpy(evbuf, (void *) ((addr_t) evbuf + evbuf->length),
+			       remaining);
+		} else {
+			unprocessed++;
+			evbuf = (struct evbuf_header *)
+					((addr_t) evbuf + evbuf->length);
+		}
+	}
+	return unprocessed;
+}
+
+EXPORT_SYMBOL(sclp_remove_processed);
+
+/* Prepare init mask request. Called while sclp_lock is locked. */
+static inline void
+__sclp_make_init_req(u32 receive_mask, u32 send_mask)
+{
+	struct init_sccb *sccb;
+
+	sccb = (struct init_sccb *) sclp_init_sccb;
+	clear_page(sccb);
+	memset(&sclp_init_req, 0, sizeof(struct sclp_req));
+	sclp_init_req.command = SCLP_CMDW_WRITE_EVENT_MASK;
+	sclp_init_req.status = SCLP_REQ_FILLED;
+	sclp_init_req.start_count = 0;
+	sclp_init_req.callback = NULL;
+	sclp_init_req.callback_data = NULL;
+	sclp_init_req.sccb = sccb;
+	sccb->header.length = sizeof(struct init_sccb);
+	sccb->mask_length = sizeof(sccb_mask_t);
+	sccb->receive_mask = receive_mask;
+	sccb->send_mask = send_mask;
+	sccb->sclp_receive_mask = 0;
+	sccb->sclp_send_mask = 0;
+}
+
+/* Start init mask request. If calculate is non-zero, calculate the mask as
+ * requested by registered listeners. Use zero mask otherwise. Return 0 on
+ * success, non-zero otherwise. */
+static int
+sclp_init_mask(int calculate)
+{
+	unsigned long flags;
+	struct init_sccb *sccb = (struct init_sccb *) sclp_init_sccb;
+	sccb_mask_t receive_mask;
+	sccb_mask_t send_mask;
+	int retry;
+	int rc;
+	unsigned long wait;
+
+	spin_lock_irqsave(&sclp_lock, flags);
+	/* Check if interface is in appropriate state */
+	if (sclp_mask_state != sclp_mask_state_idle) {
+		spin_unlock_irqrestore(&sclp_lock, flags);
+		return -EBUSY;
+	}
+	if (sclp_activation_state == sclp_activation_state_inactive) {
+		spin_unlock_irqrestore(&sclp_lock, flags);
+		return -EINVAL;
+	}
+	sclp_mask_state = sclp_mask_state_initializing;
+	/* Determine mask */
+	if (calculate)
+		__sclp_get_mask(&receive_mask, &send_mask);
+	else {
+		receive_mask = 0;
+		send_mask = 0;
+	}
+	rc = -EIO;
+	for (retry = 0; retry <= SCLP_MASK_RETRY; retry++) {
+		/* Prepare request */
+		__sclp_make_init_req(receive_mask, send_mask);
+		spin_unlock_irqrestore(&sclp_lock, flags);
+		if (sclp_add_request(&sclp_init_req)) {
+			/* Try again later */
+			wait = jiffies + SCLP_BUSY_INTERVAL * HZ;
+			while (time_before(jiffies, wait))
+				sclp_sync_wait();
+			spin_lock_irqsave(&sclp_lock, flags);
+			continue;
+		}
+		while (sclp_init_req.status != SCLP_REQ_DONE &&
+		       sclp_init_req.status != SCLP_REQ_FAILED)
+			sclp_sync_wait();
+		spin_lock_irqsave(&sclp_lock, flags);
+		if (sclp_init_req.status == SCLP_REQ_DONE &&
+		    sccb->header.response_code == 0x20) {
+			/* Successful request */
+			if (calculate) {
+				sclp_receive_mask = sccb->sclp_receive_mask;
+				sclp_send_mask = sccb->sclp_send_mask;
+			} else {
+				sclp_receive_mask = 0;
+				sclp_send_mask = 0;
+			}
+			spin_unlock_irqrestore(&sclp_lock, flags);
+			sclp_dispatch_state_change();
+			spin_lock_irqsave(&sclp_lock, flags);
+			rc = 0;
+			break;
+		}
+	}
+	sclp_mask_state = sclp_mask_state_idle;
+	spin_unlock_irqrestore(&sclp_lock, flags);
+	return rc;
+}
+
+/* Deactivate SCLP interface. On success, new requests will be rejected,
+ * events will no longer be dispatched. Return 0 on success, non-zero
+ * otherwise. */
+int
+sclp_deactivate(void)
+{
+	unsigned long flags;
+	int rc;
+
+	spin_lock_irqsave(&sclp_lock, flags);
+	/* Deactivate can only be called when active */
+	if (sclp_activation_state != sclp_activation_state_active) {
+		spin_unlock_irqrestore(&sclp_lock, flags);
+		return -EINVAL;
+	}
+	sclp_activation_state = sclp_activation_state_deactivating;
+	spin_unlock_irqrestore(&sclp_lock, flags);
+	rc = sclp_init_mask(0);
+	spin_lock_irqsave(&sclp_lock, flags);
+	if (rc == 0)
+		sclp_activation_state = sclp_activation_state_inactive;
+	else
+		sclp_activation_state = sclp_activation_state_active;
+	spin_unlock_irqrestore(&sclp_lock, flags);
+	return rc;
+}
+
+EXPORT_SYMBOL(sclp_deactivate);
+
+/* Reactivate SCLP interface after sclp_deactivate. On success, new
+ * requests will be accepted, events will be dispatched again. Return 0 on
+ * success, non-zero otherwise. */
+int
+sclp_reactivate(void)
+{
+	unsigned long flags;
+	int rc;
+
+	spin_lock_irqsave(&sclp_lock, flags);
+	/* Reactivate can only be called when inactive */
+	if (sclp_activation_state != sclp_activation_state_inactive) {
+		spin_unlock_irqrestore(&sclp_lock, flags);
+		return -EINVAL;
+	}
+	sclp_activation_state = sclp_activation_state_activating;
+	spin_unlock_irqrestore(&sclp_lock, flags);
+	rc = sclp_init_mask(1);
+	spin_lock_irqsave(&sclp_lock, flags);
+	if (rc == 0)
+		sclp_activation_state = sclp_activation_state_active;
+	else
+		sclp_activation_state = sclp_activation_state_inactive;
+	spin_unlock_irqrestore(&sclp_lock, flags);
+	return rc;
+}
+
+EXPORT_SYMBOL(sclp_reactivate);
+
+/* Handler for external interruption used during initialization. Modify
+ * request state to done. */
+static void sclp_check_handler(struct ext_code ext_code,
+			       unsigned int param32, unsigned long param64)
+{
+	u32 finished_sccb;
+
+	inc_irq_stat(IRQEXT_SCP);
+	finished_sccb = param32 & 0xfffffff8;
+	/* Is this the interrupt we are waiting for? */
+	if (finished_sccb == 0)
+		return;
+	if (finished_sccb != (u32) (addr_t) sclp_init_sccb)
+		panic("sclp: unsolicited interrupt for buffer at 0x%x\n",
+		      finished_sccb);
+	spin_lock(&sclp_lock);
+	if (sclp_running_state == sclp_running_state_running) {
+		sclp_init_req.status = SCLP_REQ_DONE;
+		sclp_running_state = sclp_running_state_idle;
+	}
+	spin_unlock(&sclp_lock);
+}
+
+/* Initial init mask request timed out. Modify request state to failed. */
+static void
+sclp_check_timeout(unsigned long data)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&sclp_lock, flags);
+	if (sclp_running_state == sclp_running_state_running) {
+		sclp_init_req.status = SCLP_REQ_FAILED;
+		sclp_running_state = sclp_running_state_idle;
+	}
+	spin_unlock_irqrestore(&sclp_lock, flags);
+}
+
+/* Perform a check of the SCLP interface. Return zero if the interface is
+ * available and there are no pending requests from a previous instance.
+ * Return non-zero otherwise. */
+static int
+sclp_check_interface(void)
+{
+	struct init_sccb *sccb;
+	unsigned long flags;
+	int retry;
+	int rc;
+
+	spin_lock_irqsave(&sclp_lock, flags);
+	/* Prepare init mask command */
+	rc = register_external_irq(EXT_IRQ_SERVICE_SIG, sclp_check_handler);
+	if (rc) {
+		spin_unlock_irqrestore(&sclp_lock, flags);
+		return rc;
+	}
+	for (retry = 0; retry <= SCLP_INIT_RETRY; retry++) {
+		__sclp_make_init_req(0, 0);
+		sccb = (struct init_sccb *) sclp_init_req.sccb;
+		rc = sclp_service_call(sclp_init_req.command, sccb);
+		if (rc == -EIO)
+			break;
+		sclp_init_req.status = SCLP_REQ_RUNNING;
+		sclp_running_state = sclp_running_state_running;
+		__sclp_set_request_timer(SCLP_RETRY_INTERVAL * HZ,
+					 sclp_check_timeout, 0);
+		spin_unlock_irqrestore(&sclp_lock, flags);
+		/* Enable service-signal interruption - needs to happen
+		 * with IRQs enabled. */
+		irq_subclass_register(IRQ_SUBCLASS_SERVICE_SIGNAL);
+		/* Wait for signal from interrupt or timeout */
+		sclp_sync_wait();
+		/* Disable service-signal interruption - needs to happen
+		 * with IRQs enabled. */
+		irq_subclass_unregister(IRQ_SUBCLASS_SERVICE_SIGNAL);
+		spin_lock_irqsave(&sclp_lock, flags);
+		del_timer(&sclp_request_timer);
+		if (sclp_init_req.status == SCLP_REQ_DONE &&
+		    sccb->header.response_code == 0x20) {
+			rc = 0;
+			break;
+		} else
+			rc = -EBUSY;
+	}
+	unregister_external_irq(EXT_IRQ_SERVICE_SIG, sclp_check_handler);
+	spin_unlock_irqrestore(&sclp_lock, flags);
+	return rc;
+}
+
+/* Reboot event handler. Reset send and receive mask to prevent pending SCLP
+ * events from interfering with rebooted system. */
+static int
+sclp_reboot_event(struct notifier_block *this, unsigned long event, void *ptr)
+{
+	sclp_deactivate();
+	return NOTIFY_DONE;
+}
+
+static struct notifier_block sclp_reboot_notifier = {
+	.notifier_call = sclp_reboot_event
+};
+
+/*
+ * Suspend/resume SCLP notifier implementation
+ */
+
+static void sclp_pm_event(enum sclp_pm_event sclp_pm_event, int rollback)
+{
+	struct sclp_register *reg;
+	unsigned long flags;
+
+	if (!rollback) {
+		spin_lock_irqsave(&sclp_lock, flags);
+		list_for_each_entry(reg, &sclp_reg_list, list)
+			reg->pm_event_posted = 0;
+		spin_unlock_irqrestore(&sclp_lock, flags);
+	}
+	do {
+		spin_lock_irqsave(&sclp_lock, flags);
+		list_for_each_entry(reg, &sclp_reg_list, list) {
+			if (rollback && reg->pm_event_posted)
+				goto found;
+			if (!rollback && !reg->pm_event_posted)
+				goto found;
+		}
+		spin_unlock_irqrestore(&sclp_lock, flags);
+		return;
+found:
+		spin_unlock_irqrestore(&sclp_lock, flags);
+		if (reg->pm_event_fn)
+			reg->pm_event_fn(reg, sclp_pm_event);
+		reg->pm_event_posted = rollback ? 0 : 1;
+	} while (1);
+}
+
+/*
+ * Susend/resume callbacks for platform device
+ */
+
+static int sclp_freeze(struct device *dev)
+{
+	unsigned long flags;
+	int rc;
+
+	sclp_pm_event(SCLP_PM_EVENT_FREEZE, 0);
+
+	spin_lock_irqsave(&sclp_lock, flags);
+	sclp_suspend_state = sclp_suspend_state_suspended;
+	spin_unlock_irqrestore(&sclp_lock, flags);
+
+	/* Init supend data */
+	memset(&sclp_suspend_req, 0, sizeof(sclp_suspend_req));
+	sclp_suspend_req.callback = sclp_suspend_req_cb;
+	sclp_suspend_req.status = SCLP_REQ_FILLED;
+	init_completion(&sclp_request_queue_flushed);
+
+	rc = sclp_add_request(&sclp_suspend_req);
+	if (rc == 0)
+		wait_for_completion(&sclp_request_queue_flushed);
+	else if (rc != -ENODATA)
+		goto fail_thaw;
+
+	rc = sclp_deactivate();
+	if (rc)
+		goto fail_thaw;
+	return 0;
+
+fail_thaw:
+	spin_lock_irqsave(&sclp_lock, flags);
+	sclp_suspend_state = sclp_suspend_state_running;
+	spin_unlock_irqrestore(&sclp_lock, flags);
+	sclp_pm_event(SCLP_PM_EVENT_THAW, 1);
+	return rc;
+}
+
+static int sclp_undo_suspend(enum sclp_pm_event event)
+{
+	unsigned long flags;
+	int rc;
+
+	rc = sclp_reactivate();
+	if (rc)
+		return rc;
+
+	spin_lock_irqsave(&sclp_lock, flags);
+	sclp_suspend_state = sclp_suspend_state_running;
+	spin_unlock_irqrestore(&sclp_lock, flags);
+
+	sclp_pm_event(event, 0);
+	return 0;
+}
+
+static int sclp_thaw(struct device *dev)
+{
+	return sclp_undo_suspend(SCLP_PM_EVENT_THAW);
+}
+
+static int sclp_restore(struct device *dev)
+{
+	return sclp_undo_suspend(SCLP_PM_EVENT_RESTORE);
+}
+
+static const struct dev_pm_ops sclp_pm_ops = {
+	.freeze		= sclp_freeze,
+	.thaw		= sclp_thaw,
+	.restore	= sclp_restore,
+};
+
+static ssize_t con_pages_show(struct device_driver *dev, char *buf)
+{
+	return sprintf(buf, "%i\n", sclp_console_pages);
+}
+
+static DRIVER_ATTR_RO(con_pages);
+
+static ssize_t con_drop_show(struct device_driver *dev, char *buf)
+{
+	return sprintf(buf, "%i\n", sclp_console_drop);
+}
+
+static DRIVER_ATTR_RO(con_drop);
+
+static ssize_t con_full_show(struct device_driver *dev, char *buf)
+{
+	return sprintf(buf, "%lu\n", sclp_console_full);
+}
+
+static DRIVER_ATTR_RO(con_full);
+
+static struct attribute *sclp_drv_attrs[] = {
+	&driver_attr_con_pages.attr,
+	&driver_attr_con_drop.attr,
+	&driver_attr_con_full.attr,
+	NULL,
+};
+static struct attribute_group sclp_drv_attr_group = {
+	.attrs = sclp_drv_attrs,
+};
+static const struct attribute_group *sclp_drv_attr_groups[] = {
+	&sclp_drv_attr_group,
+	NULL,
+};
+
+static struct platform_driver sclp_pdrv = {
+	.driver = {
+		.name	= "sclp",
+		.pm	= &sclp_pm_ops,
+		.groups = sclp_drv_attr_groups,
+	},
+};
+
+static struct platform_device *sclp_pdev;
+
+/* Initialize SCLP driver. Return zero if driver is operational, non-zero
+ * otherwise. */
+static int
+sclp_init(void)
+{
+	unsigned long flags;
+	int rc = 0;
+
+	spin_lock_irqsave(&sclp_lock, flags);
+	/* Check for previous or running initialization */
+	if (sclp_init_state != sclp_init_state_uninitialized)
+		goto fail_unlock;
+	sclp_init_state = sclp_init_state_initializing;
+	/* Set up variables */
+	INIT_LIST_HEAD(&sclp_req_queue);
+	INIT_LIST_HEAD(&sclp_reg_list);
+	list_add(&sclp_state_change_event.list, &sclp_reg_list);
+	init_timer(&sclp_request_timer);
+	init_timer(&sclp_queue_timer);
+	sclp_queue_timer.function = sclp_req_queue_timeout;
+	/* Check interface */
+	spin_unlock_irqrestore(&sclp_lock, flags);
+	rc = sclp_check_interface();
+	spin_lock_irqsave(&sclp_lock, flags);
+	if (rc)
+		goto fail_init_state_uninitialized;
+	/* Register reboot handler */
+	rc = register_reboot_notifier(&sclp_reboot_notifier);
+	if (rc)
+		goto fail_init_state_uninitialized;
+	/* Register interrupt handler */
+	rc = register_external_irq(EXT_IRQ_SERVICE_SIG, sclp_interrupt_handler);
+	if (rc)
+		goto fail_unregister_reboot_notifier;
+	sclp_init_state = sclp_init_state_initialized;
+	spin_unlock_irqrestore(&sclp_lock, flags);
+	/* Enable service-signal external interruption - needs to happen with
+	 * IRQs enabled. */
+	irq_subclass_register(IRQ_SUBCLASS_SERVICE_SIGNAL);
+	sclp_init_mask(1);
+	return 0;
+
+fail_unregister_reboot_notifier:
+	unregister_reboot_notifier(&sclp_reboot_notifier);
+fail_init_state_uninitialized:
+	sclp_init_state = sclp_init_state_uninitialized;
+fail_unlock:
+	spin_unlock_irqrestore(&sclp_lock, flags);
+	return rc;
+}
+
+/*
+ * SCLP panic notifier: If we are suspended, we thaw SCLP in order to be able
+ * to print the panic message.
+ */
+static int sclp_panic_notify(struct notifier_block *self,
+			     unsigned long event, void *data)
+{
+	if (sclp_suspend_state == sclp_suspend_state_suspended)
+		sclp_undo_suspend(SCLP_PM_EVENT_THAW);
+	return NOTIFY_OK;
+}
+
+static struct notifier_block sclp_on_panic_nb = {
+	.notifier_call = sclp_panic_notify,
+	.priority = SCLP_PANIC_PRIO,
+};
+
+static __init int sclp_initcall(void)
+{
+	int rc;
+
+	rc = platform_driver_register(&sclp_pdrv);
+	if (rc)
+		return rc;
+
+	sclp_pdev = platform_device_register_simple("sclp", -1, NULL, 0);
+	rc = PTR_ERR_OR_ZERO(sclp_pdev);
+	if (rc)
+		goto fail_platform_driver_unregister;
+
+	rc = atomic_notifier_chain_register(&panic_notifier_list,
+					    &sclp_on_panic_nb);
+	if (rc)
+		goto fail_platform_device_unregister;
+
+	return sclp_init();
+
+fail_platform_device_unregister:
+	platform_device_unregister(sclp_pdev);
+fail_platform_driver_unregister:
+	platform_driver_unregister(&sclp_pdrv);
+	return rc;
+}
+
+arch_initcall(sclp_initcall);
diff --git a/src/kernel/linux/v4.14/drivers/s390/char/sclp.h b/src/kernel/linux/v4.14/drivers/s390/char/sclp.h
new file mode 100644
index 0000000..f41f6e2
--- /dev/null
+++ b/src/kernel/linux/v4.14/drivers/s390/char/sclp.h
@@ -0,0 +1,303 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright IBM Corp. 1999,2012
+ *
+ * Author(s): Martin Peschke <mpeschke@de.ibm.com>
+ *	      Martin Schwidefsky <schwidefsky@de.ibm.com>
+ */
+
+#ifndef __SCLP_H__
+#define __SCLP_H__
+
+#include <linux/types.h>
+#include <linux/list.h>
+#include <asm/sclp.h>
+#include <asm/ebcdic.h>
+
+/* maximum number of pages concerning our own memory management */
+#define MAX_KMEM_PAGES (sizeof(unsigned long) << 3)
+#define SCLP_CONSOLE_PAGES	6
+
+#define SCLP_EVTYP_MASK(T)	(1U << (32 - (T)))
+
+#define EVTYP_OPCMD		0x01
+#define EVTYP_MSG		0x02
+#define EVTYP_CONFMGMDATA	0x04
+#define EVTYP_DIAG_TEST		0x07
+#define EVTYP_STATECHANGE	0x08
+#define EVTYP_PMSGCMD		0x09
+#define EVTYP_ASYNC		0x0A
+#define EVTYP_CTLPROGIDENT	0x0B
+#define EVTYP_ERRNOTIFY		0x18
+#define EVTYP_VT220MSG		0x1A
+#define EVTYP_SDIAS		0x1C
+#define EVTYP_SIGQUIESCE	0x1D
+#define EVTYP_OCF		0x1E
+
+#define EVTYP_OPCMD_MASK	SCLP_EVTYP_MASK(EVTYP_OPCMD)
+#define EVTYP_MSG_MASK		SCLP_EVTYP_MASK(EVTYP_MSG)
+#define EVTYP_CONFMGMDATA_MASK	SCLP_EVTYP_MASK(EVTYP_CONFMGMDATA)
+#define EVTYP_DIAG_TEST_MASK	SCLP_EVTYP_MASK(EVTYP_DIAG_TEST)
+#define EVTYP_STATECHANGE_MASK	SCLP_EVTYP_MASK(EVTYP_STATECHANGE)
+#define EVTYP_PMSGCMD_MASK	SCLP_EVTYP_MASK(EVTYP_PMSGCMD)
+#define EVTYP_ASYNC_MASK	SCLP_EVTYP_MASK(EVTYP_ASYNC)
+#define EVTYP_CTLPROGIDENT_MASK	SCLP_EVTYP_MASK(EVTYP_CTLPROGIDENT)
+#define EVTYP_ERRNOTIFY_MASK	SCLP_EVTYP_MASK(EVTYP_ERRNOTIFY)
+#define EVTYP_VT220MSG_MASK	SCLP_EVTYP_MASK(EVTYP_VT220MSG)
+#define EVTYP_SDIAS_MASK	SCLP_EVTYP_MASK(EVTYP_SDIAS)
+#define EVTYP_SIGQUIESCE_MASK	SCLP_EVTYP_MASK(EVTYP_SIGQUIESCE)
+#define EVTYP_OCF_MASK		SCLP_EVTYP_MASK(EVTYP_OCF)
+
+#define GNRLMSGFLGS_DOM		0x8000
+#define GNRLMSGFLGS_SNDALRM	0x4000
+#define GNRLMSGFLGS_HOLDMSG	0x2000
+
+#define LNTPFLGS_CNTLTEXT	0x8000
+#define LNTPFLGS_LABELTEXT	0x4000
+#define LNTPFLGS_DATATEXT	0x2000
+#define LNTPFLGS_ENDTEXT	0x1000
+#define LNTPFLGS_PROMPTTEXT	0x0800
+
+typedef unsigned int sclp_cmdw_t;
+
+#define SCLP_CMDW_READ_CPU_INFO		0x00010001
+#define SCLP_CMDW_READ_EVENT_DATA	0x00770005
+#define SCLP_CMDW_WRITE_EVENT_DATA	0x00760005
+#define SCLP_CMDW_WRITE_EVENT_MASK	0x00780005
+
+#define GDS_ID_MDSMU		0x1310
+#define GDS_ID_MDSROUTEINFO	0x1311
+#define GDS_ID_AGUNWRKCORR	0x1549
+#define GDS_ID_SNACONDREPORT	0x1532
+#define GDS_ID_CPMSU		0x1212
+#define GDS_ID_ROUTTARGINSTR	0x154D
+#define GDS_ID_OPREQ		0x8070
+#define GDS_ID_TEXTCMD		0x1320
+
+#define GDS_KEY_SELFDEFTEXTMSG	0x31
+
+enum sclp_pm_event {
+	SCLP_PM_EVENT_FREEZE,
+	SCLP_PM_EVENT_THAW,
+	SCLP_PM_EVENT_RESTORE,
+};
+
+#define SCLP_PANIC_PRIO		1
+#define SCLP_PANIC_PRIO_CLIENT	0
+
+typedef u32 sccb_mask_t;	/* ATTENTION: assumes 32bit mask !!! */
+
+struct sccb_header {
+	u16	length;
+	u8	function_code;
+	u8	control_mask[3];
+	u16	response_code;
+} __attribute__((packed));
+
+struct init_sccb {
+	struct sccb_header header;
+	u16 _reserved;
+	u16 mask_length;
+	sccb_mask_t receive_mask;
+	sccb_mask_t send_mask;
+	sccb_mask_t sclp_receive_mask;
+	sccb_mask_t sclp_send_mask;
+} __attribute__((packed));
+
+struct read_cpu_info_sccb {
+	struct	sccb_header header;
+	u16	nr_configured;
+	u16	offset_configured;
+	u16	nr_standby;
+	u16	offset_standby;
+	u8	reserved[4096 - 16];
+} __attribute__((packed, aligned(PAGE_SIZE)));
+
+static inline void sclp_fill_core_info(struct sclp_core_info *info,
+				       struct read_cpu_info_sccb *sccb)
+{
+	char *page = (char *) sccb;
+
+	memset(info, 0, sizeof(*info));
+	info->configured = sccb->nr_configured;
+	info->standby = sccb->nr_standby;
+	info->combined = sccb->nr_configured + sccb->nr_standby;
+	memcpy(&info->core, page + sccb->offset_configured,
+	       info->combined * sizeof(struct sclp_core_entry));
+}
+
+#define SCLP_HAS_CHP_INFO	(sclp.facilities & 0x8000000000000000ULL)
+#define SCLP_HAS_CHP_RECONFIG	(sclp.facilities & 0x2000000000000000ULL)
+#define SCLP_HAS_CPU_INFO	(sclp.facilities & 0x0800000000000000ULL)
+#define SCLP_HAS_CPU_RECONFIG	(sclp.facilities & 0x0400000000000000ULL)
+#define SCLP_HAS_PCI_RECONFIG	(sclp.facilities & 0x0000000040000000ULL)
+
+
+struct gds_subvector {
+	u8	length;
+	u8	key;
+} __attribute__((packed));
+
+struct gds_vector {
+	u16	length;
+	u16	gds_id;
+} __attribute__((packed));
+
+struct evbuf_header {
+	u16	length;
+	u8	type;
+	u8	flags;
+	u16	_reserved;
+} __attribute__((packed));
+
+struct sclp_req {
+	struct list_head list;		/* list_head for request queueing. */
+	sclp_cmdw_t command;		/* sclp command to execute */
+	void	*sccb;			/* pointer to the sccb to execute */
+	char	status;			/* status of this request */
+	int     start_count;		/* number of SVCs done for this req */
+	/* Callback that is called after reaching final status. */
+	void (*callback)(struct sclp_req *, void *data);
+	void *callback_data;
+	int queue_timeout;		/* request queue timeout (sec), set by
+					   caller of sclp_add_request(), if
+					   needed */
+	/* Internal fields */
+	unsigned long queue_expires;	/* request queue timeout (jiffies) */
+};
+
+#define SCLP_REQ_FILLED	  0x00	/* request is ready to be processed */
+#define SCLP_REQ_QUEUED	  0x01	/* request is queued to be processed */
+#define SCLP_REQ_RUNNING  0x02	/* request is currently running */
+#define SCLP_REQ_DONE	  0x03	/* request is completed successfully */
+#define SCLP_REQ_FAILED	  0x05	/* request is finally failed */
+#define SCLP_REQ_QUEUED_TIMEOUT 0x06	/* request on queue timed out */
+
+#define SCLP_QUEUE_INTERVAL 5	/* timeout interval for request queue */
+
+/* function pointers that a high level driver has to use for registration */
+/* of some routines it wants to be called from the low level driver */
+struct sclp_register {
+	struct list_head list;
+	/* User wants to receive: */
+	sccb_mask_t receive_mask;
+	/* User wants to send: */
+	sccb_mask_t send_mask;
+	/* H/W can receive: */
+	sccb_mask_t sclp_receive_mask;
+	/* H/W can send: */
+	sccb_mask_t sclp_send_mask;
+	/* called if event type availability changes */
+	void (*state_change_fn)(struct sclp_register *);
+	/* called for events in cp_receive_mask/sclp_receive_mask */
+	void (*receiver_fn)(struct evbuf_header *);
+	/* called for power management events */
+	void (*pm_event_fn)(struct sclp_register *, enum sclp_pm_event);
+	/* pm event posted flag */
+	int pm_event_posted;
+};
+
+/* externals from sclp.c */
+int sclp_add_request(struct sclp_req *req);
+void sclp_sync_wait(void);
+int sclp_register(struct sclp_register *reg);
+void sclp_unregister(struct sclp_register *reg);
+int sclp_remove_processed(struct sccb_header *sccb);
+int sclp_deactivate(void);
+int sclp_reactivate(void);
+int sclp_sync_request(sclp_cmdw_t command, void *sccb);
+int sclp_sync_request_timeout(sclp_cmdw_t command, void *sccb, int timeout);
+
+int sclp_sdias_init(void);
+void sclp_sdias_exit(void);
+
+enum {
+	sclp_init_state_uninitialized,
+	sclp_init_state_initializing,
+	sclp_init_state_initialized
+};
+
+extern int sclp_init_state;
+extern int sclp_console_pages;
+extern int sclp_console_drop;
+extern unsigned long sclp_console_full;
+
+extern char sclp_early_sccb[PAGE_SIZE];
+
+void sclp_early_wait_irq(void);
+int sclp_early_cmd(sclp_cmdw_t cmd, void *sccb);
+unsigned int sclp_early_con_check_linemode(struct init_sccb *sccb);
+int sclp_early_set_event_mask(struct init_sccb *sccb,
+			      unsigned long receive_mask,
+			      unsigned long send_mask);
+
+/* useful inlines */
+
+/* Perform service call. Return 0 on success, non-zero otherwise. */
+static inline int sclp_service_call(sclp_cmdw_t command, void *sccb)
+{
+	int cc = 4; /* Initialize for program check handling */
+
+	asm volatile(
+		"0:	.insn	rre,0xb2200000,%1,%2\n"	 /* servc %1,%2 */
+		"1:	ipm	%0\n"
+		"	srl	%0,28\n"
+		"2:\n"
+		EX_TABLE(0b, 2b)
+		EX_TABLE(1b, 2b)
+		: "+&d" (cc) : "d" (command), "a" ((unsigned long)sccb)
+		: "cc", "memory");
+	if (cc == 4)
+		return -EINVAL;
+	if (cc == 3)
+		return -EIO;
+	if (cc == 2)
+		return -EBUSY;
+	return 0;
+}
+
+/* VM uses EBCDIC 037, LPAR+native(SE+HMC) use EBCDIC 500 */
+/* translate single character from ASCII to EBCDIC */
+static inline unsigned char
+sclp_ascebc(unsigned char ch)
+{
+	return (MACHINE_IS_VM) ? _ascebc[ch] : _ascebc_500[ch];
+}
+
+/* translate string from EBCDIC to ASCII */
+static inline void
+sclp_ebcasc_str(unsigned char *str, int nr)
+{
+	(MACHINE_IS_VM) ? EBCASC(str, nr) : EBCASC_500(str, nr);
+}
+
+/* translate string from ASCII to EBCDIC */
+static inline void
+sclp_ascebc_str(unsigned char *str, int nr)
+{
+	(MACHINE_IS_VM) ? ASCEBC(str, nr) : ASCEBC_500(str, nr);
+}
+
+static inline struct gds_vector *
+sclp_find_gds_vector(void *start, void *end, u16 id)
+{
+	struct gds_vector *v;
+
+	for (v = start; (void *) v < end; v = (void *) v + v->length)
+		if (v->gds_id == id)
+			return v;
+	return NULL;
+}
+
+static inline struct gds_subvector *
+sclp_find_gds_subvector(void *start, void *end, u8 key)
+{
+	struct gds_subvector *sv;
+
+	for (sv = start; (void *) sv < end; sv = (void *) sv + sv->length)
+		if (sv->key == key)
+			return sv;
+	return NULL;
+}
+
+#endif	 /* __SCLP_H__ */
diff --git a/src/kernel/linux/v4.14/drivers/s390/char/sclp_async.c b/src/kernel/linux/v4.14/drivers/s390/char/sclp_async.c
new file mode 100644
index 0000000..19c2542
--- /dev/null
+++ b/src/kernel/linux/v4.14/drivers/s390/char/sclp_async.c
@@ -0,0 +1,212 @@
+/*
+ * Enable Asynchronous Notification via SCLP.
+ *
+ * Copyright IBM Corp. 2009
+ * Author(s): Hans-Joachim Picht <hans@linux.vnet.ibm.com>
+ *
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/stat.h>
+#include <linux/string.h>
+#include <linux/slab.h>
+#include <linux/ctype.h>
+#include <linux/kmod.h>
+#include <linux/err.h>
+#include <linux/errno.h>
+#include <linux/proc_fs.h>
+#include <linux/sysctl.h>
+#include <linux/utsname.h>
+#include "sclp.h"
+
+static int callhome_enabled;
+static struct sclp_req *request;
+static struct sclp_async_sccb *sccb;
+static int sclp_async_send_wait(char *message);
+static struct ctl_table_header *callhome_sysctl_header;
+static DEFINE_SPINLOCK(sclp_async_lock);
+#define SCLP_NORMAL_WRITE	0x00
+
+struct async_evbuf {
+	struct evbuf_header header;
+	u64 reserved;
+	u8 rflags;
+	u8 empty;
+	u8 rtype;
+	u8 otype;
+	char comp_id[12];
+	char data[3000]; /* there is still some space left */
+} __attribute__((packed));
+
+struct sclp_async_sccb {
+	struct sccb_header header;
+	struct async_evbuf evbuf;
+} __attribute__((packed));
+
+static struct sclp_register sclp_async_register = {
+	.send_mask = EVTYP_ASYNC_MASK,
+};
+
+static int call_home_on_panic(struct notifier_block *self,
+			      unsigned long event, void *data)
+{
+	strncat(data, init_utsname()->nodename,
+		sizeof(init_utsname()->nodename));
+	sclp_async_send_wait(data);
+	return NOTIFY_DONE;
+}
+
+static struct notifier_block call_home_panic_nb = {
+	.notifier_call = call_home_on_panic,
+	.priority = INT_MAX,
+};
+
+static int proc_handler_callhome(struct ctl_table *ctl, int write,
+				 void __user *buffer, size_t *count,
+				 loff_t *ppos)
+{
+	unsigned long val;
+	int len, rc;
+	char buf[3];
+
+	if (!*count || (*ppos && !write)) {
+		*count = 0;
+		return 0;
+	}
+	if (!write) {
+		len = snprintf(buf, sizeof(buf), "%d\n", callhome_enabled);
+		rc = copy_to_user(buffer, buf, sizeof(buf));
+		if (rc != 0)
+			return -EFAULT;
+	} else {
+		len = *count;
+		rc = kstrtoul_from_user(buffer, len, 0, &val);
+		if (rc)
+			return rc;
+		if (val != 0 && val != 1)
+			return -EINVAL;
+		callhome_enabled = val;
+	}
+	*count = len;
+	*ppos += len;
+	return 0;
+}
+
+static struct ctl_table callhome_table[] = {
+	{
+		.procname	= "callhome",
+		.mode		= 0644,
+		.proc_handler	= proc_handler_callhome,
+	},
+	{}
+};
+
+static struct ctl_table kern_dir_table[] = {
+	{
+		.procname	= "kernel",
+		.maxlen		= 0,
+		.mode		= 0555,
+		.child		= callhome_table,
+	},
+	{}
+};
+
+/*
+ * Function used to transfer asynchronous notification
+ * records which waits for send completion
+ */
+static int sclp_async_send_wait(char *message)
+{
+	struct async_evbuf *evb;
+	int rc;
+	unsigned long flags;
+
+	if (!callhome_enabled)
+		return 0;
+	sccb->evbuf.header.type = EVTYP_ASYNC;
+	sccb->evbuf.rtype = 0xA5;
+	sccb->evbuf.otype = 0x00;
+	evb = &sccb->evbuf;
+	request->command = SCLP_CMDW_WRITE_EVENT_DATA;
+	request->sccb = sccb;
+	request->status = SCLP_REQ_FILLED;
+	strncpy(sccb->evbuf.data, message, sizeof(sccb->evbuf.data));
+	/*
+	 * Retain Queue
+	 * e.g. 5639CC140 500 Red Hat RHEL5 Linux for zSeries (RHEL AS)
+	 */
+	strncpy(sccb->evbuf.comp_id, CONFIG_SCLP_ASYNC_ID,
+		sizeof(sccb->evbuf.comp_id));
+	sccb->evbuf.header.length = sizeof(sccb->evbuf);
+	sccb->header.length = sizeof(sccb->evbuf) + sizeof(sccb->header);
+	sccb->header.function_code = SCLP_NORMAL_WRITE;
+	rc = sclp_add_request(request);
+	if (rc)
+		return rc;
+	spin_lock_irqsave(&sclp_async_lock, flags);
+	while (request->status != SCLP_REQ_DONE &&
+		request->status != SCLP_REQ_FAILED) {
+		 sclp_sync_wait();
+	}
+	spin_unlock_irqrestore(&sclp_async_lock, flags);
+	if (request->status != SCLP_REQ_DONE)
+		return -EIO;
+	rc = ((struct sclp_async_sccb *)
+	       request->sccb)->header.response_code;
+	if (rc != 0x0020)
+		return -EIO;
+	if (evb->header.flags != 0x80)
+		return -EIO;
+	return rc;
+}
+
+static int __init sclp_async_init(void)
+{
+	int rc;
+
+	rc = sclp_register(&sclp_async_register);
+	if (rc)
+		return rc;
+	rc = -EOPNOTSUPP;
+	if (!(sclp_async_register.sclp_receive_mask & EVTYP_ASYNC_MASK))
+		goto out_sclp;
+	rc = -ENOMEM;
+	callhome_sysctl_header = register_sysctl_table(kern_dir_table);
+	if (!callhome_sysctl_header)
+		goto out_sclp;
+	request = kzalloc(sizeof(struct sclp_req), GFP_KERNEL);
+	sccb = (struct sclp_async_sccb *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
+	if (!request || !sccb)
+		goto out_mem;
+	rc = atomic_notifier_chain_register(&panic_notifier_list,
+					    &call_home_panic_nb);
+	if (!rc)
+		goto out;
+out_mem:
+	kfree(request);
+	free_page((unsigned long) sccb);
+	unregister_sysctl_table(callhome_sysctl_header);
+out_sclp:
+	sclp_unregister(&sclp_async_register);
+out:
+	return rc;
+}
+module_init(sclp_async_init);
+
+static void __exit sclp_async_exit(void)
+{
+	atomic_notifier_chain_unregister(&panic_notifier_list,
+					 &call_home_panic_nb);
+	unregister_sysctl_table(callhome_sysctl_header);
+	sclp_unregister(&sclp_async_register);
+	free_page((unsigned long) sccb);
+	kfree(request);
+}
+module_exit(sclp_async_exit);
+
+MODULE_AUTHOR("Copyright IBM Corp. 2009");
+MODULE_AUTHOR("Hans-Joachim Picht <hans@linux.vnet.ibm.com>");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("SCLP Asynchronous Notification Records");
diff --git a/src/kernel/linux/v4.14/drivers/s390/char/sclp_cmd.c b/src/kernel/linux/v4.14/drivers/s390/char/sclp_cmd.c
new file mode 100644
index 0000000..d7686a6
--- /dev/null
+++ b/src/kernel/linux/v4.14/drivers/s390/char/sclp_cmd.c
@@ -0,0 +1,673 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright IBM Corp. 2007,2012
+ *
+ * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>,
+ *	      Peter Oberparleiter <peter.oberparleiter@de.ibm.com>
+ */
+
+#define KMSG_COMPONENT "sclp_cmd"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
+#include <linux/completion.h>
+#include <linux/init.h>
+#include <linux/errno.h>
+#include <linux/err.h>
+#include <linux/export.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/mm.h>
+#include <linux/mmzone.h>
+#include <linux/memory.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <asm/ctl_reg.h>
+#include <asm/chpid.h>
+#include <asm/setup.h>
+#include <asm/page.h>
+#include <asm/sclp.h>
+#include <asm/numa.h>
+
+#include "sclp.h"
+
+static void sclp_sync_callback(struct sclp_req *req, void *data)
+{
+	struct completion *completion = data;
+
+	complete(completion);
+}
+
+int sclp_sync_request(sclp_cmdw_t cmd, void *sccb)
+{
+	return sclp_sync_request_timeout(cmd, sccb, 0);
+}
+
+int sclp_sync_request_timeout(sclp_cmdw_t cmd, void *sccb, int timeout)
+{
+	struct completion completion;
+	struct sclp_req *request;
+	int rc;
+
+	request = kzalloc(sizeof(*request), GFP_KERNEL);
+	if (!request)
+		return -ENOMEM;
+	if (timeout)
+		request->queue_timeout = timeout;
+	request->command = cmd;
+	request->sccb = sccb;
+	request->status = SCLP_REQ_FILLED;
+	request->callback = sclp_sync_callback;
+	request->callback_data = &completion;
+	init_completion(&completion);
+
+	/* Perform sclp request. */
+	rc = sclp_add_request(request);
+	if (rc)
+		goto out;
+	wait_for_completion(&completion);
+
+	/* Check response. */
+	if (request->status != SCLP_REQ_DONE) {
+		pr_warn("sync request failed (cmd=0x%08x, status=0x%02x)\n",
+			cmd, request->status);
+		rc = -EIO;
+	}
+out:
+	kfree(request);
+	return rc;
+}
+
+/*
+ * CPU configuration related functions.
+ */
+
+#define SCLP_CMDW_CONFIGURE_CPU		0x00110001
+#define SCLP_CMDW_DECONFIGURE_CPU	0x00100001
+
+int _sclp_get_core_info(struct sclp_core_info *info)
+{
+	int rc;
+	struct read_cpu_info_sccb *sccb;
+
+	if (!SCLP_HAS_CPU_INFO)
+		return -EOPNOTSUPP;
+	sccb = (void *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
+	if (!sccb)
+		return -ENOMEM;
+	sccb->header.length = sizeof(*sccb);
+	rc = sclp_sync_request_timeout(SCLP_CMDW_READ_CPU_INFO, sccb,
+				       SCLP_QUEUE_INTERVAL);
+	if (rc)
+		goto out;
+	if (sccb->header.response_code != 0x0010) {
+		pr_warn("readcpuinfo failed (response=0x%04x)\n",
+			sccb->header.response_code);
+		rc = -EIO;
+		goto out;
+	}
+	sclp_fill_core_info(info, sccb);
+out:
+	free_page((unsigned long) sccb);
+	return rc;
+}
+
+struct cpu_configure_sccb {
+	struct sccb_header header;
+} __attribute__((packed, aligned(8)));
+
+static int do_core_configure(sclp_cmdw_t cmd)
+{
+	struct cpu_configure_sccb *sccb;
+	int rc;
+
+	if (!SCLP_HAS_CPU_RECONFIG)
+		return -EOPNOTSUPP;
+	/*
+	 * This is not going to cross a page boundary since we force
+	 * kmalloc to have a minimum alignment of 8 bytes on s390.
+	 */
+	sccb = kzalloc(sizeof(*sccb), GFP_KERNEL | GFP_DMA);
+	if (!sccb)
+		return -ENOMEM;
+	sccb->header.length = sizeof(*sccb);
+	rc = sclp_sync_request_timeout(cmd, sccb, SCLP_QUEUE_INTERVAL);
+	if (rc)
+		goto out;
+	switch (sccb->header.response_code) {
+	case 0x0020:
+	case 0x0120:
+		break;
+	default:
+		pr_warn("configure cpu failed (cmd=0x%08x, response=0x%04x)\n",
+			cmd, sccb->header.response_code);
+		rc = -EIO;
+		break;
+	}
+out:
+	kfree(sccb);
+	return rc;
+}
+
+int sclp_core_configure(u8 core)
+{
+	return do_core_configure(SCLP_CMDW_CONFIGURE_CPU | core << 8);
+}
+
+int sclp_core_deconfigure(u8 core)
+{
+	return do_core_configure(SCLP_CMDW_DECONFIGURE_CPU | core << 8);
+}
+
+#ifdef CONFIG_MEMORY_HOTPLUG
+
+static DEFINE_MUTEX(sclp_mem_mutex);
+static LIST_HEAD(sclp_mem_list);
+static u8 sclp_max_storage_id;
+static DECLARE_BITMAP(sclp_storage_ids, 256);
+static int sclp_mem_state_changed;
+
+struct memory_increment {
+	struct list_head list;
+	u16 rn;
+	int standby;
+};
+
+struct assign_storage_sccb {
+	struct sccb_header header;
+	u16 rn;
+} __packed;
+
+int arch_get_memory_phys_device(unsigned long start_pfn)
+{
+	if (!sclp.rzm)
+		return 0;
+	return PFN_PHYS(start_pfn) >> ilog2(sclp.rzm);
+}
+
+static unsigned long long rn2addr(u16 rn)
+{
+	return (unsigned long long) (rn - 1) * sclp.rzm;
+}
+
+static int do_assign_storage(sclp_cmdw_t cmd, u16 rn)
+{
+	struct assign_storage_sccb *sccb;
+	int rc;
+
+	sccb = (void *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
+	if (!sccb)
+		return -ENOMEM;
+	sccb->header.length = PAGE_SIZE;
+	sccb->rn = rn;
+	rc = sclp_sync_request_timeout(cmd, sccb, SCLP_QUEUE_INTERVAL);
+	if (rc)
+		goto out;
+	switch (sccb->header.response_code) {
+	case 0x0020:
+	case 0x0120:
+		break;
+	default:
+		pr_warn("assign storage failed (cmd=0x%08x, response=0x%04x, rn=0x%04x)\n",
+			cmd, sccb->header.response_code, rn);
+		rc = -EIO;
+		break;
+	}
+out:
+	free_page((unsigned long) sccb);
+	return rc;
+}
+
+static int sclp_assign_storage(u16 rn)
+{
+	unsigned long long start;
+	int rc;
+
+	rc = do_assign_storage(0x000d0001, rn);
+	if (rc)
+		return rc;
+	start = rn2addr(rn);
+	storage_key_init_range(start, start + sclp.rzm);
+	return 0;
+}
+
+static int sclp_unassign_storage(u16 rn)
+{
+	return do_assign_storage(0x000c0001, rn);
+}
+
+struct attach_storage_sccb {
+	struct sccb_header header;
+	u16 :16;
+	u16 assigned;
+	u32 :32;
+	u32 entries[0];
+} __packed;
+
+static int sclp_attach_storage(u8 id)
+{
+	struct attach_storage_sccb *sccb;
+	int rc;
+	int i;
+
+	sccb = (void *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
+	if (!sccb)
+		return -ENOMEM;
+	sccb->header.length = PAGE_SIZE;
+	sccb->header.function_code = 0x40;
+	rc = sclp_sync_request_timeout(0x00080001 | id << 8, sccb,
+				       SCLP_QUEUE_INTERVAL);
+	if (rc)
+		goto out;
+	switch (sccb->header.response_code) {
+	case 0x0020:
+		set_bit(id, sclp_storage_ids);
+		for (i = 0; i < sccb->assigned; i++) {
+			if (sccb->entries[i])
+				sclp_unassign_storage(sccb->entries[i] >> 16);
+		}
+		break;
+	default:
+		rc = -EIO;
+		break;
+	}
+out:
+	free_page((unsigned long) sccb);
+	return rc;
+}
+
+static int sclp_mem_change_state(unsigned long start, unsigned long size,
+				 int online)
+{
+	struct memory_increment *incr;
+	unsigned long long istart;
+	int rc = 0;
+
+	list_for_each_entry(incr, &sclp_mem_list, list) {
+		istart = rn2addr(incr->rn);
+		if (start + size - 1 < istart)
+			break;
+		if (start > istart + sclp.rzm - 1)
+			continue;
+		if (online)
+			rc |= sclp_assign_storage(incr->rn);
+		else
+			sclp_unassign_storage(incr->rn);
+		if (rc == 0)
+			incr->standby = online ? 0 : 1;
+	}
+	return rc ? -EIO : 0;
+}
+
+static bool contains_standby_increment(unsigned long start, unsigned long end)
+{
+	struct memory_increment *incr;
+	unsigned long istart;
+
+	list_for_each_entry(incr, &sclp_mem_list, list) {
+		istart = rn2addr(incr->rn);
+		if (end - 1 < istart)
+			continue;
+		if (start > istart + sclp.rzm - 1)
+			continue;
+		if (incr->standby)
+			return true;
+	}
+	return false;
+}
+
+static int sclp_mem_notifier(struct notifier_block *nb,
+			     unsigned long action, void *data)
+{
+	unsigned long start, size;
+	struct memory_notify *arg;
+	unsigned char id;
+	int rc = 0;
+
+	arg = data;
+	start = arg->start_pfn << PAGE_SHIFT;
+	size = arg->nr_pages << PAGE_SHIFT;
+	mutex_lock(&sclp_mem_mutex);
+	for_each_clear_bit(id, sclp_storage_ids, sclp_max_storage_id + 1)
+		sclp_attach_storage(id);
+	switch (action) {
+	case MEM_GOING_OFFLINE:
+		/*
+		 * We do not allow to set memory blocks offline that contain
+		 * standby memory. This is done to simplify the "memory online"
+		 * case.
+		 */
+		if (contains_standby_increment(start, start + size))
+			rc = -EPERM;
+		break;
+	case MEM_ONLINE:
+	case MEM_CANCEL_OFFLINE:
+		break;
+	case MEM_GOING_ONLINE:
+		rc = sclp_mem_change_state(start, size, 1);
+		break;
+	case MEM_CANCEL_ONLINE:
+		sclp_mem_change_state(start, size, 0);
+		break;
+	case MEM_OFFLINE:
+		sclp_mem_change_state(start, size, 0);
+		break;
+	default:
+		rc = -EINVAL;
+		break;
+	}
+	if (!rc)
+		sclp_mem_state_changed = 1;
+	mutex_unlock(&sclp_mem_mutex);
+	return rc ? NOTIFY_BAD : NOTIFY_OK;
+}
+
+static struct notifier_block sclp_mem_nb = {
+	.notifier_call = sclp_mem_notifier,
+};
+
+static void __init align_to_block_size(unsigned long long *start,
+				       unsigned long long *size,
+				       unsigned long long alignment)
+{
+	unsigned long long start_align, size_align;
+
+	start_align = roundup(*start, alignment);
+	size_align = rounddown(*start + *size, alignment) - start_align;
+
+	pr_info("Standby memory at 0x%llx (%lluM of %lluM usable)\n",
+		*start, size_align >> 20, *size >> 20);
+	*start = start_align;
+	*size = size_align;
+}
+
+static void __init add_memory_merged(u16 rn)
+{
+	unsigned long long start, size, addr, block_size;
+	static u16 first_rn, num;
+
+	if (rn && first_rn && (first_rn + num == rn)) {
+		num++;
+		return;
+	}
+	if (!first_rn)
+		goto skip_add;
+	start = rn2addr(first_rn);
+	size = (unsigned long long) num * sclp.rzm;
+	if (start >= VMEM_MAX_PHYS)
+		goto skip_add;
+	if (start + size > VMEM_MAX_PHYS)
+		size = VMEM_MAX_PHYS - start;
+	if (memory_end_set && (start >= memory_end))
+		goto skip_add;
+	if (memory_end_set && (start + size > memory_end))
+		size = memory_end - start;
+	block_size = memory_block_size_bytes();
+	align_to_block_size(&start, &size, block_size);
+	if (!size)
+		goto skip_add;
+	for (addr = start; addr < start + size; addr += block_size)
+		add_memory(numa_pfn_to_nid(PFN_DOWN(addr)), addr, block_size);
+skip_add:
+	first_rn = rn;
+	num = 1;
+}
+
+static void __init sclp_add_standby_memory(void)
+{
+	struct memory_increment *incr;
+
+	list_for_each_entry(incr, &sclp_mem_list, list)
+		if (incr->standby)
+			add_memory_merged(incr->rn);
+	add_memory_merged(0);
+}
+
+static void __init insert_increment(u16 rn, int standby, int assigned)
+{
+	struct memory_increment *incr, *new_incr;
+	struct list_head *prev;
+	u16 last_rn;
+
+	new_incr = kzalloc(sizeof(*new_incr), GFP_KERNEL);
+	if (!new_incr)
+		return;
+	new_incr->rn = rn;
+	new_incr->standby = standby;
+	last_rn = 0;
+	prev = &sclp_mem_list;
+	list_for_each_entry(incr, &sclp_mem_list, list) {
+		if (assigned && incr->rn > rn)
+			break;
+		if (!assigned && incr->rn - last_rn > 1)
+			break;
+		last_rn = incr->rn;
+		prev = &incr->list;
+	}
+	if (!assigned)
+		new_incr->rn = last_rn + 1;
+	if (new_incr->rn > sclp.rnmax) {
+		kfree(new_incr);
+		return;
+	}
+	list_add(&new_incr->list, prev);
+}
+
+static int sclp_mem_freeze(struct device *dev)
+{
+	if (!sclp_mem_state_changed)
+		return 0;
+	pr_err("Memory hotplug state changed, suspend refused.\n");
+	return -EPERM;
+}
+
+struct read_storage_sccb {
+	struct sccb_header header;
+	u16 max_id;
+	u16 assigned;
+	u16 standby;
+	u16 :16;
+	u32 entries[0];
+} __packed;
+
+static const struct dev_pm_ops sclp_mem_pm_ops = {
+	.freeze		= sclp_mem_freeze,
+};
+
+static struct platform_driver sclp_mem_pdrv = {
+	.driver = {
+		.name	= "sclp_mem",
+		.pm	= &sclp_mem_pm_ops,
+	},
+};
+
+static int __init sclp_detect_standby_memory(void)
+{
+	struct platform_device *sclp_pdev;
+	struct read_storage_sccb *sccb;
+	int i, id, assigned, rc;
+
+	if (OLDMEM_BASE) /* No standby memory in kdump mode */
+		return 0;
+	if ((sclp.facilities & 0xe00000000000ULL) != 0xe00000000000ULL)
+		return 0;
+	rc = -ENOMEM;
+	sccb = (void *) __get_free_page(GFP_KERNEL | GFP_DMA);
+	if (!sccb)
+		goto out;
+	assigned = 0;
+	for (id = 0; id <= sclp_max_storage_id; id++) {
+		memset(sccb, 0, PAGE_SIZE);
+		sccb->header.length = PAGE_SIZE;
+		rc = sclp_sync_request(0x00040001 | id << 8, sccb);
+		if (rc)
+			goto out;
+		switch (sccb->header.response_code) {
+		case 0x0010:
+			set_bit(id, sclp_storage_ids);
+			for (i = 0; i < sccb->assigned; i++) {
+				if (!sccb->entries[i])
+					continue;
+				assigned++;
+				insert_increment(sccb->entries[i] >> 16, 0, 1);
+			}
+			break;
+		case 0x0310:
+			break;
+		case 0x0410:
+			for (i = 0; i < sccb->assigned; i++) {
+				if (!sccb->entries[i])
+					continue;
+				assigned++;
+				insert_increment(sccb->entries[i] >> 16, 1, 1);
+			}
+			break;
+		default:
+			rc = -EIO;
+			break;
+		}
+		if (!rc)
+			sclp_max_storage_id = sccb->max_id;
+	}
+	if (rc || list_empty(&sclp_mem_list))
+		goto out;
+	for (i = 1; i <= sclp.rnmax - assigned; i++)
+		insert_increment(0, 1, 0);
+	rc = register_memory_notifier(&sclp_mem_nb);
+	if (rc)
+		goto out;
+	rc = platform_driver_register(&sclp_mem_pdrv);
+	if (rc)
+		goto out;
+	sclp_pdev = platform_device_register_simple("sclp_mem", -1, NULL, 0);
+	rc = PTR_ERR_OR_ZERO(sclp_pdev);
+	if (rc)
+		goto out_driver;
+	sclp_add_standby_memory();
+	goto out;
+out_driver:
+	platform_driver_unregister(&sclp_mem_pdrv);
+out:
+	free_page((unsigned long) sccb);
+	return rc;
+}
+__initcall(sclp_detect_standby_memory);
+
+#endif /* CONFIG_MEMORY_HOTPLUG */
+
+/*
+ * Channel path configuration related functions.
+ */
+
+#define SCLP_CMDW_CONFIGURE_CHPATH		0x000f0001
+#define SCLP_CMDW_DECONFIGURE_CHPATH		0x000e0001
+#define SCLP_CMDW_READ_CHPATH_INFORMATION	0x00030001
+
+struct chp_cfg_sccb {
+	struct sccb_header header;
+	u8 ccm;
+	u8 reserved[6];
+	u8 cssid;
+} __attribute__((packed));
+
+static int do_chp_configure(sclp_cmdw_t cmd)
+{
+	struct chp_cfg_sccb *sccb;
+	int rc;
+
+	if (!SCLP_HAS_CHP_RECONFIG)
+		return -EOPNOTSUPP;
+	/* Prepare sccb. */
+	sccb = (struct chp_cfg_sccb *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
+	if (!sccb)
+		return -ENOMEM;
+	sccb->header.length = sizeof(*sccb);
+	rc = sclp_sync_request(cmd, sccb);
+	if (rc)
+		goto out;
+	switch (sccb->header.response_code) {
+	case 0x0020:
+	case 0x0120:
+	case 0x0440:
+	case 0x0450:
+		break;
+	default:
+		pr_warn("configure channel-path failed (cmd=0x%08x, response=0x%04x)\n",
+			cmd, sccb->header.response_code);
+		rc = -EIO;
+		break;
+	}
+out:
+	free_page((unsigned long) sccb);
+	return rc;
+}
+
+/**
+ * sclp_chp_configure - perform configure channel-path sclp command
+ * @chpid: channel-path ID
+ *
+ * Perform configure channel-path command sclp command for specified chpid.
+ * Return 0 after command successfully finished, non-zero otherwise.
+ */
+int sclp_chp_configure(struct chp_id chpid)
+{
+	return do_chp_configure(SCLP_CMDW_CONFIGURE_CHPATH | chpid.id << 8);
+}
+
+/**
+ * sclp_chp_deconfigure - perform deconfigure channel-path sclp command
+ * @chpid: channel-path ID
+ *
+ * Perform deconfigure channel-path command sclp command for specified chpid
+ * and wait for completion. On success return 0. Return non-zero otherwise.
+ */
+int sclp_chp_deconfigure(struct chp_id chpid)
+{
+	return do_chp_configure(SCLP_CMDW_DECONFIGURE_CHPATH | chpid.id << 8);
+}
+
+struct chp_info_sccb {
+	struct sccb_header header;
+	u8 recognized[SCLP_CHP_INFO_MASK_SIZE];
+	u8 standby[SCLP_CHP_INFO_MASK_SIZE];
+	u8 configured[SCLP_CHP_INFO_MASK_SIZE];
+	u8 ccm;
+	u8 reserved[6];
+	u8 cssid;
+} __attribute__((packed));
+
+/**
+ * sclp_chp_read_info - perform read channel-path information sclp command
+ * @info: resulting channel-path information data
+ *
+ * Perform read channel-path information sclp command and wait for completion.
+ * On success, store channel-path information in @info and return 0. Return
+ * non-zero otherwise.
+ */
+int sclp_chp_read_info(struct sclp_chp_info *info)
+{
+	struct chp_info_sccb *sccb;
+	int rc;
+
+	if (!SCLP_HAS_CHP_INFO)
+		return -EOPNOTSUPP;
+	/* Prepare sccb. */
+	sccb = (struct chp_info_sccb *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
+	if (!sccb)
+		return -ENOMEM;
+	sccb->header.length = sizeof(*sccb);
+	rc = sclp_sync_request(SCLP_CMDW_READ_CHPATH_INFORMATION, sccb);
+	if (rc)
+		goto out;
+	if (sccb->header.response_code != 0x0010) {
+		pr_warn("read channel-path info failed (response=0x%04x)\n",
+			sccb->header.response_code);
+		rc = -EIO;
+		goto out;
+	}
+	memcpy(info->recognized, sccb->recognized, SCLP_CHP_INFO_MASK_SIZE);
+	memcpy(info->standby, sccb->standby, SCLP_CHP_INFO_MASK_SIZE);
+	memcpy(info->configured, sccb->configured, SCLP_CHP_INFO_MASK_SIZE);
+out:
+	free_page((unsigned long) sccb);
+	return rc;
+}
diff --git a/src/kernel/linux/v4.14/drivers/s390/char/sclp_con.c b/src/kernel/linux/v4.14/drivers/s390/char/sclp_con.c
new file mode 100644
index 0000000..d247f23
--- /dev/null
+++ b/src/kernel/linux/v4.14/drivers/s390/char/sclp_con.c
@@ -0,0 +1,358 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * SCLP line mode console driver
+ *
+ * Copyright IBM Corp. 1999, 2009
+ * Author(s): Martin Peschke <mpeschke@de.ibm.com>
+ *	      Martin Schwidefsky <schwidefsky@de.ibm.com>
+ */
+
+#include <linux/kmod.h>
+#include <linux/console.h>
+#include <linux/init.h>
+#include <linux/timer.h>
+#include <linux/jiffies.h>
+#include <linux/termios.h>
+#include <linux/err.h>
+#include <linux/reboot.h>
+#include <linux/gfp.h>
+
+#include "sclp.h"
+#include "sclp_rw.h"
+#include "sclp_tty.h"
+
+#define sclp_console_major 4		/* TTYAUX_MAJOR */
+#define sclp_console_minor 64
+#define sclp_console_name  "ttyS"
+
+/* Lock to guard over changes to global variables */
+static spinlock_t sclp_con_lock;
+/* List of free pages that can be used for console output buffering */
+static struct list_head sclp_con_pages;
+/* List of full struct sclp_buffer structures ready for output */
+static struct list_head sclp_con_outqueue;
+/* Pointer to current console buffer */
+static struct sclp_buffer *sclp_conbuf;
+/* Timer for delayed output of console messages */
+static struct timer_list sclp_con_timer;
+/* Suspend mode flag */
+static int sclp_con_suspended;
+/* Flag that output queue is currently running */
+static int sclp_con_queue_running;
+
+/* Output format for console messages */
+static unsigned short sclp_con_columns;
+static unsigned short sclp_con_width_htab;
+
+static void
+sclp_conbuf_callback(struct sclp_buffer *buffer, int rc)
+{
+	unsigned long flags;
+	void *page;
+
+	do {
+		page = sclp_unmake_buffer(buffer);
+		spin_lock_irqsave(&sclp_con_lock, flags);
+
+		/* Remove buffer from outqueue */
+		list_del(&buffer->list);
+		list_add_tail((struct list_head *) page, &sclp_con_pages);
+
+		/* Check if there is a pending buffer on the out queue. */
+		buffer = NULL;
+		if (!list_empty(&sclp_con_outqueue))
+			buffer = list_first_entry(&sclp_con_outqueue,
+						  struct sclp_buffer, list);
+		if (!buffer || sclp_con_suspended) {
+			sclp_con_queue_running = 0;
+			spin_unlock_irqrestore(&sclp_con_lock, flags);
+			break;
+		}
+		spin_unlock_irqrestore(&sclp_con_lock, flags);
+	} while (sclp_emit_buffer(buffer, sclp_conbuf_callback));
+}
+
+/*
+ * Finalize and emit first pending buffer.
+ */
+static void sclp_conbuf_emit(void)
+{
+	struct sclp_buffer* buffer;
+	unsigned long flags;
+	int rc;
+
+	spin_lock_irqsave(&sclp_con_lock, flags);
+	if (sclp_conbuf)
+		list_add_tail(&sclp_conbuf->list, &sclp_con_outqueue);
+	sclp_conbuf = NULL;
+	if (sclp_con_queue_running || sclp_con_suspended)
+		goto out_unlock;
+	if (list_empty(&sclp_con_outqueue))
+		goto out_unlock;
+	buffer = list_first_entry(&sclp_con_outqueue, struct sclp_buffer,
+				  list);
+	sclp_con_queue_running = 1;
+	spin_unlock_irqrestore(&sclp_con_lock, flags);
+
+	rc = sclp_emit_buffer(buffer, sclp_conbuf_callback);
+	if (rc)
+		sclp_conbuf_callback(buffer, rc);
+	return;
+out_unlock:
+	spin_unlock_irqrestore(&sclp_con_lock, flags);
+}
+
+/*
+ * Wait until out queue is empty
+ */
+static void sclp_console_sync_queue(void)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&sclp_con_lock, flags);
+	if (timer_pending(&sclp_con_timer))
+		del_timer(&sclp_con_timer);
+	while (sclp_con_queue_running) {
+		spin_unlock_irqrestore(&sclp_con_lock, flags);
+		sclp_sync_wait();
+		spin_lock_irqsave(&sclp_con_lock, flags);
+	}
+	spin_unlock_irqrestore(&sclp_con_lock, flags);
+}
+
+/*
+ * When this routine is called from the timer then we flush the
+ * temporary write buffer without further waiting on a final new line.
+ */
+static void
+sclp_console_timeout(unsigned long data)
+{
+	sclp_conbuf_emit();
+}
+
+/*
+ * Drop oldest console buffer if sclp_con_drop is set
+ */
+static int
+sclp_console_drop_buffer(void)
+{
+	struct list_head *list;
+	struct sclp_buffer *buffer;
+	void *page;
+
+	if (!sclp_console_drop)
+		return 0;
+	list = sclp_con_outqueue.next;
+	if (sclp_con_queue_running)
+		/* The first element is in I/O */
+		list = list->next;
+	if (list == &sclp_con_outqueue)
+		return 0;
+	list_del(list);
+	buffer = list_entry(list, struct sclp_buffer, list);
+	page = sclp_unmake_buffer(buffer);
+	list_add_tail((struct list_head *) page, &sclp_con_pages);
+	return 1;
+}
+
+/*
+ * Writes the given message to S390 system console
+ */
+static void
+sclp_console_write(struct console *console, const char *message,
+		   unsigned int count)
+{
+	unsigned long flags;
+	void *page;
+	int written;
+
+	if (count == 0)
+		return;
+	spin_lock_irqsave(&sclp_con_lock, flags);
+	/*
+	 * process escape characters, write message into buffer,
+	 * send buffer to SCLP
+	 */
+	do {
+		/* make sure we have a console output buffer */
+		if (sclp_conbuf == NULL) {
+			if (list_empty(&sclp_con_pages))
+				sclp_console_full++;
+			while (list_empty(&sclp_con_pages)) {
+				if (sclp_con_suspended)
+					goto out;
+				if (sclp_console_drop_buffer())
+					break;
+				spin_unlock_irqrestore(&sclp_con_lock, flags);
+				sclp_sync_wait();
+				spin_lock_irqsave(&sclp_con_lock, flags);
+			}
+			page = sclp_con_pages.next;
+			list_del((struct list_head *) page);
+			sclp_conbuf = sclp_make_buffer(page, sclp_con_columns,
+						       sclp_con_width_htab);
+		}
+		/* try to write the string to the current output buffer */
+		written = sclp_write(sclp_conbuf, (const unsigned char *)
+				     message, count);
+		if (written == count)
+			break;
+		/*
+		 * Not all characters could be written to the current
+		 * output buffer. Emit the buffer, create a new buffer
+		 * and then output the rest of the string.
+		 */
+		spin_unlock_irqrestore(&sclp_con_lock, flags);
+		sclp_conbuf_emit();
+		spin_lock_irqsave(&sclp_con_lock, flags);
+		message += written;
+		count -= written;
+	} while (count > 0);
+	/* Setup timer to output current console buffer after 1/10 second */
+	if (sclp_conbuf != NULL && sclp_chars_in_buffer(sclp_conbuf) != 0 &&
+	    !timer_pending(&sclp_con_timer)) {
+		init_timer(&sclp_con_timer);
+		sclp_con_timer.function = sclp_console_timeout;
+		sclp_con_timer.data = 0UL;
+		sclp_con_timer.expires = jiffies + HZ/10;
+		add_timer(&sclp_con_timer);
+	}
+out:
+	spin_unlock_irqrestore(&sclp_con_lock, flags);
+}
+
+static struct tty_driver *
+sclp_console_device(struct console *c, int *index)
+{
+	*index = c->index;
+	return sclp_tty_driver;
+}
+
+/*
+ * Make sure that all buffers will be flushed to the SCLP.
+ */
+static void
+sclp_console_flush(void)
+{
+	sclp_conbuf_emit();
+	sclp_console_sync_queue();
+}
+
+/*
+ * Resume console: If there are cached messages, emit them.
+ */
+static void sclp_console_resume(void)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&sclp_con_lock, flags);
+	sclp_con_suspended = 0;
+	spin_unlock_irqrestore(&sclp_con_lock, flags);
+	sclp_conbuf_emit();
+}
+
+/*
+ * Suspend console: Set suspend flag and flush console
+ */
+static void sclp_console_suspend(void)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&sclp_con_lock, flags);
+	sclp_con_suspended = 1;
+	spin_unlock_irqrestore(&sclp_con_lock, flags);
+	sclp_console_flush();
+}
+
+static int sclp_console_notify(struct notifier_block *self,
+			       unsigned long event, void *data)
+{
+	sclp_console_flush();
+	return NOTIFY_OK;
+}
+
+static struct notifier_block on_panic_nb = {
+	.notifier_call = sclp_console_notify,
+	.priority = SCLP_PANIC_PRIO_CLIENT,
+};
+
+static struct notifier_block on_reboot_nb = {
+	.notifier_call = sclp_console_notify,
+	.priority = 1,
+};
+
+/*
+ * used to register the SCLP console to the kernel and to
+ * give printk necessary information
+ */
+static struct console sclp_console =
+{
+	.name = sclp_console_name,
+	.write = sclp_console_write,
+	.device = sclp_console_device,
+	.flags = CON_PRINTBUFFER,
+	.index = 0 /* ttyS0 */
+};
+
+/*
+ * This function is called for SCLP suspend and resume events.
+ */
+void sclp_console_pm_event(enum sclp_pm_event sclp_pm_event)
+{
+	switch (sclp_pm_event) {
+	case SCLP_PM_EVENT_FREEZE:
+		sclp_console_suspend();
+		break;
+	case SCLP_PM_EVENT_RESTORE:
+	case SCLP_PM_EVENT_THAW:
+		sclp_console_resume();
+		break;
+	}
+}
+
+/*
+ * called by console_init() in drivers/char/tty_io.c at boot-time.
+ */
+static int __init
+sclp_console_init(void)
+{
+	void *page;
+	int i;
+	int rc;
+
+	/* SCLP consoles are handled together */
+	if (!(CONSOLE_IS_SCLP || CONSOLE_IS_VT220))
+		return 0;
+	rc = sclp_rw_init();
+	if (rc)
+		return rc;
+	/* Allocate pages for output buffering */
+	INIT_LIST_HEAD(&sclp_con_pages);
+	for (i = 0; i < sclp_console_pages; i++) {
+		page = (void *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
+		list_add_tail(page, &sclp_con_pages);
+	}
+	INIT_LIST_HEAD(&sclp_con_outqueue);
+	spin_lock_init(&sclp_con_lock);
+	sclp_conbuf = NULL;
+	init_timer(&sclp_con_timer);
+
+	/* Set output format */
+	if (MACHINE_IS_VM)
+		/*
+		 * save 4 characters for the CPU number
+		 * written at start of each line by VM/CP
+		 */
+		sclp_con_columns = 76;
+	else
+		sclp_con_columns = 80;
+	sclp_con_width_htab = 8;
+
+	/* enable printk-access to this driver */
+	atomic_notifier_chain_register(&panic_notifier_list, &on_panic_nb);
+	register_reboot_notifier(&on_reboot_nb);
+	register_console(&sclp_console);
+	return 0;
+}
+
+console_initcall(sclp_console_init);
diff --git a/src/kernel/linux/v4.14/drivers/s390/char/sclp_config.c b/src/kernel/linux/v4.14/drivers/s390/char/sclp_config.c
new file mode 100644
index 0000000..039b207
--- /dev/null
+++ b/src/kernel/linux/v4.14/drivers/s390/char/sclp_config.c
@@ -0,0 +1,180 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ *    Copyright IBM Corp. 2007
+ *    Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>
+ */
+
+#define KMSG_COMPONENT "sclp_config"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
+#include <linux/init.h>
+#include <linux/errno.h>
+#include <linux/cpu.h>
+#include <linux/device.h>
+#include <linux/workqueue.h>
+#include <linux/slab.h>
+#include <linux/sysfs.h>
+#include <asm/smp.h>
+
+#include "sclp.h"
+
+struct conf_mgm_data {
+	u8 reserved;
+	u8 ev_qualifier;
+} __attribute__((packed));
+
+#define OFB_DATA_MAX 64
+
+struct sclp_ofb_evbuf {
+	struct evbuf_header header;
+	struct conf_mgm_data cm_data;
+	char ev_data[OFB_DATA_MAX];
+} __packed;
+
+struct sclp_ofb_sccb {
+	struct sccb_header header;
+	struct sclp_ofb_evbuf ofb_evbuf;
+} __packed;
+
+#define EV_QUAL_CPU_CHANGE	1
+#define EV_QUAL_CAP_CHANGE	3
+#define EV_QUAL_OPEN4BUSINESS	5
+
+static struct work_struct sclp_cpu_capability_work;
+static struct work_struct sclp_cpu_change_work;
+
+static void sclp_cpu_capability_notify(struct work_struct *work)
+{
+	int cpu;
+	struct device *dev;
+
+	s390_update_cpu_mhz();
+	pr_info("CPU capability may have changed\n");
+	get_online_cpus();
+	for_each_online_cpu(cpu) {
+		dev = get_cpu_device(cpu);
+		kobject_uevent(&dev->kobj, KOBJ_CHANGE);
+	}
+	put_online_cpus();
+}
+
+static void __ref sclp_cpu_change_notify(struct work_struct *work)
+{
+	lock_device_hotplug();
+	smp_rescan_cpus();
+	unlock_device_hotplug();
+}
+
+static void sclp_conf_receiver_fn(struct evbuf_header *evbuf)
+{
+	struct conf_mgm_data *cdata;
+
+	cdata = (struct conf_mgm_data *)(evbuf + 1);
+	switch (cdata->ev_qualifier) {
+	case EV_QUAL_CPU_CHANGE:
+		schedule_work(&sclp_cpu_change_work);
+		break;
+	case EV_QUAL_CAP_CHANGE:
+		schedule_work(&sclp_cpu_capability_work);
+		break;
+	}
+}
+
+static struct sclp_register sclp_conf_register =
+{
+#ifdef CONFIG_SCLP_OFB
+	.send_mask    = EVTYP_CONFMGMDATA_MASK,
+#endif
+	.receive_mask = EVTYP_CONFMGMDATA_MASK,
+	.receiver_fn  = sclp_conf_receiver_fn,
+};
+
+#ifdef CONFIG_SCLP_OFB
+static int sclp_ofb_send_req(char *ev_data, size_t len)
+{
+	static DEFINE_MUTEX(send_mutex);
+	struct sclp_ofb_sccb *sccb;
+	int rc, response;
+
+	if (len > OFB_DATA_MAX)
+		return -EINVAL;
+	sccb = (struct sclp_ofb_sccb *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
+	if (!sccb)
+		return -ENOMEM;
+	/* Setup SCCB for Control-Program Identification */
+	sccb->header.length = sizeof(struct sclp_ofb_sccb);
+	sccb->ofb_evbuf.header.length = sizeof(struct sclp_ofb_evbuf);
+	sccb->ofb_evbuf.header.type = EVTYP_CONFMGMDATA;
+	sccb->ofb_evbuf.cm_data.ev_qualifier = EV_QUAL_OPEN4BUSINESS;
+	memcpy(sccb->ofb_evbuf.ev_data, ev_data, len);
+
+	if (!(sclp_conf_register.sclp_receive_mask & EVTYP_CONFMGMDATA_MASK))
+		pr_warn("SCLP receiver did not register to receive "
+			"Configuration Management Data Events.\n");
+
+	mutex_lock(&send_mutex);
+	rc = sclp_sync_request(SCLP_CMDW_WRITE_EVENT_DATA, sccb);
+	mutex_unlock(&send_mutex);
+	if (rc)
+		goto out;
+	response = sccb->header.response_code;
+	if (response != 0x0020) {
+		pr_err("Open for Business request failed with response code "
+		       "0x%04x\n", response);
+		rc = -EIO;
+	}
+out:
+	free_page((unsigned long)sccb);
+	return rc;
+}
+
+static ssize_t sysfs_ofb_data_write(struct file *filp, struct kobject *kobj,
+				    struct bin_attribute *bin_attr,
+				    char *buf, loff_t off, size_t count)
+{
+	int rc;
+
+	rc = sclp_ofb_send_req(buf, count);
+	return rc ?: count;
+}
+
+static const struct bin_attribute ofb_bin_attr = {
+	.attr = {
+		.name = "event_data",
+		.mode = S_IWUSR,
+	},
+	.write = sysfs_ofb_data_write,
+};
+#endif
+
+static int __init sclp_ofb_setup(void)
+{
+#ifdef CONFIG_SCLP_OFB
+	struct kset *ofb_kset;
+	int rc;
+
+	ofb_kset = kset_create_and_add("ofb", NULL, firmware_kobj);
+	if (!ofb_kset)
+		return -ENOMEM;
+	rc = sysfs_create_bin_file(&ofb_kset->kobj, &ofb_bin_attr);
+	if (rc) {
+		kset_unregister(ofb_kset);
+		return rc;
+	}
+#endif
+	return 0;
+}
+
+static int __init sclp_conf_init(void)
+{
+	int rc;
+
+	INIT_WORK(&sclp_cpu_capability_work, sclp_cpu_capability_notify);
+	INIT_WORK(&sclp_cpu_change_work, sclp_cpu_change_notify);
+	rc = sclp_register(&sclp_conf_register);
+	if (rc)
+		return rc;
+	return sclp_ofb_setup();
+}
+
+__initcall(sclp_conf_init);
diff --git a/src/kernel/linux/v4.14/drivers/s390/char/sclp_cpi_sys.c b/src/kernel/linux/v4.14/drivers/s390/char/sclp_cpi_sys.c
new file mode 100644
index 0000000..f60d7ea
--- /dev/null
+++ b/src/kernel/linux/v4.14/drivers/s390/char/sclp_cpi_sys.c
@@ -0,0 +1,428 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ *    SCLP control program identification sysfs interface
+ *
+ *    Copyright IBM Corp. 2001, 2007
+ *    Author(s): Martin Peschke <mpeschke@de.ibm.com>
+ *		 Michael Ernst <mernst@de.ibm.com>
+ */
+
+#define KMSG_COMPONENT "sclp_cpi"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/stat.h>
+#include <linux/device.h>
+#include <linux/string.h>
+#include <linux/ctype.h>
+#include <linux/kmod.h>
+#include <linux/timer.h>
+#include <linux/err.h>
+#include <linux/slab.h>
+#include <linux/completion.h>
+#include <linux/export.h>
+#include <asm/ebcdic.h>
+#include <asm/sclp.h>
+
+#include "sclp.h"
+#include "sclp_rw.h"
+#include "sclp_cpi_sys.h"
+
+#define CPI_LENGTH_NAME 8
+#define CPI_LENGTH_LEVEL 16
+
+static DEFINE_MUTEX(sclp_cpi_mutex);
+
+struct cpi_evbuf {
+	struct evbuf_header header;
+	u8	id_format;
+	u8	reserved0;
+	u8	system_type[CPI_LENGTH_NAME];
+	u64	reserved1;
+	u8	system_name[CPI_LENGTH_NAME];
+	u64	reserved2;
+	u64	system_level;
+	u64	reserved3;
+	u8	sysplex_name[CPI_LENGTH_NAME];
+	u8	reserved4[16];
+} __attribute__((packed));
+
+struct cpi_sccb {
+	struct sccb_header header;
+	struct cpi_evbuf cpi_evbuf;
+} __attribute__((packed));
+
+static struct sclp_register sclp_cpi_event = {
+	.send_mask = EVTYP_CTLPROGIDENT_MASK,
+};
+
+static char system_name[CPI_LENGTH_NAME + 1];
+static char sysplex_name[CPI_LENGTH_NAME + 1];
+static char system_type[CPI_LENGTH_NAME + 1];
+static u64 system_level;
+
+static void set_data(char *field, char *data)
+{
+	memset(field, ' ', CPI_LENGTH_NAME);
+	memcpy(field, data, strlen(data));
+	sclp_ascebc_str(field, CPI_LENGTH_NAME);
+}
+
+static void cpi_callback(struct sclp_req *req, void *data)
+{
+	struct completion *completion = data;
+
+	complete(completion);
+}
+
+static struct sclp_req *cpi_prepare_req(void)
+{
+	struct sclp_req *req;
+	struct cpi_sccb *sccb;
+	struct cpi_evbuf *evb;
+
+	req = kzalloc(sizeof(struct sclp_req), GFP_KERNEL);
+	if (!req)
+		return ERR_PTR(-ENOMEM);
+	sccb = (struct cpi_sccb *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
+	if (!sccb) {
+		kfree(req);
+		return ERR_PTR(-ENOMEM);
+	}
+
+	/* setup SCCB for Control-Program Identification */
+	sccb->header.length = sizeof(struct cpi_sccb);
+	sccb->cpi_evbuf.header.length = sizeof(struct cpi_evbuf);
+	sccb->cpi_evbuf.header.type = EVTYP_CTLPROGIDENT;
+	evb = &sccb->cpi_evbuf;
+
+	/* set system type */
+	set_data(evb->system_type, system_type);
+
+	/* set system name */
+	set_data(evb->system_name, system_name);
+
+	/* set system level */
+	evb->system_level = system_level;
+
+	/* set sysplex name */
+	set_data(evb->sysplex_name, sysplex_name);
+
+	/* prepare request data structure presented to SCLP driver */
+	req->command = SCLP_CMDW_WRITE_EVENT_DATA;
+	req->sccb = sccb;
+	req->status = SCLP_REQ_FILLED;
+	req->callback = cpi_callback;
+	return req;
+}
+
+static void cpi_free_req(struct sclp_req *req)
+{
+	free_page((unsigned long) req->sccb);
+	kfree(req);
+}
+
+static int cpi_req(void)
+{
+	struct completion completion;
+	struct sclp_req *req;
+	int rc;
+	int response;
+
+	rc = sclp_register(&sclp_cpi_event);
+	if (rc)
+		goto out;
+	if (!(sclp_cpi_event.sclp_receive_mask & EVTYP_CTLPROGIDENT_MASK)) {
+		rc = -EOPNOTSUPP;
+		goto out_unregister;
+	}
+
+	req = cpi_prepare_req();
+	if (IS_ERR(req)) {
+		rc = PTR_ERR(req);
+		goto out_unregister;
+	}
+
+	init_completion(&completion);
+	req->callback_data = &completion;
+
+	/* Add request to sclp queue */
+	rc = sclp_add_request(req);
+	if (rc)
+		goto out_free_req;
+
+	wait_for_completion(&completion);
+
+	if (req->status != SCLP_REQ_DONE) {
+		pr_warn("request failed (status=0x%02x)\n", req->status);
+		rc = -EIO;
+		goto out_free_req;
+	}
+
+	response = ((struct cpi_sccb *) req->sccb)->header.response_code;
+	if (response != 0x0020) {
+		pr_warn("request failed with response code 0x%x\n", response);
+		rc = -EIO;
+	}
+
+out_free_req:
+	cpi_free_req(req);
+
+out_unregister:
+	sclp_unregister(&sclp_cpi_event);
+
+out:
+	return rc;
+}
+
+static int check_string(const char *attr, const char *str)
+{
+	size_t len;
+	size_t i;
+
+	len = strlen(str);
+
+	if ((len > 0) && (str[len - 1] == '\n'))
+		len--;
+
+	if (len > CPI_LENGTH_NAME)
+		return -EINVAL;
+
+	for (i = 0; i < len ; i++) {
+		if (isalpha(str[i]) || isdigit(str[i]) ||
+		    strchr("$@# ", str[i]))
+			continue;
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static void set_string(char *attr, const char *value)
+{
+	size_t len;
+	size_t i;
+
+	len = strlen(value);
+
+	if ((len > 0) && (value[len - 1] == '\n'))
+		len--;
+
+	for (i = 0; i < CPI_LENGTH_NAME; i++) {
+		if (i < len)
+			attr[i] = toupper(value[i]);
+		else
+			attr[i] = ' ';
+	}
+}
+
+static ssize_t system_name_show(struct kobject *kobj,
+				struct kobj_attribute *attr, char *page)
+{
+	int rc;
+
+	mutex_lock(&sclp_cpi_mutex);
+	rc = snprintf(page, PAGE_SIZE, "%s\n", system_name);
+	mutex_unlock(&sclp_cpi_mutex);
+	return rc;
+}
+
+static ssize_t system_name_store(struct kobject *kobj,
+				 struct kobj_attribute *attr,
+				 const char *buf,
+	size_t len)
+{
+	int rc;
+
+	rc = check_string("system_name", buf);
+	if (rc)
+		return rc;
+
+	mutex_lock(&sclp_cpi_mutex);
+	set_string(system_name, buf);
+	mutex_unlock(&sclp_cpi_mutex);
+
+	return len;
+}
+
+static struct kobj_attribute system_name_attr =
+	__ATTR(system_name, 0644, system_name_show, system_name_store);
+
+static ssize_t sysplex_name_show(struct kobject *kobj,
+				 struct kobj_attribute *attr, char *page)
+{
+	int rc;
+
+	mutex_lock(&sclp_cpi_mutex);
+	rc = snprintf(page, PAGE_SIZE, "%s\n", sysplex_name);
+	mutex_unlock(&sclp_cpi_mutex);
+	return rc;
+}
+
+static ssize_t sysplex_name_store(struct kobject *kobj,
+				  struct kobj_attribute *attr,
+				  const char *buf,
+	size_t len)
+{
+	int rc;
+
+	rc = check_string("sysplex_name", buf);
+	if (rc)
+		return rc;
+
+	mutex_lock(&sclp_cpi_mutex);
+	set_string(sysplex_name, buf);
+	mutex_unlock(&sclp_cpi_mutex);
+
+	return len;
+}
+
+static struct kobj_attribute sysplex_name_attr =
+	__ATTR(sysplex_name, 0644, sysplex_name_show, sysplex_name_store);
+
+static ssize_t system_type_show(struct kobject *kobj,
+				struct kobj_attribute *attr, char *page)
+{
+	int rc;
+
+	mutex_lock(&sclp_cpi_mutex);
+	rc = snprintf(page, PAGE_SIZE, "%s\n", system_type);
+	mutex_unlock(&sclp_cpi_mutex);
+	return rc;
+}
+
+static ssize_t system_type_store(struct kobject *kobj,
+				 struct kobj_attribute *attr,
+				 const char *buf,
+	size_t len)
+{
+	int rc;
+
+	rc = check_string("system_type", buf);
+	if (rc)
+		return rc;
+
+	mutex_lock(&sclp_cpi_mutex);
+	set_string(system_type, buf);
+	mutex_unlock(&sclp_cpi_mutex);
+
+	return len;
+}
+
+static struct kobj_attribute system_type_attr =
+	__ATTR(system_type, 0644, system_type_show, system_type_store);
+
+static ssize_t system_level_show(struct kobject *kobj,
+				 struct kobj_attribute *attr, char *page)
+{
+	unsigned long long level;
+
+	mutex_lock(&sclp_cpi_mutex);
+	level = system_level;
+	mutex_unlock(&sclp_cpi_mutex);
+	return snprintf(page, PAGE_SIZE, "%#018llx\n", level);
+}
+
+static ssize_t system_level_store(struct kobject *kobj,
+				  struct kobj_attribute *attr,
+				  const char *buf,
+	size_t len)
+{
+	unsigned long long level;
+	char *endp;
+
+	level = simple_strtoull(buf, &endp, 16);
+
+	if (endp == buf)
+		return -EINVAL;
+	if (*endp == '\n')
+		endp++;
+	if (*endp)
+		return -EINVAL;
+
+	mutex_lock(&sclp_cpi_mutex);
+	system_level = level;
+	mutex_unlock(&sclp_cpi_mutex);
+	return len;
+}
+
+static struct kobj_attribute system_level_attr =
+	__ATTR(system_level, 0644, system_level_show, system_level_store);
+
+static ssize_t set_store(struct kobject *kobj,
+			 struct kobj_attribute *attr,
+			 const char *buf, size_t len)
+{
+	int rc;
+
+	mutex_lock(&sclp_cpi_mutex);
+	rc = cpi_req();
+	mutex_unlock(&sclp_cpi_mutex);
+	if (rc)
+		return rc;
+
+	return len;
+}
+
+static struct kobj_attribute set_attr = __ATTR(set, 0200, NULL, set_store);
+
+static struct attribute *cpi_attrs[] = {
+	&system_name_attr.attr,
+	&sysplex_name_attr.attr,
+	&system_type_attr.attr,
+	&system_level_attr.attr,
+	&set_attr.attr,
+	NULL,
+};
+
+static struct attribute_group cpi_attr_group = {
+	.attrs = cpi_attrs,
+};
+
+static struct kset *cpi_kset;
+
+int sclp_cpi_set_data(const char *system, const char *sysplex, const char *type,
+		      const u64 level)
+{
+	int rc;
+
+	rc = check_string("system_name", system);
+	if (rc)
+		return rc;
+	rc = check_string("sysplex_name", sysplex);
+	if (rc)
+		return rc;
+	rc = check_string("system_type", type);
+	if (rc)
+		return rc;
+
+	mutex_lock(&sclp_cpi_mutex);
+	set_string(system_name, system);
+	set_string(sysplex_name, sysplex);
+	set_string(system_type, type);
+	system_level = level;
+
+	rc = cpi_req();
+	mutex_unlock(&sclp_cpi_mutex);
+
+	return rc;
+}
+EXPORT_SYMBOL(sclp_cpi_set_data);
+
+static int __init cpi_init(void)
+{
+	int rc;
+
+	cpi_kset = kset_create_and_add("cpi", NULL, firmware_kobj);
+	if (!cpi_kset)
+		return -ENOMEM;
+
+	rc = sysfs_create_group(&cpi_kset->kobj, &cpi_attr_group);
+	if (rc)
+		kset_unregister(cpi_kset);
+
+	return rc;
+}
+
+__initcall(cpi_init);
diff --git a/src/kernel/linux/v4.14/drivers/s390/char/sclp_cpi_sys.h b/src/kernel/linux/v4.14/drivers/s390/char/sclp_cpi_sys.h
new file mode 100644
index 0000000..edf60d1
--- /dev/null
+++ b/src/kernel/linux/v4.14/drivers/s390/char/sclp_cpi_sys.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ *    SCLP control program identification sysfs interface
+ *
+ *    Copyright IBM Corp. 2007
+ *    Author(s): Michael Ernst <mernst@de.ibm.com>
+ */
+
+#ifndef __SCLP_CPI_SYS_H__
+#define __SCLP_CPI_SYS_H__
+
+int sclp_cpi_set_data(const char *system, const char *sysplex,
+		      const char *type, u64 level);
+
+#endif	 /* __SCLP_CPI_SYS_H__ */
diff --git a/src/kernel/linux/v4.14/drivers/s390/char/sclp_ctl.c b/src/kernel/linux/v4.14/drivers/s390/char/sclp_ctl.c
new file mode 100644
index 0000000..a78cea0
--- /dev/null
+++ b/src/kernel/linux/v4.14/drivers/s390/char/sclp_ctl.c
@@ -0,0 +1,130 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * IOCTL interface for SCLP
+ *
+ * Copyright IBM Corp. 2012
+ *
+ * Author: Michael Holzheu <holzheu@linux.vnet.ibm.com>
+ */
+
+#include <linux/compat.h>
+#include <linux/uaccess.h>
+#include <linux/miscdevice.h>
+#include <linux/gfp.h>
+#include <linux/init.h>
+#include <linux/ioctl.h>
+#include <linux/fs.h>
+#include <asm/compat.h>
+#include <asm/sclp_ctl.h>
+#include <asm/sclp.h>
+
+#include "sclp.h"
+
+/*
+ * Supported command words
+ */
+static unsigned int sclp_ctl_sccb_wlist[] = {
+	0x00400002,
+	0x00410002,
+};
+
+/*
+ * Check if command word is supported
+ */
+static int sclp_ctl_cmdw_supported(unsigned int cmdw)
+{
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(sclp_ctl_sccb_wlist); i++) {
+		if (cmdw == sclp_ctl_sccb_wlist[i])
+			return 1;
+	}
+	return 0;
+}
+
+static void __user *u64_to_uptr(u64 value)
+{
+	if (is_compat_task())
+		return compat_ptr(value);
+	else
+		return (void __user *)(unsigned long)value;
+}
+
+/*
+ * Start SCLP request
+ */
+static int sclp_ctl_ioctl_sccb(void __user *user_area)
+{
+	struct sclp_ctl_sccb ctl_sccb;
+	struct sccb_header *sccb;
+	unsigned long copied;
+	int rc;
+
+	if (copy_from_user(&ctl_sccb, user_area, sizeof(ctl_sccb)))
+		return -EFAULT;
+	if (!sclp_ctl_cmdw_supported(ctl_sccb.cmdw))
+		return -EOPNOTSUPP;
+	sccb = (void *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
+	if (!sccb)
+		return -ENOMEM;
+	copied = PAGE_SIZE -
+		copy_from_user(sccb, u64_to_uptr(ctl_sccb.sccb), PAGE_SIZE);
+	if (offsetof(struct sccb_header, length) +
+	    sizeof(sccb->length) > copied || sccb->length > copied) {
+		rc = -EFAULT;
+		goto out_free;
+	}
+	if (sccb->length < 8) {
+		rc = -EINVAL;
+		goto out_free;
+	}
+	rc = sclp_sync_request(ctl_sccb.cmdw, sccb);
+	if (rc)
+		goto out_free;
+	if (copy_to_user(u64_to_uptr(ctl_sccb.sccb), sccb, sccb->length))
+		rc = -EFAULT;
+out_free:
+	free_page((unsigned long) sccb);
+	return rc;
+}
+
+/*
+ * SCLP SCCB ioctl function
+ */
+static long sclp_ctl_ioctl(struct file *filp, unsigned int cmd,
+			   unsigned long arg)
+{
+	void __user *argp;
+
+	if (is_compat_task())
+		argp = compat_ptr(arg);
+	else
+		argp = (void __user *) arg;
+	switch (cmd) {
+	case SCLP_CTL_SCCB:
+		return sclp_ctl_ioctl_sccb(argp);
+	default: /* unknown ioctl number */
+		return -ENOTTY;
+	}
+}
+
+/*
+ * File operations
+ */
+static const struct file_operations sclp_ctl_fops = {
+	.owner = THIS_MODULE,
+	.open = nonseekable_open,
+	.unlocked_ioctl = sclp_ctl_ioctl,
+	.compat_ioctl = sclp_ctl_ioctl,
+	.llseek = no_llseek,
+};
+
+/*
+ * Misc device definition
+ */
+static struct miscdevice sclp_ctl_device = {
+	.minor = MISC_DYNAMIC_MINOR,
+	.name = "sclp",
+	.fops = &sclp_ctl_fops,
+};
+builtin_misc_device(sclp_ctl_device);
diff --git a/src/kernel/linux/v4.14/drivers/s390/char/sclp_diag.h b/src/kernel/linux/v4.14/drivers/s390/char/sclp_diag.h
new file mode 100644
index 0000000..796c531
--- /dev/null
+++ b/src/kernel/linux/v4.14/drivers/s390/char/sclp_diag.h
@@ -0,0 +1,90 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ *    Copyright IBM Corp. 2013
+ *    Author(s): Ralf Hoppe (rhoppe@de.ibm.com)
+ */
+
+#ifndef _SCLP_DIAG_H
+#define _SCLP_DIAG_H
+
+#include <linux/types.h>
+
+/* return codes for Diagnostic Test FTP Service, as indicated in member
+ * sclp_diag_ftp::ldflg
+ */
+#define SCLP_DIAG_FTP_OK	0x80U /* success */
+#define SCLP_DIAG_FTP_LDFAIL	0x01U /* load failed */
+#define SCLP_DIAG_FTP_LDNPERM	0x02U /* not allowed */
+#define SCLP_DIAG_FTP_LDRUNS	0x03U /* LD runs */
+#define SCLP_DIAG_FTP_LDNRUNS	0x04U /* LD does not run */
+
+#define SCLP_DIAG_FTP_XPCX	0x80 /* PCX communication code */
+#define SCLP_DIAG_FTP_ROUTE	4 /* routing code for new FTP service */
+
+/*
+ * length of Diagnostic Test FTP Service event buffer
+ */
+#define SCLP_DIAG_FTP_EVBUF_LEN				\
+	(offsetof(struct sclp_diag_evbuf, mdd) +	\
+	 sizeof(struct sclp_diag_ftp))
+
+/**
+ * struct sclp_diag_ftp - Diagnostic Test FTP Service model-dependent data
+ * @pcx: code for PCX communication (should be 0x80)
+ * @ldflg: load flag (see defines above)
+ * @cmd: FTP command
+ * @pgsize: page size (0 = 4kB, 1 = large page size)
+ * @srcflg: source flag
+ * @spare: reserved (zeroes)
+ * @offset: file offset
+ * @fsize: file size
+ * @length: buffer size resp. bytes transferred
+ * @failaddr: failing address
+ * @bufaddr: buffer address, virtual
+ * @asce: region or segment table designation
+ * @fident: file name (ASCII, zero-terminated)
+ */
+struct sclp_diag_ftp {
+	u8 pcx;
+	u8 ldflg;
+	u8 cmd;
+	u8 pgsize;
+	u8 srcflg;
+	u8 spare;
+	u64 offset;
+	u64 fsize;
+	u64 length;
+	u64 failaddr;
+	u64 bufaddr;
+	u64 asce;
+
+	u8 fident[256];
+} __packed;
+
+/**
+ * struct sclp_diag_evbuf - Diagnostic Test (ET7) Event Buffer
+ * @hdr: event buffer header
+ * @route: diagnostic route
+ * @mdd: model-dependent data (@route dependent)
+ */
+struct sclp_diag_evbuf {
+	struct evbuf_header hdr;
+	u16 route;
+
+	union {
+		struct sclp_diag_ftp ftp;
+	} mdd;
+} __packed;
+
+/**
+ * struct sclp_diag_sccb - Diagnostic Test (ET7) SCCB
+ * @hdr: SCCB header
+ * @evbuf: event buffer
+ */
+struct sclp_diag_sccb {
+
+	struct sccb_header hdr;
+	struct sclp_diag_evbuf evbuf;
+} __packed;
+
+#endif /* _SCLP_DIAG_H */
diff --git a/src/kernel/linux/v4.14/drivers/s390/char/sclp_early.c b/src/kernel/linux/v4.14/drivers/s390/char/sclp_early.c
new file mode 100644
index 0000000..d06bc56
--- /dev/null
+++ b/src/kernel/linux/v4.14/drivers/s390/char/sclp_early.c
@@ -0,0 +1,272 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * SCLP early driver
+ *
+ * Copyright IBM Corp. 2013
+ */
+
+#define KMSG_COMPONENT "sclp_early"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
+#include <linux/errno.h>
+#include <asm/ctl_reg.h>
+#include <asm/sclp.h>
+#include <asm/ipl.h>
+#include "sclp_sdias.h"
+#include "sclp.h"
+
+#define SCLP_CMDW_READ_SCP_INFO		0x00020001
+#define SCLP_CMDW_READ_SCP_INFO_FORCED	0x00120001
+
+struct read_info_sccb {
+	struct	sccb_header header;	/* 0-7 */
+	u16	rnmax;			/* 8-9 */
+	u8	rnsize;			/* 10 */
+	u8	_pad_11[16 - 11];	/* 11-15 */
+	u16	ncpurl;			/* 16-17 */
+	u16	cpuoff;			/* 18-19 */
+	u8	_pad_20[24 - 20];	/* 20-23 */
+	u8	loadparm[8];		/* 24-31 */
+	u8	_pad_32[42 - 32];	/* 32-41 */
+	u8	fac42;			/* 42 */
+	u8	fac43;			/* 43 */
+	u8	_pad_44[48 - 44];	/* 44-47 */
+	u64	facilities;		/* 48-55 */
+	u8	_pad_56[66 - 56];	/* 56-65 */
+	u8	fac66;			/* 66 */
+	u8	_pad_67[76 - 67];	/* 67-83 */
+	u32	ibc;			/* 76-79 */
+	u8	_pad80[84 - 80];	/* 80-83 */
+	u8	fac84;			/* 84 */
+	u8	fac85;			/* 85 */
+	u8	_pad_86[91 - 86];	/* 86-90 */
+	u8	fac91;			/* 91 */
+	u8	_pad_92[98 - 92];	/* 92-97 */
+	u8	fac98;			/* 98 */
+	u8	hamaxpow;		/* 99 */
+	u32	rnsize2;		/* 100-103 */
+	u64	rnmax2;			/* 104-111 */
+	u8	_pad_112[116 - 112];	/* 112-115 */
+	u8	fac116;			/* 116 */
+	u8	fac117;			/* 117 */
+	u8	_pad_118;		/* 118 */
+	u8	fac119;			/* 119 */
+	u16	hcpua;			/* 120-121 */
+	u8	_pad_122[124 - 122];	/* 122-123 */
+	u32	hmfai;			/* 124-127 */
+	u8	_pad_128[4096 - 128];	/* 128-4095 */
+} __packed __aligned(PAGE_SIZE);
+
+static struct sclp_ipl_info sclp_ipl_info;
+
+struct sclp_info sclp;
+EXPORT_SYMBOL(sclp);
+
+static int __init sclp_early_read_info(struct read_info_sccb *sccb)
+{
+	int i;
+	sclp_cmdw_t commands[] = {SCLP_CMDW_READ_SCP_INFO_FORCED,
+				  SCLP_CMDW_READ_SCP_INFO};
+
+	for (i = 0; i < ARRAY_SIZE(commands); i++) {
+		memset(sccb, 0, sizeof(*sccb));
+		sccb->header.length = sizeof(*sccb);
+		sccb->header.function_code = 0x80;
+		sccb->header.control_mask[2] = 0x80;
+		if (sclp_early_cmd(commands[i], sccb))
+			break;
+		if (sccb->header.response_code == 0x10)
+			return 0;
+		if (sccb->header.response_code != 0x1f0)
+			break;
+	}
+	return -EIO;
+}
+
+static void __init sclp_early_facilities_detect(struct read_info_sccb *sccb)
+{
+	struct sclp_core_entry *cpue;
+	u16 boot_cpu_address, cpu;
+
+	if (sclp_early_read_info(sccb))
+		return;
+
+	sclp.facilities = sccb->facilities;
+	sclp.has_sprp = !!(sccb->fac84 & 0x02);
+	sclp.has_core_type = !!(sccb->fac84 & 0x01);
+	sclp.has_gsls = !!(sccb->fac85 & 0x80);
+	sclp.has_64bscao = !!(sccb->fac116 & 0x80);
+	sclp.has_cmma = !!(sccb->fac116 & 0x40);
+	sclp.has_esca = !!(sccb->fac116 & 0x08);
+	sclp.has_pfmfi = !!(sccb->fac117 & 0x40);
+	sclp.has_ibs = !!(sccb->fac117 & 0x20);
+	sclp.has_hvs = !!(sccb->fac119 & 0x80);
+	sclp.has_kss = !!(sccb->fac98 & 0x01);
+	if (sccb->fac85 & 0x02)
+		S390_lowcore.machine_flags |= MACHINE_FLAG_ESOP;
+	if (sccb->fac91 & 0x40)
+		S390_lowcore.machine_flags |= MACHINE_FLAG_TLB_GUEST;
+	sclp.rnmax = sccb->rnmax ? sccb->rnmax : sccb->rnmax2;
+	sclp.rzm = sccb->rnsize ? sccb->rnsize : sccb->rnsize2;
+	sclp.rzm <<= 20;
+	sclp.ibc = sccb->ibc;
+
+	if (sccb->hamaxpow && sccb->hamaxpow < 64)
+		sclp.hamax = (1UL << sccb->hamaxpow) - 1;
+	else
+		sclp.hamax = U64_MAX;
+
+	if (!sccb->hcpua) {
+		if (MACHINE_IS_VM)
+			sclp.max_cores = 64;
+		else
+			sclp.max_cores = sccb->ncpurl;
+	} else {
+		sclp.max_cores = sccb->hcpua + 1;
+	}
+
+	boot_cpu_address = stap();
+	cpue = (void *)sccb + sccb->cpuoff;
+	for (cpu = 0; cpu < sccb->ncpurl; cpue++, cpu++) {
+		if (boot_cpu_address != cpue->core_id)
+			continue;
+		sclp.has_siif = cpue->siif;
+		sclp.has_sigpif = cpue->sigpif;
+		sclp.has_sief2 = cpue->sief2;
+		sclp.has_gpere = cpue->gpere;
+		sclp.has_ib = cpue->ib;
+		sclp.has_cei = cpue->cei;
+		sclp.has_skey = cpue->skey;
+		break;
+	}
+
+	/* Save IPL information */
+	sclp_ipl_info.is_valid = 1;
+	if (sccb->fac91 & 0x2)
+		sclp_ipl_info.has_dump = 1;
+	memcpy(&sclp_ipl_info.loadparm, &sccb->loadparm, LOADPARM_LEN);
+
+	sclp.mtid = (sccb->fac42 & 0x80) ? (sccb->fac42 & 31) : 0;
+	sclp.mtid_cp = (sccb->fac42 & 0x80) ? (sccb->fac43 & 31) : 0;
+	sclp.mtid_prev = (sccb->fac42 & 0x80) ? (sccb->fac66 & 31) : 0;
+
+	sclp.hmfai = sccb->hmfai;
+}
+
+/*
+ * This function will be called after sclp_early_facilities_detect(), which gets
+ * called from early.c code. The sclp_early_facilities_detect() function retrieves
+ * and saves the IPL information.
+ */
+void __init sclp_early_get_ipl_info(struct sclp_ipl_info *info)
+{
+	*info = sclp_ipl_info;
+}
+
+static struct sclp_core_info sclp_early_core_info __initdata;
+static int sclp_early_core_info_valid __initdata;
+
+static void __init sclp_early_init_core_info(struct read_cpu_info_sccb *sccb)
+{
+	if (!SCLP_HAS_CPU_INFO)
+		return;
+	memset(sccb, 0, sizeof(*sccb));
+	sccb->header.length = sizeof(*sccb);
+	if (sclp_early_cmd(SCLP_CMDW_READ_CPU_INFO, sccb))
+		return;
+	if (sccb->header.response_code != 0x0010)
+		return;
+	sclp_fill_core_info(&sclp_early_core_info, sccb);
+	sclp_early_core_info_valid = 1;
+}
+
+int __init sclp_early_get_core_info(struct sclp_core_info *info)
+{
+	if (!sclp_early_core_info_valid)
+		return -EIO;
+	*info = sclp_early_core_info;
+	return 0;
+}
+
+static long __init sclp_early_hsa_size_init(struct sdias_sccb *sccb)
+{
+	memset(sccb, 0, sizeof(*sccb));
+	sccb->hdr.length = sizeof(*sccb);
+	sccb->evbuf.hdr.length = sizeof(struct sdias_evbuf);
+	sccb->evbuf.hdr.type = EVTYP_SDIAS;
+	sccb->evbuf.event_qual = SDIAS_EQ_SIZE;
+	sccb->evbuf.data_id = SDIAS_DI_FCP_DUMP;
+	sccb->evbuf.event_id = 4712;
+	sccb->evbuf.dbs = 1;
+	if (sclp_early_cmd(SCLP_CMDW_WRITE_EVENT_DATA, sccb))
+		return -EIO;
+	if (sccb->hdr.response_code != 0x20)
+		return -EIO;
+	if (sccb->evbuf.blk_cnt == 0)
+		return 0;
+	return (sccb->evbuf.blk_cnt - 1) * PAGE_SIZE;
+}
+
+static long __init sclp_early_hsa_copy_wait(struct sdias_sccb *sccb)
+{
+	memset(sccb, 0, PAGE_SIZE);
+	sccb->hdr.length = PAGE_SIZE;
+	if (sclp_early_cmd(SCLP_CMDW_READ_EVENT_DATA, sccb))
+		return -EIO;
+	if ((sccb->hdr.response_code != 0x20) && (sccb->hdr.response_code != 0x220))
+		return -EIO;
+	if (sccb->evbuf.blk_cnt == 0)
+		return 0;
+	return (sccb->evbuf.blk_cnt - 1) * PAGE_SIZE;
+}
+
+static void __init sclp_early_hsa_size_detect(void *sccb)
+{
+	unsigned long flags;
+	long size = -EIO;
+
+	raw_local_irq_save(flags);
+	if (sclp_early_set_event_mask(sccb, EVTYP_SDIAS_MASK, EVTYP_SDIAS_MASK))
+		goto out;
+	size = sclp_early_hsa_size_init(sccb);
+	/* First check for synchronous response (LPAR) */
+	if (size)
+		goto out_mask;
+	if (!(S390_lowcore.ext_params & 1))
+		sclp_early_wait_irq();
+	size = sclp_early_hsa_copy_wait(sccb);
+out_mask:
+	sclp_early_set_event_mask(sccb, 0, 0);
+out:
+	raw_local_irq_restore(flags);
+	if (size > 0)
+		sclp.hsa_size = size;
+}
+
+static void __init sclp_early_console_detect(struct init_sccb *sccb)
+{
+	if (sccb->header.response_code != 0x20)
+		return;
+
+	if (sccb->sclp_send_mask & EVTYP_VT220MSG_MASK)
+		sclp.has_vt220 = 1;
+
+	if (sclp_early_con_check_linemode(sccb))
+		sclp.has_linemode = 1;
+}
+
+void __init sclp_early_detect(void)
+{
+	void *sccb = &sclp_early_sccb;
+
+	sclp_early_facilities_detect(sccb);
+	sclp_early_init_core_info(sccb);
+	sclp_early_hsa_size_detect(sccb);
+
+	/*
+	 * Turn off SCLP event notifications.  Also save remote masks in the
+	 * sccb.  These are sufficient to detect sclp console capabilities.
+	 */
+	sclp_early_set_event_mask(sccb, 0, 0);
+	sclp_early_console_detect(sccb);
+}
diff --git a/src/kernel/linux/v4.14/drivers/s390/char/sclp_early_core.c b/src/kernel/linux/v4.14/drivers/s390/char/sclp_early_core.c
new file mode 100644
index 0000000..edeb259
--- /dev/null
+++ b/src/kernel/linux/v4.14/drivers/s390/char/sclp_early_core.c
@@ -0,0 +1,209 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ *    Copyright IBM Corp. 2015
+ *    Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
+ */
+
+#include <linux/kernel.h>
+#include <asm/processor.h>
+#include <asm/lowcore.h>
+#include <asm/ebcdic.h>
+#include <asm/irq.h>
+#include "sclp.h"
+#include "sclp_rw.h"
+
+char sclp_early_sccb[PAGE_SIZE] __aligned(PAGE_SIZE) __section(data);
+int sclp_init_state __section(data) = sclp_init_state_uninitialized;
+
+void sclp_early_wait_irq(void)
+{
+	unsigned long psw_mask, addr;
+	psw_t psw_ext_save, psw_wait;
+	union ctlreg0 cr0, cr0_new;
+
+	__ctl_store(cr0.val, 0, 0);
+	cr0_new.val = cr0.val & ~CR0_IRQ_SUBCLASS_MASK;
+	cr0_new.lap = 0;
+	cr0_new.sssm = 1;
+	__ctl_load(cr0_new.val, 0, 0);
+
+	psw_ext_save = S390_lowcore.external_new_psw;
+	psw_mask = __extract_psw();
+	S390_lowcore.external_new_psw.mask = psw_mask;
+	psw_wait.mask = psw_mask | PSW_MASK_EXT | PSW_MASK_WAIT;
+	S390_lowcore.ext_int_code = 0;
+
+	do {
+		asm volatile(
+			"	larl	%[addr],0f\n"
+			"	stg	%[addr],%[psw_wait_addr]\n"
+			"	stg	%[addr],%[psw_ext_addr]\n"
+			"	lpswe	%[psw_wait]\n"
+			"0:\n"
+			: [addr] "=&d" (addr),
+			  [psw_wait_addr] "=Q" (psw_wait.addr),
+			  [psw_ext_addr] "=Q" (S390_lowcore.external_new_psw.addr)
+			: [psw_wait] "Q" (psw_wait)
+			: "cc", "memory");
+	} while (S390_lowcore.ext_int_code != EXT_IRQ_SERVICE_SIG);
+
+	S390_lowcore.external_new_psw = psw_ext_save;
+	__ctl_load(cr0.val, 0, 0);
+}
+
+int sclp_early_cmd(sclp_cmdw_t cmd, void *sccb)
+{
+	unsigned long flags;
+	int rc;
+
+	raw_local_irq_save(flags);
+	rc = sclp_service_call(cmd, sccb);
+	if (rc)
+		goto out;
+	sclp_early_wait_irq();
+out:
+	raw_local_irq_restore(flags);
+	return rc;
+}
+
+struct write_sccb {
+	struct sccb_header header;
+	struct msg_buf msg;
+} __packed;
+
+/* Output multi-line text using SCLP Message interface. */
+static void sclp_early_print_lm(const char *str, unsigned int len)
+{
+	unsigned char *ptr, *end, ch;
+	unsigned int count, offset;
+	struct write_sccb *sccb;
+	struct msg_buf *msg;
+	struct mdb *mdb;
+	struct mto *mto;
+	struct go *go;
+
+	sccb = (struct write_sccb *) &sclp_early_sccb;
+	end = (unsigned char *) sccb + sizeof(sclp_early_sccb) - 1;
+	memset(sccb, 0, sizeof(*sccb));
+	ptr = (unsigned char *) &sccb->msg.mdb.mto;
+	offset = 0;
+	do {
+		for (count = sizeof(*mto); offset < len; count++) {
+			ch = str[offset++];
+			if ((ch == 0x0a) || (ptr + count > end))
+				break;
+			ptr[count] = _ascebc[ch];
+		}
+		mto = (struct mto *) ptr;
+		memset(mto, 0, sizeof(*mto));
+		mto->length = count;
+		mto->type = 4;
+		mto->line_type_flags = LNTPFLGS_ENDTEXT;
+		ptr += count;
+	} while ((offset < len) && (ptr + sizeof(*mto) <= end));
+	len = ptr - (unsigned char *) sccb;
+	sccb->header.length = len - offsetof(struct write_sccb, header);
+	msg = &sccb->msg;
+	msg->header.type = EVTYP_MSG;
+	msg->header.length = len - offsetof(struct write_sccb, msg.header);
+	mdb = &msg->mdb;
+	mdb->header.type = 1;
+	mdb->header.tag = 0xD4C4C240;
+	mdb->header.revision_code = 1;
+	mdb->header.length = len - offsetof(struct write_sccb, msg.mdb.header);
+	go = &mdb->go;
+	go->length = sizeof(*go);
+	go->type = 1;
+	sclp_early_cmd(SCLP_CMDW_WRITE_EVENT_DATA, sccb);
+}
+
+struct vt220_sccb {
+	struct sccb_header header;
+	struct {
+		struct evbuf_header header;
+		char data[];
+	} msg;
+} __packed;
+
+/* Output multi-line text using SCLP VT220 interface. */
+static void sclp_early_print_vt220(const char *str, unsigned int len)
+{
+	struct vt220_sccb *sccb;
+
+	sccb = (struct vt220_sccb *) &sclp_early_sccb;
+	if (sizeof(*sccb) + len >= sizeof(sclp_early_sccb))
+		len = sizeof(sclp_early_sccb) - sizeof(*sccb);
+	memset(sccb, 0, sizeof(*sccb));
+	memcpy(&sccb->msg.data, str, len);
+	sccb->header.length = sizeof(*sccb) + len;
+	sccb->msg.header.length = sizeof(sccb->msg) + len;
+	sccb->msg.header.type = EVTYP_VT220MSG;
+	sclp_early_cmd(SCLP_CMDW_WRITE_EVENT_DATA, sccb);
+}
+
+int sclp_early_set_event_mask(struct init_sccb *sccb,
+			      unsigned long receive_mask,
+			      unsigned long send_mask)
+{
+	memset(sccb, 0, sizeof(*sccb));
+	sccb->header.length = sizeof(*sccb);
+	sccb->mask_length = sizeof(sccb_mask_t);
+	sccb->receive_mask = receive_mask;
+	sccb->send_mask = send_mask;
+	if (sclp_early_cmd(SCLP_CMDW_WRITE_EVENT_MASK, sccb))
+		return -EIO;
+	if (sccb->header.response_code != 0x20)
+		return -EIO;
+	return 0;
+}
+
+unsigned int sclp_early_con_check_linemode(struct init_sccb *sccb)
+{
+	if (!(sccb->sclp_send_mask & EVTYP_OPCMD_MASK))
+		return 0;
+	if (!(sccb->sclp_receive_mask & (EVTYP_MSG_MASK | EVTYP_PMSGCMD_MASK)))
+		return 0;
+	return 1;
+}
+
+static int sclp_early_setup(int disable, int *have_linemode, int *have_vt220)
+{
+	unsigned long receive_mask, send_mask;
+	struct init_sccb *sccb;
+	int rc;
+
+	*have_linemode = *have_vt220 = 0;
+	sccb = (struct init_sccb *) &sclp_early_sccb;
+	receive_mask = disable ? 0 : EVTYP_OPCMD_MASK;
+	send_mask = disable ? 0 : EVTYP_VT220MSG_MASK | EVTYP_MSG_MASK;
+	rc = sclp_early_set_event_mask(sccb, receive_mask, send_mask);
+	if (rc)
+		return rc;
+	*have_linemode = sclp_early_con_check_linemode(sccb);
+	*have_vt220 = sccb->send_mask & EVTYP_VT220MSG_MASK;
+	return rc;
+}
+
+/*
+ * Output one or more lines of text on the SCLP console (VT220 and /
+ * or line-mode).
+ */
+void __sclp_early_printk(const char *str, unsigned int len)
+{
+	int have_linemode, have_vt220;
+
+	if (sclp_init_state != sclp_init_state_uninitialized)
+		return;
+	if (sclp_early_setup(0, &have_linemode, &have_vt220) != 0)
+		return;
+	if (have_linemode)
+		sclp_early_print_lm(str, len);
+	if (have_vt220)
+		sclp_early_print_vt220(str, len);
+	sclp_early_setup(1, &have_linemode, &have_vt220);
+}
+
+void sclp_early_printk(const char *str)
+{
+	__sclp_early_printk(str, strlen(str));
+}
diff --git a/src/kernel/linux/v4.14/drivers/s390/char/sclp_ftp.c b/src/kernel/linux/v4.14/drivers/s390/char/sclp_ftp.c
new file mode 100644
index 0000000..dfdd6c8
--- /dev/null
+++ b/src/kernel/linux/v4.14/drivers/s390/char/sclp_ftp.c
@@ -0,0 +1,276 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ *    SCLP Event Type (ET) 7 - Diagnostic Test FTP Services, useable on LPAR
+ *
+ *    Copyright IBM Corp. 2013
+ *    Author(s): Ralf Hoppe (rhoppe@de.ibm.com)
+ *
+ */
+
+#define KMSG_COMPONENT "hmcdrv"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/slab.h>
+#include <linux/io.h>
+#include <linux/wait.h>
+#include <linux/string.h>
+#include <linux/jiffies.h>
+#include <asm/sysinfo.h>
+#include <asm/ebcdic.h>
+
+#include "sclp.h"
+#include "sclp_diag.h"
+#include "sclp_ftp.h"
+
+static DECLARE_COMPLETION(sclp_ftp_rx_complete);
+static u8 sclp_ftp_ldflg;
+static u64 sclp_ftp_fsize;
+static u64 sclp_ftp_length;
+
+/**
+ * sclp_ftp_txcb() - Diagnostic Test FTP services SCLP command callback
+ */
+static void sclp_ftp_txcb(struct sclp_req *req, void *data)
+{
+	struct completion *completion = data;
+
+#ifdef DEBUG
+	pr_debug("SCLP (ET7) TX-IRQ, SCCB @ 0x%p: %*phN\n",
+		 req->sccb, 24, req->sccb);
+#endif
+	complete(completion);
+}
+
+/**
+ * sclp_ftp_rxcb() - Diagnostic Test FTP services receiver event callback
+ */
+static void sclp_ftp_rxcb(struct evbuf_header *evbuf)
+{
+	struct sclp_diag_evbuf *diag = (struct sclp_diag_evbuf *) evbuf;
+
+	/*
+	 * Check for Diagnostic Test FTP Service
+	 */
+	if (evbuf->type != EVTYP_DIAG_TEST ||
+	    diag->route != SCLP_DIAG_FTP_ROUTE ||
+	    diag->mdd.ftp.pcx != SCLP_DIAG_FTP_XPCX ||
+	    evbuf->length < SCLP_DIAG_FTP_EVBUF_LEN)
+		return;
+
+#ifdef DEBUG
+	pr_debug("SCLP (ET7) RX-IRQ, Event @ 0x%p: %*phN\n",
+		 evbuf, 24, evbuf);
+#endif
+
+	/*
+	 * Because the event buffer is located in a page which is owned
+	 * by the SCLP core, all data of interest must be copied. The
+	 * error indication is in 'sclp_ftp_ldflg'
+	 */
+	sclp_ftp_ldflg = diag->mdd.ftp.ldflg;
+	sclp_ftp_fsize = diag->mdd.ftp.fsize;
+	sclp_ftp_length = diag->mdd.ftp.length;
+
+	complete(&sclp_ftp_rx_complete);
+}
+
+/**
+ * sclp_ftp_et7() - start a Diagnostic Test FTP Service SCLP request
+ * @ftp: pointer to FTP descriptor
+ *
+ * Return: 0 on success, else a (negative) error code
+ */
+static int sclp_ftp_et7(const struct hmcdrv_ftp_cmdspec *ftp)
+{
+	struct completion completion;
+	struct sclp_diag_sccb *sccb;
+	struct sclp_req *req;
+	size_t len;
+	int rc;
+
+	req = kzalloc(sizeof(*req), GFP_KERNEL);
+	sccb = (void *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
+	if (!req || !sccb) {
+		rc = -ENOMEM;
+		goto out_free;
+	}
+
+	sccb->hdr.length = SCLP_DIAG_FTP_EVBUF_LEN +
+		sizeof(struct sccb_header);
+	sccb->evbuf.hdr.type = EVTYP_DIAG_TEST;
+	sccb->evbuf.hdr.length = SCLP_DIAG_FTP_EVBUF_LEN;
+	sccb->evbuf.hdr.flags = 0; /* clear processed-buffer */
+	sccb->evbuf.route = SCLP_DIAG_FTP_ROUTE;
+	sccb->evbuf.mdd.ftp.pcx = SCLP_DIAG_FTP_XPCX;
+	sccb->evbuf.mdd.ftp.srcflg = 0;
+	sccb->evbuf.mdd.ftp.pgsize = 0;
+	sccb->evbuf.mdd.ftp.asce = _ASCE_REAL_SPACE;
+	sccb->evbuf.mdd.ftp.ldflg = SCLP_DIAG_FTP_LDFAIL;
+	sccb->evbuf.mdd.ftp.fsize = 0;
+	sccb->evbuf.mdd.ftp.cmd = ftp->id;
+	sccb->evbuf.mdd.ftp.offset = ftp->ofs;
+	sccb->evbuf.mdd.ftp.length = ftp->len;
+	sccb->evbuf.mdd.ftp.bufaddr = virt_to_phys(ftp->buf);
+
+	len = strlcpy(sccb->evbuf.mdd.ftp.fident, ftp->fname,
+		      HMCDRV_FTP_FIDENT_MAX);
+	if (len >= HMCDRV_FTP_FIDENT_MAX) {
+		rc = -EINVAL;
+		goto out_free;
+	}
+
+	req->command = SCLP_CMDW_WRITE_EVENT_DATA;
+	req->sccb = sccb;
+	req->status = SCLP_REQ_FILLED;
+	req->callback = sclp_ftp_txcb;
+	req->callback_data = &completion;
+
+	init_completion(&completion);
+
+	rc = sclp_add_request(req);
+	if (rc)
+		goto out_free;
+
+	/* Wait for end of ftp sclp command. */
+	wait_for_completion(&completion);
+
+#ifdef DEBUG
+	pr_debug("status of SCLP (ET7) request is 0x%04x (0x%02x)\n",
+		 sccb->hdr.response_code, sccb->evbuf.hdr.flags);
+#endif
+
+	/*
+	 * Check if sclp accepted the request. The data transfer runs
+	 * asynchronously and the completion is indicated with an
+	 * sclp ET7 event.
+	 */
+	if (req->status != SCLP_REQ_DONE ||
+	    (sccb->evbuf.hdr.flags & 0x80) == 0 || /* processed-buffer */
+	    (sccb->hdr.response_code & 0xffU) != 0x20U) {
+		rc = -EIO;
+	}
+
+out_free:
+	free_page((unsigned long) sccb);
+	kfree(req);
+	return rc;
+}
+
+/**
+ * sclp_ftp_cmd() - executes a HMC related SCLP Diagnose (ET7) FTP command
+ * @ftp: pointer to FTP command specification
+ * @fsize: return of file size (or NULL if undesirable)
+ *
+ * Attention: Notice that this function is not reentrant - so the caller
+ * must ensure locking.
+ *
+ * Return: number of bytes read/written or a (negative) error code
+ */
+ssize_t sclp_ftp_cmd(const struct hmcdrv_ftp_cmdspec *ftp, size_t *fsize)
+{
+	ssize_t len;
+#ifdef DEBUG
+	unsigned long start_jiffies;
+
+	pr_debug("starting SCLP (ET7), cmd %d for '%s' at %lld with %zd bytes\n",
+		 ftp->id, ftp->fname, (long long) ftp->ofs, ftp->len);
+	start_jiffies = jiffies;
+#endif
+
+	init_completion(&sclp_ftp_rx_complete);
+
+	/* Start ftp sclp command. */
+	len = sclp_ftp_et7(ftp);
+	if (len)
+		goto out_unlock;
+
+	/*
+	 * There is no way to cancel the sclp ET7 request, the code
+	 * needs to wait unconditionally until the transfer is complete.
+	 */
+	wait_for_completion(&sclp_ftp_rx_complete);
+
+#ifdef DEBUG
+	pr_debug("completed SCLP (ET7) request after %lu ms (all)\n",
+		 (jiffies - start_jiffies) * 1000 / HZ);
+	pr_debug("return code of SCLP (ET7) FTP Service is 0x%02x, with %lld/%lld bytes\n",
+		 sclp_ftp_ldflg, sclp_ftp_length, sclp_ftp_fsize);
+#endif
+
+	switch (sclp_ftp_ldflg) {
+	case SCLP_DIAG_FTP_OK:
+		len = sclp_ftp_length;
+		if (fsize)
+			*fsize = sclp_ftp_fsize;
+		break;
+	case SCLP_DIAG_FTP_LDNPERM:
+		len = -EPERM;
+		break;
+	case SCLP_DIAG_FTP_LDRUNS:
+		len = -EBUSY;
+		break;
+	case SCLP_DIAG_FTP_LDFAIL:
+		len = -ENOENT;
+		break;
+	default:
+		len = -EIO;
+		break;
+	}
+
+out_unlock:
+	return len;
+}
+
+/*
+ * ET7 event listener
+ */
+static struct sclp_register sclp_ftp_event = {
+	.send_mask = EVTYP_DIAG_TEST_MASK,    /* want tx events */
+	.receive_mask = EVTYP_DIAG_TEST_MASK, /* want rx events */
+	.receiver_fn = sclp_ftp_rxcb,	      /* async callback (rx) */
+	.state_change_fn = NULL,
+	.pm_event_fn = NULL,
+};
+
+/**
+ * sclp_ftp_startup() - startup of FTP services, when running on LPAR
+ */
+int sclp_ftp_startup(void)
+{
+#ifdef DEBUG
+	unsigned long info;
+#endif
+	int rc;
+
+	rc = sclp_register(&sclp_ftp_event);
+	if (rc)
+		return rc;
+
+#ifdef DEBUG
+	info = get_zeroed_page(GFP_KERNEL);
+
+	if (info != 0) {
+		struct sysinfo_2_2_2 *info222 = (struct sysinfo_2_2_2 *)info;
+
+		if (!stsi(info222, 2, 2, 2)) { /* get SYSIB 2.2.2 */
+			info222->name[sizeof(info222->name) - 1] = '\0';
+			EBCASC_500(info222->name, sizeof(info222->name) - 1);
+			pr_debug("SCLP (ET7) FTP Service working on LPAR %u (%s)\n",
+				 info222->lpar_number, info222->name);
+		}
+
+		free_page(info);
+	}
+#endif	/* DEBUG */
+	return 0;
+}
+
+/**
+ * sclp_ftp_shutdown() - shutdown of FTP services, when running on LPAR
+ */
+void sclp_ftp_shutdown(void)
+{
+	sclp_unregister(&sclp_ftp_event);
+}
diff --git a/src/kernel/linux/v4.14/drivers/s390/char/sclp_ftp.h b/src/kernel/linux/v4.14/drivers/s390/char/sclp_ftp.h
new file mode 100644
index 0000000..d64da18
--- /dev/null
+++ b/src/kernel/linux/v4.14/drivers/s390/char/sclp_ftp.h
@@ -0,0 +1,22 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ *    SCLP Event Type (ET) 7 - Diagnostic Test FTP Services, useable on LPAR
+ *
+ *    Notice that all functions exported here are not reentrant.
+ *    So usage should be exclusive, ensured by the caller (e.g. using a
+ *    mutex).
+ *
+ *    Copyright IBM Corp. 2013
+ *    Author(s): Ralf Hoppe (rhoppe@de.ibm.com)
+ */
+
+#ifndef __SCLP_FTP_H__
+#define __SCLP_FTP_H__
+
+#include "hmcdrv_ftp.h"
+
+int sclp_ftp_startup(void);
+void sclp_ftp_shutdown(void);
+ssize_t sclp_ftp_cmd(const struct hmcdrv_ftp_cmdspec *ftp, size_t *fsize);
+
+#endif	 /* __SCLP_FTP_H__ */
diff --git a/src/kernel/linux/v4.14/drivers/s390/char/sclp_ocf.c b/src/kernel/linux/v4.14/drivers/s390/char/sclp_ocf.c
new file mode 100644
index 0000000..d35f10e
--- /dev/null
+++ b/src/kernel/linux/v4.14/drivers/s390/char/sclp_ocf.c
@@ -0,0 +1,152 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ *    SCLP OCF communication parameters sysfs interface
+ *
+ *    Copyright IBM Corp. 2011
+ *    Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
+ */
+
+#define KMSG_COMPONENT "sclp_ocf"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/stat.h>
+#include <linux/device.h>
+#include <linux/string.h>
+#include <linux/ctype.h>
+#include <linux/kmod.h>
+#include <linux/timer.h>
+#include <linux/err.h>
+#include <asm/ebcdic.h>
+#include <asm/sclp.h>
+
+#include "sclp.h"
+
+#define OCF_LENGTH_HMC_NETWORK 8UL
+#define OCF_LENGTH_CPC_NAME 8UL
+
+static char hmc_network[OCF_LENGTH_HMC_NETWORK + 1];
+static char cpc_name[OCF_LENGTH_CPC_NAME]; /* in EBCDIC */
+
+static DEFINE_SPINLOCK(sclp_ocf_lock);
+static struct work_struct sclp_ocf_change_work;
+
+static struct kset *ocf_kset;
+
+static void sclp_ocf_change_notify(struct work_struct *work)
+{
+	kobject_uevent(&ocf_kset->kobj, KOBJ_CHANGE);
+}
+
+/* Handler for OCF event. Look for the CPC image name. */
+static void sclp_ocf_handler(struct evbuf_header *evbuf)
+{
+	struct gds_vector *v;
+	struct gds_subvector *sv, *netid, *cpc;
+	size_t size;
+
+	/* Find the 0x9f00 block. */
+	v = sclp_find_gds_vector(evbuf + 1, (void *) evbuf + evbuf->length,
+				 0x9f00);
+	if (!v)
+		return;
+	/* Find the 0x9f22 block inside the 0x9f00 block. */
+	v = sclp_find_gds_vector(v + 1, (void *) v + v->length, 0x9f22);
+	if (!v)
+		return;
+	/* Find the 0x81 block inside the 0x9f22 block. */
+	sv = sclp_find_gds_subvector(v + 1, (void *) v + v->length, 0x81);
+	if (!sv)
+		return;
+	/* Find the 0x01 block inside the 0x81 block. */
+	netid = sclp_find_gds_subvector(sv + 1, (void *) sv + sv->length, 1);
+	/* Find the 0x02 block inside the 0x81 block. */
+	cpc = sclp_find_gds_subvector(sv + 1, (void *) sv + sv->length, 2);
+	/* Copy network name and cpc name. */
+	spin_lock(&sclp_ocf_lock);
+	if (netid) {
+		size = min(OCF_LENGTH_HMC_NETWORK, (size_t) netid->length);
+		memcpy(hmc_network, netid + 1, size);
+		EBCASC(hmc_network, size);
+		hmc_network[size] = 0;
+	}
+	if (cpc) {
+		size = min(OCF_LENGTH_CPC_NAME, (size_t) cpc->length);
+		memset(cpc_name, 0, OCF_LENGTH_CPC_NAME);
+		memcpy(cpc_name, cpc + 1, size);
+	}
+	spin_unlock(&sclp_ocf_lock);
+	schedule_work(&sclp_ocf_change_work);
+}
+
+static struct sclp_register sclp_ocf_event = {
+	.receive_mask = EVTYP_OCF_MASK,
+	.receiver_fn = sclp_ocf_handler,
+};
+
+void sclp_ocf_cpc_name_copy(char *dst)
+{
+	spin_lock_irq(&sclp_ocf_lock);
+	memcpy(dst, cpc_name, OCF_LENGTH_CPC_NAME);
+	spin_unlock_irq(&sclp_ocf_lock);
+}
+EXPORT_SYMBOL(sclp_ocf_cpc_name_copy);
+
+static ssize_t cpc_name_show(struct kobject *kobj,
+			     struct kobj_attribute *attr, char *page)
+{
+	char name[OCF_LENGTH_CPC_NAME + 1];
+
+	sclp_ocf_cpc_name_copy(name);
+	name[OCF_LENGTH_CPC_NAME] = 0;
+	EBCASC(name, OCF_LENGTH_CPC_NAME);
+	return snprintf(page, PAGE_SIZE, "%s\n", name);
+}
+
+static struct kobj_attribute cpc_name_attr =
+	__ATTR(cpc_name, 0444, cpc_name_show, NULL);
+
+static ssize_t hmc_network_show(struct kobject *kobj,
+				struct kobj_attribute *attr, char *page)
+{
+	int rc;
+
+	spin_lock_irq(&sclp_ocf_lock);
+	rc = snprintf(page, PAGE_SIZE, "%s\n", hmc_network);
+	spin_unlock_irq(&sclp_ocf_lock);
+	return rc;
+}
+
+static struct kobj_attribute hmc_network_attr =
+	__ATTR(hmc_network, 0444, hmc_network_show, NULL);
+
+static struct attribute *ocf_attrs[] = {
+	&cpc_name_attr.attr,
+	&hmc_network_attr.attr,
+	NULL,
+};
+
+static const struct attribute_group ocf_attr_group = {
+	.attrs = ocf_attrs,
+};
+
+static int __init ocf_init(void)
+{
+	int rc;
+
+	INIT_WORK(&sclp_ocf_change_work, sclp_ocf_change_notify);
+	ocf_kset = kset_create_and_add("ocf", NULL, firmware_kobj);
+	if (!ocf_kset)
+		return -ENOMEM;
+
+	rc = sysfs_create_group(&ocf_kset->kobj, &ocf_attr_group);
+	if (rc) {
+		kset_unregister(ocf_kset);
+		return rc;
+	}
+
+	return sclp_register(&sclp_ocf_event);
+}
+
+device_initcall(ocf_init);
diff --git a/src/kernel/linux/v4.14/drivers/s390/char/sclp_pci.c b/src/kernel/linux/v4.14/drivers/s390/char/sclp_pci.c
new file mode 100644
index 0000000..e7c84a4
--- /dev/null
+++ b/src/kernel/linux/v4.14/drivers/s390/char/sclp_pci.c
@@ -0,0 +1,194 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * PCI I/O adapter configuration related functions.
+ *
+ * Copyright IBM Corp. 2016
+ */
+#define KMSG_COMPONENT "sclp_cmd"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
+#include <linux/completion.h>
+#include <linux/export.h>
+#include <linux/mutex.h>
+#include <linux/errno.h>
+#include <linux/slab.h>
+#include <linux/init.h>
+#include <linux/err.h>
+
+#include <asm/sclp.h>
+
+#include "sclp.h"
+
+#define SCLP_CMDW_CONFIGURE_PCI			0x001a0001
+#define SCLP_CMDW_DECONFIGURE_PCI		0x001b0001
+
+#define SCLP_ATYPE_PCI				2
+
+#define SCLP_ERRNOTIFY_AQ_REPAIR		1
+#define SCLP_ERRNOTIFY_AQ_INFO_LOG		2
+
+static DEFINE_MUTEX(sclp_pci_mutex);
+static struct sclp_register sclp_pci_event = {
+	.send_mask = EVTYP_ERRNOTIFY_MASK,
+};
+
+struct err_notify_evbuf {
+	struct evbuf_header header;
+	u8 action;
+	u8 atype;
+	u32 fh;
+	u32 fid;
+	u8 data[0];
+} __packed;
+
+struct err_notify_sccb {
+	struct sccb_header header;
+	struct err_notify_evbuf evbuf;
+} __packed;
+
+struct pci_cfg_sccb {
+	struct sccb_header header;
+	u8 atype;		/* adapter type */
+	u8 reserved1;
+	u16 reserved2;
+	u32 aid;		/* adapter identifier */
+} __packed;
+
+static int do_pci_configure(sclp_cmdw_t cmd, u32 fid)
+{
+	struct pci_cfg_sccb *sccb;
+	int rc;
+
+	if (!SCLP_HAS_PCI_RECONFIG)
+		return -EOPNOTSUPP;
+
+	sccb = (struct pci_cfg_sccb *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
+	if (!sccb)
+		return -ENOMEM;
+
+	sccb->header.length = PAGE_SIZE;
+	sccb->atype = SCLP_ATYPE_PCI;
+	sccb->aid = fid;
+	rc = sclp_sync_request(cmd, sccb);
+	if (rc)
+		goto out;
+	switch (sccb->header.response_code) {
+	case 0x0020:
+	case 0x0120:
+		break;
+	default:
+		pr_warn("configure PCI I/O adapter failed: cmd=0x%08x  response=0x%04x\n",
+			cmd, sccb->header.response_code);
+		rc = -EIO;
+		break;
+	}
+out:
+	free_page((unsigned long) sccb);
+	return rc;
+}
+
+int sclp_pci_configure(u32 fid)
+{
+	return do_pci_configure(SCLP_CMDW_CONFIGURE_PCI, fid);
+}
+EXPORT_SYMBOL(sclp_pci_configure);
+
+int sclp_pci_deconfigure(u32 fid)
+{
+	return do_pci_configure(SCLP_CMDW_DECONFIGURE_PCI, fid);
+}
+EXPORT_SYMBOL(sclp_pci_deconfigure);
+
+static void sclp_pci_callback(struct sclp_req *req, void *data)
+{
+	struct completion *completion = data;
+
+	complete(completion);
+}
+
+static int sclp_pci_check_report(struct zpci_report_error_header *report)
+{
+	if (report->version != 1)
+		return -EINVAL;
+
+	if (report->action != SCLP_ERRNOTIFY_AQ_REPAIR &&
+	    report->action != SCLP_ERRNOTIFY_AQ_INFO_LOG)
+		return -EINVAL;
+
+	if (report->length > (PAGE_SIZE - sizeof(struct err_notify_sccb)))
+		return -EINVAL;
+
+	return 0;
+}
+
+int sclp_pci_report(struct zpci_report_error_header *report, u32 fh, u32 fid)
+{
+	DECLARE_COMPLETION_ONSTACK(completion);
+	struct err_notify_sccb *sccb;
+	struct sclp_req req;
+	int ret;
+
+	ret = sclp_pci_check_report(report);
+	if (ret)
+		return ret;
+
+	mutex_lock(&sclp_pci_mutex);
+	ret = sclp_register(&sclp_pci_event);
+	if (ret)
+		goto out_unlock;
+
+	if (!(sclp_pci_event.sclp_receive_mask & EVTYP_ERRNOTIFY_MASK)) {
+		ret = -EOPNOTSUPP;
+		goto out_unregister;
+	}
+
+	sccb = (void *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
+	if (!sccb) {
+		ret = -ENOMEM;
+		goto out_unregister;
+	}
+
+	memset(&req, 0, sizeof(req));
+	req.callback_data = &completion;
+	req.callback = sclp_pci_callback;
+	req.command = SCLP_CMDW_WRITE_EVENT_DATA;
+	req.status = SCLP_REQ_FILLED;
+	req.sccb = sccb;
+
+	sccb->evbuf.header.length = sizeof(sccb->evbuf) + report->length;
+	sccb->evbuf.header.type = EVTYP_ERRNOTIFY;
+	sccb->header.length = sizeof(sccb->header) + sccb->evbuf.header.length;
+
+	sccb->evbuf.action = report->action;
+	sccb->evbuf.atype = SCLP_ATYPE_PCI;
+	sccb->evbuf.fh = fh;
+	sccb->evbuf.fid = fid;
+
+	memcpy(sccb->evbuf.data, report->data, report->length);
+
+	ret = sclp_add_request(&req);
+	if (ret)
+		goto out_free_req;
+
+	wait_for_completion(&completion);
+	if (req.status != SCLP_REQ_DONE) {
+		pr_warn("request failed (status=0x%02x)\n",
+			req.status);
+		ret = -EIO;
+		goto out_free_req;
+	}
+
+	if (sccb->header.response_code != 0x0020) {
+		pr_warn("request failed with response code 0x%x\n",
+			sccb->header.response_code);
+		ret = -EIO;
+	}
+
+out_free_req:
+	free_page((unsigned long) sccb);
+out_unregister:
+	sclp_unregister(&sclp_pci_event);
+out_unlock:
+	mutex_unlock(&sclp_pci_mutex);
+	return ret;
+}
diff --git a/src/kernel/linux/v4.14/drivers/s390/char/sclp_quiesce.c b/src/kernel/linux/v4.14/drivers/s390/char/sclp_quiesce.c
new file mode 100644
index 0000000..76956c2
--- /dev/null
+++ b/src/kernel/linux/v4.14/drivers/s390/char/sclp_quiesce.c
@@ -0,0 +1,83 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ *     signal quiesce handler
+ *
+ *  Copyright IBM Corp. 1999, 2004
+ *  Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
+ *             Peter Oberparleiter <peter.oberparleiter@de.ibm.com>
+ */
+
+#include <linux/types.h>
+#include <linux/cpumask.h>
+#include <linux/smp.h>
+#include <linux/init.h>
+#include <linux/reboot.h>
+#include <linux/atomic.h>
+#include <asm/ptrace.h>
+#include <asm/smp.h>
+
+#include "sclp.h"
+
+static void (*old_machine_restart)(char *);
+static void (*old_machine_halt)(void);
+static void (*old_machine_power_off)(void);
+
+/* Shutdown handler. Signal completion of shutdown by loading special PSW. */
+static void do_machine_quiesce(void)
+{
+	psw_t quiesce_psw;
+
+	smp_send_stop();
+	quiesce_psw.mask =
+		PSW_MASK_BASE | PSW_MASK_EA | PSW_MASK_BA | PSW_MASK_WAIT;
+	quiesce_psw.addr = 0xfff;
+	__load_psw(quiesce_psw);
+}
+
+/* Handler for quiesce event. Start shutdown procedure. */
+static void sclp_quiesce_handler(struct evbuf_header *evbuf)
+{
+	if (_machine_restart != (void *) do_machine_quiesce) {
+		old_machine_restart = _machine_restart;
+		old_machine_halt = _machine_halt;
+		old_machine_power_off = _machine_power_off;
+		_machine_restart = (void *) do_machine_quiesce;
+		_machine_halt = do_machine_quiesce;
+		_machine_power_off = do_machine_quiesce;
+	}
+	ctrl_alt_del();
+}
+
+/* Undo machine restart/halt/power_off modification on resume */
+static void sclp_quiesce_pm_event(struct sclp_register *reg,
+				  enum sclp_pm_event sclp_pm_event)
+{
+	switch (sclp_pm_event) {
+	case SCLP_PM_EVENT_RESTORE:
+		if (old_machine_restart) {
+			_machine_restart = old_machine_restart;
+			_machine_halt = old_machine_halt;
+			_machine_power_off = old_machine_power_off;
+			old_machine_restart = NULL;
+			old_machine_halt = NULL;
+			old_machine_power_off = NULL;
+		}
+		break;
+	case SCLP_PM_EVENT_FREEZE:
+	case SCLP_PM_EVENT_THAW:
+		break;
+	}
+}
+
+static struct sclp_register sclp_quiesce_event = {
+	.receive_mask = EVTYP_SIGQUIESCE_MASK,
+	.receiver_fn = sclp_quiesce_handler,
+	.pm_event_fn = sclp_quiesce_pm_event
+};
+
+/* Initialize quiesce driver. */
+static int __init sclp_quiesce_init(void)
+{
+	return sclp_register(&sclp_quiesce_event);
+}
+device_initcall(sclp_quiesce_init);
diff --git a/src/kernel/linux/v4.14/drivers/s390/char/sclp_rw.c b/src/kernel/linux/v4.14/drivers/s390/char/sclp_rw.c
new file mode 100644
index 0000000..44594a4
--- /dev/null
+++ b/src/kernel/linux/v4.14/drivers/s390/char/sclp_rw.c
@@ -0,0 +1,468 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * driver: reading from and writing to system console on S/390 via SCLP
+ *
+ * Copyright IBM Corp. 1999, 2009
+ *
+ * Author(s): Martin Peschke <mpeschke@de.ibm.com>
+ *	      Martin Schwidefsky <schwidefsky@de.ibm.com>
+ */
+
+#include <linux/kmod.h>
+#include <linux/types.h>
+#include <linux/err.h>
+#include <linux/string.h>
+#include <linux/spinlock.h>
+#include <linux/ctype.h>
+#include <linux/uaccess.h>
+
+#include "sclp.h"
+#include "sclp_rw.h"
+
+/*
+ * The room for the SCCB (only for writing) is not equal to a pages size
+ * (as it is specified as the maximum size in the SCLP documentation)
+ * because of the additional data structure described above.
+ */
+#define MAX_SCCB_ROOM (PAGE_SIZE - sizeof(struct sclp_buffer))
+
+static void sclp_rw_pm_event(struct sclp_register *reg,
+			     enum sclp_pm_event sclp_pm_event)
+{
+	sclp_console_pm_event(sclp_pm_event);
+}
+
+/* Event type structure for write message and write priority message */
+static struct sclp_register sclp_rw_event = {
+	.send_mask = EVTYP_MSG_MASK,
+	.pm_event_fn = sclp_rw_pm_event,
+};
+
+/*
+ * Setup a sclp write buffer. Gets a page as input (4K) and returns
+ * a pointer to a struct sclp_buffer structure that is located at the
+ * end of the input page. This reduces the buffer space by a few
+ * bytes but simplifies things.
+ */
+struct sclp_buffer *
+sclp_make_buffer(void *page, unsigned short columns, unsigned short htab)
+{
+	struct sclp_buffer *buffer;
+	struct sccb_header *sccb;
+
+	sccb = (struct sccb_header *) page;
+	/*
+	 * We keep the struct sclp_buffer structure at the end
+	 * of the sccb page.
+	 */
+	buffer = ((struct sclp_buffer *) ((addr_t) sccb + PAGE_SIZE)) - 1;
+	buffer->sccb = sccb;
+	buffer->retry_count = 0;
+	buffer->messages = 0;
+	buffer->char_sum = 0;
+	buffer->current_line = NULL;
+	buffer->current_length = 0;
+	buffer->columns = columns;
+	buffer->htab = htab;
+
+	/* initialize sccb */
+	memset(sccb, 0, sizeof(struct sccb_header));
+	sccb->length = sizeof(struct sccb_header);
+
+	return buffer;
+}
+
+/*
+ * Return a pointer to the original page that has been used to create
+ * the buffer.
+ */
+void *
+sclp_unmake_buffer(struct sclp_buffer *buffer)
+{
+	return buffer->sccb;
+}
+
+/*
+ * Initialize a new message the end of the provided buffer with
+ * enough room for max_len characters. Return 0 on success.
+ */
+static int
+sclp_initialize_mto(struct sclp_buffer *buffer, int max_len)
+{
+	struct sccb_header *sccb;
+	struct msg_buf *msg;
+	struct mdb *mdb;
+	struct go *go;
+	struct mto *mto;
+	int msg_size;
+
+	/* max size of new message including message text  */
+	msg_size = sizeof(struct msg_buf) + max_len;
+
+	/* check if current buffer sccb can contain the mto */
+	sccb = buffer->sccb;
+	if ((MAX_SCCB_ROOM - sccb->length) < msg_size)
+		return -ENOMEM;
+
+	msg = (struct msg_buf *)((addr_t) sccb + sccb->length);
+	memset(msg, 0, sizeof(struct msg_buf));
+	msg->header.length = sizeof(struct msg_buf);
+	msg->header.type = EVTYP_MSG;
+
+	mdb = &msg->mdb;
+	mdb->header.length = sizeof(struct mdb);
+	mdb->header.type = 1;
+	mdb->header.tag = 0xD4C4C240;	/* ebcdic "MDB " */
+	mdb->header.revision_code = 1;
+
+	go = &mdb->go;
+	go->length = sizeof(struct go);
+	go->type = 1;
+
+	mto = &mdb->mto;
+	mto->length = sizeof(struct mto);
+	mto->type = 4;	/* message text object */
+	mto->line_type_flags = LNTPFLGS_ENDTEXT; /* end text */
+
+	/* set pointer to first byte after struct mto. */
+	buffer->current_msg = msg;
+	buffer->current_line = (char *) (mto + 1);
+	buffer->current_length = 0;
+
+	return 0;
+}
+
+/*
+ * Finalize message initialized by sclp_initialize_mto(),
+ * updating the sizes of MTO, enclosing MDB, event buffer and SCCB.
+ */
+static void
+sclp_finalize_mto(struct sclp_buffer *buffer)
+{
+	struct sccb_header *sccb;
+	struct msg_buf *msg;
+
+	/*
+	 * update values of sizes
+	 * (SCCB, Event(Message) Buffer, Message Data Block)
+	 */
+	sccb = buffer->sccb;
+	msg = buffer->current_msg;
+	msg->header.length += buffer->current_length;
+	msg->mdb.header.length += buffer->current_length;
+	msg->mdb.mto.length += buffer->current_length;
+	sccb->length += msg->header.length;
+
+	/*
+	 * count number of buffered messages (= number of Message Text
+	 * Objects) and number of buffered characters
+	 * for the SCCB currently used for buffering and at all
+	 */
+	buffer->messages++;
+	buffer->char_sum += buffer->current_length;
+
+	buffer->current_line = NULL;
+	buffer->current_length = 0;
+	buffer->current_msg = NULL;
+}
+
+/*
+ * processing of a message including escape characters,
+ * returns number of characters written to the output sccb
+ * ("processed" means that is not guaranteed that the character have already
+ *  been sent to the SCLP but that it will be done at least next time the SCLP
+ *  is not busy)
+ */
+int
+sclp_write(struct sclp_buffer *buffer, const unsigned char *msg, int count)
+{
+	int spaces, i_msg;
+	int rc;
+
+	/*
+	 * parse msg for escape sequences (\t,\v ...) and put formated
+	 * msg into an mto (created by sclp_initialize_mto).
+	 *
+	 * We have to do this work ourselfs because there is no support for
+	 * these characters on the native machine and only partial support
+	 * under VM (Why does VM interpret \n but the native machine doesn't ?)
+	 *
+	 * Depending on i/o-control setting the message is always written
+	 * immediately or we wait for a final new line maybe coming with the
+	 * next message. Besides we avoid a buffer overrun by writing its
+	 * content.
+	 *
+	 * RESTRICTIONS:
+	 *
+	 * \r and \b work within one line because we are not able to modify
+	 * previous output that have already been accepted by the SCLP.
+	 *
+	 * \t combined with following \r is not correctly represented because
+	 * \t is expanded to some spaces but \r does not know about a
+	 * previous \t and decreases the current position by one column.
+	 * This is in order to a slim and quick implementation.
+	 */
+	for (i_msg = 0; i_msg < count; i_msg++) {
+		switch (msg[i_msg]) {
+		case '\n':	/* new line, line feed (ASCII)	*/
+			/* check if new mto needs to be created */
+			if (buffer->current_line == NULL) {
+				rc = sclp_initialize_mto(buffer, 0);
+				if (rc)
+					return i_msg;
+			}
+			sclp_finalize_mto(buffer);
+			break;
+		case '\a':	/* bell, one for several times	*/
+			/* set SCLP sound alarm bit in General Object */
+			if (buffer->current_line == NULL) {
+				rc = sclp_initialize_mto(buffer,
+							 buffer->columns);
+				if (rc)
+					return i_msg;
+			}
+			buffer->current_msg->mdb.go.general_msg_flags |=
+				GNRLMSGFLGS_SNDALRM;
+			break;
+		case '\t':	/* horizontal tabulator	 */
+			/* check if new mto needs to be created */
+			if (buffer->current_line == NULL) {
+				rc = sclp_initialize_mto(buffer,
+							 buffer->columns);
+				if (rc)
+					return i_msg;
+			}
+			/* "go to (next htab-boundary + 1, same line)" */
+			do {
+				if (buffer->current_length >= buffer->columns)
+					break;
+				/* ok, add a blank */
+				*buffer->current_line++ = 0x40;
+				buffer->current_length++;
+			} while (buffer->current_length % buffer->htab);
+			break;
+		case '\f':	/* form feed  */
+		case '\v':	/* vertical tabulator  */
+			/* "go to (actual column, actual line + 1)" */
+			/* = new line, leading spaces */
+			if (buffer->current_line != NULL) {
+				spaces = buffer->current_length;
+				sclp_finalize_mto(buffer);
+				rc = sclp_initialize_mto(buffer,
+							 buffer->columns);
+				if (rc)
+					return i_msg;
+				memset(buffer->current_line, 0x40, spaces);
+				buffer->current_line += spaces;
+				buffer->current_length = spaces;
+			} else {
+				/* one an empty line this is the same as \n */
+				rc = sclp_initialize_mto(buffer,
+							 buffer->columns);
+				if (rc)
+					return i_msg;
+				sclp_finalize_mto(buffer);
+			}
+			break;
+		case '\b':	/* backspace  */
+			/* "go to (actual column - 1, actual line)" */
+			/* decrement counter indicating position, */
+			/* do not remove last character */
+			if (buffer->current_line != NULL &&
+			    buffer->current_length > 0) {
+				buffer->current_length--;
+				buffer->current_line--;
+			}
+			break;
+		case 0x00:	/* end of string  */
+			/* transfer current line to SCCB */
+			if (buffer->current_line != NULL)
+				sclp_finalize_mto(buffer);
+			/* skip the rest of the message including the 0 byte */
+			i_msg = count - 1;
+			break;
+		default:	/* no escape character	*/
+			/* do not output unprintable characters */
+			if (!isprint(msg[i_msg]))
+				break;
+			/* check if new mto needs to be created */
+			if (buffer->current_line == NULL) {
+				rc = sclp_initialize_mto(buffer,
+							 buffer->columns);
+				if (rc)
+					return i_msg;
+			}
+			*buffer->current_line++ = sclp_ascebc(msg[i_msg]);
+			buffer->current_length++;
+			break;
+		}
+		/* check if current mto is full */
+		if (buffer->current_line != NULL &&
+		    buffer->current_length >= buffer->columns)
+			sclp_finalize_mto(buffer);
+	}
+
+	/* return number of processed characters */
+	return i_msg;
+}
+
+/*
+ * Return the number of free bytes in the sccb
+ */
+int
+sclp_buffer_space(struct sclp_buffer *buffer)
+{
+	struct sccb_header *sccb;
+	int count;
+
+	sccb = buffer->sccb;
+	count = MAX_SCCB_ROOM - sccb->length;
+	if (buffer->current_line != NULL)
+		count -= sizeof(struct msg_buf) + buffer->current_length;
+	return count;
+}
+
+/*
+ * Return number of characters in buffer
+ */
+int
+sclp_chars_in_buffer(struct sclp_buffer *buffer)
+{
+	int count;
+
+	count = buffer->char_sum;
+	if (buffer->current_line != NULL)
+		count += buffer->current_length;
+	return count;
+}
+
+/*
+ * sets or provides some values that influence the drivers behaviour
+ */
+void
+sclp_set_columns(struct sclp_buffer *buffer, unsigned short columns)
+{
+	buffer->columns = columns;
+	if (buffer->current_line != NULL &&
+	    buffer->current_length > buffer->columns)
+		sclp_finalize_mto(buffer);
+}
+
+void
+sclp_set_htab(struct sclp_buffer *buffer, unsigned short htab)
+{
+	buffer->htab = htab;
+}
+
+/*
+ * called by sclp_console_init and/or sclp_tty_init
+ */
+int
+sclp_rw_init(void)
+{
+	static int init_done = 0;
+	int rc;
+
+	if (init_done)
+		return 0;
+
+	rc = sclp_register(&sclp_rw_event);
+	if (rc == 0)
+		init_done = 1;
+	return rc;
+}
+
+#define SCLP_BUFFER_MAX_RETRY		1
+
+/*
+ * second half of Write Event Data-function that has to be done after
+ * interruption indicating completion of Service Call.
+ */
+static void
+sclp_writedata_callback(struct sclp_req *request, void *data)
+{
+	int rc;
+	struct sclp_buffer *buffer;
+	struct sccb_header *sccb;
+
+	buffer = (struct sclp_buffer *) data;
+	sccb = buffer->sccb;
+
+	if (request->status == SCLP_REQ_FAILED) {
+		if (buffer->callback != NULL)
+			buffer->callback(buffer, -EIO);
+		return;
+	}
+	/* check SCLP response code and choose suitable action	*/
+	switch (sccb->response_code) {
+	case 0x0020 :
+		/* Normal completion, buffer processed, message(s) sent */
+		rc = 0;
+		break;
+
+	case 0x0340: /* Contained SCLP equipment check */
+		if (++buffer->retry_count > SCLP_BUFFER_MAX_RETRY) {
+			rc = -EIO;
+			break;
+		}
+		/* remove processed buffers and requeue rest */
+		if (sclp_remove_processed((struct sccb_header *) sccb) > 0) {
+			/* not all buffers were processed */
+			sccb->response_code = 0x0000;
+			buffer->request.status = SCLP_REQ_FILLED;
+			rc = sclp_add_request(request);
+			if (rc == 0)
+				return;
+		} else
+			rc = 0;
+		break;
+
+	case 0x0040: /* SCLP equipment check */
+	case 0x05f0: /* Target resource in improper state */
+		if (++buffer->retry_count > SCLP_BUFFER_MAX_RETRY) {
+			rc = -EIO;
+			break;
+		}
+		/* retry request */
+		sccb->response_code = 0x0000;
+		buffer->request.status = SCLP_REQ_FILLED;
+		rc = sclp_add_request(request);
+		if (rc == 0)
+			return;
+		break;
+	default:
+		if (sccb->response_code == 0x71f0)
+			rc = -ENOMEM;
+		else
+			rc = -EINVAL;
+		break;
+	}
+	if (buffer->callback != NULL)
+		buffer->callback(buffer, rc);
+}
+
+/*
+ * Setup the request structure in the struct sclp_buffer to do SCLP Write
+ * Event Data and pass the request to the core SCLP loop. Return zero on
+ * success, non-zero otherwise.
+ */
+int
+sclp_emit_buffer(struct sclp_buffer *buffer,
+		 void (*callback)(struct sclp_buffer *, int))
+{
+	/* add current line if there is one */
+	if (buffer->current_line != NULL)
+		sclp_finalize_mto(buffer);
+
+	/* Are there messages in the output buffer ? */
+	if (buffer->messages == 0)
+		return -EIO;
+
+	buffer->request.command = SCLP_CMDW_WRITE_EVENT_DATA;
+	buffer->request.status = SCLP_REQ_FILLED;
+	buffer->request.callback = sclp_writedata_callback;
+	buffer->request.callback_data = buffer;
+	buffer->request.sccb = buffer->sccb;
+	buffer->callback = callback;
+	return sclp_add_request(&buffer->request);
+}
diff --git a/src/kernel/linux/v4.14/drivers/s390/char/sclp_rw.h b/src/kernel/linux/v4.14/drivers/s390/char/sclp_rw.h
new file mode 100644
index 0000000..a2eb22f
--- /dev/null
+++ b/src/kernel/linux/v4.14/drivers/s390/char/sclp_rw.h
@@ -0,0 +1,99 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * interface to the SCLP-read/write driver
+ *
+ * Copyright IBM Corporation 1999, 2009
+ *
+ * Author(s): Martin Peschke <mpeschke@de.ibm.com>
+ *	      Martin Schwidefsky <schwidefsky@de.ibm.com>
+ */
+
+#ifndef __SCLP_RW_H__
+#define __SCLP_RW_H__
+
+#include <linux/list.h>
+
+struct mto {
+	u16 length;
+	u16 type;
+	u16 line_type_flags;
+	u8  alarm_control;
+	u8  _reserved[3];
+} __attribute__((packed));
+
+struct go {
+	u16 length;
+	u16 type;
+	u32 domid;
+	u8  hhmmss_time[8];
+	u8  th_time[3];
+	u8  reserved_0;
+	u8  dddyyyy_date[7];
+	u8  _reserved_1;
+	u16 general_msg_flags;
+	u8  _reserved_2[10];
+	u8  originating_system_name[8];
+	u8  job_guest_name[8];
+} __attribute__((packed));
+
+struct mdb_header {
+	u16 length;
+	u16 type;
+	u32 tag;
+	u32 revision_code;
+} __attribute__((packed));
+
+struct mdb {
+	struct mdb_header header;
+	struct go go;
+	struct mto mto;
+} __attribute__((packed));
+
+struct msg_buf {
+	struct evbuf_header header;
+	struct mdb mdb;
+} __attribute__((packed));
+
+/* The number of empty mto buffers that can be contained in a single sccb. */
+#define NR_EMPTY_MSG_PER_SCCB ((PAGE_SIZE - sizeof(struct sclp_buffer) - \
+			sizeof(struct sccb_header)) / sizeof(struct msg_buf))
+
+/*
+ * data structure for information about list of SCCBs (only for writing),
+ * will be located at the end of a SCCBs page
+ */
+struct sclp_buffer {
+	struct list_head list;		/* list_head for sccb_info chain */
+	struct sclp_req request;
+	void *sccb;
+	struct msg_buf *current_msg;
+	char *current_line;
+	int current_length;
+	int retry_count;
+	/* output format settings */
+	unsigned short columns;
+	unsigned short htab;
+	/* statistics about this buffer */
+	unsigned int char_sum;		/* # chars in sccb */
+	unsigned int messages;		/* # messages in sccb */
+	/* Callback that is called after reaching final status. */
+	void (*callback)(struct sclp_buffer *, int);
+};
+
+int sclp_rw_init(void);
+struct sclp_buffer *sclp_make_buffer(void *, unsigned short, unsigned short);
+void *sclp_unmake_buffer(struct sclp_buffer *);
+int sclp_buffer_space(struct sclp_buffer *);
+int sclp_write(struct sclp_buffer *buffer, const unsigned char *, int);
+int sclp_emit_buffer(struct sclp_buffer *,void (*)(struct sclp_buffer *,int));
+void sclp_set_columns(struct sclp_buffer *, unsigned short);
+void sclp_set_htab(struct sclp_buffer *, unsigned short);
+int sclp_chars_in_buffer(struct sclp_buffer *);
+
+#ifdef CONFIG_SCLP_CONSOLE
+void sclp_console_pm_event(enum sclp_pm_event sclp_pm_event);
+#else
+static inline void sclp_console_pm_event(enum sclp_pm_event sclp_pm_event) { }
+#endif
+
+#endif	/* __SCLP_RW_H__ */
diff --git a/src/kernel/linux/v4.14/drivers/s390/char/sclp_sdias.c b/src/kernel/linux/v4.14/drivers/s390/char/sclp_sdias.c
new file mode 100644
index 0000000..8e0b69a
--- /dev/null
+++ b/src/kernel/linux/v4.14/drivers/s390/char/sclp_sdias.c
@@ -0,0 +1,277 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * SCLP "store data in absolute storage"
+ *
+ * Copyright IBM Corp. 2003, 2013
+ * Author(s): Michael Holzheu
+ */
+
+#define KMSG_COMPONENT "sclp_sdias"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
+#include <linux/completion.h>
+#include <linux/sched.h>
+#include <asm/sclp.h>
+#include <asm/debug.h>
+#include <asm/ipl.h>
+
+#include "sclp_sdias.h"
+#include "sclp.h"
+#include "sclp_rw.h"
+
+#define TRACE(x...) debug_sprintf_event(sdias_dbf, 1, x)
+
+#define SDIAS_RETRIES 300
+
+static struct debug_info *sdias_dbf;
+
+static struct sclp_register sclp_sdias_register = {
+	.send_mask = EVTYP_SDIAS_MASK,
+};
+
+static struct sdias_sccb sccb __attribute__((aligned(4096)));
+static struct sdias_evbuf sdias_evbuf;
+
+static DECLARE_COMPLETION(evbuf_accepted);
+static DECLARE_COMPLETION(evbuf_done);
+static DEFINE_MUTEX(sdias_mutex);
+
+/*
+ * Called by SCLP base when read event data has been completed (async mode only)
+ */
+static void sclp_sdias_receiver_fn(struct evbuf_header *evbuf)
+{
+	memcpy(&sdias_evbuf, evbuf,
+	       min_t(unsigned long, sizeof(sdias_evbuf), evbuf->length));
+	complete(&evbuf_done);
+	TRACE("sclp_sdias_receiver_fn done\n");
+}
+
+/*
+ * Called by SCLP base when sdias event has been accepted
+ */
+static void sdias_callback(struct sclp_req *request, void *data)
+{
+	complete(&evbuf_accepted);
+	TRACE("callback done\n");
+}
+
+static int sdias_sclp_send(struct sclp_req *req)
+{
+	int retries;
+	int rc;
+
+	for (retries = SDIAS_RETRIES; retries; retries--) {
+		TRACE("add request\n");
+		rc = sclp_add_request(req);
+		if (rc) {
+			/* not initiated, wait some time and retry */
+			set_current_state(TASK_INTERRUPTIBLE);
+			TRACE("add request failed: rc = %i\n",rc);
+			schedule_timeout(msecs_to_jiffies(500));
+			continue;
+		}
+		/* initiated, wait for completion of service call */
+		wait_for_completion(&evbuf_accepted);
+		if (req->status == SCLP_REQ_FAILED) {
+			TRACE("sclp request failed\n");
+			continue;
+		}
+		/* if not accepted, retry */
+		if (!(sccb.evbuf.hdr.flags & 0x80)) {
+			TRACE("sclp request failed: flags=%x\n",
+			      sccb.evbuf.hdr.flags);
+			continue;
+		}
+		/*
+		 * for the sync interface the response is in the initial sccb
+		 */
+		if (!sclp_sdias_register.receiver_fn) {
+			memcpy(&sdias_evbuf, &sccb.evbuf, sizeof(sdias_evbuf));
+			TRACE("sync request done\n");
+			return 0;
+		}
+		/* otherwise we wait for completion */
+		wait_for_completion(&evbuf_done);
+		TRACE("request done\n");
+		return 0;
+	}
+	return -EIO;
+}
+
+/*
+ * Get number of blocks (4K) available in the HSA
+ */
+int sclp_sdias_blk_count(void)
+{
+	struct sclp_req request;
+	int rc;
+
+	mutex_lock(&sdias_mutex);
+
+	memset(&sccb, 0, sizeof(sccb));
+	memset(&request, 0, sizeof(request));
+
+	sccb.hdr.length = sizeof(sccb);
+	sccb.evbuf.hdr.length = sizeof(struct sdias_evbuf);
+	sccb.evbuf.hdr.type = EVTYP_SDIAS;
+	sccb.evbuf.event_qual = SDIAS_EQ_SIZE;
+	sccb.evbuf.data_id = SDIAS_DI_FCP_DUMP;
+	sccb.evbuf.event_id = 4712;
+	sccb.evbuf.dbs = 1;
+
+	request.sccb = &sccb;
+	request.command = SCLP_CMDW_WRITE_EVENT_DATA;
+	request.status = SCLP_REQ_FILLED;
+	request.callback = sdias_callback;
+
+	rc = sdias_sclp_send(&request);
+	if (rc) {
+		pr_err("sclp_send failed for get_nr_blocks\n");
+		goto out;
+	}
+	if (sccb.hdr.response_code != 0x0020) {
+		TRACE("send failed: %x\n", sccb.hdr.response_code);
+		rc = -EIO;
+		goto out;
+	}
+
+	switch (sdias_evbuf.event_status) {
+		case 0:
+			rc = sdias_evbuf.blk_cnt;
+			break;
+		default:
+			pr_err("SCLP error: %x\n", sdias_evbuf.event_status);
+			rc = -EIO;
+			goto out;
+	}
+	TRACE("%i blocks\n", rc);
+out:
+	mutex_unlock(&sdias_mutex);
+	return rc;
+}
+
+/*
+ * Copy from HSA to absolute storage (not reentrant):
+ *
+ * @dest     : Address of buffer where data should be copied
+ * @start_blk: Start Block (beginning with 1)
+ * @nr_blks  : Number of 4K blocks to copy
+ *
+ * Return Value: 0 : Requested 'number' of blocks of data copied
+ *		 <0: ERROR - negative event status
+ */
+int sclp_sdias_copy(void *dest, int start_blk, int nr_blks)
+{
+	struct sclp_req request;
+	int rc;
+
+	mutex_lock(&sdias_mutex);
+
+	memset(&sccb, 0, sizeof(sccb));
+	memset(&request, 0, sizeof(request));
+
+	sccb.hdr.length = sizeof(sccb);
+	sccb.evbuf.hdr.length = sizeof(struct sdias_evbuf);
+	sccb.evbuf.hdr.type = EVTYP_SDIAS;
+	sccb.evbuf.hdr.flags = 0;
+	sccb.evbuf.event_qual = SDIAS_EQ_STORE_DATA;
+	sccb.evbuf.data_id = SDIAS_DI_FCP_DUMP;
+	sccb.evbuf.event_id = 4712;
+	sccb.evbuf.asa_size = SDIAS_ASA_SIZE_64;
+	sccb.evbuf.event_status = 0;
+	sccb.evbuf.blk_cnt = nr_blks;
+	sccb.evbuf.asa = (unsigned long)dest;
+	sccb.evbuf.fbn = start_blk;
+	sccb.evbuf.lbn = 0;
+	sccb.evbuf.dbs = 1;
+
+	request.sccb	 = &sccb;
+	request.command  = SCLP_CMDW_WRITE_EVENT_DATA;
+	request.status	 = SCLP_REQ_FILLED;
+	request.callback = sdias_callback;
+
+	rc = sdias_sclp_send(&request);
+	if (rc) {
+		pr_err("sclp_send failed: %x\n", rc);
+		goto out;
+	}
+	if (sccb.hdr.response_code != 0x0020) {
+		TRACE("copy failed: %x\n", sccb.hdr.response_code);
+		rc = -EIO;
+		goto out;
+	}
+
+	switch (sdias_evbuf.event_status) {
+	case SDIAS_EVSTATE_ALL_STORED:
+		TRACE("all stored\n");
+		break;
+	case SDIAS_EVSTATE_PART_STORED:
+		TRACE("part stored: %i\n", sdias_evbuf.blk_cnt);
+		break;
+	case SDIAS_EVSTATE_NO_DATA:
+		TRACE("no data\n");
+		/* fall through */
+	default:
+		pr_err("Error from SCLP while copying hsa. Event status = %x\n",
+		       sdias_evbuf.event_status);
+		rc = -EIO;
+	}
+out:
+	mutex_unlock(&sdias_mutex);
+	return rc;
+}
+
+static int __init sclp_sdias_register_check(void)
+{
+	int rc;
+
+	rc = sclp_register(&sclp_sdias_register);
+	if (rc)
+		return rc;
+	if (sclp_sdias_blk_count() == 0) {
+		sclp_unregister(&sclp_sdias_register);
+		return -ENODEV;
+	}
+	return 0;
+}
+
+static int __init sclp_sdias_init_sync(void)
+{
+	TRACE("Try synchronous mode\n");
+	sclp_sdias_register.receive_mask = 0;
+	sclp_sdias_register.receiver_fn = NULL;
+	return sclp_sdias_register_check();
+}
+
+static int __init sclp_sdias_init_async(void)
+{
+	TRACE("Try asynchronous mode\n");
+	sclp_sdias_register.receive_mask = EVTYP_SDIAS_MASK;
+	sclp_sdias_register.receiver_fn = sclp_sdias_receiver_fn;
+	return sclp_sdias_register_check();
+}
+
+int __init sclp_sdias_init(void)
+{
+	if (ipl_info.type != IPL_TYPE_FCP_DUMP)
+		return 0;
+	sdias_dbf = debug_register("dump_sdias", 4, 1, 4 * sizeof(long));
+	debug_register_view(sdias_dbf, &debug_sprintf_view);
+	debug_set_level(sdias_dbf, 6);
+	if (sclp_sdias_init_sync() == 0)
+		goto out;
+	if (sclp_sdias_init_async() == 0)
+		goto out;
+	TRACE("init failed\n");
+	return -ENODEV;
+out:
+	TRACE("init done\n");
+	return 0;
+}
+
+void __exit sclp_sdias_exit(void)
+{
+	debug_unregister(sdias_dbf);
+	sclp_unregister(&sclp_sdias_register);
+}
diff --git a/src/kernel/linux/v4.14/drivers/s390/char/sclp_sdias.h b/src/kernel/linux/v4.14/drivers/s390/char/sclp_sdias.h
new file mode 100644
index 0000000..bc36cf8
--- /dev/null
+++ b/src/kernel/linux/v4.14/drivers/s390/char/sclp_sdias.h
@@ -0,0 +1,47 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * SCLP "store data in absolute storage"
+ *
+ * Copyright IBM Corp. 2003, 2013
+ */
+
+#ifndef SCLP_SDIAS_H
+#define SCLP_SDIAS_H
+
+#include "sclp.h"
+
+#define SDIAS_EQ_STORE_DATA		0x0
+#define SDIAS_EQ_SIZE			0x1
+#define SDIAS_DI_FCP_DUMP		0x0
+#define SDIAS_ASA_SIZE_32		0x0
+#define SDIAS_ASA_SIZE_64		0x1
+#define SDIAS_EVSTATE_ALL_STORED	0x0
+#define SDIAS_EVSTATE_NO_DATA		0x3
+#define SDIAS_EVSTATE_PART_STORED	0x10
+
+struct sdias_evbuf {
+	struct	evbuf_header hdr;
+	u8	event_qual;
+	u8	data_id;
+	u64	reserved2;
+	u32	event_id;
+	u16	reserved3;
+	u8	asa_size;
+	u8	event_status;
+	u32	reserved4;
+	u32	blk_cnt;
+	u64	asa;
+	u32	reserved5;
+	u32	fbn;
+	u32	reserved6;
+	u32	lbn;
+	u16	reserved7;
+	u16	dbs;
+} __packed;
+
+struct sdias_sccb {
+	struct sccb_header	hdr;
+	struct sdias_evbuf	evbuf;
+} __packed;
+
+#endif /* SCLP_SDIAS_H */
diff --git a/src/kernel/linux/v4.14/drivers/s390/char/sclp_tty.c b/src/kernel/linux/v4.14/drivers/s390/char/sclp_tty.c
new file mode 100644
index 0000000..875628d
--- /dev/null
+++ b/src/kernel/linux/v4.14/drivers/s390/char/sclp_tty.c
@@ -0,0 +1,576 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ *    SCLP line mode terminal driver.
+ *
+ *  S390 version
+ *    Copyright IBM Corp. 1999
+ *    Author(s): Martin Peschke <mpeschke@de.ibm.com>
+ *		 Martin Schwidefsky <schwidefsky@de.ibm.com>
+ */
+
+#include <linux/kmod.h>
+#include <linux/tty.h>
+#include <linux/tty_driver.h>
+#include <linux/tty_flip.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/gfp.h>
+#include <linux/uaccess.h>
+
+#include "ctrlchar.h"
+#include "sclp.h"
+#include "sclp_rw.h"
+#include "sclp_tty.h"
+
+/*
+ * size of a buffer that collects single characters coming in
+ * via sclp_tty_put_char()
+ */
+#define SCLP_TTY_BUF_SIZE 512
+
+/*
+ * There is exactly one SCLP terminal, so we can keep things simple
+ * and allocate all variables statically.
+ */
+
+/* Lock to guard over changes to global variables. */
+static spinlock_t sclp_tty_lock;
+/* List of free pages that can be used for console output buffering. */
+static struct list_head sclp_tty_pages;
+/* List of full struct sclp_buffer structures ready for output. */
+static struct list_head sclp_tty_outqueue;
+/* Counter how many buffers are emitted. */
+static int sclp_tty_buffer_count;
+/* Pointer to current console buffer. */
+static struct sclp_buffer *sclp_ttybuf;
+/* Timer for delayed output of console messages. */
+static struct timer_list sclp_tty_timer;
+
+static struct tty_port sclp_port;
+static unsigned char sclp_tty_chars[SCLP_TTY_BUF_SIZE];
+static unsigned short int sclp_tty_chars_count;
+
+struct tty_driver *sclp_tty_driver;
+
+static int sclp_tty_tolower;
+static int sclp_tty_columns = 80;
+
+#define SPACES_PER_TAB 8
+#define CASE_DELIMITER 0x6c /* to separate upper and lower case (% in EBCDIC) */
+
+/* This routine is called whenever we try to open a SCLP terminal. */
+static int
+sclp_tty_open(struct tty_struct *tty, struct file *filp)
+{
+	tty_port_tty_set(&sclp_port, tty);
+	tty->driver_data = NULL;
+	sclp_port.low_latency = 0;
+	return 0;
+}
+
+/* This routine is called when the SCLP terminal is closed. */
+static void
+sclp_tty_close(struct tty_struct *tty, struct file *filp)
+{
+	if (tty->count > 1)
+		return;
+	tty_port_tty_set(&sclp_port, NULL);
+}
+
+/*
+ * This routine returns the numbers of characters the tty driver
+ * will accept for queuing to be written.  This number is subject
+ * to change as output buffers get emptied, or if the output flow
+ * control is acted. This is not an exact number because not every
+ * character needs the same space in the sccb. The worst case is
+ * a string of newlines. Every newline creates a new message which
+ * needs 82 bytes.
+ */
+static int
+sclp_tty_write_room (struct tty_struct *tty)
+{
+	unsigned long flags;
+	struct list_head *l;
+	int count;
+
+	spin_lock_irqsave(&sclp_tty_lock, flags);
+	count = 0;
+	if (sclp_ttybuf != NULL)
+		count = sclp_buffer_space(sclp_ttybuf) / sizeof(struct msg_buf);
+	list_for_each(l, &sclp_tty_pages)
+		count += NR_EMPTY_MSG_PER_SCCB;
+	spin_unlock_irqrestore(&sclp_tty_lock, flags);
+	return count;
+}
+
+static void
+sclp_ttybuf_callback(struct sclp_buffer *buffer, int rc)
+{
+	unsigned long flags;
+	void *page;
+
+	do {
+		page = sclp_unmake_buffer(buffer);
+		spin_lock_irqsave(&sclp_tty_lock, flags);
+		/* Remove buffer from outqueue */
+		list_del(&buffer->list);
+		sclp_tty_buffer_count--;
+		list_add_tail((struct list_head *) page, &sclp_tty_pages);
+		/* Check if there is a pending buffer on the out queue. */
+		buffer = NULL;
+		if (!list_empty(&sclp_tty_outqueue))
+			buffer = list_entry(sclp_tty_outqueue.next,
+					    struct sclp_buffer, list);
+		spin_unlock_irqrestore(&sclp_tty_lock, flags);
+	} while (buffer && sclp_emit_buffer(buffer, sclp_ttybuf_callback));
+
+	tty_port_tty_wakeup(&sclp_port);
+}
+
+static inline void
+__sclp_ttybuf_emit(struct sclp_buffer *buffer)
+{
+	unsigned long flags;
+	int count;
+	int rc;
+
+	spin_lock_irqsave(&sclp_tty_lock, flags);
+	list_add_tail(&buffer->list, &sclp_tty_outqueue);
+	count = sclp_tty_buffer_count++;
+	spin_unlock_irqrestore(&sclp_tty_lock, flags);
+	if (count)
+		return;
+	rc = sclp_emit_buffer(buffer, sclp_ttybuf_callback);
+	if (rc)
+		sclp_ttybuf_callback(buffer, rc);
+}
+
+/*
+ * When this routine is called from the timer then we flush the
+ * temporary write buffer.
+ */
+static void
+sclp_tty_timeout(unsigned long data)
+{
+	unsigned long flags;
+	struct sclp_buffer *buf;
+
+	spin_lock_irqsave(&sclp_tty_lock, flags);
+	buf = sclp_ttybuf;
+	sclp_ttybuf = NULL;
+	spin_unlock_irqrestore(&sclp_tty_lock, flags);
+
+	if (buf != NULL) {
+		__sclp_ttybuf_emit(buf);
+	}
+}
+
+/*
+ * Write a string to the sclp tty.
+ */
+static int sclp_tty_write_string(const unsigned char *str, int count, int may_fail)
+{
+	unsigned long flags;
+	void *page;
+	int written;
+	int overall_written;
+	struct sclp_buffer *buf;
+
+	if (count <= 0)
+		return 0;
+	overall_written = 0;
+	spin_lock_irqsave(&sclp_tty_lock, flags);
+	do {
+		/* Create a sclp output buffer if none exists yet */
+		if (sclp_ttybuf == NULL) {
+			while (list_empty(&sclp_tty_pages)) {
+				spin_unlock_irqrestore(&sclp_tty_lock, flags);
+				if (may_fail)
+					goto out;
+				else
+					sclp_sync_wait();
+				spin_lock_irqsave(&sclp_tty_lock, flags);
+			}
+			page = sclp_tty_pages.next;
+			list_del((struct list_head *) page);
+			sclp_ttybuf = sclp_make_buffer(page, sclp_tty_columns,
+						       SPACES_PER_TAB);
+		}
+		/* try to write the string to the current output buffer */
+		written = sclp_write(sclp_ttybuf, str, count);
+		overall_written += written;
+		if (written == count)
+			break;
+		/*
+		 * Not all characters could be written to the current
+		 * output buffer. Emit the buffer, create a new buffer
+		 * and then output the rest of the string.
+		 */
+		buf = sclp_ttybuf;
+		sclp_ttybuf = NULL;
+		spin_unlock_irqrestore(&sclp_tty_lock, flags);
+		__sclp_ttybuf_emit(buf);
+		spin_lock_irqsave(&sclp_tty_lock, flags);
+		str += written;
+		count -= written;
+	} while (count > 0);
+	/* Setup timer to output current console buffer after 1/10 second */
+	if (sclp_ttybuf && sclp_chars_in_buffer(sclp_ttybuf) &&
+	    !timer_pending(&sclp_tty_timer)) {
+		init_timer(&sclp_tty_timer);
+		sclp_tty_timer.function = sclp_tty_timeout;
+		sclp_tty_timer.data = 0UL;
+		sclp_tty_timer.expires = jiffies + HZ/10;
+		add_timer(&sclp_tty_timer);
+	}
+	spin_unlock_irqrestore(&sclp_tty_lock, flags);
+out:
+	return overall_written;
+}
+
+/*
+ * This routine is called by the kernel to write a series of characters to the
+ * tty device. The characters may come from user space or kernel space. This
+ * routine will return the number of characters actually accepted for writing.
+ */
+static int
+sclp_tty_write(struct tty_struct *tty, const unsigned char *buf, int count)
+{
+	if (sclp_tty_chars_count > 0) {
+		sclp_tty_write_string(sclp_tty_chars, sclp_tty_chars_count, 0);
+		sclp_tty_chars_count = 0;
+	}
+	return sclp_tty_write_string(buf, count, 1);
+}
+
+/*
+ * This routine is called by the kernel to write a single character to the tty
+ * device. If the kernel uses this routine, it must call the flush_chars()
+ * routine (if defined) when it is done stuffing characters into the driver.
+ *
+ * Characters provided to sclp_tty_put_char() are buffered by the SCLP driver.
+ * If the given character is a '\n' the contents of the SCLP write buffer
+ * - including previous characters from sclp_tty_put_char() and strings from
+ * sclp_write() without final '\n' - will be written.
+ */
+static int
+sclp_tty_put_char(struct tty_struct *tty, unsigned char ch)
+{
+	sclp_tty_chars[sclp_tty_chars_count++] = ch;
+	if (ch == '\n' || sclp_tty_chars_count >= SCLP_TTY_BUF_SIZE) {
+		sclp_tty_write_string(sclp_tty_chars, sclp_tty_chars_count, 0);
+		sclp_tty_chars_count = 0;
+	}
+	return 1;
+}
+
+/*
+ * This routine is called by the kernel after it has written a series of
+ * characters to the tty device using put_char().
+ */
+static void
+sclp_tty_flush_chars(struct tty_struct *tty)
+{
+	if (sclp_tty_chars_count > 0) {
+		sclp_tty_write_string(sclp_tty_chars, sclp_tty_chars_count, 0);
+		sclp_tty_chars_count = 0;
+	}
+}
+
+/*
+ * This routine returns the number of characters in the write buffer of the
+ * SCLP driver. The provided number includes all characters that are stored
+ * in the SCCB (will be written next time the SCLP is not busy) as well as
+ * characters in the write buffer (will not be written as long as there is a
+ * final line feed missing).
+ */
+static int
+sclp_tty_chars_in_buffer(struct tty_struct *tty)
+{
+	unsigned long flags;
+	struct list_head *l;
+	struct sclp_buffer *t;
+	int count;
+
+	spin_lock_irqsave(&sclp_tty_lock, flags);
+	count = 0;
+	if (sclp_ttybuf != NULL)
+		count = sclp_chars_in_buffer(sclp_ttybuf);
+	list_for_each(l, &sclp_tty_outqueue) {
+		t = list_entry(l, struct sclp_buffer, list);
+		count += sclp_chars_in_buffer(t);
+	}
+	spin_unlock_irqrestore(&sclp_tty_lock, flags);
+	return count;
+}
+
+/*
+ * removes all content from buffers of low level driver
+ */
+static void
+sclp_tty_flush_buffer(struct tty_struct *tty)
+{
+	if (sclp_tty_chars_count > 0) {
+		sclp_tty_write_string(sclp_tty_chars, sclp_tty_chars_count, 0);
+		sclp_tty_chars_count = 0;
+	}
+}
+
+/*
+ * push input to tty
+ */
+static void
+sclp_tty_input(unsigned char* buf, unsigned int count)
+{
+	struct tty_struct *tty = tty_port_tty_get(&sclp_port);
+	unsigned int cchar;
+
+	/*
+	 * If this tty driver is currently closed
+	 * then throw the received input away.
+	 */
+	if (tty == NULL)
+		return;
+	cchar = ctrlchar_handle(buf, count, tty);
+	switch (cchar & CTRLCHAR_MASK) {
+	case CTRLCHAR_SYSRQ:
+		break;
+	case CTRLCHAR_CTRL:
+		tty_insert_flip_char(&sclp_port, cchar, TTY_NORMAL);
+		tty_flip_buffer_push(&sclp_port);
+		break;
+	case CTRLCHAR_NONE:
+		/* send (normal) input to line discipline */
+		if (count < 2 ||
+		    (strncmp((const char *) buf + count - 2, "^n", 2) &&
+		     strncmp((const char *) buf + count - 2, "\252n", 2))) {
+			/* add the auto \n */
+			tty_insert_flip_string(&sclp_port, buf, count);
+			tty_insert_flip_char(&sclp_port, '\n', TTY_NORMAL);
+		} else
+			tty_insert_flip_string(&sclp_port, buf, count - 2);
+		tty_flip_buffer_push(&sclp_port);
+		break;
+	}
+	tty_kref_put(tty);
+}
+
+/*
+ * get a EBCDIC string in upper/lower case,
+ * find out characters in lower/upper case separated by a special character,
+ * modifiy original string,
+ * returns length of resulting string
+ */
+static int sclp_switch_cases(unsigned char *buf, int count)
+{
+	unsigned char *ip, *op;
+	int toggle;
+
+	/* initially changing case is off */
+	toggle = 0;
+	ip = op = buf;
+	while (count-- > 0) {
+		/* compare with special character */
+		if (*ip == CASE_DELIMITER) {
+			/* followed by another special character? */
+			if (count && ip[1] == CASE_DELIMITER) {
+				/*
+				 * ... then put a single copy of the special
+				 * character to the output string
+				 */
+				*op++ = *ip++;
+				count--;
+			} else
+				/*
+				 * ... special character follower by a normal
+				 * character toggles the case change behaviour
+				 */
+				toggle = ~toggle;
+			/* skip special character */
+			ip++;
+		} else
+			/* not the special character */
+			if (toggle)
+				/* but case switching is on */
+				if (sclp_tty_tolower)
+					/* switch to uppercase */
+					*op++ = _ebc_toupper[(int) *ip++];
+				else
+					/* switch to lowercase */
+					*op++ = _ebc_tolower[(int) *ip++];
+			else
+				/* no case switching, copy the character */
+				*op++ = *ip++;
+	}
+	/* return length of reformatted string. */
+	return op - buf;
+}
+
+static void sclp_get_input(struct gds_subvector *sv)
+{
+	unsigned char *str;
+	int count;
+
+	str = (unsigned char *) (sv + 1);
+	count = sv->length - sizeof(*sv);
+	if (sclp_tty_tolower)
+		EBC_TOLOWER(str, count);
+	count = sclp_switch_cases(str, count);
+	/* convert EBCDIC to ASCII (modify original input in SCCB) */
+	sclp_ebcasc_str(str, count);
+
+	/* transfer input to high level driver */
+	sclp_tty_input(str, count);
+}
+
+static inline void sclp_eval_selfdeftextmsg(struct gds_subvector *sv)
+{
+	void *end;
+
+	end = (void *) sv + sv->length;
+	for (sv = sv + 1; (void *) sv < end; sv = (void *) sv + sv->length)
+		if (sv->key == 0x30)
+			sclp_get_input(sv);
+}
+
+static inline void sclp_eval_textcmd(struct gds_vector *v)
+{
+	struct gds_subvector *sv;
+	void *end;
+
+	end = (void *) v + v->length;
+	for (sv = (struct gds_subvector *) (v + 1);
+	     (void *) sv < end; sv = (void *) sv + sv->length)
+		if (sv->key == GDS_KEY_SELFDEFTEXTMSG)
+			sclp_eval_selfdeftextmsg(sv);
+
+}
+
+static inline void sclp_eval_cpmsu(struct gds_vector *v)
+{
+	void *end;
+
+	end = (void *) v + v->length;
+	for (v = v + 1; (void *) v < end; v = (void *) v + v->length)
+		if (v->gds_id == GDS_ID_TEXTCMD)
+			sclp_eval_textcmd(v);
+}
+
+
+static inline void sclp_eval_mdsmu(struct gds_vector *v)
+{
+	v = sclp_find_gds_vector(v + 1, (void *) v + v->length, GDS_ID_CPMSU);
+	if (v)
+		sclp_eval_cpmsu(v);
+}
+
+static void sclp_tty_receiver(struct evbuf_header *evbuf)
+{
+	struct gds_vector *v;
+
+	v = sclp_find_gds_vector(evbuf + 1, (void *) evbuf + evbuf->length,
+				 GDS_ID_MDSMU);
+	if (v)
+		sclp_eval_mdsmu(v);
+}
+
+static void
+sclp_tty_state_change(struct sclp_register *reg)
+{
+}
+
+static struct sclp_register sclp_input_event =
+{
+	.receive_mask = EVTYP_OPCMD_MASK | EVTYP_PMSGCMD_MASK,
+	.state_change_fn = sclp_tty_state_change,
+	.receiver_fn = sclp_tty_receiver
+};
+
+static const struct tty_operations sclp_ops = {
+	.open = sclp_tty_open,
+	.close = sclp_tty_close,
+	.write = sclp_tty_write,
+	.put_char = sclp_tty_put_char,
+	.flush_chars = sclp_tty_flush_chars,
+	.write_room = sclp_tty_write_room,
+	.chars_in_buffer = sclp_tty_chars_in_buffer,
+	.flush_buffer = sclp_tty_flush_buffer,
+};
+
+static int __init
+sclp_tty_init(void)
+{
+	struct tty_driver *driver;
+	void *page;
+	int i;
+	int rc;
+
+	if (!CONSOLE_IS_SCLP)
+		return 0;
+	driver = alloc_tty_driver(1);
+	if (!driver)
+		return -ENOMEM;
+
+	rc = sclp_rw_init();
+	if (rc) {
+		put_tty_driver(driver);
+		return rc;
+	}
+	/* Allocate pages for output buffering */
+	INIT_LIST_HEAD(&sclp_tty_pages);
+	for (i = 0; i < MAX_KMEM_PAGES; i++) {
+		page = (void *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
+		if (page == NULL) {
+			put_tty_driver(driver);
+			return -ENOMEM;
+		}
+		list_add_tail((struct list_head *) page, &sclp_tty_pages);
+	}
+	INIT_LIST_HEAD(&sclp_tty_outqueue);
+	spin_lock_init(&sclp_tty_lock);
+	init_timer(&sclp_tty_timer);
+	sclp_ttybuf = NULL;
+	sclp_tty_buffer_count = 0;
+	if (MACHINE_IS_VM) {
+		/*
+		 * save 4 characters for the CPU number
+		 * written at start of each line by VM/CP
+		 */
+		sclp_tty_columns = 76;
+		/* case input lines to lowercase */
+		sclp_tty_tolower = 1;
+	}
+	sclp_tty_chars_count = 0;
+
+	rc = sclp_register(&sclp_input_event);
+	if (rc) {
+		put_tty_driver(driver);
+		return rc;
+	}
+
+	tty_port_init(&sclp_port);
+
+	driver->driver_name = "sclp_line";
+	driver->name = "sclp_line";
+	driver->major = TTY_MAJOR;
+	driver->minor_start = 64;
+	driver->type = TTY_DRIVER_TYPE_SYSTEM;
+	driver->subtype = SYSTEM_TYPE_TTY;
+	driver->init_termios = tty_std_termios;
+	driver->init_termios.c_iflag = IGNBRK | IGNPAR;
+	driver->init_termios.c_oflag = ONLCR;
+	driver->init_termios.c_lflag = ISIG | ECHO;
+	driver->flags = TTY_DRIVER_REAL_RAW;
+	tty_set_operations(driver, &sclp_ops);
+	tty_port_link_device(&sclp_port, driver, 0);
+	rc = tty_register_driver(driver);
+	if (rc) {
+		put_tty_driver(driver);
+		tty_port_destroy(&sclp_port);
+		return rc;
+	}
+	sclp_tty_driver = driver;
+	return 0;
+}
+device_initcall(sclp_tty_init);
diff --git a/src/kernel/linux/v4.14/drivers/s390/char/sclp_tty.h b/src/kernel/linux/v4.14/drivers/s390/char/sclp_tty.h
new file mode 100644
index 0000000..0fa2d59
--- /dev/null
+++ b/src/kernel/linux/v4.14/drivers/s390/char/sclp_tty.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ *    interface to the SCLP-read/write driver
+ *
+ *  S390 version
+ *    Copyright IBM Corp. 1999
+ *    Author(s): Martin Peschke <mpeschke@de.ibm.com>
+ *		 Martin Schwidefsky <schwidefsky@de.ibm.com>
+ */
+
+#ifndef __SCLP_TTY_H__
+#define __SCLP_TTY_H__
+
+#include <linux/tty_driver.h>
+
+extern struct tty_driver *sclp_tty_driver;
+
+#endif	/* __SCLP_TTY_H__ */
diff --git a/src/kernel/linux/v4.14/drivers/s390/char/sclp_vt220.c b/src/kernel/linux/v4.14/drivers/s390/char/sclp_vt220.c
new file mode 100644
index 0000000..e84395d
--- /dev/null
+++ b/src/kernel/linux/v4.14/drivers/s390/char/sclp_vt220.c
@@ -0,0 +1,900 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * SCLP VT220 terminal driver.
+ *
+ * Copyright IBM Corp. 2003, 2009
+ *
+ * Author(s): Peter Oberparleiter <Peter.Oberparleiter@de.ibm.com>
+ */
+
+#include <linux/module.h>
+#include <linux/spinlock.h>
+#include <linux/list.h>
+#include <linux/wait.h>
+#include <linux/timer.h>
+#include <linux/kernel.h>
+#include <linux/sysrq.h>
+#include <linux/tty.h>
+#include <linux/tty_driver.h>
+#include <linux/tty_flip.h>
+#include <linux/errno.h>
+#include <linux/mm.h>
+#include <linux/major.h>
+#include <linux/console.h>
+#include <linux/kdev_t.h>
+#include <linux/interrupt.h>
+#include <linux/init.h>
+#include <linux/reboot.h>
+#include <linux/slab.h>
+
+#include <linux/uaccess.h>
+#include "sclp.h"
+#include "ctrlchar.h"
+
+#define SCLP_VT220_MAJOR		TTY_MAJOR
+#define SCLP_VT220_MINOR		65
+#define SCLP_VT220_DRIVER_NAME		"sclp_vt220"
+#define SCLP_VT220_DEVICE_NAME		"ttysclp"
+#define SCLP_VT220_CONSOLE_NAME		"ttyS"
+#define SCLP_VT220_CONSOLE_INDEX	1	/* console=ttyS1 */
+
+/* Representation of a single write request */
+struct sclp_vt220_request {
+	struct list_head list;
+	struct sclp_req sclp_req;
+	int retry_count;
+};
+
+/* VT220 SCCB */
+struct sclp_vt220_sccb {
+	struct sccb_header header;
+	struct evbuf_header evbuf;
+};
+
+#define SCLP_VT220_MAX_CHARS_PER_BUFFER	(PAGE_SIZE - \
+					 sizeof(struct sclp_vt220_request) - \
+					 sizeof(struct sclp_vt220_sccb))
+
+/* Structures and data needed to register tty driver */
+static struct tty_driver *sclp_vt220_driver;
+
+static struct tty_port sclp_vt220_port;
+
+/* Lock to protect internal data from concurrent access */
+static spinlock_t sclp_vt220_lock;
+
+/* List of empty pages to be used as write request buffers */
+static struct list_head sclp_vt220_empty;
+
+/* List of pending requests */
+static struct list_head sclp_vt220_outqueue;
+
+/* Suspend mode flag */
+static int sclp_vt220_suspended;
+
+/* Flag that output queue is currently running */
+static int sclp_vt220_queue_running;
+
+/* Timer used for delaying write requests to merge subsequent messages into
+ * a single buffer */
+static struct timer_list sclp_vt220_timer;
+
+/* Pointer to current request buffer which has been partially filled but not
+ * yet sent */
+static struct sclp_vt220_request *sclp_vt220_current_request;
+
+/* Number of characters in current request buffer */
+static int sclp_vt220_buffered_chars;
+
+/* Counter controlling core driver initialization. */
+static int __initdata sclp_vt220_init_count;
+
+/* Flag indicating that sclp_vt220_current_request should really
+ * have been already queued but wasn't because the SCLP was processing
+ * another buffer */
+static int sclp_vt220_flush_later;
+
+static void sclp_vt220_receiver_fn(struct evbuf_header *evbuf);
+static void sclp_vt220_pm_event_fn(struct sclp_register *reg,
+				   enum sclp_pm_event sclp_pm_event);
+static int __sclp_vt220_emit(struct sclp_vt220_request *request);
+static void sclp_vt220_emit_current(void);
+
+/* Registration structure for SCLP output event buffers */
+static struct sclp_register sclp_vt220_register = {
+	.send_mask		= EVTYP_VT220MSG_MASK,
+	.pm_event_fn		= sclp_vt220_pm_event_fn,
+};
+
+/* Registration structure for SCLP input event buffers */
+static struct sclp_register sclp_vt220_register_input = {
+	.receive_mask		= EVTYP_VT220MSG_MASK,
+	.receiver_fn		= sclp_vt220_receiver_fn,
+};
+
+
+/*
+ * Put provided request buffer back into queue and check emit pending
+ * buffers if necessary.
+ */
+static void
+sclp_vt220_process_queue(struct sclp_vt220_request *request)
+{
+	unsigned long flags;
+	void *page;
+
+	do {
+		/* Put buffer back to list of empty buffers */
+		page = request->sclp_req.sccb;
+		spin_lock_irqsave(&sclp_vt220_lock, flags);
+		/* Move request from outqueue to empty queue */
+		list_del(&request->list);
+		list_add_tail((struct list_head *) page, &sclp_vt220_empty);
+		/* Check if there is a pending buffer on the out queue. */
+		request = NULL;
+		if (!list_empty(&sclp_vt220_outqueue))
+			request = list_entry(sclp_vt220_outqueue.next,
+					     struct sclp_vt220_request, list);
+		if (!request || sclp_vt220_suspended) {
+			sclp_vt220_queue_running = 0;
+			spin_unlock_irqrestore(&sclp_vt220_lock, flags);
+			break;
+		}
+		spin_unlock_irqrestore(&sclp_vt220_lock, flags);
+	} while (__sclp_vt220_emit(request));
+	if (request == NULL && sclp_vt220_flush_later)
+		sclp_vt220_emit_current();
+	tty_port_tty_wakeup(&sclp_vt220_port);
+}
+
+#define SCLP_BUFFER_MAX_RETRY		1
+
+/*
+ * Callback through which the result of a write request is reported by the
+ * SCLP.
+ */
+static void
+sclp_vt220_callback(struct sclp_req *request, void *data)
+{
+	struct sclp_vt220_request *vt220_request;
+	struct sclp_vt220_sccb *sccb;
+
+	vt220_request = (struct sclp_vt220_request *) data;
+	if (request->status == SCLP_REQ_FAILED) {
+		sclp_vt220_process_queue(vt220_request);
+		return;
+	}
+	sccb = (struct sclp_vt220_sccb *) vt220_request->sclp_req.sccb;
+
+	/* Check SCLP response code and choose suitable action	*/
+	switch (sccb->header.response_code) {
+	case 0x0020 :
+		break;
+
+	case 0x05f0: /* Target resource in improper state */
+		break;
+
+	case 0x0340: /* Contained SCLP equipment check */
+		if (++vt220_request->retry_count > SCLP_BUFFER_MAX_RETRY)
+			break;
+		/* Remove processed buffers and requeue rest */
+		if (sclp_remove_processed((struct sccb_header *) sccb) > 0) {
+			/* Not all buffers were processed */
+			sccb->header.response_code = 0x0000;
+			vt220_request->sclp_req.status = SCLP_REQ_FILLED;
+			if (sclp_add_request(request) == 0)
+				return;
+		}
+		break;
+
+	case 0x0040: /* SCLP equipment check */
+		if (++vt220_request->retry_count > SCLP_BUFFER_MAX_RETRY)
+			break;
+		sccb->header.response_code = 0x0000;
+		vt220_request->sclp_req.status = SCLP_REQ_FILLED;
+		if (sclp_add_request(request) == 0)
+			return;
+		break;
+
+	default:
+		break;
+	}
+	sclp_vt220_process_queue(vt220_request);
+}
+
+/*
+ * Emit vt220 request buffer to SCLP. Return zero on success, non-zero
+ * otherwise.
+ */
+static int
+__sclp_vt220_emit(struct sclp_vt220_request *request)
+{
+	request->sclp_req.command = SCLP_CMDW_WRITE_EVENT_DATA;
+	request->sclp_req.status = SCLP_REQ_FILLED;
+	request->sclp_req.callback = sclp_vt220_callback;
+	request->sclp_req.callback_data = (void *) request;
+
+	return sclp_add_request(&request->sclp_req);
+}
+
+/*
+ * Queue and emit current request.
+ */
+static void
+sclp_vt220_emit_current(void)
+{
+	unsigned long flags;
+	struct sclp_vt220_request *request;
+	struct sclp_vt220_sccb *sccb;
+
+	spin_lock_irqsave(&sclp_vt220_lock, flags);
+	if (sclp_vt220_current_request) {
+		sccb = (struct sclp_vt220_sccb *) 
+				sclp_vt220_current_request->sclp_req.sccb;
+		/* Only emit buffers with content */
+		if (sccb->header.length != sizeof(struct sclp_vt220_sccb)) {
+			list_add_tail(&sclp_vt220_current_request->list,
+				      &sclp_vt220_outqueue);
+			sclp_vt220_current_request = NULL;
+			if (timer_pending(&sclp_vt220_timer))
+				del_timer(&sclp_vt220_timer);
+		}
+		sclp_vt220_flush_later = 0;
+	}
+	if (sclp_vt220_queue_running || sclp_vt220_suspended)
+		goto out_unlock;
+	if (list_empty(&sclp_vt220_outqueue))
+		goto out_unlock;
+	request = list_first_entry(&sclp_vt220_outqueue,
+				   struct sclp_vt220_request, list);
+	sclp_vt220_queue_running = 1;
+	spin_unlock_irqrestore(&sclp_vt220_lock, flags);
+
+	if (__sclp_vt220_emit(request))
+		sclp_vt220_process_queue(request);
+	return;
+out_unlock:
+	spin_unlock_irqrestore(&sclp_vt220_lock, flags);
+}
+
+#define SCLP_NORMAL_WRITE	0x00
+
+/*
+ * Helper function to initialize a page with the sclp request structure.
+ */
+static struct sclp_vt220_request *
+sclp_vt220_initialize_page(void *page)
+{
+	struct sclp_vt220_request *request;
+	struct sclp_vt220_sccb *sccb;
+
+	/* Place request structure at end of page */
+	request = ((struct sclp_vt220_request *)
+			((addr_t) page + PAGE_SIZE)) - 1;
+	request->retry_count = 0;
+	request->sclp_req.sccb = page;
+	/* SCCB goes at start of page */
+	sccb = (struct sclp_vt220_sccb *) page;
+	memset((void *) sccb, 0, sizeof(struct sclp_vt220_sccb));
+	sccb->header.length = sizeof(struct sclp_vt220_sccb);
+	sccb->header.function_code = SCLP_NORMAL_WRITE;
+	sccb->header.response_code = 0x0000;
+	sccb->evbuf.type = EVTYP_VT220MSG;
+	sccb->evbuf.length = sizeof(struct evbuf_header);
+
+	return request;
+}
+
+static inline unsigned int
+sclp_vt220_space_left(struct sclp_vt220_request *request)
+{
+	struct sclp_vt220_sccb *sccb;
+	sccb = (struct sclp_vt220_sccb *) request->sclp_req.sccb;
+	return PAGE_SIZE - sizeof(struct sclp_vt220_request) -
+	       sccb->header.length;
+}
+
+static inline unsigned int
+sclp_vt220_chars_stored(struct sclp_vt220_request *request)
+{
+	struct sclp_vt220_sccb *sccb;
+	sccb = (struct sclp_vt220_sccb *) request->sclp_req.sccb;
+	return sccb->evbuf.length - sizeof(struct evbuf_header);
+}
+
+/*
+ * Add msg to buffer associated with request. Return the number of characters
+ * added.
+ */
+static int
+sclp_vt220_add_msg(struct sclp_vt220_request *request,
+		   const unsigned char *msg, int count, int convertlf)
+{
+	struct sclp_vt220_sccb *sccb;
+	void *buffer;
+	unsigned char c;
+	int from;
+	int to;
+
+	if (count > sclp_vt220_space_left(request))
+		count = sclp_vt220_space_left(request);
+	if (count <= 0)
+		return 0;
+
+	sccb = (struct sclp_vt220_sccb *) request->sclp_req.sccb;
+	buffer = (void *) ((addr_t) sccb + sccb->header.length);
+
+	if (convertlf) {
+		/* Perform Linefeed conversion (0x0a -> 0x0a 0x0d)*/
+		for (from=0, to=0;
+		     (from < count) && (to < sclp_vt220_space_left(request));
+		     from++) {
+			/* Retrieve character */
+			c = msg[from];
+			/* Perform conversion */
+			if (c == 0x0a) {
+				if (to + 1 < sclp_vt220_space_left(request)) {
+					((unsigned char *) buffer)[to++] = c;
+					((unsigned char *) buffer)[to++] = 0x0d;
+				} else
+					break;
+
+			} else
+				((unsigned char *) buffer)[to++] = c;
+		}
+		sccb->header.length += to;
+		sccb->evbuf.length += to;
+		return from;
+	} else {
+		memcpy(buffer, (const void *) msg, count);
+		sccb->header.length += count;
+		sccb->evbuf.length += count;
+		return count;
+	}
+}
+
+/*
+ * Emit buffer after having waited long enough for more data to arrive.
+ */
+static void
+sclp_vt220_timeout(unsigned long data)
+{
+	sclp_vt220_emit_current();
+}
+
+#define BUFFER_MAX_DELAY	HZ/20
+
+/*
+ * Drop oldest console buffer if sclp_con_drop is set
+ */
+static int
+sclp_vt220_drop_buffer(void)
+{
+	struct list_head *list;
+	struct sclp_vt220_request *request;
+	void *page;
+
+	if (!sclp_console_drop)
+		return 0;
+	list = sclp_vt220_outqueue.next;
+	if (sclp_vt220_queue_running)
+		/* The first element is in I/O */
+		list = list->next;
+	if (list == &sclp_vt220_outqueue)
+		return 0;
+	list_del(list);
+	request = list_entry(list, struct sclp_vt220_request, list);
+	page = request->sclp_req.sccb;
+	list_add_tail((struct list_head *) page, &sclp_vt220_empty);
+	return 1;
+}
+
+/* 
+ * Internal implementation of the write function. Write COUNT bytes of data
+ * from memory at BUF
+ * to the SCLP interface. In case that the data does not fit into the current
+ * write buffer, emit the current one and allocate a new one. If there are no
+ * more empty buffers available, wait until one gets emptied. If DO_SCHEDULE
+ * is non-zero, the buffer will be scheduled for emitting after a timeout -
+ * otherwise the user has to explicitly call the flush function.
+ * A non-zero CONVERTLF parameter indicates that 0x0a characters in the message
+ * buffer should be converted to 0x0a 0x0d. After completion, return the number
+ * of bytes written.
+ */
+static int
+__sclp_vt220_write(const unsigned char *buf, int count, int do_schedule,
+		   int convertlf, int may_fail)
+{
+	unsigned long flags;
+	void *page;
+	int written;
+	int overall_written;
+
+	if (count <= 0)
+		return 0;
+	overall_written = 0;
+	spin_lock_irqsave(&sclp_vt220_lock, flags);
+	do {
+		/* Create an sclp output buffer if none exists yet */
+		if (sclp_vt220_current_request == NULL) {
+			if (list_empty(&sclp_vt220_empty))
+				sclp_console_full++;
+			while (list_empty(&sclp_vt220_empty)) {
+				if (may_fail || sclp_vt220_suspended)
+					goto out;
+				if (sclp_vt220_drop_buffer())
+					break;
+				spin_unlock_irqrestore(&sclp_vt220_lock, flags);
+
+				sclp_sync_wait();
+				spin_lock_irqsave(&sclp_vt220_lock, flags);
+			}
+			page = (void *) sclp_vt220_empty.next;
+			list_del((struct list_head *) page);
+			sclp_vt220_current_request =
+				sclp_vt220_initialize_page(page);
+		}
+		/* Try to write the string to the current request buffer */
+		written = sclp_vt220_add_msg(sclp_vt220_current_request,
+					     buf, count, convertlf);
+		overall_written += written;
+		if (written == count)
+			break;
+		/*
+		 * Not all characters could be written to the current
+		 * output buffer. Emit the buffer, create a new buffer
+		 * and then output the rest of the string.
+		 */
+		spin_unlock_irqrestore(&sclp_vt220_lock, flags);
+		sclp_vt220_emit_current();
+		spin_lock_irqsave(&sclp_vt220_lock, flags);
+		buf += written;
+		count -= written;
+	} while (count > 0);
+	/* Setup timer to output current console buffer after some time */
+	if (sclp_vt220_current_request != NULL &&
+	    !timer_pending(&sclp_vt220_timer) && do_schedule) {
+		sclp_vt220_timer.function = sclp_vt220_timeout;
+		sclp_vt220_timer.data = 0UL;
+		sclp_vt220_timer.expires = jiffies + BUFFER_MAX_DELAY;
+		add_timer(&sclp_vt220_timer);
+	}
+out:
+	spin_unlock_irqrestore(&sclp_vt220_lock, flags);
+	return overall_written;
+}
+
+/*
+ * This routine is called by the kernel to write a series of
+ * characters to the tty device.  The characters may come from
+ * user space or kernel space.  This routine will return the
+ * number of characters actually accepted for writing.
+ */
+static int
+sclp_vt220_write(struct tty_struct *tty, const unsigned char *buf, int count)
+{
+	return __sclp_vt220_write(buf, count, 1, 0, 1);
+}
+
+#define SCLP_VT220_SESSION_ENDED	0x01
+#define	SCLP_VT220_SESSION_STARTED	0x80
+#define SCLP_VT220_SESSION_DATA		0x00
+
+#ifdef CONFIG_MAGIC_SYSRQ
+
+static int sysrq_pressed;
+static struct sysrq_work sysrq;
+
+static void sclp_vt220_reset_session(void)
+{
+	sysrq_pressed = 0;
+}
+
+static void sclp_vt220_handle_input(const char *buffer, unsigned int count)
+{
+	int i;
+
+	for (i = 0; i < count; i++) {
+		/* Handle magic sys request */
+		if (buffer[i] == ('O' ^ 0100)) { /* CTRL-O */
+			/*
+			 * If pressed again, reset sysrq_pressed
+			 * and flip CTRL-O character
+			 */
+			sysrq_pressed = !sysrq_pressed;
+			if (sysrq_pressed)
+				continue;
+		} else if (sysrq_pressed) {
+			sysrq.key = buffer[i];
+			schedule_sysrq_work(&sysrq);
+			sysrq_pressed = 0;
+			continue;
+		}
+		tty_insert_flip_char(&sclp_vt220_port, buffer[i], 0);
+	}
+}
+
+#else
+
+static void sclp_vt220_reset_session(void)
+{
+}
+
+static void sclp_vt220_handle_input(const char *buffer, unsigned int count)
+{
+	tty_insert_flip_string(&sclp_vt220_port, buffer, count);
+}
+
+#endif
+
+/*
+ * Called by the SCLP to report incoming event buffers.
+ */
+static void
+sclp_vt220_receiver_fn(struct evbuf_header *evbuf)
+{
+	char *buffer;
+	unsigned int count;
+
+	buffer = (char *) ((addr_t) evbuf + sizeof(struct evbuf_header));
+	count = evbuf->length - sizeof(struct evbuf_header);
+
+	switch (*buffer) {
+	case SCLP_VT220_SESSION_ENDED:
+	case SCLP_VT220_SESSION_STARTED:
+		sclp_vt220_reset_session();
+		break;
+	case SCLP_VT220_SESSION_DATA:
+		/* Send input to line discipline */
+		buffer++;
+		count--;
+		sclp_vt220_handle_input(buffer, count);
+		tty_flip_buffer_push(&sclp_vt220_port);
+		break;
+	}
+}
+
+/*
+ * This routine is called when a particular tty device is opened.
+ */
+static int
+sclp_vt220_open(struct tty_struct *tty, struct file *filp)
+{
+	if (tty->count == 1) {
+		tty_port_tty_set(&sclp_vt220_port, tty);
+		sclp_vt220_port.low_latency = 0;
+		if (!tty->winsize.ws_row && !tty->winsize.ws_col) {
+			tty->winsize.ws_row = 24;
+			tty->winsize.ws_col = 80;
+		}
+	}
+	return 0;
+}
+
+/*
+ * This routine is called when a particular tty device is closed.
+ */
+static void
+sclp_vt220_close(struct tty_struct *tty, struct file *filp)
+{
+	if (tty->count == 1)
+		tty_port_tty_set(&sclp_vt220_port, NULL);
+}
+
+/*
+ * This routine is called by the kernel to write a single
+ * character to the tty device.  If the kernel uses this routine,
+ * it must call the flush_chars() routine (if defined) when it is
+ * done stuffing characters into the driver.
+ */
+static int
+sclp_vt220_put_char(struct tty_struct *tty, unsigned char ch)
+{
+	return __sclp_vt220_write(&ch, 1, 0, 0, 1);
+}
+
+/*
+ * This routine is called by the kernel after it has written a
+ * series of characters to the tty device using put_char().  
+ */
+static void
+sclp_vt220_flush_chars(struct tty_struct *tty)
+{
+	if (!sclp_vt220_queue_running)
+		sclp_vt220_emit_current();
+	else
+		sclp_vt220_flush_later = 1;
+}
+
+/*
+ * This routine returns the numbers of characters the tty driver
+ * will accept for queuing to be written.  This number is subject
+ * to change as output buffers get emptied, or if the output flow
+ * control is acted.
+ */
+static int
+sclp_vt220_write_room(struct tty_struct *tty)
+{
+	unsigned long flags;
+	struct list_head *l;
+	int count;
+
+	spin_lock_irqsave(&sclp_vt220_lock, flags);
+	count = 0;
+	if (sclp_vt220_current_request != NULL)
+		count = sclp_vt220_space_left(sclp_vt220_current_request);
+	list_for_each(l, &sclp_vt220_empty)
+		count += SCLP_VT220_MAX_CHARS_PER_BUFFER;
+	spin_unlock_irqrestore(&sclp_vt220_lock, flags);
+	return count;
+}
+
+/*
+ * Return number of buffered chars.
+ */
+static int
+sclp_vt220_chars_in_buffer(struct tty_struct *tty)
+{
+	unsigned long flags;
+	struct list_head *l;
+	struct sclp_vt220_request *r;
+	int count;
+
+	spin_lock_irqsave(&sclp_vt220_lock, flags);
+	count = 0;
+	if (sclp_vt220_current_request != NULL)
+		count = sclp_vt220_chars_stored(sclp_vt220_current_request);
+	list_for_each(l, &sclp_vt220_outqueue) {
+		r = list_entry(l, struct sclp_vt220_request, list);
+		count += sclp_vt220_chars_stored(r);
+	}
+	spin_unlock_irqrestore(&sclp_vt220_lock, flags);
+	return count;
+}
+
+/*
+ * Pass on all buffers to the hardware. Return only when there are no more
+ * buffers pending.
+ */
+static void
+sclp_vt220_flush_buffer(struct tty_struct *tty)
+{
+	sclp_vt220_emit_current();
+}
+
+/* Release allocated pages. */
+static void __init __sclp_vt220_free_pages(void)
+{
+	struct list_head *page, *p;
+
+	list_for_each_safe(page, p, &sclp_vt220_empty) {
+		list_del(page);
+		free_page((unsigned long) page);
+	}
+}
+
+/* Release memory and unregister from sclp core. Controlled by init counting -
+ * only the last invoker will actually perform these actions. */
+static void __init __sclp_vt220_cleanup(void)
+{
+	sclp_vt220_init_count--;
+	if (sclp_vt220_init_count != 0)
+		return;
+	sclp_unregister(&sclp_vt220_register);
+	__sclp_vt220_free_pages();
+	tty_port_destroy(&sclp_vt220_port);
+}
+
+/* Allocate buffer pages and register with sclp core. Controlled by init
+ * counting - only the first invoker will actually perform these actions. */
+static int __init __sclp_vt220_init(int num_pages)
+{
+	void *page;
+	int i;
+	int rc;
+
+	sclp_vt220_init_count++;
+	if (sclp_vt220_init_count != 1)
+		return 0;
+	spin_lock_init(&sclp_vt220_lock);
+	INIT_LIST_HEAD(&sclp_vt220_empty);
+	INIT_LIST_HEAD(&sclp_vt220_outqueue);
+	init_timer(&sclp_vt220_timer);
+	tty_port_init(&sclp_vt220_port);
+	sclp_vt220_current_request = NULL;
+	sclp_vt220_buffered_chars = 0;
+	sclp_vt220_flush_later = 0;
+
+	/* Allocate pages for output buffering */
+	rc = -ENOMEM;
+	for (i = 0; i < num_pages; i++) {
+		page = (void *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
+		if (!page)
+			goto out;
+		list_add_tail(page, &sclp_vt220_empty);
+	}
+	rc = sclp_register(&sclp_vt220_register);
+out:
+	if (rc) {
+		__sclp_vt220_free_pages();
+		sclp_vt220_init_count--;
+		tty_port_destroy(&sclp_vt220_port);
+	}
+	return rc;
+}
+
+static const struct tty_operations sclp_vt220_ops = {
+	.open = sclp_vt220_open,
+	.close = sclp_vt220_close,
+	.write = sclp_vt220_write,
+	.put_char = sclp_vt220_put_char,
+	.flush_chars = sclp_vt220_flush_chars,
+	.write_room = sclp_vt220_write_room,
+	.chars_in_buffer = sclp_vt220_chars_in_buffer,
+	.flush_buffer = sclp_vt220_flush_buffer,
+};
+
+/*
+ * Register driver with SCLP and Linux and initialize internal tty structures.
+ */
+static int __init sclp_vt220_tty_init(void)
+{
+	struct tty_driver *driver;
+	int rc;
+
+	/* Note: we're not testing for CONSOLE_IS_SCLP here to preserve
+	 * symmetry between VM and LPAR systems regarding ttyS1. */
+	driver = alloc_tty_driver(1);
+	if (!driver)
+		return -ENOMEM;
+	rc = __sclp_vt220_init(MAX_KMEM_PAGES);
+	if (rc)
+		goto out_driver;
+
+	driver->driver_name = SCLP_VT220_DRIVER_NAME;
+	driver->name = SCLP_VT220_DEVICE_NAME;
+	driver->major = SCLP_VT220_MAJOR;
+	driver->minor_start = SCLP_VT220_MINOR;
+	driver->type = TTY_DRIVER_TYPE_SYSTEM;
+	driver->subtype = SYSTEM_TYPE_TTY;
+	driver->init_termios = tty_std_termios;
+	driver->flags = TTY_DRIVER_REAL_RAW;
+	tty_set_operations(driver, &sclp_vt220_ops);
+	tty_port_link_device(&sclp_vt220_port, driver, 0);
+
+	rc = tty_register_driver(driver);
+	if (rc)
+		goto out_init;
+	rc = sclp_register(&sclp_vt220_register_input);
+	if (rc)
+		goto out_reg;
+	sclp_vt220_driver = driver;
+	return 0;
+
+out_reg:
+	tty_unregister_driver(driver);
+out_init:
+	__sclp_vt220_cleanup();
+out_driver:
+	put_tty_driver(driver);
+	return rc;
+}
+__initcall(sclp_vt220_tty_init);
+
+static void __sclp_vt220_flush_buffer(void)
+{
+	unsigned long flags;
+
+	sclp_vt220_emit_current();
+	spin_lock_irqsave(&sclp_vt220_lock, flags);
+	if (timer_pending(&sclp_vt220_timer))
+		del_timer(&sclp_vt220_timer);
+	while (sclp_vt220_queue_running) {
+		spin_unlock_irqrestore(&sclp_vt220_lock, flags);
+		sclp_sync_wait();
+		spin_lock_irqsave(&sclp_vt220_lock, flags);
+	}
+	spin_unlock_irqrestore(&sclp_vt220_lock, flags);
+}
+
+/*
+ * Resume console: If there are cached messages, emit them.
+ */
+static void sclp_vt220_resume(void)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&sclp_vt220_lock, flags);
+	sclp_vt220_suspended = 0;
+	spin_unlock_irqrestore(&sclp_vt220_lock, flags);
+	sclp_vt220_emit_current();
+}
+
+/*
+ * Suspend console: Set suspend flag and flush console
+ */
+static void sclp_vt220_suspend(void)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&sclp_vt220_lock, flags);
+	sclp_vt220_suspended = 1;
+	spin_unlock_irqrestore(&sclp_vt220_lock, flags);
+	__sclp_vt220_flush_buffer();
+}
+
+static void sclp_vt220_pm_event_fn(struct sclp_register *reg,
+				   enum sclp_pm_event sclp_pm_event)
+{
+	switch (sclp_pm_event) {
+	case SCLP_PM_EVENT_FREEZE:
+		sclp_vt220_suspend();
+		break;
+	case SCLP_PM_EVENT_RESTORE:
+	case SCLP_PM_EVENT_THAW:
+		sclp_vt220_resume();
+		break;
+	}
+}
+
+#ifdef CONFIG_SCLP_VT220_CONSOLE
+
+static void
+sclp_vt220_con_write(struct console *con, const char *buf, unsigned int count)
+{
+	__sclp_vt220_write((const unsigned char *) buf, count, 1, 1, 0);
+}
+
+static struct tty_driver *
+sclp_vt220_con_device(struct console *c, int *index)
+{
+	*index = 0;
+	return sclp_vt220_driver;
+}
+
+static int
+sclp_vt220_notify(struct notifier_block *self,
+			  unsigned long event, void *data)
+{
+	__sclp_vt220_flush_buffer();
+	return NOTIFY_OK;
+}
+
+static struct notifier_block on_panic_nb = {
+	.notifier_call = sclp_vt220_notify,
+	.priority = 1,
+};
+
+static struct notifier_block on_reboot_nb = {
+	.notifier_call = sclp_vt220_notify,
+	.priority = 1,
+};
+
+/* Structure needed to register with printk */
+static struct console sclp_vt220_console =
+{
+	.name = SCLP_VT220_CONSOLE_NAME,
+	.write = sclp_vt220_con_write,
+	.device = sclp_vt220_con_device,
+	.flags = CON_PRINTBUFFER,
+	.index = SCLP_VT220_CONSOLE_INDEX
+};
+
+static int __init
+sclp_vt220_con_init(void)
+{
+	int rc;
+
+	rc = __sclp_vt220_init(sclp_console_pages);
+	if (rc)
+		return rc;
+	/* Attach linux console */
+	atomic_notifier_chain_register(&panic_notifier_list, &on_panic_nb);
+	register_reboot_notifier(&on_reboot_nb);
+	register_console(&sclp_vt220_console);
+	return 0;
+}
+
+console_initcall(sclp_vt220_con_init);
+#endif /* CONFIG_SCLP_VT220_CONSOLE */
+
diff --git a/src/kernel/linux/v4.14/drivers/s390/char/tape.h b/src/kernel/linux/v4.14/drivers/s390/char/tape.h
new file mode 100644
index 0000000..997b25f
--- /dev/null
+++ b/src/kernel/linux/v4.14/drivers/s390/char/tape.h
@@ -0,0 +1,370 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ *    tape device driver for 3480/3490E/3590 tapes.
+ *
+ *  S390 and zSeries version
+ *    Copyright IBM Corp. 2001, 2009
+ *    Author(s): Carsten Otte <cotte@de.ibm.com>
+ *		 Tuan Ngo-Anh <ngoanh@de.ibm.com>
+ *		 Martin Schwidefsky <schwidefsky@de.ibm.com>
+ *		 Stefan Bader <shbader@de.ibm.com>
+ */
+
+#ifndef _TAPE_H
+#define _TAPE_H
+
+#include <asm/ccwdev.h>
+#include <asm/debug.h>
+#include <asm/idals.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mtio.h>
+#include <linux/interrupt.h>
+#include <linux/workqueue.h>
+
+struct gendisk;
+
+/*
+ * Define DBF_LIKE_HELL for lots of messages in the debug feature.
+ */
+#define DBF_LIKE_HELL
+#ifdef  DBF_LIKE_HELL
+#define DBF_LH(level, str, ...) \
+do { \
+	debug_sprintf_event(TAPE_DBF_AREA, level, str, ## __VA_ARGS__); \
+} while (0)
+#else
+#define DBF_LH(level, str, ...) do {} while(0)
+#endif
+
+/*
+ * macros s390 debug feature (dbf)
+ */
+#define DBF_EVENT(d_level, d_str...) \
+do { \
+	debug_sprintf_event(TAPE_DBF_AREA, d_level, d_str); \
+} while (0)
+
+#define DBF_EXCEPTION(d_level, d_str...) \
+do { \
+	debug_sprintf_exception(TAPE_DBF_AREA, d_level, d_str); \
+} while (0)
+
+#define TAPE_VERSION_MAJOR 2
+#define TAPE_VERSION_MINOR 0
+#define TAPE_MAGIC "tape"
+
+#define TAPE_MINORS_PER_DEV 2	    /* two minors per device */
+#define TAPEBLOCK_HSEC_SIZE	2048
+#define TAPEBLOCK_HSEC_S2B	2
+#define TAPEBLOCK_RETRIES	5
+
+enum tape_medium_state {
+	MS_UNKNOWN,
+	MS_LOADED,
+	MS_UNLOADED,
+	MS_SIZE
+};
+
+enum tape_state {
+	TS_UNUSED=0,
+	TS_IN_USE,
+	TS_BLKUSE,
+	TS_INIT,
+	TS_NOT_OPER,
+	TS_SIZE
+};
+
+enum tape_op {
+	TO_BLOCK,	/* Block read */
+	TO_BSB,		/* Backward space block */
+	TO_BSF,		/* Backward space filemark */
+	TO_DSE,		/* Data security erase */
+	TO_FSB,		/* Forward space block */
+	TO_FSF,		/* Forward space filemark */
+	TO_LBL,		/* Locate block label */
+	TO_NOP,		/* No operation */
+	TO_RBA,		/* Read backward */
+	TO_RBI,		/* Read block information */
+	TO_RFO,		/* Read forward */
+	TO_REW,		/* Rewind tape */
+	TO_RUN,		/* Rewind and unload tape */
+	TO_WRI,		/* Write block */
+	TO_WTM,		/* Write tape mark */
+	TO_MSEN,	/* Medium sense */
+	TO_LOAD,	/* Load tape */
+	TO_READ_CONFIG, /* Read configuration data */
+	TO_READ_ATTMSG, /* Read attention message */
+	TO_DIS,		/* Tape display */
+	TO_ASSIGN,	/* Assign tape to channel path */
+	TO_UNASSIGN,	/* Unassign tape from channel path */
+	TO_CRYPT_ON,	/* Enable encrpytion */
+	TO_CRYPT_OFF,	/* Disable encrpytion */
+	TO_KEKL_SET,	/* Set KEK label */
+	TO_KEKL_QUERY,	/* Query KEK label */
+	TO_RDC,		/* Read device characteristics */
+	TO_SIZE,	/* #entries in tape_op_t */
+};
+
+/* Forward declaration */
+struct tape_device;
+
+/* tape_request->status can be: */
+enum tape_request_status {
+	TAPE_REQUEST_INIT,	/* request is ready to be processed */
+	TAPE_REQUEST_QUEUED,	/* request is queued to be processed */
+	TAPE_REQUEST_IN_IO,	/* request is currently in IO */
+	TAPE_REQUEST_DONE,	/* request is completed. */
+	TAPE_REQUEST_CANCEL,	/* request should be canceled. */
+	TAPE_REQUEST_LONG_BUSY, /* request has to be restarted after long busy */
+};
+
+/* Tape CCW request */
+struct tape_request {
+	struct list_head list;		/* list head for request queueing. */
+	struct tape_device *device;	/* tape device of this request */
+	struct ccw1 *cpaddr;		/* address of the channel program. */
+	void *cpdata;			/* pointer to ccw data. */
+	enum tape_request_status status;/* status of this request */
+	int options;			/* options for execution. */
+	int retries;			/* retry counter for error recovery. */
+	int rescnt;			/* residual count from devstat. */
+
+	/* Callback for delivering final status. */
+	void (*callback)(struct tape_request *, void *);
+	void *callback_data;
+
+	enum tape_op op;
+	int rc;
+};
+
+/* Function type for magnetic tape commands */
+typedef int (*tape_mtop_fn)(struct tape_device *, int);
+
+/* Size of the array containing the mtops for a discipline */
+#define TAPE_NR_MTOPS (MTMKPART+1)
+
+/* Tape Discipline */
+struct tape_discipline {
+	struct module *owner;
+	int  (*setup_device)(struct tape_device *);
+	void (*cleanup_device)(struct tape_device *);
+	int (*irq)(struct tape_device *, struct tape_request *, struct irb *);
+	struct tape_request *(*read_block)(struct tape_device *, size_t);
+	struct tape_request *(*write_block)(struct tape_device *, size_t);
+	void (*process_eov)(struct tape_device*);
+	/* ioctl function for additional ioctls. */
+	int (*ioctl_fn)(struct tape_device *, unsigned int, unsigned long);
+	/* Array of tape commands with TAPE_NR_MTOPS entries */
+	tape_mtop_fn *mtop_array;
+};
+
+/*
+ * The discipline irq function either returns an error code (<0) which
+ * means that the request has failed with an error or one of the following:
+ */
+#define TAPE_IO_SUCCESS		0	/* request successful */
+#define TAPE_IO_PENDING		1	/* request still running */
+#define TAPE_IO_RETRY		2	/* retry to current request */
+#define TAPE_IO_STOP		3	/* stop the running request */
+#define TAPE_IO_LONG_BUSY	4	/* delay the running request */
+
+/* Char Frontend Data */
+struct tape_char_data {
+	struct idal_buffer *idal_buf;	/* idal buffer for user char data */
+	int block_size;			/*   of size block_size. */
+};
+
+/* Tape Info */
+struct tape_device {
+	/* entry in tape_device_list */
+	struct list_head		node;
+
+	int				cdev_id;
+	struct ccw_device *		cdev;
+	struct tape_class_device *	nt;
+	struct tape_class_device *	rt;
+
+	/* Device mutex to serialize tape commands. */
+	struct mutex			mutex;
+
+	/* Device discipline information. */
+	struct tape_discipline *	discipline;
+	void *				discdata;
+
+	/* Generic status flags */
+	long				tape_generic_status;
+
+	/* Device state information. */
+	wait_queue_head_t		state_change_wq;
+	enum tape_state			tape_state;
+	enum tape_medium_state		medium_state;
+	unsigned char *			modeset_byte;
+
+	/* Reference count. */
+	atomic_t			ref_count;
+
+	/* Request queue. */
+	struct list_head		req_queue;
+
+	/* Request wait queue. */
+	wait_queue_head_t		wait_queue;
+
+	/* Each tape device has (currently) two minor numbers. */
+	int				first_minor;
+
+	/* Number of tapemarks required for correct termination. */
+	int				required_tapemarks;
+
+	/* Block ID of the BOF */
+	unsigned int			bof;
+
+	/* Character device frontend data */
+	struct tape_char_data		char_data;
+
+	/* Function to start or stop the next request later. */
+	struct delayed_work		tape_dnr;
+
+	/* Timer for long busy */
+	struct timer_list		lb_timeout;
+
+};
+
+/* Externals from tape_core.c */
+extern struct tape_request *tape_alloc_request(int cplength, int datasize);
+extern void tape_free_request(struct tape_request *);
+extern int tape_do_io(struct tape_device *, struct tape_request *);
+extern int tape_do_io_async(struct tape_device *, struct tape_request *);
+extern int tape_do_io_interruptible(struct tape_device *, struct tape_request *);
+extern int tape_cancel_io(struct tape_device *, struct tape_request *);
+void tape_hotplug_event(struct tape_device *, int major, int action);
+
+static inline int
+tape_do_io_free(struct tape_device *device, struct tape_request *request)
+{
+	int rc;
+
+	rc = tape_do_io(device, request);
+	tape_free_request(request);
+	return rc;
+}
+
+static inline void
+tape_do_io_async_free(struct tape_device *device, struct tape_request *request)
+{
+	request->callback = (void *) tape_free_request;
+	request->callback_data = NULL;
+	tape_do_io_async(device, request);
+}
+
+extern int tape_oper_handler(int irq, int status);
+extern void tape_noper_handler(int irq, int status);
+extern int tape_open(struct tape_device *);
+extern int tape_release(struct tape_device *);
+extern int tape_mtop(struct tape_device *, int, int);
+extern void tape_state_set(struct tape_device *, enum tape_state);
+
+extern int tape_generic_online(struct tape_device *, struct tape_discipline *);
+extern int tape_generic_offline(struct ccw_device *);
+extern int tape_generic_pm_suspend(struct ccw_device *);
+
+/* Externals from tape_devmap.c */
+extern int tape_generic_probe(struct ccw_device *);
+extern void tape_generic_remove(struct ccw_device *);
+
+extern struct tape_device *tape_find_device(int devindex);
+extern struct tape_device *tape_get_device(struct tape_device *);
+extern void tape_put_device(struct tape_device *);
+
+/* Externals from tape_char.c */
+extern int tapechar_init(void);
+extern void tapechar_exit(void);
+extern int  tapechar_setup_device(struct tape_device *);
+extern void tapechar_cleanup_device(struct tape_device *);
+
+/* tape initialisation functions */
+#ifdef CONFIG_PROC_FS
+extern void tape_proc_init (void);
+extern void tape_proc_cleanup (void);
+#else
+static inline void tape_proc_init (void) {;}
+static inline void tape_proc_cleanup (void) {;}
+#endif
+
+/* a function for dumping device sense info */
+extern void tape_dump_sense_dbf(struct tape_device *, struct tape_request *,
+				struct irb *);
+
+/* functions for handling the status of a device */
+extern void tape_med_state_set(struct tape_device *, enum tape_medium_state);
+
+/* The debug area */
+extern debug_info_t *TAPE_DBF_AREA;
+
+/* functions for building ccws */
+static inline struct ccw1 *
+tape_ccw_cc(struct ccw1 *ccw, __u8 cmd_code, __u16 memsize, void *cda)
+{
+	ccw->cmd_code = cmd_code;
+	ccw->flags = CCW_FLAG_CC;
+	ccw->count = memsize;
+	ccw->cda = (__u32)(addr_t) cda;
+	return ccw + 1;
+}
+
+static inline struct ccw1 *
+tape_ccw_end(struct ccw1 *ccw, __u8 cmd_code, __u16 memsize, void *cda)
+{
+	ccw->cmd_code = cmd_code;
+	ccw->flags = 0;
+	ccw->count = memsize;
+	ccw->cda = (__u32)(addr_t) cda;
+	return ccw + 1;
+}
+
+static inline struct ccw1 *
+tape_ccw_cmd(struct ccw1 *ccw, __u8 cmd_code)
+{
+	ccw->cmd_code = cmd_code;
+	ccw->flags = 0;
+	ccw->count = 0;
+	ccw->cda = (__u32)(addr_t) &ccw->cmd_code;
+	return ccw + 1;
+}
+
+static inline struct ccw1 *
+tape_ccw_repeat(struct ccw1 *ccw, __u8 cmd_code, int count)
+{
+	while (count-- > 0) {
+		ccw->cmd_code = cmd_code;
+		ccw->flags = CCW_FLAG_CC;
+		ccw->count = 0;
+		ccw->cda = (__u32)(addr_t) &ccw->cmd_code;
+		ccw++;
+	}
+	return ccw;
+}
+
+static inline struct ccw1 *
+tape_ccw_cc_idal(struct ccw1 *ccw, __u8 cmd_code, struct idal_buffer *idal)
+{
+	ccw->cmd_code = cmd_code;
+	ccw->flags    = CCW_FLAG_CC;
+	idal_buffer_set_cda(idal, ccw);
+	return ccw++;
+}
+
+static inline struct ccw1 *
+tape_ccw_end_idal(struct ccw1 *ccw, __u8 cmd_code, struct idal_buffer *idal)
+{
+	ccw->cmd_code = cmd_code;
+	ccw->flags    = 0;
+	idal_buffer_set_cda(idal, ccw);
+	return ccw++;
+}
+
+/* Global vars */
+extern const char *tape_state_verbose[];
+extern const char *tape_op_verbose[];
+
+#endif /* for ifdef tape.h */
diff --git a/src/kernel/linux/v4.14/drivers/s390/char/tape_34xx.c b/src/kernel/linux/v4.14/drivers/s390/char/tape_34xx.c
new file mode 100644
index 0000000..de69f0d
--- /dev/null
+++ b/src/kernel/linux/v4.14/drivers/s390/char/tape_34xx.c
@@ -0,0 +1,1232 @@
+/*
+ *    tape device discipline for 3480/3490 tapes.
+ *
+ *    Copyright IBM Corp. 2001, 2009
+ *    Author(s): Carsten Otte <cotte@de.ibm.com>
+ *		 Tuan Ngo-Anh <ngoanh@de.ibm.com>
+ *		 Martin Schwidefsky <schwidefsky@de.ibm.com>
+ */
+
+#define KMSG_COMPONENT "tape_34xx"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/bio.h>
+#include <linux/workqueue.h>
+#include <linux/slab.h>
+
+#define TAPE_DBF_AREA	tape_34xx_dbf
+
+#include "tape.h"
+#include "tape_std.h"
+
+/*
+ * Pointer to debug area.
+ */
+debug_info_t *TAPE_DBF_AREA = NULL;
+EXPORT_SYMBOL(TAPE_DBF_AREA);
+
+#define TAPE34XX_FMT_3480	0
+#define TAPE34XX_FMT_3480_2_XF	1
+#define TAPE34XX_FMT_3480_XF	2
+
+struct tape_34xx_block_id {
+	unsigned int	wrap		: 1;
+	unsigned int	segment		: 7;
+	unsigned int	format		: 2;
+	unsigned int	block		: 22;
+};
+
+/*
+ * A list of block ID's is used to faster seek blocks.
+ */
+struct tape_34xx_sbid {
+	struct list_head		list;
+	struct tape_34xx_block_id	bid;
+};
+
+static void tape_34xx_delete_sbid_from(struct tape_device *, int);
+
+/*
+ * Medium sense for 34xx tapes. There is no 'real' medium sense call.
+ * So we just do a normal sense.
+ */
+static void __tape_34xx_medium_sense(struct tape_request *request)
+{
+	struct tape_device *device = request->device;
+	unsigned char *sense;
+
+	if (request->rc == 0) {
+		sense = request->cpdata;
+
+		/*
+		 * This isn't quite correct. But since INTERVENTION_REQUIRED
+		 * means that the drive is 'neither ready nor on-line' it is
+		 * only slightly inaccurate to say there is no tape loaded if
+		 * the drive isn't online...
+		 */
+		if (sense[0] & SENSE_INTERVENTION_REQUIRED)
+			tape_med_state_set(device, MS_UNLOADED);
+		else
+			tape_med_state_set(device, MS_LOADED);
+
+		if (sense[1] & SENSE_WRITE_PROTECT)
+			device->tape_generic_status |= GMT_WR_PROT(~0);
+		else
+			device->tape_generic_status &= ~GMT_WR_PROT(~0);
+	} else
+		DBF_EVENT(4, "tape_34xx: medium sense failed with rc=%d\n",
+			request->rc);
+	tape_free_request(request);
+}
+
+static int tape_34xx_medium_sense(struct tape_device *device)
+{
+	struct tape_request *request;
+	int rc;
+
+	request = tape_alloc_request(1, 32);
+	if (IS_ERR(request)) {
+		DBF_EXCEPTION(6, "MSEN fail\n");
+		return PTR_ERR(request);
+	}
+
+	request->op = TO_MSEN;
+	tape_ccw_end(request->cpaddr, SENSE, 32, request->cpdata);
+	rc = tape_do_io_interruptible(device, request);
+	__tape_34xx_medium_sense(request);
+	return rc;
+}
+
+static void tape_34xx_medium_sense_async(struct tape_device *device)
+{
+	struct tape_request *request;
+
+	request = tape_alloc_request(1, 32);
+	if (IS_ERR(request)) {
+		DBF_EXCEPTION(6, "MSEN fail\n");
+		return;
+	}
+
+	request->op = TO_MSEN;
+	tape_ccw_end(request->cpaddr, SENSE, 32, request->cpdata);
+	request->callback = (void *) __tape_34xx_medium_sense;
+	request->callback_data = NULL;
+	tape_do_io_async(device, request);
+}
+
+struct tape_34xx_work {
+	struct tape_device	*device;
+	enum tape_op		 op;
+	struct work_struct	 work;
+};
+
+/*
+ * These functions are currently used only to schedule a medium_sense for
+ * later execution. This is because we get an interrupt whenever a medium
+ * is inserted but cannot call tape_do_io* from an interrupt context.
+ * Maybe that's useful for other actions we want to start from the
+ * interrupt handler.
+ * Note: the work handler is called by the system work queue. The tape
+ * commands started by the handler need to be asynchrounous, otherwise
+ * a deadlock can occur e.g. in case of a deferred cc=1 (see __tape_do_irq).
+ */
+static void
+tape_34xx_work_handler(struct work_struct *work)
+{
+	struct tape_34xx_work *p =
+		container_of(work, struct tape_34xx_work, work);
+	struct tape_device *device = p->device;
+
+	switch(p->op) {
+		case TO_MSEN:
+			tape_34xx_medium_sense_async(device);
+			break;
+		default:
+			DBF_EVENT(3, "T34XX: internal error: unknown work\n");
+	}
+	tape_put_device(device);
+	kfree(p);
+}
+
+static int
+tape_34xx_schedule_work(struct tape_device *device, enum tape_op op)
+{
+	struct tape_34xx_work *p;
+
+	if ((p = kzalloc(sizeof(*p), GFP_ATOMIC)) == NULL)
+		return -ENOMEM;
+
+	INIT_WORK(&p->work, tape_34xx_work_handler);
+
+	p->device = tape_get_device(device);
+	p->op     = op;
+
+	schedule_work(&p->work);
+	return 0;
+}
+
+/*
+ * Done Handler is called when dev stat = DEVICE-END (successful operation)
+ */
+static inline int
+tape_34xx_done(struct tape_request *request)
+{
+	DBF_EVENT(6, "%s done\n", tape_op_verbose[request->op]);
+
+	switch (request->op) {
+		case TO_DSE:
+		case TO_RUN:
+		case TO_WRI:
+		case TO_WTM:
+		case TO_ASSIGN:
+		case TO_UNASSIGN:
+			tape_34xx_delete_sbid_from(request->device, 0);
+			break;
+		default:
+			;
+	}
+	return TAPE_IO_SUCCESS;
+}
+
+static inline int
+tape_34xx_erp_failed(struct tape_request *request, int rc)
+{
+	DBF_EVENT(3, "Error recovery failed for %s (RC=%d)\n",
+		  tape_op_verbose[request->op], rc);
+	return rc;
+}
+
+static inline int
+tape_34xx_erp_succeeded(struct tape_request *request)
+{
+	DBF_EVENT(3, "Error Recovery successful for %s\n",
+		  tape_op_verbose[request->op]);
+	return tape_34xx_done(request);
+}
+
+static inline int
+tape_34xx_erp_retry(struct tape_request *request)
+{
+	DBF_EVENT(3, "xerp retr %s\n", tape_op_verbose[request->op]);
+	return TAPE_IO_RETRY;
+}
+
+/*
+ * This function is called, when no request is outstanding and we get an
+ * interrupt
+ */
+static int
+tape_34xx_unsolicited_irq(struct tape_device *device, struct irb *irb)
+{
+	if (irb->scsw.cmd.dstat == 0x85) { /* READY */
+		/* A medium was inserted in the drive. */
+		DBF_EVENT(6, "xuud med\n");
+		tape_34xx_delete_sbid_from(device, 0);
+		tape_34xx_schedule_work(device, TO_MSEN);
+	} else {
+		DBF_EVENT(3, "unsol.irq! dev end: %08x\n", device->cdev_id);
+		tape_dump_sense_dbf(device, NULL, irb);
+	}
+	return TAPE_IO_SUCCESS;
+}
+
+/*
+ * Read Opposite Error Recovery Function:
+ * Used, when Read Forward does not work
+ */
+static int
+tape_34xx_erp_read_opposite(struct tape_device *device,
+			    struct tape_request *request)
+{
+	if (request->op == TO_RFO) {
+		/*
+		 * We did read forward, but the data could not be read
+		 * *correctly*. We transform the request to a read backward
+		 * and try again.
+		 */
+		tape_std_read_backward(device, request);
+		return tape_34xx_erp_retry(request);
+	}
+
+	/*
+	 * We tried to read forward and backward, but hat no
+	 * success -> failed.
+	 */
+	return tape_34xx_erp_failed(request, -EIO);
+}
+
+static int
+tape_34xx_erp_bug(struct tape_device *device, struct tape_request *request,
+		  struct irb *irb, int no)
+{
+	if (request->op != TO_ASSIGN) {
+		dev_err(&device->cdev->dev, "An unexpected condition %d "
+			"occurred in tape error recovery\n", no);
+		tape_dump_sense_dbf(device, request, irb);
+	}
+	return tape_34xx_erp_failed(request, -EIO);
+}
+
+/*
+ * Handle data overrun between cu and drive. The channel speed might
+ * be too slow.
+ */
+static int
+tape_34xx_erp_overrun(struct tape_device *device, struct tape_request *request,
+		      struct irb *irb)
+{
+	if (irb->ecw[3] == 0x40) {
+		dev_warn (&device->cdev->dev, "A data overrun occurred between"
+			" the control unit and tape unit\n");
+		return tape_34xx_erp_failed(request, -EIO);
+	}
+	return tape_34xx_erp_bug(device, request, irb, -1);
+}
+
+/*
+ * Handle record sequence error.
+ */
+static int
+tape_34xx_erp_sequence(struct tape_device *device,
+		       struct tape_request *request, struct irb *irb)
+{
+	if (irb->ecw[3] == 0x41) {
+		/*
+		 * cu detected incorrect block-id sequence on tape.
+		 */
+		dev_warn (&device->cdev->dev, "The block ID sequence on the "
+			"tape is incorrect\n");
+		return tape_34xx_erp_failed(request, -EIO);
+	}
+	/*
+	 * Record sequence error bit is set, but erpa does not
+	 * show record sequence error.
+	 */
+	return tape_34xx_erp_bug(device, request, irb, -2);
+}
+
+/*
+ * This function analyses the tape's sense-data in case of a unit-check.
+ * If possible, it tries to recover from the error. Else the user is
+ * informed about the problem.
+ */
+static int
+tape_34xx_unit_check(struct tape_device *device, struct tape_request *request,
+		     struct irb *irb)
+{
+	int inhibit_cu_recovery;
+	__u8* sense;
+
+	inhibit_cu_recovery = (*device->modeset_byte & 0x80) ? 1 : 0;
+	sense = irb->ecw;
+
+	if (
+		sense[0] & SENSE_COMMAND_REJECT &&
+		sense[1] & SENSE_WRITE_PROTECT
+	) {
+		if (
+			request->op == TO_DSE ||
+			request->op == TO_WRI ||
+			request->op == TO_WTM
+		) {
+			/* medium is write protected */
+			return tape_34xx_erp_failed(request, -EACCES);
+		} else {
+			return tape_34xx_erp_bug(device, request, irb, -3);
+		}
+	}
+
+	/*
+	 * Special cases for various tape-states when reaching
+	 * end of recorded area
+	 *
+	 * FIXME: Maybe a special case of the special case:
+	 *        sense[0] == SENSE_EQUIPMENT_CHECK &&
+	 *        sense[1] == SENSE_DRIVE_ONLINE    &&
+	 *        sense[3] == 0x47 (Volume Fenced)
+	 *
+	 *        This was caused by continued FSF or FSR after an
+	 *        'End Of Data'.
+	 */
+	if ((
+		sense[0] == SENSE_DATA_CHECK      ||
+		sense[0] == SENSE_EQUIPMENT_CHECK ||
+		sense[0] == SENSE_EQUIPMENT_CHECK + SENSE_DEFERRED_UNIT_CHECK
+	) && (
+		sense[1] == SENSE_DRIVE_ONLINE ||
+		sense[1] == SENSE_BEGINNING_OF_TAPE + SENSE_WRITE_MODE
+	)) {
+		switch (request->op) {
+		/*
+		 * sense[0] == SENSE_DATA_CHECK   &&
+		 * sense[1] == SENSE_DRIVE_ONLINE
+		 * sense[3] == 0x36 (End Of Data)
+		 *
+		 * Further seeks might return a 'Volume Fenced'.
+		 */
+		case TO_FSF:
+		case TO_FSB:
+			/* Trying to seek beyond end of recorded area */
+			return tape_34xx_erp_failed(request, -ENOSPC);
+		case TO_BSB:
+			return tape_34xx_erp_retry(request);
+
+		/*
+		 * sense[0] == SENSE_DATA_CHECK   &&
+		 * sense[1] == SENSE_DRIVE_ONLINE &&
+		 * sense[3] == 0x36 (End Of Data)
+		 */
+		case TO_LBL:
+			/* Block could not be located. */
+			tape_34xx_delete_sbid_from(device, 0);
+			return tape_34xx_erp_failed(request, -EIO);
+
+		case TO_RFO:
+			/* Read beyond end of recorded area -> 0 bytes read */
+			return tape_34xx_erp_failed(request, 0);
+
+		/*
+		 * sense[0] == SENSE_EQUIPMENT_CHECK &&
+		 * sense[1] == SENSE_DRIVE_ONLINE    &&
+		 * sense[3] == 0x38 (Physical End Of Volume)
+		 */
+		case TO_WRI:
+			/* Writing at physical end of volume */
+			return tape_34xx_erp_failed(request, -ENOSPC);
+		default:
+			return tape_34xx_erp_failed(request, 0);
+		}
+	}
+
+	/* Sensing special bits */
+	if (sense[0] & SENSE_BUS_OUT_CHECK)
+		return tape_34xx_erp_retry(request);
+
+	if (sense[0] & SENSE_DATA_CHECK) {
+		/*
+		 * hardware failure, damaged tape or improper
+		 * operating conditions
+		 */
+		switch (sense[3]) {
+		case 0x23:
+			/* a read data check occurred */
+			if ((sense[2] & SENSE_TAPE_SYNC_MODE) ||
+			    inhibit_cu_recovery)
+				// data check is not permanent, may be
+				// recovered. We always use async-mode with
+				// cu-recovery, so this should *never* happen.
+				return tape_34xx_erp_bug(device, request,
+							 irb, -4);
+
+			/* data check is permanent, CU recovery has failed */
+			dev_warn (&device->cdev->dev, "A read error occurred "
+				"that cannot be recovered\n");
+			return tape_34xx_erp_failed(request, -EIO);
+		case 0x25:
+			// a write data check occurred
+			if ((sense[2] & SENSE_TAPE_SYNC_MODE) ||
+			    inhibit_cu_recovery)
+				// data check is not permanent, may be
+				// recovered. We always use async-mode with
+				// cu-recovery, so this should *never* happen.
+				return tape_34xx_erp_bug(device, request,
+							 irb, -5);
+
+			// data check is permanent, cu-recovery has failed
+			dev_warn (&device->cdev->dev, "A write error on the "
+				"tape cannot be recovered\n");
+			return tape_34xx_erp_failed(request, -EIO);
+		case 0x26:
+			/* Data Check (read opposite) occurred. */
+			return tape_34xx_erp_read_opposite(device, request);
+		case 0x28:
+			/* ID-Mark at tape start couldn't be written */
+			dev_warn (&device->cdev->dev, "Writing the ID-mark "
+				"failed\n");
+			return tape_34xx_erp_failed(request, -EIO);
+		case 0x31:
+			/* Tape void. Tried to read beyond end of device. */
+			dev_warn (&device->cdev->dev, "Reading the tape beyond"
+				" the end of the recorded area failed\n");
+			return tape_34xx_erp_failed(request, -ENOSPC);
+		case 0x41:
+			/* Record sequence error. */
+			dev_warn (&device->cdev->dev, "The tape contains an "
+				"incorrect block ID sequence\n");
+			return tape_34xx_erp_failed(request, -EIO);
+		default:
+			/* all data checks for 3480 should result in one of
+			 * the above erpa-codes. For 3490, other data-check
+			 * conditions do exist. */
+			if (device->cdev->id.driver_info == tape_3480)
+				return tape_34xx_erp_bug(device, request,
+							 irb, -6);
+		}
+	}
+
+	if (sense[0] & SENSE_OVERRUN)
+		return tape_34xx_erp_overrun(device, request, irb);
+
+	if (sense[1] & SENSE_RECORD_SEQUENCE_ERR)
+		return tape_34xx_erp_sequence(device, request, irb);
+
+	/* Sensing erpa codes */
+	switch (sense[3]) {
+	case 0x00:
+		/* Unit check with erpa code 0. Report and ignore. */
+		return TAPE_IO_SUCCESS;
+	case 0x21:
+		/*
+		 * Data streaming not operational. CU will switch to
+		 * interlock mode. Reissue the command.
+		 */
+		return tape_34xx_erp_retry(request);
+	case 0x22:
+		/*
+		 * Path equipment check. Might be drive adapter error, buffer
+		 * error on the lower interface, internal path not usable,
+		 * or error during cartridge load.
+		 */
+		dev_warn (&device->cdev->dev, "A path equipment check occurred"
+			" for the tape device\n");
+		return tape_34xx_erp_failed(request, -EIO);
+	case 0x24:
+		/*
+		 * Load display check. Load display was command was issued,
+		 * but the drive is displaying a drive check message. Can
+		 * be threated as "device end".
+		 */
+		return tape_34xx_erp_succeeded(request);
+	case 0x27:
+		/*
+		 * Command reject. May indicate illegal channel program or
+		 * buffer over/underrun. Since all channel programs are
+		 * issued by this driver and ought be correct, we assume a
+		 * over/underrun situation and retry the channel program.
+		 */
+		return tape_34xx_erp_retry(request);
+	case 0x29:
+		/*
+		 * Function incompatible. Either the tape is idrc compressed
+		 * but the hardware isn't capable to do idrc, or a perform
+		 * subsystem func is issued and the CU is not on-line.
+		 */
+		return tape_34xx_erp_failed(request, -EIO);
+	case 0x2a:
+		/*
+		 * Unsolicited environmental data. An internal counter
+		 * overflows, we can ignore this and reissue the cmd.
+		 */
+		return tape_34xx_erp_retry(request);
+	case 0x2b:
+		/*
+		 * Environmental data present. Indicates either unload
+		 * completed ok or read buffered log command completed ok.
+		 */
+		if (request->op == TO_RUN) {
+			/* Rewind unload completed ok. */
+			tape_med_state_set(device, MS_UNLOADED);
+			return tape_34xx_erp_succeeded(request);
+		}
+		/* tape_34xx doesn't use read buffered log commands. */
+		return tape_34xx_erp_bug(device, request, irb, sense[3]);
+	case 0x2c:
+		/*
+		 * Permanent equipment check. CU has tried recovery, but
+		 * did not succeed.
+		 */
+		return tape_34xx_erp_failed(request, -EIO);
+	case 0x2d:
+		/* Data security erase failure. */
+		if (request->op == TO_DSE)
+			return tape_34xx_erp_failed(request, -EIO);
+		/* Data security erase failure, but no such command issued. */
+		return tape_34xx_erp_bug(device, request, irb, sense[3]);
+	case 0x2e:
+		/*
+		 * Not capable. This indicates either that the drive fails
+		 * reading the format id mark or that that format specified
+		 * is not supported by the drive.
+		 */
+		dev_warn (&device->cdev->dev, "The tape unit cannot process "
+			"the tape format\n");
+		return tape_34xx_erp_failed(request, -EMEDIUMTYPE);
+	case 0x30:
+		/* The medium is write protected. */
+		dev_warn (&device->cdev->dev, "The tape medium is write-"
+			"protected\n");
+		return tape_34xx_erp_failed(request, -EACCES);
+	case 0x32:
+		// Tension loss. We cannot recover this, it's an I/O error.
+		dev_warn (&device->cdev->dev, "The tape does not have the "
+			"required tape tension\n");
+		return tape_34xx_erp_failed(request, -EIO);
+	case 0x33:
+		/*
+		 * Load Failure. The cartridge was not inserted correctly or
+		 * the tape is not threaded correctly.
+		 */
+		dev_warn (&device->cdev->dev, "The tape unit failed to load"
+			" the cartridge\n");
+		tape_34xx_delete_sbid_from(device, 0);
+		return tape_34xx_erp_failed(request, -EIO);
+	case 0x34:
+		/*
+		 * Unload failure. The drive cannot maintain tape tension
+		 * and control tape movement during an unload operation.
+		 */
+		dev_warn (&device->cdev->dev, "Automatic unloading of the tape"
+			" cartridge failed\n");
+		if (request->op == TO_RUN)
+			return tape_34xx_erp_failed(request, -EIO);
+		return tape_34xx_erp_bug(device, request, irb, sense[3]);
+	case 0x35:
+		/*
+		 * Drive equipment check. One of the following:
+		 * - cu cannot recover from a drive detected error
+		 * - a check code message is shown on drive display
+		 * - the cartridge loader does not respond correctly
+		 * - a failure occurs during an index, load, or unload cycle
+		 */
+		dev_warn (&device->cdev->dev, "An equipment check has occurred"
+			" on the tape unit\n");
+		return tape_34xx_erp_failed(request, -EIO);
+	case 0x36:
+		if (device->cdev->id.driver_info == tape_3490)
+			/* End of data. */
+			return tape_34xx_erp_failed(request, -EIO);
+		/* This erpa is reserved for 3480 */
+		return tape_34xx_erp_bug(device, request, irb, sense[3]);
+	case 0x37:
+		/*
+		 * Tape length error. The tape is shorter than reported in
+		 * the beginning-of-tape data.
+		 */
+		dev_warn (&device->cdev->dev, "The tape information states an"
+			" incorrect length\n");
+		return tape_34xx_erp_failed(request, -EIO);
+	case 0x38:
+		/*
+		 * Physical end of tape. A read/write operation reached
+		 * the physical end of tape.
+		 */
+		if (request->op==TO_WRI ||
+		    request->op==TO_DSE ||
+		    request->op==TO_WTM)
+			return tape_34xx_erp_failed(request, -ENOSPC);
+		return tape_34xx_erp_failed(request, -EIO);
+	case 0x39:
+		/* Backward at Beginning of tape. */
+		return tape_34xx_erp_failed(request, -EIO);
+	case 0x3a:
+		/* Drive switched to not ready. */
+		dev_warn (&device->cdev->dev, "The tape unit is not ready\n");
+		return tape_34xx_erp_failed(request, -EIO);
+	case 0x3b:
+		/* Manual rewind or unload. This causes an I/O error. */
+		dev_warn (&device->cdev->dev, "The tape medium has been "
+			"rewound or unloaded manually\n");
+		tape_34xx_delete_sbid_from(device, 0);
+		return tape_34xx_erp_failed(request, -EIO);
+	case 0x42:
+		/*
+		 * Degraded mode. A condition that can cause degraded
+		 * performance is detected.
+		 */
+		dev_warn (&device->cdev->dev, "The tape subsystem is running "
+			"in degraded mode\n");
+		return tape_34xx_erp_retry(request);
+	case 0x43:
+		/* Drive not ready. */
+		tape_34xx_delete_sbid_from(device, 0);
+		tape_med_state_set(device, MS_UNLOADED);
+		/* Some commands commands are successful even in this case */
+		if (sense[1] & SENSE_DRIVE_ONLINE) {
+			switch(request->op) {
+				case TO_ASSIGN:
+				case TO_UNASSIGN:
+				case TO_DIS:
+				case TO_NOP:
+					return tape_34xx_done(request);
+					break;
+				default:
+					break;
+			}
+		}
+		return tape_34xx_erp_failed(request, -ENOMEDIUM);
+	case 0x44:
+		/* Locate Block unsuccessful. */
+		if (request->op != TO_BLOCK && request->op != TO_LBL)
+			/* No locate block was issued. */
+			return tape_34xx_erp_bug(device, request,
+						 irb, sense[3]);
+		return tape_34xx_erp_failed(request, -EIO);
+	case 0x45:
+		/* The drive is assigned to a different channel path. */
+		dev_warn (&device->cdev->dev, "The tape unit is already "
+			"assigned\n");
+		return tape_34xx_erp_failed(request, -EIO);
+	case 0x46:
+		/*
+		 * Drive not on-line. Drive may be switched offline,
+		 * the power supply may be switched off or
+		 * the drive address may not be set correctly.
+		 */
+		dev_warn (&device->cdev->dev, "The tape unit is not online\n");
+		return tape_34xx_erp_failed(request, -EIO);
+	case 0x47:
+		/* Volume fenced. CU reports volume integrity is lost. */
+		dev_warn (&device->cdev->dev, "The control unit has fenced "
+			"access to the tape volume\n");
+		tape_34xx_delete_sbid_from(device, 0);
+		return tape_34xx_erp_failed(request, -EIO);
+	case 0x48:
+		/* Log sense data and retry request. */
+		return tape_34xx_erp_retry(request);
+	case 0x49:
+		/* Bus out check. A parity check error on the bus was found. */
+		dev_warn (&device->cdev->dev, "A parity error occurred on the "
+			"tape bus\n");
+		return tape_34xx_erp_failed(request, -EIO);
+	case 0x4a:
+		/* Control unit erp failed. */
+		dev_warn (&device->cdev->dev, "I/O error recovery failed on "
+			"the tape control unit\n");
+		return tape_34xx_erp_failed(request, -EIO);
+	case 0x4b:
+		/*
+		 * CU and drive incompatible. The drive requests micro-program
+		 * patches, which are not available on the CU.
+		 */
+		dev_warn (&device->cdev->dev, "The tape unit requires a "
+			"firmware update\n");
+		return tape_34xx_erp_failed(request, -EIO);
+	case 0x4c:
+		/*
+		 * Recovered Check-One failure. Cu develops a hardware error,
+		 * but is able to recover.
+		 */
+		return tape_34xx_erp_retry(request);
+	case 0x4d:
+		if (device->cdev->id.driver_info == tape_3490)
+			/*
+			 * Resetting event received. Since the driver does
+			 * not support resetting event recovery (which has to
+			 * be handled by the I/O Layer), retry our command.
+			 */
+			return tape_34xx_erp_retry(request);
+		/* This erpa is reserved for 3480. */
+		return tape_34xx_erp_bug(device, request, irb, sense[3]);
+	case 0x4e:
+		if (device->cdev->id.driver_info == tape_3490) {
+			/*
+			 * Maximum block size exceeded. This indicates, that
+			 * the block to be written is larger than allowed for
+			 * buffered mode.
+			 */
+			dev_warn (&device->cdev->dev, "The maximum block size"
+				" for buffered mode is exceeded\n");
+			return tape_34xx_erp_failed(request, -ENOBUFS);
+		}
+		/* This erpa is reserved for 3480. */
+		return tape_34xx_erp_bug(device, request, irb, sense[3]);
+	case 0x50:
+		/*
+		 * Read buffered log (Overflow). CU is running in extended
+		 * buffered log mode, and a counter overflows. This should
+		 * never happen, since we're never running in extended
+		 * buffered log mode.
+		 */
+		return tape_34xx_erp_retry(request);
+	case 0x51:
+		/*
+		 * Read buffered log (EOV). EOF processing occurs while the
+		 * CU is in extended buffered log mode. This should never
+		 * happen, since we're never running in extended buffered
+		 * log mode.
+		 */
+		return tape_34xx_erp_retry(request);
+	case 0x52:
+		/* End of Volume complete. Rewind unload completed ok. */
+		if (request->op == TO_RUN) {
+			tape_med_state_set(device, MS_UNLOADED);
+			tape_34xx_delete_sbid_from(device, 0);
+			return tape_34xx_erp_succeeded(request);
+		}
+		return tape_34xx_erp_bug(device, request, irb, sense[3]);
+	case 0x53:
+		/* Global command intercept. */
+		return tape_34xx_erp_retry(request);
+	case 0x54:
+		/* Channel interface recovery (temporary). */
+		return tape_34xx_erp_retry(request);
+	case 0x55:
+		/* Channel interface recovery (permanent). */
+		dev_warn (&device->cdev->dev, "A channel interface error cannot be"
+			" recovered\n");
+		return tape_34xx_erp_failed(request, -EIO);
+	case 0x56:
+		/* Channel protocol error. */
+		dev_warn (&device->cdev->dev, "A channel protocol error "
+			"occurred\n");
+		return tape_34xx_erp_failed(request, -EIO);
+	case 0x57:
+		/*
+		 * 3480: Attention intercept.
+		 * 3490: Global status intercept.
+		 */
+		return tape_34xx_erp_retry(request);
+	case 0x5a:
+		/*
+		 * Tape length incompatible. The tape inserted is too long,
+		 * which could cause damage to the tape or the drive.
+		 */
+		dev_warn (&device->cdev->dev, "The tape unit does not support "
+			"the tape length\n");
+		return tape_34xx_erp_failed(request, -EIO);
+	case 0x5b:
+		/* Format 3480 XF incompatible */
+		if (sense[1] & SENSE_BEGINNING_OF_TAPE)
+			/* The tape will get overwritten. */
+			return tape_34xx_erp_retry(request);
+		dev_warn (&device->cdev->dev, "The tape unit does not support"
+			" format 3480 XF\n");
+		return tape_34xx_erp_failed(request, -EIO);
+	case 0x5c:
+		/* Format 3480-2 XF incompatible */
+		dev_warn (&device->cdev->dev, "The tape unit does not support tape "
+			"format 3480-2 XF\n");
+		return tape_34xx_erp_failed(request, -EIO);
+	case 0x5d:
+		/* Tape length violation. */
+		dev_warn (&device->cdev->dev, "The tape unit does not support"
+			" the current tape length\n");
+		return tape_34xx_erp_failed(request, -EMEDIUMTYPE);
+	case 0x5e:
+		/* Compaction algorithm incompatible. */
+		dev_warn (&device->cdev->dev, "The tape unit does not support"
+			" the compaction algorithm\n");
+		return tape_34xx_erp_failed(request, -EMEDIUMTYPE);
+
+		/* The following erpas should have been covered earlier. */
+	case 0x23: /* Read data check. */
+	case 0x25: /* Write data check. */
+	case 0x26: /* Data check (read opposite). */
+	case 0x28: /* Write id mark check. */
+	case 0x31: /* Tape void. */
+	case 0x40: /* Overrun error. */
+	case 0x41: /* Record sequence error. */
+		/* All other erpas are reserved for future use. */
+	default:
+		return tape_34xx_erp_bug(device, request, irb, sense[3]);
+	}
+}
+
+/*
+ * 3480/3490 interrupt handler
+ */
+static int
+tape_34xx_irq(struct tape_device *device, struct tape_request *request,
+	      struct irb *irb)
+{
+	if (request == NULL)
+		return tape_34xx_unsolicited_irq(device, irb);
+
+	if ((irb->scsw.cmd.dstat & DEV_STAT_UNIT_EXCEP) &&
+	    (irb->scsw.cmd.dstat & DEV_STAT_DEV_END) &&
+	    (request->op == TO_WRI)) {
+		/* Write at end of volume */
+		return tape_34xx_erp_failed(request, -ENOSPC);
+	}
+
+	if (irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK)
+		return tape_34xx_unit_check(device, request, irb);
+
+	if (irb->scsw.cmd.dstat & DEV_STAT_DEV_END) {
+		/*
+		 * A unit exception occurs on skipping over a tapemark block.
+		 */
+		if (irb->scsw.cmd.dstat & DEV_STAT_UNIT_EXCEP) {
+			if (request->op == TO_BSB || request->op == TO_FSB)
+				request->rescnt++;
+			else
+				DBF_EVENT(5, "Unit Exception!\n");
+		}
+		return tape_34xx_done(request);
+	}
+
+	DBF_EVENT(6, "xunknownirq\n");
+	tape_dump_sense_dbf(device, request, irb);
+	return TAPE_IO_STOP;
+}
+
+/*
+ * ioctl_overload
+ */
+static int
+tape_34xx_ioctl(struct tape_device *device, unsigned int cmd, unsigned long arg)
+{
+	if (cmd == TAPE390_DISPLAY) {
+		struct display_struct disp;
+
+		if (copy_from_user(&disp, (char __user *) arg, sizeof(disp)) != 0)
+			return -EFAULT;
+
+		return tape_std_display(device, &disp);
+	} else
+		return -EINVAL;
+}
+
+static inline void
+tape_34xx_append_new_sbid(struct tape_34xx_block_id bid, struct list_head *l)
+{
+	struct tape_34xx_sbid *	new_sbid;
+
+	new_sbid = kmalloc(sizeof(*new_sbid), GFP_ATOMIC);
+	if (!new_sbid)
+		return;
+
+	new_sbid->bid = bid;
+	list_add(&new_sbid->list, l);
+}
+
+/*
+ * Build up the search block ID list. The block ID consists of a logical
+ * block number and a hardware specific part. The hardware specific part
+ * helps the tape drive to speed up searching for a specific block.
+ */
+static void
+tape_34xx_add_sbid(struct tape_device *device, struct tape_34xx_block_id bid)
+{
+	struct list_head *	sbid_list;
+	struct tape_34xx_sbid *	sbid;
+	struct list_head *	l;
+
+	/*
+	 * immediately return if there is no list at all or the block to add
+	 * is located in segment 1 of wrap 0 because this position is used
+	 * if no hardware position data is supplied.
+	 */
+	sbid_list = (struct list_head *) device->discdata;
+	if (!sbid_list || (bid.segment < 2 && bid.wrap == 0))
+		return;
+
+	/*
+	 * Search the position where to insert the new entry. Hardware
+	 * acceleration uses only the segment and wrap number. So we
+	 * need only one entry for a specific wrap/segment combination.
+	 * If there is a block with a lower number but the same hard-
+	 * ware position data we just update the block number in the
+	 * existing entry.
+	 */
+	list_for_each(l, sbid_list) {
+		sbid = list_entry(l, struct tape_34xx_sbid, list);
+
+		if (
+			(sbid->bid.segment == bid.segment) &&
+			(sbid->bid.wrap    == bid.wrap)
+		) {
+			if (bid.block < sbid->bid.block)
+				sbid->bid = bid;
+			else return;
+			break;
+		}
+
+		/* Sort in according to logical block number. */
+		if (bid.block < sbid->bid.block) {
+			tape_34xx_append_new_sbid(bid, l->prev);
+			break;
+		}
+	}
+	/* List empty or new block bigger than last entry. */
+	if (l == sbid_list)
+		tape_34xx_append_new_sbid(bid, l->prev);
+
+	DBF_LH(4, "Current list is:\n");
+	list_for_each(l, sbid_list) {
+		sbid = list_entry(l, struct tape_34xx_sbid, list);
+		DBF_LH(4, "%d:%03d@%05d\n",
+			sbid->bid.wrap,
+			sbid->bid.segment,
+			sbid->bid.block
+		);
+	}
+}
+
+/*
+ * Delete all entries from the search block ID list that belong to tape blocks
+ * equal or higher than the given number.
+ */
+static void
+tape_34xx_delete_sbid_from(struct tape_device *device, int from)
+{
+	struct list_head *	sbid_list;
+	struct tape_34xx_sbid *	sbid;
+	struct list_head *	l;
+	struct list_head *	n;
+
+	sbid_list = (struct list_head *) device->discdata;
+	if (!sbid_list)
+		return;
+
+	list_for_each_safe(l, n, sbid_list) {
+		sbid = list_entry(l, struct tape_34xx_sbid, list);
+		if (sbid->bid.block >= from) {
+			DBF_LH(4, "Delete sbid %d:%03d@%05d\n",
+				sbid->bid.wrap,
+				sbid->bid.segment,
+				sbid->bid.block
+			);
+			list_del(l);
+			kfree(sbid);
+		}
+	}
+}
+
+/*
+ * Merge hardware position data into a block id.
+ */
+static void
+tape_34xx_merge_sbid(
+	struct tape_device *		device,
+	struct tape_34xx_block_id *	bid
+) {
+	struct tape_34xx_sbid *	sbid;
+	struct tape_34xx_sbid *	sbid_to_use;
+	struct list_head *	sbid_list;
+	struct list_head *	l;
+
+	sbid_list = (struct list_head *) device->discdata;
+	bid->wrap    = 0;
+	bid->segment = 1;
+
+	if (!sbid_list || list_empty(sbid_list))
+		return;
+
+	sbid_to_use = NULL;
+	list_for_each(l, sbid_list) {
+		sbid = list_entry(l, struct tape_34xx_sbid, list);
+
+		if (sbid->bid.block >= bid->block)
+			break;
+		sbid_to_use = sbid;
+	}
+	if (sbid_to_use) {
+		bid->wrap    = sbid_to_use->bid.wrap;
+		bid->segment = sbid_to_use->bid.segment;
+		DBF_LH(4, "Use %d:%03d@%05d for %05d\n",
+			sbid_to_use->bid.wrap,
+			sbid_to_use->bid.segment,
+			sbid_to_use->bid.block,
+			bid->block
+		);
+	}
+}
+
+static int
+tape_34xx_setup_device(struct tape_device * device)
+{
+	int			rc;
+	struct list_head *	discdata;
+
+	DBF_EVENT(6, "34xx device setup\n");
+	if ((rc = tape_std_assign(device)) == 0) {
+		if ((rc = tape_34xx_medium_sense(device)) != 0) {
+			DBF_LH(3, "34xx medium sense returned %d\n", rc);
+		}
+	}
+	discdata = kmalloc(sizeof(struct list_head), GFP_KERNEL);
+	if (discdata) {
+			INIT_LIST_HEAD(discdata);
+			device->discdata = discdata;
+	}
+
+	return rc;
+}
+
+static void
+tape_34xx_cleanup_device(struct tape_device *device)
+{
+	tape_std_unassign(device);
+	
+	if (device->discdata) {
+		tape_34xx_delete_sbid_from(device, 0);
+		kfree(device->discdata);
+		device->discdata = NULL;
+	}
+}
+
+
+/*
+ * MTTELL: Tell block. Return the number of block relative to current file.
+ */
+static int
+tape_34xx_mttell(struct tape_device *device, int mt_count)
+{
+	struct {
+		struct tape_34xx_block_id	cbid;
+		struct tape_34xx_block_id	dbid;
+	} __attribute__ ((packed)) block_id;
+	int rc;
+
+	rc = tape_std_read_block_id(device, (__u64 *) &block_id);
+	if (rc)
+		return rc;
+
+	tape_34xx_add_sbid(device, block_id.cbid);
+	return block_id.cbid.block;
+}
+
+/*
+ * MTSEEK: seek to the specified block.
+ */
+static int
+tape_34xx_mtseek(struct tape_device *device, int mt_count)
+{
+	struct tape_request *request;
+	struct tape_34xx_block_id *	bid;
+
+	if (mt_count > 0x3fffff) {
+		DBF_EXCEPTION(6, "xsee parm\n");
+		return -EINVAL;
+	}
+	request = tape_alloc_request(3, 4);
+	if (IS_ERR(request))
+		return PTR_ERR(request);
+
+	/* setup ccws */
+	request->op = TO_LBL;
+	bid         = (struct tape_34xx_block_id *) request->cpdata;
+	bid->format = (*device->modeset_byte & 0x08) ?
+			TAPE34XX_FMT_3480_XF : TAPE34XX_FMT_3480;
+	bid->block  = mt_count;
+	tape_34xx_merge_sbid(device, bid);
+
+	tape_ccw_cc(request->cpaddr, MODE_SET_DB, 1, device->modeset_byte);
+	tape_ccw_cc(request->cpaddr + 1, LOCATE, 4, request->cpdata);
+	tape_ccw_end(request->cpaddr + 2, NOP, 0, NULL);
+
+	/* execute it */
+	return tape_do_io_free(device, request);
+}
+
+/*
+ * List of 3480/3490 magnetic tape commands.
+ */
+static tape_mtop_fn tape_34xx_mtop[TAPE_NR_MTOPS] = {
+	[MTRESET]	 = tape_std_mtreset,
+	[MTFSF]		 = tape_std_mtfsf,
+	[MTBSF]		 = tape_std_mtbsf,
+	[MTFSR]		 = tape_std_mtfsr,
+	[MTBSR]		 = tape_std_mtbsr,
+	[MTWEOF]	 = tape_std_mtweof,
+	[MTREW]		 = tape_std_mtrew,
+	[MTOFFL]	 = tape_std_mtoffl,
+	[MTNOP]		 = tape_std_mtnop,
+	[MTRETEN]	 = tape_std_mtreten,
+	[MTBSFM]	 = tape_std_mtbsfm,
+	[MTFSFM]	 = tape_std_mtfsfm,
+	[MTEOM]		 = tape_std_mteom,
+	[MTERASE]	 = tape_std_mterase,
+	[MTRAS1]	 = NULL,
+	[MTRAS2]	 = NULL,
+	[MTRAS3]	 = NULL,
+	[MTSETBLK]	 = tape_std_mtsetblk,
+	[MTSETDENSITY]	 = NULL,
+	[MTSEEK]	 = tape_34xx_mtseek,
+	[MTTELL]	 = tape_34xx_mttell,
+	[MTSETDRVBUFFER] = NULL,
+	[MTFSS]		 = NULL,
+	[MTBSS]		 = NULL,
+	[MTWSM]		 = NULL,
+	[MTLOCK]	 = NULL,
+	[MTUNLOCK]	 = NULL,
+	[MTLOAD]	 = tape_std_mtload,
+	[MTUNLOAD]	 = tape_std_mtunload,
+	[MTCOMPRESSION]	 = tape_std_mtcompression,
+	[MTSETPART]	 = NULL,
+	[MTMKPART]	 = NULL
+};
+
+/*
+ * Tape discipline structure for 3480 and 3490.
+ */
+static struct tape_discipline tape_discipline_34xx = {
+	.owner = THIS_MODULE,
+	.setup_device = tape_34xx_setup_device,
+	.cleanup_device = tape_34xx_cleanup_device,
+	.process_eov = tape_std_process_eov,
+	.irq = tape_34xx_irq,
+	.read_block = tape_std_read_block,
+	.write_block = tape_std_write_block,
+	.ioctl_fn = tape_34xx_ioctl,
+	.mtop_array = tape_34xx_mtop
+};
+
+static struct ccw_device_id tape_34xx_ids[] = {
+	{ CCW_DEVICE_DEVTYPE(0x3480, 0, 0x3480, 0), .driver_info = tape_3480},
+	{ CCW_DEVICE_DEVTYPE(0x3490, 0, 0x3490, 0), .driver_info = tape_3490},
+	{ /* end of list */ },
+};
+
+static int
+tape_34xx_online(struct ccw_device *cdev)
+{
+	return tape_generic_online(
+		dev_get_drvdata(&cdev->dev),
+		&tape_discipline_34xx
+	);
+}
+
+static struct ccw_driver tape_34xx_driver = {
+	.driver = {
+		.name = "tape_34xx",
+		.owner = THIS_MODULE,
+	},
+	.ids = tape_34xx_ids,
+	.probe = tape_generic_probe,
+	.remove = tape_generic_remove,
+	.set_online = tape_34xx_online,
+	.set_offline = tape_generic_offline,
+	.freeze = tape_generic_pm_suspend,
+	.int_class = IRQIO_TAP,
+};
+
+static int
+tape_34xx_init (void)
+{
+	int rc;
+
+	TAPE_DBF_AREA = debug_register ( "tape_34xx", 2, 2, 4*sizeof(long));
+	debug_register_view(TAPE_DBF_AREA, &debug_sprintf_view);
+#ifdef DBF_LIKE_HELL
+	debug_set_level(TAPE_DBF_AREA, 6);
+#endif
+
+	DBF_EVENT(3, "34xx init\n");
+	/* Register driver for 3480/3490 tapes. */
+	rc = ccw_driver_register(&tape_34xx_driver);
+	if (rc)
+		DBF_EVENT(3, "34xx init failed\n");
+	else
+		DBF_EVENT(3, "34xx registered\n");
+	return rc;
+}
+
+static void
+tape_34xx_exit(void)
+{
+	ccw_driver_unregister(&tape_34xx_driver);
+
+	debug_unregister(TAPE_DBF_AREA);
+}
+
+MODULE_DEVICE_TABLE(ccw, tape_34xx_ids);
+MODULE_AUTHOR("(C) 2001-2002 IBM Deutschland Entwicklung GmbH");
+MODULE_DESCRIPTION("Linux on zSeries channel attached 3480 tape device driver");
+MODULE_LICENSE("GPL");
+
+module_init(tape_34xx_init);
+module_exit(tape_34xx_exit);
diff --git a/src/kernel/linux/v4.14/drivers/s390/char/tape_3590.c b/src/kernel/linux/v4.14/drivers/s390/char/tape_3590.c
new file mode 100644
index 0000000..e352047
--- /dev/null
+++ b/src/kernel/linux/v4.14/drivers/s390/char/tape_3590.c
@@ -0,0 +1,1701 @@
+/*
+ *    tape device discipline for 3590 tapes.
+ *
+ *    Copyright IBM Corp. 2001, 2009
+ *    Author(s): Stefan Bader <shbader@de.ibm.com>
+ *		 Michael Holzheu <holzheu@de.ibm.com>
+ *		 Martin Schwidefsky <schwidefsky@de.ibm.com>
+ */
+
+#define KMSG_COMPONENT "tape_3590"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/init.h>
+#include <linux/bio.h>
+#include <asm/ebcdic.h>
+
+#define TAPE_DBF_AREA	tape_3590_dbf
+#define BUFSIZE 512	/* size of buffers for dynamic generated messages */
+
+#include "tape.h"
+#include "tape_std.h"
+#include "tape_3590.h"
+
+static struct workqueue_struct *tape_3590_wq;
+
+/*
+ * Pointer to debug area.
+ */
+debug_info_t *TAPE_DBF_AREA = NULL;
+EXPORT_SYMBOL(TAPE_DBF_AREA);
+
+/*******************************************************************
+ * Error Recovery functions:
+ * - Read Opposite:		 implemented
+ * - Read Device (buffered) log: BRA
+ * - Read Library log:		 BRA
+ * - Swap Devices:		 BRA
+ * - Long Busy:			 implemented
+ * - Special Intercept:		 BRA
+ * - Read Alternate:		 implemented
+ *******************************************************************/
+
+static const char *tape_3590_msg[TAPE_3590_MAX_MSG] = {
+	[0x00] = "",
+	[0x10] = "Lost Sense",
+	[0x11] = "Assigned Elsewhere",
+	[0x12] = "Allegiance Reset",
+	[0x13] = "Shared Access Violation",
+	[0x20] = "Command Reject",
+	[0x21] = "Configuration Error",
+	[0x22] = "Protection Exception",
+	[0x23] = "Write Protect",
+	[0x24] = "Write Length",
+	[0x25] = "Read-Only Format",
+	[0x31] = "Beginning of Partition",
+	[0x33] = "End of Partition",
+	[0x34] = "End of Data",
+	[0x35] = "Block not found",
+	[0x40] = "Device Intervention",
+	[0x41] = "Loader Intervention",
+	[0x42] = "Library Intervention",
+	[0x50] = "Write Error",
+	[0x51] = "Erase Error",
+	[0x52] = "Formatting Error",
+	[0x53] = "Read Error",
+	[0x54] = "Unsupported Format",
+	[0x55] = "No Formatting",
+	[0x56] = "Positioning lost",
+	[0x57] = "Read Length",
+	[0x60] = "Unsupported Medium",
+	[0x61] = "Medium Length Error",
+	[0x62] = "Medium removed",
+	[0x64] = "Load Check",
+	[0x65] = "Unload Check",
+	[0x70] = "Equipment Check",
+	[0x71] = "Bus out Check",
+	[0x72] = "Protocol Error",
+	[0x73] = "Interface Error",
+	[0x74] = "Overrun",
+	[0x75] = "Halt Signal",
+	[0x90] = "Device fenced",
+	[0x91] = "Device Path fenced",
+	[0xa0] = "Volume misplaced",
+	[0xa1] = "Volume inaccessible",
+	[0xa2] = "Volume in input",
+	[0xa3] = "Volume ejected",
+	[0xa4] = "All categories reserved",
+	[0xa5] = "Duplicate Volume",
+	[0xa6] = "Library Manager Offline",
+	[0xa7] = "Library Output Station full",
+	[0xa8] = "Vision System non-operational",
+	[0xa9] = "Library Manager Equipment Check",
+	[0xaa] = "Library Equipment Check",
+	[0xab] = "All Library Cells full",
+	[0xac] = "No Cleaner Volumes in Library",
+	[0xad] = "I/O Station door open",
+	[0xae] = "Subsystem environmental alert",
+};
+
+static int crypt_supported(struct tape_device *device)
+{
+	return TAPE390_CRYPT_SUPPORTED(TAPE_3590_CRYPT_INFO(device));
+}
+
+static int crypt_enabled(struct tape_device *device)
+{
+	return TAPE390_CRYPT_ON(TAPE_3590_CRYPT_INFO(device));
+}
+
+static void ext_to_int_kekl(struct tape390_kekl *in,
+			    struct tape3592_kekl *out)
+{
+	int i;
+
+	memset(out, 0, sizeof(*out));
+	if (in->type == TAPE390_KEKL_TYPE_HASH)
+		out->flags |= 0x40;
+	if (in->type_on_tape == TAPE390_KEKL_TYPE_HASH)
+		out->flags |= 0x80;
+	strncpy(out->label, in->label, 64);
+	for (i = strlen(in->label); i < sizeof(out->label); i++)
+		out->label[i] = ' ';
+	ASCEBC(out->label, sizeof(out->label));
+}
+
+static void int_to_ext_kekl(struct tape3592_kekl *in,
+			    struct tape390_kekl *out)
+{
+	memset(out, 0, sizeof(*out));
+	if(in->flags & 0x40)
+		out->type = TAPE390_KEKL_TYPE_HASH;
+	else
+		out->type = TAPE390_KEKL_TYPE_LABEL;
+	if(in->flags & 0x80)
+		out->type_on_tape = TAPE390_KEKL_TYPE_HASH;
+	else
+		out->type_on_tape = TAPE390_KEKL_TYPE_LABEL;
+	memcpy(out->label, in->label, sizeof(in->label));
+	EBCASC(out->label, sizeof(in->label));
+	strim(out->label);
+}
+
+static void int_to_ext_kekl_pair(struct tape3592_kekl_pair *in,
+				 struct tape390_kekl_pair *out)
+{
+	if (in->count == 0) {
+		out->kekl[0].type = TAPE390_KEKL_TYPE_NONE;
+		out->kekl[0].type_on_tape = TAPE390_KEKL_TYPE_NONE;
+		out->kekl[1].type = TAPE390_KEKL_TYPE_NONE;
+		out->kekl[1].type_on_tape = TAPE390_KEKL_TYPE_NONE;
+	} else if (in->count == 1) {
+		int_to_ext_kekl(&in->kekl[0], &out->kekl[0]);
+		out->kekl[1].type = TAPE390_KEKL_TYPE_NONE;
+		out->kekl[1].type_on_tape = TAPE390_KEKL_TYPE_NONE;
+	} else if (in->count == 2) {
+		int_to_ext_kekl(&in->kekl[0], &out->kekl[0]);
+		int_to_ext_kekl(&in->kekl[1], &out->kekl[1]);
+	} else {
+		printk("Invalid KEKL number: %d\n", in->count);
+		BUG();
+	}
+}
+
+static int check_ext_kekl(struct tape390_kekl *kekl)
+{
+	if (kekl->type == TAPE390_KEKL_TYPE_NONE)
+		goto invalid;
+	if (kekl->type > TAPE390_KEKL_TYPE_HASH)
+		goto invalid;
+	if (kekl->type_on_tape == TAPE390_KEKL_TYPE_NONE)
+		goto invalid;
+	if (kekl->type_on_tape > TAPE390_KEKL_TYPE_HASH)
+		goto invalid;
+	if ((kekl->type == TAPE390_KEKL_TYPE_HASH) &&
+	    (kekl->type_on_tape == TAPE390_KEKL_TYPE_LABEL))
+		goto invalid;
+
+	return 0;
+invalid:
+	return -EINVAL;
+}
+
+static int check_ext_kekl_pair(struct tape390_kekl_pair *kekls)
+{
+	if (check_ext_kekl(&kekls->kekl[0]))
+		goto invalid;
+	if (check_ext_kekl(&kekls->kekl[1]))
+		goto invalid;
+
+	return 0;
+invalid:
+	return -EINVAL;
+}
+
+/*
+ * Query KEKLs
+ */
+static int tape_3592_kekl_query(struct tape_device *device,
+				struct tape390_kekl_pair *ext_kekls)
+{
+	struct tape_request *request;
+	struct tape3592_kekl_query_order *order;
+	struct tape3592_kekl_query_data *int_kekls;
+	int rc;
+
+	DBF_EVENT(6, "tape3592_kekl_query\n");
+	int_kekls = kmalloc(sizeof(*int_kekls), GFP_KERNEL|GFP_DMA);
+	if (!int_kekls)
+		return -ENOMEM;
+	request = tape_alloc_request(2, sizeof(*order));
+	if (IS_ERR(request)) {
+		rc = PTR_ERR(request);
+		goto fail_malloc;
+	}
+	order = request->cpdata;
+	memset(order,0,sizeof(*order));
+	order->code = 0xe2;
+	order->max_count = 2;
+	request->op = TO_KEKL_QUERY;
+	tape_ccw_cc(request->cpaddr, PERF_SUBSYS_FUNC, sizeof(*order), order);
+	tape_ccw_end(request->cpaddr + 1, READ_SS_DATA, sizeof(*int_kekls),
+		     int_kekls);
+	rc = tape_do_io(device, request);
+	if (rc)
+		goto fail_request;
+	int_to_ext_kekl_pair(&int_kekls->kekls, ext_kekls);
+
+	rc = 0;
+fail_request:
+	tape_free_request(request);
+fail_malloc:
+	kfree(int_kekls);
+	return rc;
+}
+
+/*
+ * IOCTL: Query KEKLs
+ */
+static int tape_3592_ioctl_kekl_query(struct tape_device *device,
+				      unsigned long arg)
+{
+	int rc;
+	struct tape390_kekl_pair *ext_kekls;
+
+	DBF_EVENT(6, "tape_3592_ioctl_kekl_query\n");
+	if (!crypt_supported(device))
+		return -ENOSYS;
+	if (!crypt_enabled(device))
+		return -EUNATCH;
+	ext_kekls = kmalloc(sizeof(*ext_kekls), GFP_KERNEL);
+	if (!ext_kekls)
+		return -ENOMEM;
+	rc = tape_3592_kekl_query(device, ext_kekls);
+	if (rc != 0)
+		goto fail;
+	if (copy_to_user((char __user *) arg, ext_kekls, sizeof(*ext_kekls))) {
+		rc = -EFAULT;
+		goto fail;
+	}
+	rc = 0;
+fail:
+	kfree(ext_kekls);
+	return rc;
+}
+
+static int tape_3590_mttell(struct tape_device *device, int mt_count);
+
+/*
+ * Set KEKLs
+ */
+static int tape_3592_kekl_set(struct tape_device *device,
+			      struct tape390_kekl_pair *ext_kekls)
+{
+	struct tape_request *request;
+	struct tape3592_kekl_set_order *order;
+
+	DBF_EVENT(6, "tape3592_kekl_set\n");
+	if (check_ext_kekl_pair(ext_kekls)) {
+		DBF_EVENT(6, "invalid kekls\n");
+		return -EINVAL;
+	}
+	if (tape_3590_mttell(device, 0) != 0)
+		return -EBADSLT;
+	request = tape_alloc_request(1, sizeof(*order));
+	if (IS_ERR(request))
+		return PTR_ERR(request);
+	order = request->cpdata;
+	memset(order, 0, sizeof(*order));
+	order->code = 0xe3;
+	order->kekls.count = 2;
+	ext_to_int_kekl(&ext_kekls->kekl[0], &order->kekls.kekl[0]);
+	ext_to_int_kekl(&ext_kekls->kekl[1], &order->kekls.kekl[1]);
+	request->op = TO_KEKL_SET;
+	tape_ccw_end(request->cpaddr, PERF_SUBSYS_FUNC, sizeof(*order), order);
+
+	return tape_do_io_free(device, request);
+}
+
+/*
+ * IOCTL: Set KEKLs
+ */
+static int tape_3592_ioctl_kekl_set(struct tape_device *device,
+				    unsigned long arg)
+{
+	int rc;
+	struct tape390_kekl_pair *ext_kekls;
+
+	DBF_EVENT(6, "tape_3592_ioctl_kekl_set\n");
+	if (!crypt_supported(device))
+		return -ENOSYS;
+	if (!crypt_enabled(device))
+		return -EUNATCH;
+	ext_kekls = memdup_user((char __user *)arg, sizeof(*ext_kekls));
+	if (IS_ERR(ext_kekls))
+		return PTR_ERR(ext_kekls);
+	rc = tape_3592_kekl_set(device, ext_kekls);
+	kfree(ext_kekls);
+	return rc;
+}
+
+/*
+ * Enable encryption
+ */
+static struct tape_request *__tape_3592_enable_crypt(struct tape_device *device)
+{
+	struct tape_request *request;
+	char *data;
+
+	DBF_EVENT(6, "tape_3592_enable_crypt\n");
+	if (!crypt_supported(device))
+		return ERR_PTR(-ENOSYS);
+	request = tape_alloc_request(2, 72);
+	if (IS_ERR(request))
+		return request;
+	data = request->cpdata;
+	memset(data,0,72);
+
+	data[0]       = 0x05;
+	data[36 + 0]  = 0x03;
+	data[36 + 1]  = 0x03;
+	data[36 + 4]  = 0x40;
+	data[36 + 6]  = 0x01;
+	data[36 + 14] = 0x2f;
+	data[36 + 18] = 0xc3;
+	data[36 + 35] = 0x72;
+	request->op = TO_CRYPT_ON;
+	tape_ccw_cc(request->cpaddr, MODE_SET_CB, 36, data);
+	tape_ccw_end(request->cpaddr + 1, MODE_SET_CB, 36, data + 36);
+	return request;
+}
+
+static int tape_3592_enable_crypt(struct tape_device *device)
+{
+	struct tape_request *request;
+
+	request = __tape_3592_enable_crypt(device);
+	if (IS_ERR(request))
+		return PTR_ERR(request);
+	return tape_do_io_free(device, request);
+}
+
+static void tape_3592_enable_crypt_async(struct tape_device *device)
+{
+	struct tape_request *request;
+
+	request = __tape_3592_enable_crypt(device);
+	if (!IS_ERR(request))
+		tape_do_io_async_free(device, request);
+}
+
+/*
+ * Disable encryption
+ */
+static struct tape_request *__tape_3592_disable_crypt(struct tape_device *device)
+{
+	struct tape_request *request;
+	char *data;
+
+	DBF_EVENT(6, "tape_3592_disable_crypt\n");
+	if (!crypt_supported(device))
+		return ERR_PTR(-ENOSYS);
+	request = tape_alloc_request(2, 72);
+	if (IS_ERR(request))
+		return request;
+	data = request->cpdata;
+	memset(data,0,72);
+
+	data[0]       = 0x05;
+	data[36 + 0]  = 0x03;
+	data[36 + 1]  = 0x03;
+	data[36 + 35] = 0x32;
+
+	request->op = TO_CRYPT_OFF;
+	tape_ccw_cc(request->cpaddr, MODE_SET_CB, 36, data);
+	tape_ccw_end(request->cpaddr + 1, MODE_SET_CB, 36, data + 36);
+
+	return request;
+}
+
+static int tape_3592_disable_crypt(struct tape_device *device)
+{
+	struct tape_request *request;
+
+	request = __tape_3592_disable_crypt(device);
+	if (IS_ERR(request))
+		return PTR_ERR(request);
+	return tape_do_io_free(device, request);
+}
+
+static void tape_3592_disable_crypt_async(struct tape_device *device)
+{
+	struct tape_request *request;
+
+	request = __tape_3592_disable_crypt(device);
+	if (!IS_ERR(request))
+		tape_do_io_async_free(device, request);
+}
+
+/*
+ * IOCTL: Set encryption status
+ */
+static int tape_3592_ioctl_crypt_set(struct tape_device *device,
+				     unsigned long arg)
+{
+	struct tape390_crypt_info info;
+
+	DBF_EVENT(6, "tape_3592_ioctl_crypt_set\n");
+	if (!crypt_supported(device))
+		return -ENOSYS;
+	if (copy_from_user(&info, (char __user *)arg, sizeof(info)))
+		return -EFAULT;
+	if (info.status & ~TAPE390_CRYPT_ON_MASK)
+		return -EINVAL;
+	if (info.status & TAPE390_CRYPT_ON_MASK)
+		return tape_3592_enable_crypt(device);
+	else
+		return tape_3592_disable_crypt(device);
+}
+
+static int tape_3590_sense_medium(struct tape_device *device);
+
+/*
+ * IOCTL: Query enryption status
+ */
+static int tape_3592_ioctl_crypt_query(struct tape_device *device,
+				       unsigned long arg)
+{
+	DBF_EVENT(6, "tape_3592_ioctl_crypt_query\n");
+	if (!crypt_supported(device))
+		return -ENOSYS;
+	tape_3590_sense_medium(device);
+	if (copy_to_user((char __user *) arg, &TAPE_3590_CRYPT_INFO(device),
+		sizeof(TAPE_3590_CRYPT_INFO(device))))
+		return -EFAULT;
+	else
+		return 0;
+}
+
+/*
+ * 3590 IOCTL Overload
+ */
+static int
+tape_3590_ioctl(struct tape_device *device, unsigned int cmd, unsigned long arg)
+{
+	switch (cmd) {
+	case TAPE390_DISPLAY: {
+		struct display_struct disp;
+
+		if (copy_from_user(&disp, (char __user *) arg, sizeof(disp)))
+			return -EFAULT;
+
+		return tape_std_display(device, &disp);
+	}
+	case TAPE390_KEKL_SET:
+		return tape_3592_ioctl_kekl_set(device, arg);
+	case TAPE390_KEKL_QUERY:
+		return tape_3592_ioctl_kekl_query(device, arg);
+	case TAPE390_CRYPT_SET:
+		return tape_3592_ioctl_crypt_set(device, arg);
+	case TAPE390_CRYPT_QUERY:
+		return tape_3592_ioctl_crypt_query(device, arg);
+	default:
+		return -EINVAL;	/* no additional ioctls */
+	}
+}
+
+/*
+ * SENSE Medium: Get Sense data about medium state
+ */
+static int tape_3590_sense_medium(struct tape_device *device)
+{
+	struct tape_request *request;
+
+	request = tape_alloc_request(1, 128);
+	if (IS_ERR(request))
+		return PTR_ERR(request);
+	request->op = TO_MSEN;
+	tape_ccw_end(request->cpaddr, MEDIUM_SENSE, 128, request->cpdata);
+	return tape_do_io_free(device, request);
+}
+
+static void tape_3590_sense_medium_async(struct tape_device *device)
+{
+	struct tape_request *request;
+
+	request = tape_alloc_request(1, 128);
+	if (IS_ERR(request))
+		return;
+	request->op = TO_MSEN;
+	tape_ccw_end(request->cpaddr, MEDIUM_SENSE, 128, request->cpdata);
+	tape_do_io_async_free(device, request);
+}
+
+/*
+ * MTTELL: Tell block. Return the number of block relative to current file.
+ */
+static int
+tape_3590_mttell(struct tape_device *device, int mt_count)
+{
+	__u64 block_id;
+	int rc;
+
+	rc = tape_std_read_block_id(device, &block_id);
+	if (rc)
+		return rc;
+	return block_id >> 32;
+}
+
+/*
+ * MTSEEK: seek to the specified block.
+ */
+static int
+tape_3590_mtseek(struct tape_device *device, int count)
+{
+	struct tape_request *request;
+
+	DBF_EVENT(6, "xsee id: %x\n", count);
+	request = tape_alloc_request(3, 4);
+	if (IS_ERR(request))
+		return PTR_ERR(request);
+	request->op = TO_LBL;
+	tape_ccw_cc(request->cpaddr, MODE_SET_DB, 1, device->modeset_byte);
+	*(__u32 *) request->cpdata = count;
+	tape_ccw_cc(request->cpaddr + 1, LOCATE, 4, request->cpdata);
+	tape_ccw_end(request->cpaddr + 2, NOP, 0, NULL);
+	return tape_do_io_free(device, request);
+}
+
+/*
+ * Read Opposite Error Recovery Function:
+ * Used, when Read Forward does not work
+ */
+static void
+tape_3590_read_opposite(struct tape_device *device,
+			struct tape_request *request)
+{
+	struct tape_3590_disc_data *data;
+
+	/*
+	 * We have allocated 4 ccws in tape_std_read, so we can now
+	 * transform the request to a read backward, followed by a
+	 * forward space block.
+	 */
+	request->op = TO_RBA;
+	tape_ccw_cc(request->cpaddr, MODE_SET_DB, 1, device->modeset_byte);
+	data = device->discdata;
+	tape_ccw_cc_idal(request->cpaddr + 1, data->read_back_op,
+			 device->char_data.idal_buf);
+	tape_ccw_cc(request->cpaddr + 2, FORSPACEBLOCK, 0, NULL);
+	tape_ccw_end(request->cpaddr + 3, NOP, 0, NULL);
+	DBF_EVENT(6, "xrop ccwg\n");
+}
+
+/*
+ * Read Attention Msg
+ * This should be done after an interrupt with attention bit (0x80)
+ * in device state.
+ *
+ * After a "read attention message" request there are two possible
+ * results:
+ *
+ * 1. A unit check is presented, when attention sense is present (e.g. when
+ * a medium has been unloaded). The attention sense comes then
+ * together with the unit check. The recovery action is either "retry"
+ * (in case there is an attention message pending) or "permanent error".
+ *
+ * 2. The attention msg is written to the "read subsystem data" buffer.
+ * In this case we probably should print it to the console.
+ */
+static void tape_3590_read_attmsg_async(struct tape_device *device)
+{
+	struct tape_request *request;
+	char *buf;
+
+	request = tape_alloc_request(3, 4096);
+	if (IS_ERR(request))
+		return;
+	request->op = TO_READ_ATTMSG;
+	buf = request->cpdata;
+	buf[0] = PREP_RD_SS_DATA;
+	buf[6] = RD_ATTMSG;	/* read att msg */
+	tape_ccw_cc(request->cpaddr, PERFORM_SS_FUNC, 12, buf);
+	tape_ccw_cc(request->cpaddr + 1, READ_SS_DATA, 4096 - 12, buf + 12);
+	tape_ccw_end(request->cpaddr + 2, NOP, 0, NULL);
+	tape_do_io_async_free(device, request);
+}
+
+/*
+ * These functions are used to schedule follow-up actions from within an
+ * interrupt context (like unsolicited interrupts).
+ * Note: the work handler is called by the system work queue. The tape
+ * commands started by the handler need to be asynchrounous, otherwise
+ * a deadlock can occur e.g. in case of a deferred cc=1 (see __tape_do_irq).
+ */
+struct work_handler_data {
+	struct tape_device *device;
+	enum tape_op        op;
+	struct work_struct  work;
+};
+
+static void
+tape_3590_work_handler(struct work_struct *work)
+{
+	struct work_handler_data *p =
+		container_of(work, struct work_handler_data, work);
+
+	switch (p->op) {
+	case TO_MSEN:
+		tape_3590_sense_medium_async(p->device);
+		break;
+	case TO_READ_ATTMSG:
+		tape_3590_read_attmsg_async(p->device);
+		break;
+	case TO_CRYPT_ON:
+		tape_3592_enable_crypt_async(p->device);
+		break;
+	case TO_CRYPT_OFF:
+		tape_3592_disable_crypt_async(p->device);
+		break;
+	default:
+		DBF_EVENT(3, "T3590: work handler undefined for "
+			  "operation 0x%02x\n", p->op);
+	}
+	tape_put_device(p->device);
+	kfree(p);
+}
+
+static int
+tape_3590_schedule_work(struct tape_device *device, enum tape_op op)
+{
+	struct work_handler_data *p;
+
+	if ((p = kzalloc(sizeof(*p), GFP_ATOMIC)) == NULL)
+		return -ENOMEM;
+
+	INIT_WORK(&p->work, tape_3590_work_handler);
+
+	p->device = tape_get_device(device);
+	p->op = op;
+
+	queue_work(tape_3590_wq, &p->work);
+	return 0;
+}
+
+static void tape_3590_med_state_set(struct tape_device *device,
+				    struct tape_3590_med_sense *sense)
+{
+	struct tape390_crypt_info *c_info;
+
+	c_info = &TAPE_3590_CRYPT_INFO(device);
+
+	DBF_EVENT(6, "medium state: %x:%x\n", sense->macst, sense->masst);
+	switch (sense->macst) {
+	case 0x04:
+	case 0x05:
+	case 0x06:
+		tape_med_state_set(device, MS_UNLOADED);
+		TAPE_3590_CRYPT_INFO(device).medium_status = 0;
+		return;
+	case 0x08:
+	case 0x09:
+		tape_med_state_set(device, MS_LOADED);
+		break;
+	default:
+		tape_med_state_set(device, MS_UNKNOWN);
+		return;
+	}
+	c_info->medium_status |= TAPE390_MEDIUM_LOADED_MASK;
+	if (sense->flags & MSENSE_CRYPT_MASK) {
+		DBF_EVENT(6, "Medium is encrypted (%04x)\n", sense->flags);
+		c_info->medium_status |= TAPE390_MEDIUM_ENCRYPTED_MASK;
+	} else	{
+		DBF_EVENT(6, "Medium is not encrypted %04x\n", sense->flags);
+		c_info->medium_status &= ~TAPE390_MEDIUM_ENCRYPTED_MASK;
+	}
+}
+
+/*
+ * The done handler is called at device/channel end and wakes up the sleeping
+ * process
+ */
+static int
+tape_3590_done(struct tape_device *device, struct tape_request *request)
+{
+
+	DBF_EVENT(6, "%s done\n", tape_op_verbose[request->op]);
+
+	switch (request->op) {
+	case TO_BSB:
+	case TO_BSF:
+	case TO_DSE:
+	case TO_FSB:
+	case TO_FSF:
+	case TO_LBL:
+	case TO_RFO:
+	case TO_RBA:
+	case TO_REW:
+	case TO_WRI:
+	case TO_WTM:
+	case TO_BLOCK:
+	case TO_LOAD:
+		tape_med_state_set(device, MS_LOADED);
+		break;
+	case TO_RUN:
+		tape_med_state_set(device, MS_UNLOADED);
+		tape_3590_schedule_work(device, TO_CRYPT_OFF);
+		break;
+	case TO_MSEN:
+		tape_3590_med_state_set(device, request->cpdata);
+		break;
+	case TO_CRYPT_ON:
+		TAPE_3590_CRYPT_INFO(device).status
+			|= TAPE390_CRYPT_ON_MASK;
+		*(device->modeset_byte) |= 0x03;
+		break;
+	case TO_CRYPT_OFF:
+		TAPE_3590_CRYPT_INFO(device).status
+			&= ~TAPE390_CRYPT_ON_MASK;
+		*(device->modeset_byte) &= ~0x03;
+		break;
+	case TO_RBI:	/* RBI seems to succeed even without medium loaded. */
+	case TO_NOP:	/* Same to NOP. */
+	case TO_READ_CONFIG:
+	case TO_READ_ATTMSG:
+	case TO_DIS:
+	case TO_ASSIGN:
+	case TO_UNASSIGN:
+	case TO_SIZE:
+	case TO_KEKL_SET:
+	case TO_KEKL_QUERY:
+	case TO_RDC:
+		break;
+	}
+	return TAPE_IO_SUCCESS;
+}
+
+/*
+ * This function is called, when error recovery was successful
+ */
+static inline int
+tape_3590_erp_succeded(struct tape_device *device, struct tape_request *request)
+{
+	DBF_EVENT(3, "Error Recovery successful for %s\n",
+		  tape_op_verbose[request->op]);
+	return tape_3590_done(device, request);
+}
+
+/*
+ * This function is called, when error recovery was not successful
+ */
+static inline int
+tape_3590_erp_failed(struct tape_device *device, struct tape_request *request,
+		     struct irb *irb, int rc)
+{
+	DBF_EVENT(3, "Error Recovery failed for %s\n",
+		  tape_op_verbose[request->op]);
+	tape_dump_sense_dbf(device, request, irb);
+	return rc;
+}
+
+/*
+ * Error Recovery do retry
+ */
+static inline int
+tape_3590_erp_retry(struct tape_device *device, struct tape_request *request,
+		    struct irb *irb)
+{
+	DBF_EVENT(2, "Retry: %s\n", tape_op_verbose[request->op]);
+	tape_dump_sense_dbf(device, request, irb);
+	return TAPE_IO_RETRY;
+}
+
+/*
+ * Handle unsolicited interrupts
+ */
+static int
+tape_3590_unsolicited_irq(struct tape_device *device, struct irb *irb)
+{
+	if (irb->scsw.cmd.dstat == DEV_STAT_CHN_END)
+		/* Probably result of halt ssch */
+		return TAPE_IO_PENDING;
+	else if (irb->scsw.cmd.dstat == 0x85)
+		/* Device Ready */
+		DBF_EVENT(3, "unsol.irq! tape ready: %08x\n", device->cdev_id);
+	else if (irb->scsw.cmd.dstat & DEV_STAT_ATTENTION) {
+		tape_3590_schedule_work(device, TO_READ_ATTMSG);
+	} else {
+		DBF_EVENT(3, "unsol.irq! dev end: %08x\n", device->cdev_id);
+		tape_dump_sense_dbf(device, NULL, irb);
+	}
+	/* check medium state */
+	tape_3590_schedule_work(device, TO_MSEN);
+	return TAPE_IO_SUCCESS;
+}
+
+/*
+ * Basic Recovery routine
+ */
+static int
+tape_3590_erp_basic(struct tape_device *device, struct tape_request *request,
+		    struct irb *irb, int rc)
+{
+	struct tape_3590_sense *sense;
+
+	sense = (struct tape_3590_sense *) irb->ecw;
+
+	switch (sense->bra) {
+	case SENSE_BRA_PER:
+		return tape_3590_erp_failed(device, request, irb, rc);
+	case SENSE_BRA_CONT:
+		return tape_3590_erp_succeded(device, request);
+	case SENSE_BRA_RE:
+		return tape_3590_erp_retry(device, request, irb);
+	case SENSE_BRA_DRE:
+		return tape_3590_erp_failed(device, request, irb, rc);
+	default:
+		BUG();
+		return TAPE_IO_STOP;
+	}
+}
+
+/*
+ *  RDL: Read Device (buffered) log
+ */
+static int
+tape_3590_erp_read_buf_log(struct tape_device *device,
+			   struct tape_request *request, struct irb *irb)
+{
+	/*
+	 * We just do the basic error recovery at the moment (retry).
+	 * Perhaps in the future, we read the log and dump it somewhere...
+	 */
+	return tape_3590_erp_basic(device, request, irb, -EIO);
+}
+
+/*
+ *  SWAP: Swap Devices
+ */
+static int
+tape_3590_erp_swap(struct tape_device *device, struct tape_request *request,
+		   struct irb *irb)
+{
+	/*
+	 * This error recovery should swap the tapes
+	 * if the original has a problem. The operation
+	 * should proceed with the new tape... this
+	 * should probably be done in user space!
+	 */
+	dev_warn (&device->cdev->dev, "The tape medium must be loaded into a "
+		"different tape unit\n");
+	return tape_3590_erp_basic(device, request, irb, -EIO);
+}
+
+/*
+ *  LBY: Long Busy
+ */
+static int
+tape_3590_erp_long_busy(struct tape_device *device,
+			struct tape_request *request, struct irb *irb)
+{
+	DBF_EVENT(6, "Device is busy\n");
+	return TAPE_IO_LONG_BUSY;
+}
+
+/*
+ *  SPI: Special Intercept
+ */
+static int
+tape_3590_erp_special_interrupt(struct tape_device *device,
+				struct tape_request *request, struct irb *irb)
+{
+	return tape_3590_erp_basic(device, request, irb, -EIO);
+}
+
+/*
+ *  RDA: Read Alternate
+ */
+static int
+tape_3590_erp_read_alternate(struct tape_device *device,
+			     struct tape_request *request, struct irb *irb)
+{
+	struct tape_3590_disc_data *data;
+
+	/*
+	 * The issued Read Backward or Read Previous command is not
+	 * supported by the device
+	 * The recovery action should be to issue another command:
+	 * Read Revious: if Read Backward is not supported
+	 * Read Backward: if Read Previous is not supported
+	 */
+	data = device->discdata;
+	if (data->read_back_op == READ_PREVIOUS) {
+		DBF_EVENT(2, "(%08x): No support for READ_PREVIOUS command\n",
+			  device->cdev_id);
+		data->read_back_op = READ_BACKWARD;
+	} else {
+		DBF_EVENT(2, "(%08x): No support for READ_BACKWARD command\n",
+			  device->cdev_id);
+		data->read_back_op = READ_PREVIOUS;
+	}
+	tape_3590_read_opposite(device, request);
+	return tape_3590_erp_retry(device, request, irb);
+}
+
+/*
+ * Error Recovery read opposite
+ */
+static int
+tape_3590_erp_read_opposite(struct tape_device *device,
+			    struct tape_request *request, struct irb *irb)
+{
+	switch (request->op) {
+	case TO_RFO:
+		/*
+		 * We did read forward, but the data could not be read.
+		 * We will read backward and then skip forward again.
+		 */
+		tape_3590_read_opposite(device, request);
+		return tape_3590_erp_retry(device, request, irb);
+	case TO_RBA:
+		/* We tried to read forward and backward, but hat no success */
+		return tape_3590_erp_failed(device, request, irb, -EIO);
+		break;
+	default:
+		return tape_3590_erp_failed(device, request, irb, -EIO);
+	}
+}
+
+/*
+ * Print an MIM (Media Information  Message) (message code f0)
+ */
+static void
+tape_3590_print_mim_msg_f0(struct tape_device *device, struct irb *irb)
+{
+	struct tape_3590_sense *sense;
+	char *exception, *service;
+
+	exception = kmalloc(BUFSIZE, GFP_ATOMIC);
+	service = kmalloc(BUFSIZE, GFP_ATOMIC);
+
+	if (!exception || !service)
+		goto out_nomem;
+
+	sense = (struct tape_3590_sense *) irb->ecw;
+	/* Exception Message */
+	switch (sense->fmt.f70.emc) {
+	case 0x02:
+		snprintf(exception, BUFSIZE, "Data degraded");
+		break;
+	case 0x03:
+		snprintf(exception, BUFSIZE, "Data degraded in partion %i",
+			sense->fmt.f70.mp);
+		break;
+	case 0x04:
+		snprintf(exception, BUFSIZE, "Medium degraded");
+		break;
+	case 0x05:
+		snprintf(exception, BUFSIZE, "Medium degraded in partition %i",
+			sense->fmt.f70.mp);
+		break;
+	case 0x06:
+		snprintf(exception, BUFSIZE, "Block 0 Error");
+		break;
+	case 0x07:
+		snprintf(exception, BUFSIZE, "Medium Exception 0x%02x",
+			sense->fmt.f70.md);
+		break;
+	default:
+		snprintf(exception, BUFSIZE, "0x%02x",
+			sense->fmt.f70.emc);
+		break;
+	}
+	/* Service Message */
+	switch (sense->fmt.f70.smc) {
+	case 0x02:
+		snprintf(service, BUFSIZE, "Reference Media maintenance "
+			"procedure %i", sense->fmt.f70.md);
+		break;
+	default:
+		snprintf(service, BUFSIZE, "0x%02x",
+			sense->fmt.f70.smc);
+		break;
+	}
+
+	dev_warn (&device->cdev->dev, "Tape media information: exception %s, "
+		"service %s\n", exception, service);
+
+out_nomem:
+	kfree(exception);
+	kfree(service);
+}
+
+/*
+ * Print an I/O Subsystem Service Information Message (message code f1)
+ */
+static void
+tape_3590_print_io_sim_msg_f1(struct tape_device *device, struct irb *irb)
+{
+	struct tape_3590_sense *sense;
+	char *exception, *service;
+
+	exception = kmalloc(BUFSIZE, GFP_ATOMIC);
+	service = kmalloc(BUFSIZE, GFP_ATOMIC);
+
+	if (!exception || !service)
+		goto out_nomem;
+
+	sense = (struct tape_3590_sense *) irb->ecw;
+	/* Exception Message */
+	switch (sense->fmt.f71.emc) {
+	case 0x01:
+		snprintf(exception, BUFSIZE, "Effect of failure is unknown");
+		break;
+	case 0x02:
+		snprintf(exception, BUFSIZE, "CU Exception - no performance "
+			"impact");
+		break;
+	case 0x03:
+		snprintf(exception, BUFSIZE, "CU Exception on channel "
+			"interface 0x%02x", sense->fmt.f71.md[0]);
+		break;
+	case 0x04:
+		snprintf(exception, BUFSIZE, "CU Exception on device path "
+			"0x%02x", sense->fmt.f71.md[0]);
+		break;
+	case 0x05:
+		snprintf(exception, BUFSIZE, "CU Exception on library path "
+			"0x%02x", sense->fmt.f71.md[0]);
+		break;
+	case 0x06:
+		snprintf(exception, BUFSIZE, "CU Exception on node 0x%02x",
+			sense->fmt.f71.md[0]);
+		break;
+	case 0x07:
+		snprintf(exception, BUFSIZE, "CU Exception on partition "
+			"0x%02x", sense->fmt.f71.md[0]);
+		break;
+	default:
+		snprintf(exception, BUFSIZE, "0x%02x",
+			sense->fmt.f71.emc);
+	}
+	/* Service Message */
+	switch (sense->fmt.f71.smc) {
+	case 0x01:
+		snprintf(service, BUFSIZE, "Repair impact is unknown");
+		break;
+	case 0x02:
+		snprintf(service, BUFSIZE, "Repair will not impact cu "
+			"performance");
+		break;
+	case 0x03:
+		if (sense->fmt.f71.mdf == 0)
+			snprintf(service, BUFSIZE, "Repair will disable node "
+				"0x%x on CU", sense->fmt.f71.md[1]);
+		else
+			snprintf(service, BUFSIZE, "Repair will disable "
+				"nodes (0x%x-0x%x) on CU", sense->fmt.f71.md[1],
+				sense->fmt.f71.md[2]);
+		break;
+	case 0x04:
+		if (sense->fmt.f71.mdf == 0)
+			snprintf(service, BUFSIZE, "Repair will disable "
+				"channel path 0x%x on CU",
+				sense->fmt.f71.md[1]);
+		else
+			snprintf(service, BUFSIZE, "Repair will disable channel"
+				" paths (0x%x-0x%x) on CU",
+				sense->fmt.f71.md[1], sense->fmt.f71.md[2]);
+		break;
+	case 0x05:
+		if (sense->fmt.f71.mdf == 0)
+			snprintf(service, BUFSIZE, "Repair will disable device"
+				" path 0x%x on CU", sense->fmt.f71.md[1]);
+		else
+			snprintf(service, BUFSIZE, "Repair will disable device"
+				" paths (0x%x-0x%x) on CU",
+				sense->fmt.f71.md[1], sense->fmt.f71.md[2]);
+		break;
+	case 0x06:
+		if (sense->fmt.f71.mdf == 0)
+			snprintf(service, BUFSIZE, "Repair will disable "
+				"library path 0x%x on CU",
+				sense->fmt.f71.md[1]);
+		else
+			snprintf(service, BUFSIZE, "Repair will disable "
+				"library paths (0x%x-0x%x) on CU",
+				sense->fmt.f71.md[1], sense->fmt.f71.md[2]);
+		break;
+	case 0x07:
+		snprintf(service, BUFSIZE, "Repair will disable access to CU");
+		break;
+	default:
+		snprintf(service, BUFSIZE, "0x%02x",
+			sense->fmt.f71.smc);
+	}
+
+	dev_warn (&device->cdev->dev, "I/O subsystem information: exception"
+		" %s, service %s\n", exception, service);
+out_nomem:
+	kfree(exception);
+	kfree(service);
+}
+
+/*
+ * Print an Device Subsystem Service Information Message (message code f2)
+ */
+static void
+tape_3590_print_dev_sim_msg_f2(struct tape_device *device, struct irb *irb)
+{
+	struct tape_3590_sense *sense;
+	char *exception, *service;
+
+	exception = kmalloc(BUFSIZE, GFP_ATOMIC);
+	service = kmalloc(BUFSIZE, GFP_ATOMIC);
+
+	if (!exception || !service)
+		goto out_nomem;
+
+	sense = (struct tape_3590_sense *) irb->ecw;
+	/* Exception Message */
+	switch (sense->fmt.f71.emc) {
+	case 0x01:
+		snprintf(exception, BUFSIZE, "Effect of failure is unknown");
+		break;
+	case 0x02:
+		snprintf(exception, BUFSIZE, "DV Exception - no performance"
+			" impact");
+		break;
+	case 0x03:
+		snprintf(exception, BUFSIZE, "DV Exception on channel "
+			"interface 0x%02x", sense->fmt.f71.md[0]);
+		break;
+	case 0x04:
+		snprintf(exception, BUFSIZE, "DV Exception on loader 0x%02x",
+			sense->fmt.f71.md[0]);
+		break;
+	case 0x05:
+		snprintf(exception, BUFSIZE, "DV Exception on message display"
+			" 0x%02x", sense->fmt.f71.md[0]);
+		break;
+	case 0x06:
+		snprintf(exception, BUFSIZE, "DV Exception in tape path");
+		break;
+	case 0x07:
+		snprintf(exception, BUFSIZE, "DV Exception in drive");
+		break;
+	default:
+		snprintf(exception, BUFSIZE, "0x%02x",
+			sense->fmt.f71.emc);
+	}
+	/* Service Message */
+	switch (sense->fmt.f71.smc) {
+	case 0x01:
+		snprintf(service, BUFSIZE, "Repair impact is unknown");
+		break;
+	case 0x02:
+		snprintf(service, BUFSIZE, "Repair will not impact device "
+			"performance");
+		break;
+	case 0x03:
+		if (sense->fmt.f71.mdf == 0)
+			snprintf(service, BUFSIZE, "Repair will disable "
+				"channel path 0x%x on DV",
+				sense->fmt.f71.md[1]);
+		else
+			snprintf(service, BUFSIZE, "Repair will disable "
+				"channel path (0x%x-0x%x) on DV",
+				sense->fmt.f71.md[1], sense->fmt.f71.md[2]);
+		break;
+	case 0x04:
+		if (sense->fmt.f71.mdf == 0)
+			snprintf(service, BUFSIZE, "Repair will disable "
+				"interface 0x%x on DV", sense->fmt.f71.md[1]);
+		else
+			snprintf(service, BUFSIZE, "Repair will disable "
+				"interfaces (0x%x-0x%x) on DV",
+				sense->fmt.f71.md[1], sense->fmt.f71.md[2]);
+		break;
+	case 0x05:
+		if (sense->fmt.f71.mdf == 0)
+			snprintf(service, BUFSIZE, "Repair will disable loader"
+				" 0x%x on DV", sense->fmt.f71.md[1]);
+		else
+			snprintf(service, BUFSIZE, "Repair will disable loader"
+				" (0x%x-0x%x) on DV",
+				sense->fmt.f71.md[1], sense->fmt.f71.md[2]);
+		break;
+	case 0x07:
+		snprintf(service, BUFSIZE, "Repair will disable access to DV");
+		break;
+	case 0x08:
+		if (sense->fmt.f71.mdf == 0)
+			snprintf(service, BUFSIZE, "Repair will disable "
+				"message display 0x%x on DV",
+				sense->fmt.f71.md[1]);
+		else
+			snprintf(service, BUFSIZE, "Repair will disable "
+				"message displays (0x%x-0x%x) on DV",
+				 sense->fmt.f71.md[1], sense->fmt.f71.md[2]);
+		break;
+	case 0x09:
+		snprintf(service, BUFSIZE, "Clean DV");
+		break;
+	default:
+		snprintf(service, BUFSIZE, "0x%02x",
+			sense->fmt.f71.smc);
+	}
+
+	dev_warn (&device->cdev->dev, "Device subsystem information: exception"
+		" %s, service %s\n", exception, service);
+out_nomem:
+	kfree(exception);
+	kfree(service);
+}
+
+/*
+ * Print standard ERA Message
+ */
+static void
+tape_3590_print_era_msg(struct tape_device *device, struct irb *irb)
+{
+	struct tape_3590_sense *sense;
+
+	sense = (struct tape_3590_sense *) irb->ecw;
+	if (sense->mc == 0)
+		return;
+	if ((sense->mc > 0) && (sense->mc < TAPE_3590_MAX_MSG)) {
+		if (tape_3590_msg[sense->mc] != NULL)
+			dev_warn (&device->cdev->dev, "The tape unit has "
+				"issued sense message %s\n",
+				tape_3590_msg[sense->mc]);
+		else
+			dev_warn (&device->cdev->dev, "The tape unit has "
+				"issued an unknown sense message code 0x%x\n",
+				sense->mc);
+		return;
+	}
+	if (sense->mc == 0xf0) {
+		/* Standard Media Information Message */
+		dev_warn (&device->cdev->dev, "MIM SEV=%i, MC=%02x, ES=%x/%x, "
+			"RC=%02x-%04x-%02x\n", sense->fmt.f70.sev, sense->mc,
+			sense->fmt.f70.emc, sense->fmt.f70.smc,
+			sense->fmt.f70.refcode, sense->fmt.f70.mid,
+			sense->fmt.f70.fid);
+		tape_3590_print_mim_msg_f0(device, irb);
+		return;
+	}
+	if (sense->mc == 0xf1) {
+		/* Standard I/O Subsystem Service Information Message */
+		dev_warn (&device->cdev->dev, "IOSIM SEV=%i, DEVTYPE=3590/%02x,"
+			" MC=%02x, ES=%x/%x, REF=0x%04x-0x%04x-0x%04x\n",
+			sense->fmt.f71.sev, device->cdev->id.dev_model,
+			sense->mc, sense->fmt.f71.emc, sense->fmt.f71.smc,
+			sense->fmt.f71.refcode1, sense->fmt.f71.refcode2,
+			sense->fmt.f71.refcode3);
+		tape_3590_print_io_sim_msg_f1(device, irb);
+		return;
+	}
+	if (sense->mc == 0xf2) {
+		/* Standard Device Service Information Message */
+		dev_warn (&device->cdev->dev, "DEVSIM SEV=%i, DEVTYPE=3590/%02x"
+			", MC=%02x, ES=%x/%x, REF=0x%04x-0x%04x-0x%04x\n",
+			sense->fmt.f71.sev, device->cdev->id.dev_model,
+			sense->mc, sense->fmt.f71.emc, sense->fmt.f71.smc,
+			sense->fmt.f71.refcode1, sense->fmt.f71.refcode2,
+			sense->fmt.f71.refcode3);
+		tape_3590_print_dev_sim_msg_f2(device, irb);
+		return;
+	}
+	if (sense->mc == 0xf3) {
+		/* Standard Library Service Information Message */
+		return;
+	}
+	dev_warn (&device->cdev->dev, "The tape unit has issued an unknown "
+		"sense message code %x\n", sense->mc);
+}
+
+static int tape_3590_crypt_error(struct tape_device *device,
+				 struct tape_request *request, struct irb *irb)
+{
+	u8 cu_rc;
+	u16 ekm_rc2;
+	char *sense;
+
+	sense = ((struct tape_3590_sense *) irb->ecw)->fmt.data;
+	cu_rc = sense[0];
+	ekm_rc2 = *((u16*) &sense[10]);
+	if ((cu_rc == 0) && (ekm_rc2 == 0xee31))
+		/* key not defined on EKM */
+		return tape_3590_erp_basic(device, request, irb, -EKEYREJECTED);
+	if ((cu_rc == 1) || (cu_rc == 2))
+		/* No connection to EKM */
+		return tape_3590_erp_basic(device, request, irb, -ENOTCONN);
+
+	dev_err (&device->cdev->dev, "The tape unit failed to obtain the "
+		"encryption key from EKM\n");
+
+	return tape_3590_erp_basic(device, request, irb, -ENOKEY);
+}
+
+/*
+ *  3590 error Recovery routine:
+ *  If possible, it tries to recover from the error. If this is not possible,
+ *  inform the user about the problem.
+ */
+static int
+tape_3590_unit_check(struct tape_device *device, struct tape_request *request,
+		     struct irb *irb)
+{
+	struct tape_3590_sense *sense;
+
+	sense = (struct tape_3590_sense *) irb->ecw;
+
+	DBF_EVENT(6, "Unit Check: RQC = %x\n", sense->rc_rqc);
+
+	/*
+	 * First check all RC-QRCs where we want to do something special
+	 *   - "break":     basic error recovery is done
+	 *   - "goto out:": just print error message if available
+	 */
+	switch (sense->rc_rqc) {
+
+	case 0x1110:
+		tape_3590_print_era_msg(device, irb);
+		return tape_3590_erp_read_buf_log(device, request, irb);
+
+	case 0x2011:
+		tape_3590_print_era_msg(device, irb);
+		return tape_3590_erp_read_alternate(device, request, irb);
+
+	case 0x2230:
+	case 0x2231:
+		tape_3590_print_era_msg(device, irb);
+		return tape_3590_erp_special_interrupt(device, request, irb);
+	case 0x2240:
+		return tape_3590_crypt_error(device, request, irb);
+
+	case 0x3010:
+		DBF_EVENT(2, "(%08x): Backward at Beginning of Partition\n",
+			  device->cdev_id);
+		return tape_3590_erp_basic(device, request, irb, -ENOSPC);
+	case 0x3012:
+		DBF_EVENT(2, "(%08x): Forward at End of Partition\n",
+			  device->cdev_id);
+		return tape_3590_erp_basic(device, request, irb, -ENOSPC);
+	case 0x3020:
+		DBF_EVENT(2, "(%08x): End of Data Mark\n", device->cdev_id);
+		return tape_3590_erp_basic(device, request, irb, -ENOSPC);
+
+	case 0x3122:
+		DBF_EVENT(2, "(%08x): Rewind Unload initiated\n",
+			  device->cdev_id);
+		return tape_3590_erp_basic(device, request, irb, -EIO);
+	case 0x3123:
+		DBF_EVENT(2, "(%08x): Rewind Unload complete\n",
+			  device->cdev_id);
+		tape_med_state_set(device, MS_UNLOADED);
+		tape_3590_schedule_work(device, TO_CRYPT_OFF);
+		return tape_3590_erp_basic(device, request, irb, 0);
+
+	case 0x4010:
+		/*
+		 * print additional msg since default msg
+		 * "device intervention" is not very meaningfull
+		 */
+		tape_med_state_set(device, MS_UNLOADED);
+		tape_3590_schedule_work(device, TO_CRYPT_OFF);
+		return tape_3590_erp_basic(device, request, irb, -ENOMEDIUM);
+	case 0x4012:		/* Device Long Busy */
+		/* XXX: Also use long busy handling here? */
+		DBF_EVENT(6, "(%08x): LONG BUSY\n", device->cdev_id);
+		tape_3590_print_era_msg(device, irb);
+		return tape_3590_erp_basic(device, request, irb, -EBUSY);
+	case 0x4014:
+		DBF_EVENT(6, "(%08x): Crypto LONG BUSY\n", device->cdev_id);
+		return tape_3590_erp_long_busy(device, request, irb);
+
+	case 0x5010:
+		if (sense->rac == 0xd0) {
+			/* Swap */
+			tape_3590_print_era_msg(device, irb);
+			return tape_3590_erp_swap(device, request, irb);
+		}
+		if (sense->rac == 0x26) {
+			/* Read Opposite */
+			tape_3590_print_era_msg(device, irb);
+			return tape_3590_erp_read_opposite(device, request,
+							   irb);
+		}
+		return tape_3590_erp_basic(device, request, irb, -EIO);
+	case 0x5020:
+	case 0x5021:
+	case 0x5022:
+	case 0x5040:
+	case 0x5041:
+	case 0x5042:
+		tape_3590_print_era_msg(device, irb);
+		return tape_3590_erp_swap(device, request, irb);
+
+	case 0x5110:
+	case 0x5111:
+		return tape_3590_erp_basic(device, request, irb, -EMEDIUMTYPE);
+
+	case 0x5120:
+	case 0x1120:
+		tape_med_state_set(device, MS_UNLOADED);
+		tape_3590_schedule_work(device, TO_CRYPT_OFF);
+		return tape_3590_erp_basic(device, request, irb, -ENOMEDIUM);
+
+	case 0x6020:
+		return tape_3590_erp_basic(device, request, irb, -EMEDIUMTYPE);
+
+	case 0x8011:
+		return tape_3590_erp_basic(device, request, irb, -EPERM);
+	case 0x8013:
+		dev_warn (&device->cdev->dev, "A different host has privileged"
+			" access to the tape unit\n");
+		return tape_3590_erp_basic(device, request, irb, -EPERM);
+	default:
+		return tape_3590_erp_basic(device, request, irb, -EIO);
+	}
+}
+
+/*
+ * 3590 interrupt handler:
+ */
+static int
+tape_3590_irq(struct tape_device *device, struct tape_request *request,
+	      struct irb *irb)
+{
+	if (request == NULL)
+		return tape_3590_unsolicited_irq(device, irb);
+
+	if ((irb->scsw.cmd.dstat & DEV_STAT_UNIT_EXCEP) &&
+	    (irb->scsw.cmd.dstat & DEV_STAT_DEV_END) &&
+	    (request->op == TO_WRI)) {
+		/* Write at end of volume */
+		DBF_EVENT(2, "End of volume\n");
+		return tape_3590_erp_failed(device, request, irb, -ENOSPC);
+	}
+
+	if (irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK)
+		return tape_3590_unit_check(device, request, irb);
+
+	if (irb->scsw.cmd.dstat & DEV_STAT_DEV_END) {
+		if (irb->scsw.cmd.dstat == DEV_STAT_UNIT_EXCEP) {
+			if (request->op == TO_FSB || request->op == TO_BSB)
+				request->rescnt++;
+			else
+				DBF_EVENT(5, "Unit Exception!\n");
+		}
+
+		return tape_3590_done(device, request);
+	}
+
+	if (irb->scsw.cmd.dstat & DEV_STAT_CHN_END) {
+		DBF_EVENT(2, "channel end\n");
+		return TAPE_IO_PENDING;
+	}
+
+	if (irb->scsw.cmd.dstat & DEV_STAT_ATTENTION) {
+		DBF_EVENT(2, "Unit Attention when busy..\n");
+		return TAPE_IO_PENDING;
+	}
+
+	DBF_EVENT(6, "xunknownirq\n");
+	tape_dump_sense_dbf(device, request, irb);
+	return TAPE_IO_STOP;
+}
+
+
+static int tape_3590_read_dev_chars(struct tape_device *device,
+				    struct tape_3590_rdc_data *rdc_data)
+{
+	int rc;
+	struct tape_request *request;
+
+	request = tape_alloc_request(1, sizeof(*rdc_data));
+	if (IS_ERR(request))
+		return PTR_ERR(request);
+	request->op = TO_RDC;
+	tape_ccw_end(request->cpaddr, CCW_CMD_RDC, sizeof(*rdc_data),
+		     request->cpdata);
+	rc = tape_do_io(device, request);
+	if (rc == 0)
+		memcpy(rdc_data, request->cpdata, sizeof(*rdc_data));
+	tape_free_request(request);
+	return rc;
+}
+
+/*
+ * Setup device function
+ */
+static int
+tape_3590_setup_device(struct tape_device *device)
+{
+	int rc;
+	struct tape_3590_disc_data *data;
+	struct tape_3590_rdc_data *rdc_data;
+
+	DBF_EVENT(6, "3590 device setup\n");
+	data = kzalloc(sizeof(struct tape_3590_disc_data), GFP_KERNEL | GFP_DMA);
+	if (data == NULL)
+		return -ENOMEM;
+	data->read_back_op = READ_PREVIOUS;
+	device->discdata = data;
+
+	rdc_data = kmalloc(sizeof(*rdc_data), GFP_KERNEL | GFP_DMA);
+	if (!rdc_data) {
+		rc = -ENOMEM;
+		goto fail_kmalloc;
+	}
+	rc = tape_3590_read_dev_chars(device, rdc_data);
+	if (rc) {
+		DBF_LH(3, "Read device characteristics failed!\n");
+		goto fail_rdc_data;
+	}
+	rc = tape_std_assign(device);
+	if (rc)
+		goto fail_rdc_data;
+	if (rdc_data->data[31] == 0x13) {
+		data->crypt_info.capability |= TAPE390_CRYPT_SUPPORTED_MASK;
+		tape_3592_disable_crypt(device);
+	} else {
+		DBF_EVENT(6, "Device has NO crypto support\n");
+	}
+	/* Try to find out if medium is loaded */
+	rc = tape_3590_sense_medium(device);
+	if (rc) {
+		DBF_LH(3, "3590 medium sense returned %d\n", rc);
+		goto fail_rdc_data;
+	}
+	return 0;
+
+fail_rdc_data:
+	kfree(rdc_data);
+fail_kmalloc:
+	kfree(data);
+	return rc;
+}
+
+/*
+ * Cleanup device function
+ */
+static void
+tape_3590_cleanup_device(struct tape_device *device)
+{
+	flush_workqueue(tape_3590_wq);
+	tape_std_unassign(device);
+
+	kfree(device->discdata);
+	device->discdata = NULL;
+}
+
+/*
+ * List of 3590 magnetic tape commands.
+ */
+static tape_mtop_fn tape_3590_mtop[TAPE_NR_MTOPS] = {
+	[MTRESET]	 = tape_std_mtreset,
+	[MTFSF]		 = tape_std_mtfsf,
+	[MTBSF]		 = tape_std_mtbsf,
+	[MTFSR]		 = tape_std_mtfsr,
+	[MTBSR]		 = tape_std_mtbsr,
+	[MTWEOF]	 = tape_std_mtweof,
+	[MTREW]		 = tape_std_mtrew,
+	[MTOFFL]	 = tape_std_mtoffl,
+	[MTNOP]		 = tape_std_mtnop,
+	[MTRETEN]	 = tape_std_mtreten,
+	[MTBSFM]	 = tape_std_mtbsfm,
+	[MTFSFM]	 = tape_std_mtfsfm,
+	[MTEOM]		 = tape_std_mteom,
+	[MTERASE]	 = tape_std_mterase,
+	[MTRAS1]	 = NULL,
+	[MTRAS2]	 = NULL,
+	[MTRAS3]	 = NULL,
+	[MTSETBLK]	 = tape_std_mtsetblk,
+	[MTSETDENSITY]	 = NULL,
+	[MTSEEK]	 = tape_3590_mtseek,
+	[MTTELL]	 = tape_3590_mttell,
+	[MTSETDRVBUFFER] = NULL,
+	[MTFSS]		 = NULL,
+	[MTBSS]		 = NULL,
+	[MTWSM]		 = NULL,
+	[MTLOCK]	 = NULL,
+	[MTUNLOCK]	 = NULL,
+	[MTLOAD]	 = tape_std_mtload,
+	[MTUNLOAD]	 = tape_std_mtunload,
+	[MTCOMPRESSION]	 = tape_std_mtcompression,
+	[MTSETPART]	 = NULL,
+	[MTMKPART]	 = NULL
+};
+
+/*
+ * Tape discipline structure for 3590.
+ */
+static struct tape_discipline tape_discipline_3590 = {
+	.owner = THIS_MODULE,
+	.setup_device = tape_3590_setup_device,
+	.cleanup_device = tape_3590_cleanup_device,
+	.process_eov = tape_std_process_eov,
+	.irq = tape_3590_irq,
+	.read_block = tape_std_read_block,
+	.write_block = tape_std_write_block,
+	.ioctl_fn = tape_3590_ioctl,
+	.mtop_array = tape_3590_mtop
+};
+
+static struct ccw_device_id tape_3590_ids[] = {
+	{CCW_DEVICE_DEVTYPE(0x3590, 0, 0x3590, 0), .driver_info = tape_3590},
+	{CCW_DEVICE_DEVTYPE(0x3592, 0, 0x3592, 0), .driver_info = tape_3592},
+	{ /* end of list */ }
+};
+
+static int
+tape_3590_online(struct ccw_device *cdev)
+{
+	return tape_generic_online(dev_get_drvdata(&cdev->dev),
+				   &tape_discipline_3590);
+}
+
+static struct ccw_driver tape_3590_driver = {
+	.driver = {
+		.name = "tape_3590",
+		.owner = THIS_MODULE,
+	},
+	.ids = tape_3590_ids,
+	.probe = tape_generic_probe,
+	.remove = tape_generic_remove,
+	.set_offline = tape_generic_offline,
+	.set_online = tape_3590_online,
+	.freeze = tape_generic_pm_suspend,
+	.int_class = IRQIO_TAP,
+};
+
+/*
+ * Setup discipline structure.
+ */
+static int
+tape_3590_init(void)
+{
+	int rc;
+
+	TAPE_DBF_AREA = debug_register("tape_3590", 2, 2, 4 * sizeof(long));
+	debug_register_view(TAPE_DBF_AREA, &debug_sprintf_view);
+#ifdef DBF_LIKE_HELL
+	debug_set_level(TAPE_DBF_AREA, 6);
+#endif
+
+	DBF_EVENT(3, "3590 init\n");
+
+	tape_3590_wq = alloc_workqueue("tape_3590", 0, 0);
+	if (!tape_3590_wq)
+		return -ENOMEM;
+
+	/* Register driver for 3590 tapes. */
+	rc = ccw_driver_register(&tape_3590_driver);
+	if (rc) {
+		destroy_workqueue(tape_3590_wq);
+		DBF_EVENT(3, "3590 init failed\n");
+	} else
+		DBF_EVENT(3, "3590 registered\n");
+	return rc;
+}
+
+static void
+tape_3590_exit(void)
+{
+	ccw_driver_unregister(&tape_3590_driver);
+	destroy_workqueue(tape_3590_wq);
+	debug_unregister(TAPE_DBF_AREA);
+}
+
+MODULE_DEVICE_TABLE(ccw, tape_3590_ids);
+MODULE_AUTHOR("(C) 2001,2006 IBM Corporation");
+MODULE_DESCRIPTION("Linux on zSeries channel attached 3590 tape device driver");
+MODULE_LICENSE("GPL");
+
+module_init(tape_3590_init);
+module_exit(tape_3590_exit);
diff --git a/src/kernel/linux/v4.14/drivers/s390/char/tape_3590.h b/src/kernel/linux/v4.14/drivers/s390/char/tape_3590.h
new file mode 100644
index 0000000..b398d8a
--- /dev/null
+++ b/src/kernel/linux/v4.14/drivers/s390/char/tape_3590.h
@@ -0,0 +1,175 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ *    tape device discipline for 3590 tapes.
+ *
+ *    Copyright IBM Corp. 2001, 2006
+ *    Author(s): Stefan Bader <shbader@de.ibm.com>
+ *		 Michael Holzheu <holzheu@de.ibm.com>
+ *		 Martin Schwidefsky <schwidefsky@de.ibm.com>
+ */
+
+#ifndef _TAPE_3590_H
+#define _TAPE_3590_H
+
+#define MEDIUM_SENSE	0xc2
+#define READ_PREVIOUS	0x0a
+#define MODE_SENSE	0xcf
+#define PERFORM_SS_FUNC 0x77
+#define READ_SS_DATA	0x3e
+
+#define PREP_RD_SS_DATA 0x18
+#define RD_ATTMSG	0x3
+
+#define SENSE_BRA_PER  0
+#define SENSE_BRA_CONT 1
+#define SENSE_BRA_RE   2
+#define SENSE_BRA_DRE  3
+
+#define SENSE_FMT_LIBRARY	0x23
+#define SENSE_FMT_UNSOLICITED	0x40
+#define SENSE_FMT_COMMAND_REJ	0x41
+#define SENSE_FMT_COMMAND_EXEC0 0x50
+#define SENSE_FMT_COMMAND_EXEC1 0x51
+#define SENSE_FMT_EVENT0	0x60
+#define SENSE_FMT_EVENT1	0x61
+#define SENSE_FMT_MIM		0x70
+#define SENSE_FMT_SIM		0x71
+
+#define MSENSE_UNASSOCIATED	 0x00
+#define MSENSE_ASSOCIATED_MOUNT	 0x01
+#define MSENSE_ASSOCIATED_UMOUNT 0x02
+#define MSENSE_CRYPT_MASK	 0x00000010
+
+#define TAPE_3590_MAX_MSG	 0xb0
+
+/* Datatypes */
+
+struct tape_3590_disc_data {
+	struct tape390_crypt_info crypt_info;
+	int read_back_op;
+};
+
+#define TAPE_3590_CRYPT_INFO(device) \
+	((struct tape_3590_disc_data*)(device->discdata))->crypt_info
+#define TAPE_3590_READ_BACK_OP(device) \
+	((struct tape_3590_disc_data*)(device->discdata))->read_back_op
+
+struct tape_3590_sense {
+
+	unsigned int command_rej:1;
+	unsigned int interv_req:1;
+	unsigned int bus_out_check:1;
+	unsigned int eq_check:1;
+	unsigned int data_check:1;
+	unsigned int overrun:1;
+	unsigned int def_unit_check:1;
+	unsigned int assgnd_elsew:1;
+
+	unsigned int locate_fail:1;
+	unsigned int inst_online:1;
+	unsigned int reserved:1;
+	unsigned int blk_seq_err:1;
+	unsigned int begin_part:1;
+	unsigned int wr_mode:1;
+	unsigned int wr_prot:1;
+	unsigned int not_cap:1;
+
+	unsigned int bra:2;
+	unsigned int lc:3;
+	unsigned int vlf_active:1;
+	unsigned int stm:1;
+	unsigned int med_pos:1;
+
+	unsigned int rac:8;
+
+	unsigned int rc_rqc:16;
+
+	unsigned int mc:8;
+
+	unsigned int sense_fmt:8;
+
+	union {
+		struct {
+			unsigned int emc:4;
+			unsigned int smc:4;
+			unsigned int sev:2;
+			unsigned int reserved:6;
+			unsigned int md:8;
+			unsigned int refcode:8;
+			unsigned int mid:16;
+			unsigned int mp:16;
+			unsigned char volid[6];
+			unsigned int fid:8;
+		} f70;
+		struct {
+			unsigned int emc:4;
+			unsigned int smc:4;
+			unsigned int sev:2;
+			unsigned int reserved1:5;
+			unsigned int mdf:1;
+			unsigned char md[3];
+			unsigned int simid:8;
+			unsigned int uid:16;
+			unsigned int refcode1:16;
+			unsigned int refcode2:16;
+			unsigned int refcode3:16;
+			unsigned int reserved2:8;
+		} f71;
+		unsigned char data[14];
+	} fmt;
+	unsigned char pad[10];
+
+} __attribute__ ((packed));
+
+struct tape_3590_med_sense {
+	unsigned int macst:4;
+	unsigned int masst:4;
+	char pad1[7];
+	unsigned int flags;
+	char pad2[116];
+} __attribute__ ((packed));
+
+struct tape_3590_rdc_data {
+	char data[64];
+} __attribute__ ((packed));
+
+/* Datastructures for 3592 encryption support */
+
+struct tape3592_kekl {
+	__u8 flags;
+	char label[64];
+} __attribute__ ((packed));
+
+struct tape3592_kekl_pair {
+	__u8 count;
+	struct tape3592_kekl kekl[2];
+} __attribute__ ((packed));
+
+struct tape3592_kekl_query_data {
+	__u16 len;
+	__u8  fmt;
+	__u8  mc;
+	__u32 id;
+	__u8  flags;
+	struct tape3592_kekl_pair kekls;
+	char reserved[116];
+} __attribute__ ((packed));
+
+struct tape3592_kekl_query_order {
+	__u8 code;
+	__u8 flags;
+	char reserved1[2];
+	__u8 max_count;
+	char reserved2[35];
+} __attribute__ ((packed));
+
+struct tape3592_kekl_set_order {
+	__u8 code;
+	__u8 flags;
+	char reserved1[2];
+	__u8 op;
+	struct tape3592_kekl_pair kekls;
+	char reserved2[120];
+} __attribute__ ((packed));
+
+#endif /* _TAPE_3590_H */
diff --git a/src/kernel/linux/v4.14/drivers/s390/char/tape_char.c b/src/kernel/linux/v4.14/drivers/s390/char/tape_char.c
new file mode 100644
index 0000000..fc206c9
--- /dev/null
+++ b/src/kernel/linux/v4.14/drivers/s390/char/tape_char.c
@@ -0,0 +1,501 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ *    character device frontend for tape device driver
+ *
+ *  S390 and zSeries version
+ *    Copyright IBM Corp. 2001, 2006
+ *    Author(s): Carsten Otte <cotte@de.ibm.com>
+ *		 Michael Holzheu <holzheu@de.ibm.com>
+ *		 Tuan Ngo-Anh <ngoanh@de.ibm.com>
+ *		 Martin Schwidefsky <schwidefsky@de.ibm.com>
+ */
+
+#define KMSG_COMPONENT "tape"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/proc_fs.h>
+#include <linux/mtio.h>
+#include <linux/compat.h>
+
+#include <linux/uaccess.h>
+
+#define TAPE_DBF_AREA	tape_core_dbf
+
+#include "tape.h"
+#include "tape_std.h"
+#include "tape_class.h"
+
+#define TAPECHAR_MAJOR		0	/* get dynamic major */
+
+/*
+ * file operation structure for tape character frontend
+ */
+static ssize_t tapechar_read(struct file *, char __user *, size_t, loff_t *);
+static ssize_t tapechar_write(struct file *, const char __user *, size_t, loff_t *);
+static int tapechar_open(struct inode *,struct file *);
+static int tapechar_release(struct inode *,struct file *);
+static long tapechar_ioctl(struct file *, unsigned int, unsigned long);
+#ifdef CONFIG_COMPAT
+static long tapechar_compat_ioctl(struct file *, unsigned int, unsigned long);
+#endif
+
+static const struct file_operations tape_fops =
+{
+	.owner = THIS_MODULE,
+	.read = tapechar_read,
+	.write = tapechar_write,
+	.unlocked_ioctl = tapechar_ioctl,
+#ifdef CONFIG_COMPAT
+	.compat_ioctl = tapechar_compat_ioctl,
+#endif
+	.open = tapechar_open,
+	.release = tapechar_release,
+	.llseek = no_llseek,
+};
+
+static int tapechar_major = TAPECHAR_MAJOR;
+
+/*
+ * This function is called for every new tapedevice
+ */
+int
+tapechar_setup_device(struct tape_device * device)
+{
+	char	device_name[20];
+
+	sprintf(device_name, "ntibm%i", device->first_minor / 2);
+	device->nt = register_tape_dev(
+		&device->cdev->dev,
+		MKDEV(tapechar_major, device->first_minor),
+		&tape_fops,
+		device_name,
+		"non-rewinding"
+	);
+	device_name[0] = 'r';
+	device->rt = register_tape_dev(
+		&device->cdev->dev,
+		MKDEV(tapechar_major, device->first_minor + 1),
+		&tape_fops,
+		device_name,
+		"rewinding"
+	);
+
+	return 0;
+}
+
+void
+tapechar_cleanup_device(struct tape_device *device)
+{
+	unregister_tape_dev(&device->cdev->dev, device->rt);
+	device->rt = NULL;
+	unregister_tape_dev(&device->cdev->dev, device->nt);
+	device->nt = NULL;
+}
+
+static int
+tapechar_check_idalbuffer(struct tape_device *device, size_t block_size)
+{
+	struct idal_buffer *new;
+
+	if (device->char_data.idal_buf != NULL &&
+	    device->char_data.idal_buf->size == block_size)
+		return 0;
+
+	if (block_size > MAX_BLOCKSIZE) {
+		DBF_EVENT(3, "Invalid blocksize (%zd > %d)\n",
+			block_size, MAX_BLOCKSIZE);
+		return -EINVAL;
+	}
+
+	/* The current idal buffer is not correct. Allocate a new one. */
+	new = idal_buffer_alloc(block_size, 0);
+	if (IS_ERR(new))
+		return -ENOMEM;
+
+	if (device->char_data.idal_buf != NULL)
+		idal_buffer_free(device->char_data.idal_buf);
+
+	device->char_data.idal_buf = new;
+
+	return 0;
+}
+
+/*
+ * Tape device read function
+ */
+static ssize_t
+tapechar_read(struct file *filp, char __user *data, size_t count, loff_t *ppos)
+{
+	struct tape_device *device;
+	struct tape_request *request;
+	size_t block_size;
+	int rc;
+
+	DBF_EVENT(6, "TCHAR:read\n");
+	device = (struct tape_device *) filp->private_data;
+
+	/*
+	 * If the tape isn't terminated yet, do it now. And since we then
+	 * are at the end of the tape there wouldn't be anything to read
+	 * anyways. So we return immediately.
+	 */
+	if(device->required_tapemarks) {
+		return tape_std_terminate_write(device);
+	}
+
+	/* Find out block size to use */
+	if (device->char_data.block_size != 0) {
+		if (count < device->char_data.block_size) {
+			DBF_EVENT(3, "TCHAR:read smaller than block "
+				  "size was requested\n");
+			return -EINVAL;
+		}
+		block_size = device->char_data.block_size;
+	} else {
+		block_size = count;
+	}
+
+	rc = tapechar_check_idalbuffer(device, block_size);
+	if (rc)
+		return rc;
+
+	DBF_EVENT(6, "TCHAR:nbytes: %lx\n", block_size);
+	/* Let the discipline build the ccw chain. */
+	request = device->discipline->read_block(device, block_size);
+	if (IS_ERR(request))
+		return PTR_ERR(request);
+	/* Execute it. */
+	rc = tape_do_io(device, request);
+	if (rc == 0) {
+		rc = block_size - request->rescnt;
+		DBF_EVENT(6, "TCHAR:rbytes:  %x\n", rc);
+		/* Copy data from idal buffer to user space. */
+		if (idal_buffer_to_user(device->char_data.idal_buf,
+					data, rc) != 0)
+			rc = -EFAULT;
+	}
+	tape_free_request(request);
+	return rc;
+}
+
+/*
+ * Tape device write function
+ */
+static ssize_t
+tapechar_write(struct file *filp, const char __user *data, size_t count, loff_t *ppos)
+{
+	struct tape_device *device;
+	struct tape_request *request;
+	size_t block_size;
+	size_t written;
+	int nblocks;
+	int i, rc;
+
+	DBF_EVENT(6, "TCHAR:write\n");
+	device = (struct tape_device *) filp->private_data;
+	/* Find out block size and number of blocks */
+	if (device->char_data.block_size != 0) {
+		if (count < device->char_data.block_size) {
+			DBF_EVENT(3, "TCHAR:write smaller than block "
+				  "size was requested\n");
+			return -EINVAL;
+		}
+		block_size = device->char_data.block_size;
+		nblocks = count / block_size;
+	} else {
+		block_size = count;
+		nblocks = 1;
+	}
+
+	rc = tapechar_check_idalbuffer(device, block_size);
+	if (rc)
+		return rc;
+
+	DBF_EVENT(6,"TCHAR:nbytes: %lx\n", block_size);
+	DBF_EVENT(6, "TCHAR:nblocks: %x\n", nblocks);
+	/* Let the discipline build the ccw chain. */
+	request = device->discipline->write_block(device, block_size);
+	if (IS_ERR(request))
+		return PTR_ERR(request);
+	rc = 0;
+	written = 0;
+	for (i = 0; i < nblocks; i++) {
+		/* Copy data from user space to idal buffer. */
+		if (idal_buffer_from_user(device->char_data.idal_buf,
+					  data, block_size)) {
+			rc = -EFAULT;
+			break;
+		}
+		rc = tape_do_io(device, request);
+		if (rc)
+			break;
+		DBF_EVENT(6, "TCHAR:wbytes: %lx\n",
+			  block_size - request->rescnt);
+		written += block_size - request->rescnt;
+		if (request->rescnt != 0)
+			break;
+		data += block_size;
+	}
+	tape_free_request(request);
+	if (rc == -ENOSPC) {
+		/*
+		 * Ok, the device has no more space. It has NOT written
+		 * the block.
+		 */
+		if (device->discipline->process_eov)
+			device->discipline->process_eov(device);
+		if (written > 0)
+			rc = 0;
+
+	}
+
+	/*
+	 * After doing a write we always need two tapemarks to correctly
+	 * terminate the tape (one to terminate the file, the second to
+	 * flag the end of recorded data.
+	 * Since process_eov positions the tape in front of the written
+	 * tapemark it doesn't hurt to write two marks again.
+	 */
+	if (!rc)
+		device->required_tapemarks = 2;
+
+	return rc ? rc : written;
+}
+
+/*
+ * Character frontend tape device open function.
+ */
+static int
+tapechar_open (struct inode *inode, struct file *filp)
+{
+	struct tape_device *device;
+	int minor, rc;
+
+	DBF_EVENT(6, "TCHAR:open: %i:%i\n",
+		imajor(file_inode(filp)),
+		iminor(file_inode(filp)));
+
+	if (imajor(file_inode(filp)) != tapechar_major)
+		return -ENODEV;
+
+	minor = iminor(file_inode(filp));
+	device = tape_find_device(minor / TAPE_MINORS_PER_DEV);
+	if (IS_ERR(device)) {
+		DBF_EVENT(3, "TCHAR:open: tape_find_device() failed\n");
+		return PTR_ERR(device);
+	}
+
+	rc = tape_open(device);
+	if (rc == 0) {
+		filp->private_data = device;
+		nonseekable_open(inode, filp);
+	} else
+		tape_put_device(device);
+
+	return rc;
+}
+
+/*
+ * Character frontend tape device release function.
+ */
+
+static int
+tapechar_release(struct inode *inode, struct file *filp)
+{
+	struct tape_device *device;
+
+	DBF_EVENT(6, "TCHAR:release: %x\n", iminor(inode));
+	device = (struct tape_device *) filp->private_data;
+
+	/*
+	 * If this is the rewinding tape minor then rewind. In that case we
+	 * write all required tapemarks. Otherwise only one to terminate the
+	 * file.
+	 */
+	if ((iminor(inode) & 1) != 0) {
+		if (device->required_tapemarks)
+			tape_std_terminate_write(device);
+		tape_mtop(device, MTREW, 1);
+	} else {
+		if (device->required_tapemarks > 1) {
+			if (tape_mtop(device, MTWEOF, 1) == 0)
+				device->required_tapemarks--;
+		}
+	}
+
+	if (device->char_data.idal_buf != NULL) {
+		idal_buffer_free(device->char_data.idal_buf);
+		device->char_data.idal_buf = NULL;
+	}
+	tape_release(device);
+	filp->private_data = NULL;
+	tape_put_device(device);
+
+	return 0;
+}
+
+/*
+ * Tape device io controls.
+ */
+static int
+__tapechar_ioctl(struct tape_device *device,
+		 unsigned int no, unsigned long data)
+{
+	int rc;
+
+	if (no == MTIOCTOP) {
+		struct mtop op;
+
+		if (copy_from_user(&op, (char __user *) data, sizeof(op)) != 0)
+			return -EFAULT;
+		if (op.mt_count < 0)
+			return -EINVAL;
+
+		/*
+		 * Operations that change tape position should write final
+		 * tapemarks.
+		 */
+		switch (op.mt_op) {
+			case MTFSF:
+			case MTBSF:
+			case MTFSR:
+			case MTBSR:
+			case MTREW:
+			case MTOFFL:
+			case MTEOM:
+			case MTRETEN:
+			case MTBSFM:
+			case MTFSFM:
+			case MTSEEK:
+				if (device->required_tapemarks)
+					tape_std_terminate_write(device);
+			default:
+				;
+		}
+		rc = tape_mtop(device, op.mt_op, op.mt_count);
+
+		if (op.mt_op == MTWEOF && rc == 0) {
+			if (op.mt_count > device->required_tapemarks)
+				device->required_tapemarks = 0;
+			else
+				device->required_tapemarks -= op.mt_count;
+		}
+		return rc;
+	}
+	if (no == MTIOCPOS) {
+		/* MTIOCPOS: query the tape position. */
+		struct mtpos pos;
+
+		rc = tape_mtop(device, MTTELL, 1);
+		if (rc < 0)
+			return rc;
+		pos.mt_blkno = rc;
+		if (copy_to_user((char __user *) data, &pos, sizeof(pos)) != 0)
+			return -EFAULT;
+		return 0;
+	}
+	if (no == MTIOCGET) {
+		/* MTIOCGET: query the tape drive status. */
+		struct mtget get;
+
+		memset(&get, 0, sizeof(get));
+		get.mt_type = MT_ISUNKNOWN;
+		get.mt_resid = 0 /* device->devstat.rescnt */;
+		get.mt_dsreg =
+			((device->char_data.block_size << MT_ST_BLKSIZE_SHIFT)
+			 & MT_ST_BLKSIZE_MASK);
+		/* FIXME: mt_gstat, mt_erreg, mt_fileno */
+		get.mt_gstat = 0;
+		get.mt_erreg = 0;
+		get.mt_fileno = 0;
+		get.mt_gstat  = device->tape_generic_status;
+
+		if (device->medium_state == MS_LOADED) {
+			rc = tape_mtop(device, MTTELL, 1);
+
+			if (rc < 0)
+				return rc;
+
+			if (rc == 0)
+				get.mt_gstat |= GMT_BOT(~0);
+
+			get.mt_blkno = rc;
+		}
+
+		if (copy_to_user((char __user *) data, &get, sizeof(get)) != 0)
+			return -EFAULT;
+
+		return 0;
+	}
+	/* Try the discipline ioctl function. */
+	if (device->discipline->ioctl_fn == NULL)
+		return -EINVAL;
+	return device->discipline->ioctl_fn(device, no, data);
+}
+
+static long
+tapechar_ioctl(struct file *filp, unsigned int no, unsigned long data)
+{
+	struct tape_device *device;
+	long rc;
+
+	DBF_EVENT(6, "TCHAR:ioct\n");
+
+	device = (struct tape_device *) filp->private_data;
+	mutex_lock(&device->mutex);
+	rc = __tapechar_ioctl(device, no, data);
+	mutex_unlock(&device->mutex);
+	return rc;
+}
+
+#ifdef CONFIG_COMPAT
+static long
+tapechar_compat_ioctl(struct file *filp, unsigned int no, unsigned long data)
+{
+	struct tape_device *device = filp->private_data;
+	int rval = -ENOIOCTLCMD;
+	unsigned long argp;
+
+	/* The 'arg' argument of any ioctl function may only be used for
+	 * pointers because of the compat pointer conversion.
+	 * Consider this when adding new ioctls.
+	 */
+	argp = (unsigned long) compat_ptr(data);
+	if (device->discipline->ioctl_fn) {
+		mutex_lock(&device->mutex);
+		rval = device->discipline->ioctl_fn(device, no, argp);
+		mutex_unlock(&device->mutex);
+		if (rval == -EINVAL)
+			rval = -ENOIOCTLCMD;
+	}
+
+	return rval;
+}
+#endif /* CONFIG_COMPAT */
+
+/*
+ * Initialize character device frontend.
+ */
+int
+tapechar_init (void)
+{
+	dev_t	dev;
+
+	if (alloc_chrdev_region(&dev, 0, 256, "tape") != 0)
+		return -1;
+
+	tapechar_major = MAJOR(dev);
+
+	return 0;
+}
+
+/*
+ * cleanup
+ */
+void
+tapechar_exit(void)
+{
+	unregister_chrdev_region(MKDEV(tapechar_major, 0), 256);
+}
diff --git a/src/kernel/linux/v4.14/drivers/s390/char/tape_class.c b/src/kernel/linux/v4.14/drivers/s390/char/tape_class.c
new file mode 100644
index 0000000..91c3c64
--- /dev/null
+++ b/src/kernel/linux/v4.14/drivers/s390/char/tape_class.c
@@ -0,0 +1,132 @@
+/*
+ * Copyright IBM Corp. 2004
+ *
+ * Tape class device support
+ *
+ * Author: Stefan Bader <shbader@de.ibm.com>
+ * Based on simple class device code by Greg K-H
+ */
+
+#define KMSG_COMPONENT "tape"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
+#include <linux/slab.h>
+
+#include "tape_class.h"
+
+MODULE_AUTHOR("Stefan Bader <shbader@de.ibm.com>");
+MODULE_DESCRIPTION(
+	"Copyright IBM Corp. 2004   All Rights Reserved.\n"
+	"tape_class.c"
+);
+MODULE_LICENSE("GPL");
+
+static struct class *tape_class;
+
+/*
+ * Register a tape device and return a pointer to the cdev structure.
+ *
+ * device
+ *	The pointer to the struct device of the physical (base) device.
+ * drivername
+ *	The pointer to the drivers name for it's character devices.
+ * dev
+ *	The intended major/minor number. The major number may be 0 to
+ *	get a dynamic major number.
+ * fops
+ *	The pointer to the drivers file operations for the tape device.
+ * devname
+ *	The pointer to the name of the character device.
+ */
+struct tape_class_device *register_tape_dev(
+	struct device *		device,
+	dev_t			dev,
+	const struct file_operations *fops,
+	char *			device_name,
+	char *			mode_name)
+{
+	struct tape_class_device *	tcd;
+	int		rc;
+	char *		s;
+
+	tcd = kzalloc(sizeof(struct tape_class_device), GFP_KERNEL);
+	if (!tcd)
+		return ERR_PTR(-ENOMEM);
+
+	strncpy(tcd->device_name, device_name, TAPECLASS_NAME_LEN);
+	for (s = strchr(tcd->device_name, '/'); s; s = strchr(s, '/'))
+		*s = '!';
+	strncpy(tcd->mode_name, mode_name, TAPECLASS_NAME_LEN);
+	for (s = strchr(tcd->mode_name, '/'); s; s = strchr(s, '/'))
+		*s = '!';
+
+	tcd->char_device = cdev_alloc();
+	if (!tcd->char_device) {
+		rc = -ENOMEM;
+		goto fail_with_tcd;
+	}
+
+	tcd->char_device->owner = fops->owner;
+	tcd->char_device->ops   = fops;
+	tcd->char_device->dev   = dev;
+
+	rc = cdev_add(tcd->char_device, tcd->char_device->dev, 1);
+	if (rc)
+		goto fail_with_cdev;
+
+	tcd->class_device = device_create(tape_class, device,
+					  tcd->char_device->dev, NULL,
+					  "%s", tcd->device_name);
+	rc = PTR_RET(tcd->class_device);
+	if (rc)
+		goto fail_with_cdev;
+	rc = sysfs_create_link(
+		&device->kobj,
+		&tcd->class_device->kobj,
+		tcd->mode_name
+	);
+	if (rc)
+		goto fail_with_class_device;
+
+	return tcd;
+
+fail_with_class_device:
+	device_destroy(tape_class, tcd->char_device->dev);
+
+fail_with_cdev:
+	cdev_del(tcd->char_device);
+
+fail_with_tcd:
+	kfree(tcd);
+
+	return ERR_PTR(rc);
+}
+EXPORT_SYMBOL(register_tape_dev);
+
+void unregister_tape_dev(struct device *device, struct tape_class_device *tcd)
+{
+	if (tcd != NULL && !IS_ERR(tcd)) {
+		sysfs_remove_link(&device->kobj, tcd->mode_name);
+		device_destroy(tape_class, tcd->char_device->dev);
+		cdev_del(tcd->char_device);
+		kfree(tcd);
+	}
+}
+EXPORT_SYMBOL(unregister_tape_dev);
+
+
+static int __init tape_init(void)
+{
+	tape_class = class_create(THIS_MODULE, "tape390");
+
+	return 0;
+}
+
+static void __exit tape_exit(void)
+{
+	class_destroy(tape_class);
+	tape_class = NULL;
+}
+
+postcore_initcall(tape_init);
+module_exit(tape_exit);
diff --git a/src/kernel/linux/v4.14/drivers/s390/char/tape_class.h b/src/kernel/linux/v4.14/drivers/s390/char/tape_class.h
new file mode 100644
index 0000000..d25ac07
--- /dev/null
+++ b/src/kernel/linux/v4.14/drivers/s390/char/tape_class.h
@@ -0,0 +1,59 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright IBM Corp. 2004   All Rights Reserved.
+ *
+ * Tape class device support
+ *
+ * Author: Stefan Bader <shbader@de.ibm.com>
+ * Based on simple class device code by Greg K-H
+ */
+#ifndef __TAPE_CLASS_H__
+#define __TAPE_CLASS_H__
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <linux/major.h>
+#include <linux/cdev.h>
+
+#include <linux/device.h>
+#include <linux/kdev_t.h>
+
+#define TAPECLASS_NAME_LEN	32
+
+struct tape_class_device {
+	struct cdev		*char_device;
+	struct device		*class_device;
+	char			device_name[TAPECLASS_NAME_LEN];
+	char			mode_name[TAPECLASS_NAME_LEN];
+};
+
+/*
+ * Register a tape device and return a pointer to the tape class device
+ * created by the call.
+ *
+ * device
+ *	The pointer to the struct device of the physical (base) device.
+ * dev
+ *	The intended major/minor number. The major number may be 0 to
+ *	get a dynamic major number.
+ * fops
+ *	The pointer to the drivers file operations for the tape device.
+ * device_name
+ *	Pointer to the logical device name (will also be used as kobject name
+ *	of the cdev). This can also be called the name of the tape class
+ *	device.
+ * mode_name
+ *	Points to the name of the tape mode. This creates a link with that
+ *	name from the physical device to the logical device (class).
+ */
+struct tape_class_device *register_tape_dev(
+	struct device *		device,
+	dev_t			dev,
+	const struct file_operations *fops,
+	char *			device_name,
+	char *			node_name
+);
+void unregister_tape_dev(struct device *device, struct tape_class_device *tcd);
+
+#endif /* __TAPE_CLASS_H__ */
diff --git a/src/kernel/linux/v4.14/drivers/s390/char/tape_core.c b/src/kernel/linux/v4.14/drivers/s390/char/tape_core.c
new file mode 100644
index 0000000..9dd4534
--- /dev/null
+++ b/src/kernel/linux/v4.14/drivers/s390/char/tape_core.c
@@ -0,0 +1,1379 @@
+/*
+ *    basic function of the tape device driver
+ *
+ *  S390 and zSeries version
+ *    Copyright IBM Corp. 2001, 2009
+ *    Author(s): Carsten Otte <cotte@de.ibm.com>
+ *		 Michael Holzheu <holzheu@de.ibm.com>
+ *		 Tuan Ngo-Anh <ngoanh@de.ibm.com>
+ *		 Martin Schwidefsky <schwidefsky@de.ibm.com>
+ *		 Stefan Bader <shbader@de.ibm.com>
+ */
+
+#define KMSG_COMPONENT "tape"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
+#include <linux/module.h>
+#include <linux/init.h>	     // for kernel parameters
+#include <linux/kmod.h>	     // for requesting modules
+#include <linux/spinlock.h>  // for locks
+#include <linux/vmalloc.h>
+#include <linux/list.h>
+#include <linux/slab.h>
+
+#include <asm/types.h>	     // for variable types
+
+#define TAPE_DBF_AREA	tape_core_dbf
+
+#include "tape.h"
+#include "tape_std.h"
+
+#define LONG_BUSY_TIMEOUT 180 /* seconds */
+
+static void __tape_do_irq (struct ccw_device *, unsigned long, struct irb *);
+static void tape_delayed_next_request(struct work_struct *);
+static void tape_long_busy_timeout(unsigned long data);
+
+/*
+ * One list to contain all tape devices of all disciplines, so
+ * we can assign the devices to minor numbers of the same major
+ * The list is protected by the rwlock
+ */
+static LIST_HEAD(tape_device_list);
+static DEFINE_RWLOCK(tape_device_lock);
+
+/*
+ * Pointer to debug area.
+ */
+debug_info_t *TAPE_DBF_AREA = NULL;
+EXPORT_SYMBOL(TAPE_DBF_AREA);
+
+/*
+ * Printable strings for tape enumerations.
+ */
+const char *tape_state_verbose[TS_SIZE] =
+{
+	[TS_UNUSED]   = "UNUSED",
+	[TS_IN_USE]   = "IN_USE",
+	[TS_BLKUSE]   = "BLKUSE",
+	[TS_INIT]     = "INIT  ",
+	[TS_NOT_OPER] = "NOT_OP"
+};
+
+const char *tape_op_verbose[TO_SIZE] =
+{
+	[TO_BLOCK] = "BLK",	[TO_BSB] = "BSB",
+	[TO_BSF] = "BSF",	[TO_DSE] = "DSE",
+	[TO_FSB] = "FSB",	[TO_FSF] = "FSF",
+	[TO_LBL] = "LBL",	[TO_NOP] = "NOP",
+	[TO_RBA] = "RBA",	[TO_RBI] = "RBI",
+	[TO_RFO] = "RFO",	[TO_REW] = "REW",
+	[TO_RUN] = "RUN",	[TO_WRI] = "WRI",
+	[TO_WTM] = "WTM",	[TO_MSEN] = "MSN",
+	[TO_LOAD] = "LOA",	[TO_READ_CONFIG] = "RCF",
+	[TO_READ_ATTMSG] = "RAT",
+	[TO_DIS] = "DIS",	[TO_ASSIGN] = "ASS",
+	[TO_UNASSIGN] = "UAS",  [TO_CRYPT_ON] = "CON",
+	[TO_CRYPT_OFF] = "COF",	[TO_KEKL_SET] = "KLS",
+	[TO_KEKL_QUERY] = "KLQ",[TO_RDC] = "RDC",
+};
+
+static int devid_to_int(struct ccw_dev_id *dev_id)
+{
+	return dev_id->devno + (dev_id->ssid << 16);
+}
+
+/*
+ * Some channel attached tape specific attributes.
+ *
+ * FIXME: In the future the first_minor and blocksize attribute should be
+ *        replaced by a link to the cdev tree.
+ */
+static ssize_t
+tape_medium_state_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+	struct tape_device *tdev;
+
+	tdev = dev_get_drvdata(dev);
+	return scnprintf(buf, PAGE_SIZE, "%i\n", tdev->medium_state);
+}
+
+static
+DEVICE_ATTR(medium_state, 0444, tape_medium_state_show, NULL);
+
+static ssize_t
+tape_first_minor_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+	struct tape_device *tdev;
+
+	tdev = dev_get_drvdata(dev);
+	return scnprintf(buf, PAGE_SIZE, "%i\n", tdev->first_minor);
+}
+
+static
+DEVICE_ATTR(first_minor, 0444, tape_first_minor_show, NULL);
+
+static ssize_t
+tape_state_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+	struct tape_device *tdev;
+
+	tdev = dev_get_drvdata(dev);
+	return scnprintf(buf, PAGE_SIZE, "%s\n", (tdev->first_minor < 0) ?
+		"OFFLINE" : tape_state_verbose[tdev->tape_state]);
+}
+
+static
+DEVICE_ATTR(state, 0444, tape_state_show, NULL);
+
+static ssize_t
+tape_operation_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+	struct tape_device *tdev;
+	ssize_t rc;
+
+	tdev = dev_get_drvdata(dev);
+	if (tdev->first_minor < 0)
+		return scnprintf(buf, PAGE_SIZE, "N/A\n");
+
+	spin_lock_irq(get_ccwdev_lock(tdev->cdev));
+	if (list_empty(&tdev->req_queue))
+		rc = scnprintf(buf, PAGE_SIZE, "---\n");
+	else {
+		struct tape_request *req;
+
+		req = list_entry(tdev->req_queue.next, struct tape_request,
+			list);
+		rc = scnprintf(buf,PAGE_SIZE, "%s\n", tape_op_verbose[req->op]);
+	}
+	spin_unlock_irq(get_ccwdev_lock(tdev->cdev));
+	return rc;
+}
+
+static
+DEVICE_ATTR(operation, 0444, tape_operation_show, NULL);
+
+static ssize_t
+tape_blocksize_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+	struct tape_device *tdev;
+
+	tdev = dev_get_drvdata(dev);
+
+	return scnprintf(buf, PAGE_SIZE, "%i\n", tdev->char_data.block_size);
+}
+
+static
+DEVICE_ATTR(blocksize, 0444, tape_blocksize_show, NULL);
+
+static struct attribute *tape_attrs[] = {
+	&dev_attr_medium_state.attr,
+	&dev_attr_first_minor.attr,
+	&dev_attr_state.attr,
+	&dev_attr_operation.attr,
+	&dev_attr_blocksize.attr,
+	NULL
+};
+
+static const struct attribute_group tape_attr_group = {
+	.attrs = tape_attrs,
+};
+
+/*
+ * Tape state functions
+ */
+void
+tape_state_set(struct tape_device *device, enum tape_state newstate)
+{
+	const char *str;
+
+	if (device->tape_state == TS_NOT_OPER) {
+		DBF_EVENT(3, "ts_set err: not oper\n");
+		return;
+	}
+	DBF_EVENT(4, "ts. dev:	%x\n", device->first_minor);
+	DBF_EVENT(4, "old ts:\t\n");
+	if (device->tape_state < TS_SIZE && device->tape_state >=0 )
+		str = tape_state_verbose[device->tape_state];
+	else
+		str = "UNKNOWN TS";
+	DBF_EVENT(4, "%s\n", str);
+	DBF_EVENT(4, "new ts:\t\n");
+	if (newstate < TS_SIZE && newstate >= 0)
+		str = tape_state_verbose[newstate];
+	else
+		str = "UNKNOWN TS";
+	DBF_EVENT(4, "%s\n", str);
+	device->tape_state = newstate;
+	wake_up(&device->state_change_wq);
+}
+
+struct tape_med_state_work_data {
+	struct tape_device *device;
+	enum tape_medium_state state;
+	struct work_struct  work;
+};
+
+static void
+tape_med_state_work_handler(struct work_struct *work)
+{
+	static char env_state_loaded[] = "MEDIUM_STATE=LOADED";
+	static char env_state_unloaded[] = "MEDIUM_STATE=UNLOADED";
+	struct tape_med_state_work_data *p =
+		container_of(work, struct tape_med_state_work_data, work);
+	struct tape_device *device = p->device;
+	char *envp[] = { NULL, NULL };
+
+	switch (p->state) {
+	case MS_UNLOADED:
+		pr_info("%s: The tape cartridge has been successfully "
+			"unloaded\n", dev_name(&device->cdev->dev));
+		envp[0] = env_state_unloaded;
+		kobject_uevent_env(&device->cdev->dev.kobj, KOBJ_CHANGE, envp);
+		break;
+	case MS_LOADED:
+		pr_info("%s: A tape cartridge has been mounted\n",
+			dev_name(&device->cdev->dev));
+		envp[0] = env_state_loaded;
+		kobject_uevent_env(&device->cdev->dev.kobj, KOBJ_CHANGE, envp);
+		break;
+	default:
+		break;
+	}
+	tape_put_device(device);
+	kfree(p);
+}
+
+static void
+tape_med_state_work(struct tape_device *device, enum tape_medium_state state)
+{
+	struct tape_med_state_work_data *p;
+
+	p = kzalloc(sizeof(*p), GFP_ATOMIC);
+	if (p) {
+		INIT_WORK(&p->work, tape_med_state_work_handler);
+		p->device = tape_get_device(device);
+		p->state = state;
+		schedule_work(&p->work);
+	}
+}
+
+void
+tape_med_state_set(struct tape_device *device, enum tape_medium_state newstate)
+{
+	enum tape_medium_state oldstate;
+
+	oldstate = device->medium_state;
+	if (oldstate == newstate)
+		return;
+	device->medium_state = newstate;
+	switch(newstate){
+	case MS_UNLOADED:
+		device->tape_generic_status |= GMT_DR_OPEN(~0);
+		if (oldstate == MS_LOADED)
+			tape_med_state_work(device, MS_UNLOADED);
+		break;
+	case MS_LOADED:
+		device->tape_generic_status &= ~GMT_DR_OPEN(~0);
+		if (oldstate == MS_UNLOADED)
+			tape_med_state_work(device, MS_LOADED);
+		break;
+	default:
+		break;
+	}
+	wake_up(&device->state_change_wq);
+}
+
+/*
+ * Stop running ccw. Has to be called with the device lock held.
+ */
+static int
+__tape_cancel_io(struct tape_device *device, struct tape_request *request)
+{
+	int retries;
+	int rc;
+
+	/* Check if interrupt has already been processed */
+	if (request->callback == NULL)
+		return 0;
+
+	rc = 0;
+	for (retries = 0; retries < 5; retries++) {
+		rc = ccw_device_clear(device->cdev, (long) request);
+
+		switch (rc) {
+			case 0:
+				request->status	= TAPE_REQUEST_DONE;
+				return 0;
+			case -EBUSY:
+				request->status	= TAPE_REQUEST_CANCEL;
+				schedule_delayed_work(&device->tape_dnr, 0);
+				return 0;
+			case -ENODEV:
+				DBF_EXCEPTION(2, "device gone, retry\n");
+				break;
+			case -EIO:
+				DBF_EXCEPTION(2, "I/O error, retry\n");
+				break;
+			default:
+				BUG();
+		}
+	}
+
+	return rc;
+}
+
+/*
+ * Add device into the sorted list, giving it the first
+ * available minor number.
+ */
+static int
+tape_assign_minor(struct tape_device *device)
+{
+	struct tape_device *tmp;
+	int minor;
+
+	minor = 0;
+	write_lock(&tape_device_lock);
+	list_for_each_entry(tmp, &tape_device_list, node) {
+		if (minor < tmp->first_minor)
+			break;
+		minor += TAPE_MINORS_PER_DEV;
+	}
+	if (minor >= 256) {
+		write_unlock(&tape_device_lock);
+		return -ENODEV;
+	}
+	device->first_minor = minor;
+	list_add_tail(&device->node, &tmp->node);
+	write_unlock(&tape_device_lock);
+	return 0;
+}
+
+/* remove device from the list */
+static void
+tape_remove_minor(struct tape_device *device)
+{
+	write_lock(&tape_device_lock);
+	list_del_init(&device->node);
+	device->first_minor = -1;
+	write_unlock(&tape_device_lock);
+}
+
+/*
+ * Set a device online.
+ *
+ * This function is called by the common I/O layer to move a device from the
+ * detected but offline into the online state.
+ * If we return an error (RC < 0) the device remains in the offline state. This
+ * can happen if the device is assigned somewhere else, for example.
+ */
+int
+tape_generic_online(struct tape_device *device,
+		   struct tape_discipline *discipline)
+{
+	int rc;
+
+	DBF_LH(6, "tape_enable_device(%p, %p)\n", device, discipline);
+
+	if (device->tape_state != TS_INIT) {
+		DBF_LH(3, "Tapestate not INIT (%d)\n", device->tape_state);
+		return -EINVAL;
+	}
+
+	init_timer(&device->lb_timeout);
+	device->lb_timeout.function = tape_long_busy_timeout;
+
+	/* Let the discipline have a go at the device. */
+	device->discipline = discipline;
+	if (!try_module_get(discipline->owner)) {
+		return -EINVAL;
+	}
+
+	rc = discipline->setup_device(device);
+	if (rc)
+		goto out;
+	rc = tape_assign_minor(device);
+	if (rc)
+		goto out_discipline;
+
+	rc = tapechar_setup_device(device);
+	if (rc)
+		goto out_minor;
+
+	tape_state_set(device, TS_UNUSED);
+
+	DBF_LH(3, "(%08x): Drive set online\n", device->cdev_id);
+
+	return 0;
+
+out_minor:
+	tape_remove_minor(device);
+out_discipline:
+	device->discipline->cleanup_device(device);
+	device->discipline = NULL;
+out:
+	module_put(discipline->owner);
+	return rc;
+}
+
+static void
+tape_cleanup_device(struct tape_device *device)
+{
+	tapechar_cleanup_device(device);
+	device->discipline->cleanup_device(device);
+	module_put(device->discipline->owner);
+	tape_remove_minor(device);
+	tape_med_state_set(device, MS_UNKNOWN);
+}
+
+/*
+ * Suspend device.
+ *
+ * Called by the common I/O layer if the drive should be suspended on user
+ * request. We refuse to suspend if the device is loaded or in use for the
+ * following reason:
+ * While the Linux guest is suspended, it might be logged off which causes
+ * devices to be detached. Tape devices are automatically rewound and unloaded
+ * during DETACH processing (unless the tape device was attached with the
+ * NOASSIGN or MULTIUSER option). After rewind/unload, there is no way to
+ * resume the original state of the tape device, since we would need to
+ * manually re-load the cartridge which was active at suspend time.
+ */
+int tape_generic_pm_suspend(struct ccw_device *cdev)
+{
+	struct tape_device *device;
+
+	device = dev_get_drvdata(&cdev->dev);
+	if (!device) {
+		return -ENODEV;
+	}
+
+	DBF_LH(3, "(%08x): tape_generic_pm_suspend(%p)\n",
+		device->cdev_id, device);
+
+	if (device->medium_state != MS_UNLOADED) {
+		pr_err("A cartridge is loaded in tape device %s, "
+		       "refusing to suspend\n", dev_name(&cdev->dev));
+		return -EBUSY;
+	}
+
+	spin_lock_irq(get_ccwdev_lock(device->cdev));
+	switch (device->tape_state) {
+		case TS_INIT:
+		case TS_NOT_OPER:
+		case TS_UNUSED:
+			spin_unlock_irq(get_ccwdev_lock(device->cdev));
+			break;
+		default:
+			pr_err("Tape device %s is busy, refusing to "
+			       "suspend\n", dev_name(&cdev->dev));
+			spin_unlock_irq(get_ccwdev_lock(device->cdev));
+			return -EBUSY;
+	}
+
+	DBF_LH(3, "(%08x): Drive suspended.\n", device->cdev_id);
+	return 0;
+}
+
+/*
+ * Set device offline.
+ *
+ * Called by the common I/O layer if the drive should set offline on user
+ * request. We may prevent this by returning an error.
+ * Manual offline is only allowed while the drive is not in use.
+ */
+int
+tape_generic_offline(struct ccw_device *cdev)
+{
+	struct tape_device *device;
+
+	device = dev_get_drvdata(&cdev->dev);
+	if (!device) {
+		return -ENODEV;
+	}
+
+	DBF_LH(3, "(%08x): tape_generic_offline(%p)\n",
+		device->cdev_id, device);
+
+	spin_lock_irq(get_ccwdev_lock(device->cdev));
+	switch (device->tape_state) {
+		case TS_INIT:
+		case TS_NOT_OPER:
+			spin_unlock_irq(get_ccwdev_lock(device->cdev));
+			break;
+		case TS_UNUSED:
+			tape_state_set(device, TS_INIT);
+			spin_unlock_irq(get_ccwdev_lock(device->cdev));
+			tape_cleanup_device(device);
+			break;
+		default:
+			DBF_EVENT(3, "(%08x): Set offline failed "
+				"- drive in use.\n",
+				device->cdev_id);
+			spin_unlock_irq(get_ccwdev_lock(device->cdev));
+			return -EBUSY;
+	}
+
+	DBF_LH(3, "(%08x): Drive set offline.\n", device->cdev_id);
+	return 0;
+}
+
+/*
+ * Allocate memory for a new device structure.
+ */
+static struct tape_device *
+tape_alloc_device(void)
+{
+	struct tape_device *device;
+
+	device = kzalloc(sizeof(struct tape_device), GFP_KERNEL);
+	if (device == NULL) {
+		DBF_EXCEPTION(2, "ti:no mem\n");
+		return ERR_PTR(-ENOMEM);
+	}
+	device->modeset_byte = kmalloc(1, GFP_KERNEL | GFP_DMA);
+	if (device->modeset_byte == NULL) {
+		DBF_EXCEPTION(2, "ti:no mem\n");
+		kfree(device);
+		return ERR_PTR(-ENOMEM);
+	}
+	mutex_init(&device->mutex);
+	INIT_LIST_HEAD(&device->req_queue);
+	INIT_LIST_HEAD(&device->node);
+	init_waitqueue_head(&device->state_change_wq);
+	init_waitqueue_head(&device->wait_queue);
+	device->tape_state = TS_INIT;
+	device->medium_state = MS_UNKNOWN;
+	*device->modeset_byte = 0;
+	device->first_minor = -1;
+	atomic_set(&device->ref_count, 1);
+	INIT_DELAYED_WORK(&device->tape_dnr, tape_delayed_next_request);
+
+	return device;
+}
+
+/*
+ * Get a reference to an existing device structure. This will automatically
+ * increment the reference count.
+ */
+struct tape_device *
+tape_get_device(struct tape_device *device)
+{
+	int count;
+
+	count = atomic_inc_return(&device->ref_count);
+	DBF_EVENT(4, "tape_get_device(%p) = %i\n", device, count);
+	return device;
+}
+
+/*
+ * Decrease the reference counter of a devices structure. If the
+ * reference counter reaches zero free the device structure.
+ * The function returns a NULL pointer to be used by the caller
+ * for clearing reference pointers.
+ */
+void
+tape_put_device(struct tape_device *device)
+{
+	int count;
+
+	count = atomic_dec_return(&device->ref_count);
+	DBF_EVENT(4, "tape_put_device(%p) -> %i\n", device, count);
+	BUG_ON(count < 0);
+	if (count == 0) {
+		kfree(device->modeset_byte);
+		kfree(device);
+	}
+}
+
+/*
+ * Find tape device by a device index.
+ */
+struct tape_device *
+tape_find_device(int devindex)
+{
+	struct tape_device *device, *tmp;
+
+	device = ERR_PTR(-ENODEV);
+	read_lock(&tape_device_lock);
+	list_for_each_entry(tmp, &tape_device_list, node) {
+		if (tmp->first_minor / TAPE_MINORS_PER_DEV == devindex) {
+			device = tape_get_device(tmp);
+			break;
+		}
+	}
+	read_unlock(&tape_device_lock);
+	return device;
+}
+
+/*
+ * Driverfs tape probe function.
+ */
+int
+tape_generic_probe(struct ccw_device *cdev)
+{
+	struct tape_device *device;
+	int ret;
+	struct ccw_dev_id dev_id;
+
+	device = tape_alloc_device();
+	if (IS_ERR(device))
+		return -ENODEV;
+	ccw_device_set_options(cdev, CCWDEV_DO_PATHGROUP |
+				     CCWDEV_DO_MULTIPATH);
+	ret = sysfs_create_group(&cdev->dev.kobj, &tape_attr_group);
+	if (ret) {
+		tape_put_device(device);
+		return ret;
+	}
+	dev_set_drvdata(&cdev->dev, device);
+	cdev->handler = __tape_do_irq;
+	device->cdev = cdev;
+	ccw_device_get_id(cdev, &dev_id);
+	device->cdev_id = devid_to_int(&dev_id);
+	return ret;
+}
+
+static void
+__tape_discard_requests(struct tape_device *device)
+{
+	struct tape_request *	request;
+	struct list_head *	l, *n;
+
+	list_for_each_safe(l, n, &device->req_queue) {
+		request = list_entry(l, struct tape_request, list);
+		if (request->status == TAPE_REQUEST_IN_IO)
+			request->status = TAPE_REQUEST_DONE;
+		list_del(&request->list);
+
+		/* Decrease ref_count for removed request. */
+		request->device = NULL;
+		tape_put_device(device);
+		request->rc = -EIO;
+		if (request->callback != NULL)
+			request->callback(request, request->callback_data);
+	}
+}
+
+/*
+ * Driverfs tape remove function.
+ *
+ * This function is called whenever the common I/O layer detects the device
+ * gone. This can happen at any time and we cannot refuse.
+ */
+void
+tape_generic_remove(struct ccw_device *cdev)
+{
+	struct tape_device *	device;
+
+	device = dev_get_drvdata(&cdev->dev);
+	if (!device) {
+		return;
+	}
+	DBF_LH(3, "(%08x): tape_generic_remove(%p)\n", device->cdev_id, cdev);
+
+	spin_lock_irq(get_ccwdev_lock(device->cdev));
+	switch (device->tape_state) {
+		case TS_INIT:
+			tape_state_set(device, TS_NOT_OPER);
+		case TS_NOT_OPER:
+			/*
+			 * Nothing to do.
+			 */
+			spin_unlock_irq(get_ccwdev_lock(device->cdev));
+			break;
+		case TS_UNUSED:
+			/*
+			 * Need only to release the device.
+			 */
+			tape_state_set(device, TS_NOT_OPER);
+			spin_unlock_irq(get_ccwdev_lock(device->cdev));
+			tape_cleanup_device(device);
+			break;
+		default:
+			/*
+			 * There may be requests on the queue. We will not get
+			 * an interrupt for a request that was running. So we
+			 * just post them all as I/O errors.
+			 */
+			DBF_EVENT(3, "(%08x): Drive in use vanished!\n",
+				device->cdev_id);
+			pr_warn("%s: A tape unit was detached while in use\n",
+				dev_name(&device->cdev->dev));
+			tape_state_set(device, TS_NOT_OPER);
+			__tape_discard_requests(device);
+			spin_unlock_irq(get_ccwdev_lock(device->cdev));
+			tape_cleanup_device(device);
+	}
+
+	device = dev_get_drvdata(&cdev->dev);
+	if (device) {
+		sysfs_remove_group(&cdev->dev.kobj, &tape_attr_group);
+		dev_set_drvdata(&cdev->dev, NULL);
+		tape_put_device(device);
+	}
+}
+
+/*
+ * Allocate a new tape ccw request
+ */
+struct tape_request *
+tape_alloc_request(int cplength, int datasize)
+{
+	struct tape_request *request;
+
+	BUG_ON(datasize > PAGE_SIZE || (cplength*sizeof(struct ccw1)) > PAGE_SIZE);
+
+	DBF_LH(6, "tape_alloc_request(%d, %d)\n", cplength, datasize);
+
+	request = kzalloc(sizeof(struct tape_request), GFP_KERNEL);
+	if (request == NULL) {
+		DBF_EXCEPTION(1, "cqra nomem\n");
+		return ERR_PTR(-ENOMEM);
+	}
+	/* allocate channel program */
+	if (cplength > 0) {
+		request->cpaddr = kcalloc(cplength, sizeof(struct ccw1),
+					  GFP_ATOMIC | GFP_DMA);
+		if (request->cpaddr == NULL) {
+			DBF_EXCEPTION(1, "cqra nomem\n");
+			kfree(request);
+			return ERR_PTR(-ENOMEM);
+		}
+	}
+	/* alloc small kernel buffer */
+	if (datasize > 0) {
+		request->cpdata = kzalloc(datasize, GFP_KERNEL | GFP_DMA);
+		if (request->cpdata == NULL) {
+			DBF_EXCEPTION(1, "cqra nomem\n");
+			kfree(request->cpaddr);
+			kfree(request);
+			return ERR_PTR(-ENOMEM);
+		}
+	}
+	DBF_LH(6, "New request %p(%p/%p)\n", request, request->cpaddr,
+		request->cpdata);
+
+	return request;
+}
+
+/*
+ * Free tape ccw request
+ */
+void
+tape_free_request (struct tape_request * request)
+{
+	DBF_LH(6, "Free request %p\n", request);
+
+	if (request->device)
+		tape_put_device(request->device);
+	kfree(request->cpdata);
+	kfree(request->cpaddr);
+	kfree(request);
+}
+
+static int
+__tape_start_io(struct tape_device *device, struct tape_request *request)
+{
+	int rc;
+
+	rc = ccw_device_start(
+		device->cdev,
+		request->cpaddr,
+		(unsigned long) request,
+		0x00,
+		request->options
+	);
+	if (rc == 0) {
+		request->status = TAPE_REQUEST_IN_IO;
+	} else if (rc == -EBUSY) {
+		/* The common I/O subsystem is currently busy. Retry later. */
+		request->status = TAPE_REQUEST_QUEUED;
+		schedule_delayed_work(&device->tape_dnr, 0);
+		rc = 0;
+	} else {
+		/* Start failed. Remove request and indicate failure. */
+		DBF_EVENT(1, "tape: start request failed with RC = %i\n", rc);
+	}
+	return rc;
+}
+
+static void
+__tape_start_next_request(struct tape_device *device)
+{
+	struct list_head *l, *n;
+	struct tape_request *request;
+	int rc;
+
+	DBF_LH(6, "__tape_start_next_request(%p)\n", device);
+	/*
+	 * Try to start each request on request queue until one is
+	 * started successful.
+	 */
+	list_for_each_safe(l, n, &device->req_queue) {
+		request = list_entry(l, struct tape_request, list);
+
+		/*
+		 * Avoid race condition if bottom-half was triggered more than
+		 * once.
+		 */
+		if (request->status == TAPE_REQUEST_IN_IO)
+			return;
+		/*
+		 * Request has already been stopped. We have to wait until
+		 * the request is removed from the queue in the interrupt
+		 * handling.
+		 */
+		if (request->status == TAPE_REQUEST_DONE)
+			return;
+
+		/*
+		 * We wanted to cancel the request but the common I/O layer
+		 * was busy at that time. This can only happen if this
+		 * function is called by delayed_next_request.
+		 * Otherwise we start the next request on the queue.
+		 */
+		if (request->status == TAPE_REQUEST_CANCEL) {
+			rc = __tape_cancel_io(device, request);
+		} else {
+			rc = __tape_start_io(device, request);
+		}
+		if (rc == 0)
+			return;
+
+		/* Set ending status. */
+		request->rc = rc;
+		request->status = TAPE_REQUEST_DONE;
+
+		/* Remove from request queue. */
+		list_del(&request->list);
+
+		/* Do callback. */
+		if (request->callback != NULL)
+			request->callback(request, request->callback_data);
+	}
+}
+
+static void
+tape_delayed_next_request(struct work_struct *work)
+{
+	struct tape_device *device =
+		container_of(work, struct tape_device, tape_dnr.work);
+
+	DBF_LH(6, "tape_delayed_next_request(%p)\n", device);
+	spin_lock_irq(get_ccwdev_lock(device->cdev));
+	__tape_start_next_request(device);
+	spin_unlock_irq(get_ccwdev_lock(device->cdev));
+}
+
+static void tape_long_busy_timeout(unsigned long data)
+{
+	struct tape_request *request;
+	struct tape_device *device;
+
+	device = (struct tape_device *) data;
+	spin_lock_irq(get_ccwdev_lock(device->cdev));
+	request = list_entry(device->req_queue.next, struct tape_request, list);
+	BUG_ON(request->status != TAPE_REQUEST_LONG_BUSY);
+	DBF_LH(6, "%08x: Long busy timeout.\n", device->cdev_id);
+	__tape_start_next_request(device);
+	device->lb_timeout.data = 0UL;
+	tape_put_device(device);
+	spin_unlock_irq(get_ccwdev_lock(device->cdev));
+}
+
+static void
+__tape_end_request(
+	struct tape_device *	device,
+	struct tape_request *	request,
+	int			rc)
+{
+	DBF_LH(6, "__tape_end_request(%p, %p, %i)\n", device, request, rc);
+	if (request) {
+		request->rc = rc;
+		request->status = TAPE_REQUEST_DONE;
+
+		/* Remove from request queue. */
+		list_del(&request->list);
+
+		/* Do callback. */
+		if (request->callback != NULL)
+			request->callback(request, request->callback_data);
+	}
+
+	/* Start next request. */
+	if (!list_empty(&device->req_queue))
+		__tape_start_next_request(device);
+}
+
+/*
+ * Write sense data to dbf
+ */
+void
+tape_dump_sense_dbf(struct tape_device *device, struct tape_request *request,
+		    struct irb *irb)
+{
+	unsigned int *sptr;
+	const char* op;
+
+	if (request != NULL)
+		op = tape_op_verbose[request->op];
+	else
+		op = "---";
+	DBF_EVENT(3, "DSTAT : %02x   CSTAT: %02x\n",
+		  irb->scsw.cmd.dstat, irb->scsw.cmd.cstat);
+	DBF_EVENT(3, "DEVICE: %08x OP\t: %s\n", device->cdev_id, op);
+	sptr = (unsigned int *) irb->ecw;
+	DBF_EVENT(3, "%08x %08x\n", sptr[0], sptr[1]);
+	DBF_EVENT(3, "%08x %08x\n", sptr[2], sptr[3]);
+	DBF_EVENT(3, "%08x %08x\n", sptr[4], sptr[5]);
+	DBF_EVENT(3, "%08x %08x\n", sptr[6], sptr[7]);
+}
+
+/*
+ * I/O helper function. Adds the request to the request queue
+ * and starts it if the tape is idle. Has to be called with
+ * the device lock held.
+ */
+static int
+__tape_start_request(struct tape_device *device, struct tape_request *request)
+{
+	int rc;
+
+	switch (request->op) {
+		case TO_MSEN:
+		case TO_ASSIGN:
+		case TO_UNASSIGN:
+		case TO_READ_ATTMSG:
+		case TO_RDC:
+			if (device->tape_state == TS_INIT)
+				break;
+			if (device->tape_state == TS_UNUSED)
+				break;
+		default:
+			if (device->tape_state == TS_BLKUSE)
+				break;
+			if (device->tape_state != TS_IN_USE)
+				return -ENODEV;
+	}
+
+	/* Increase use count of device for the added request. */
+	request->device = tape_get_device(device);
+
+	if (list_empty(&device->req_queue)) {
+		/* No other requests are on the queue. Start this one. */
+		rc = __tape_start_io(device, request);
+		if (rc)
+			return rc;
+
+		DBF_LH(5, "Request %p added for execution.\n", request);
+		list_add(&request->list, &device->req_queue);
+	} else {
+		DBF_LH(5, "Request %p add to queue.\n", request);
+		request->status = TAPE_REQUEST_QUEUED;
+		list_add_tail(&request->list, &device->req_queue);
+	}
+	return 0;
+}
+
+/*
+ * Add the request to the request queue, try to start it if the
+ * tape is idle. Return without waiting for end of i/o.
+ */
+int
+tape_do_io_async(struct tape_device *device, struct tape_request *request)
+{
+	int rc;
+
+	DBF_LH(6, "tape_do_io_async(%p, %p)\n", device, request);
+
+	spin_lock_irq(get_ccwdev_lock(device->cdev));
+	/* Add request to request queue and try to start it. */
+	rc = __tape_start_request(device, request);
+	spin_unlock_irq(get_ccwdev_lock(device->cdev));
+	return rc;
+}
+
+/*
+ * tape_do_io/__tape_wake_up
+ * Add the request to the request queue, try to start it if the
+ * tape is idle and wait uninterruptible for its completion.
+ */
+static void
+__tape_wake_up(struct tape_request *request, void *data)
+{
+	request->callback = NULL;
+	wake_up((wait_queue_head_t *) data);
+}
+
+int
+tape_do_io(struct tape_device *device, struct tape_request *request)
+{
+	int rc;
+
+	spin_lock_irq(get_ccwdev_lock(device->cdev));
+	/* Setup callback */
+	request->callback = __tape_wake_up;
+	request->callback_data = &device->wait_queue;
+	/* Add request to request queue and try to start it. */
+	rc = __tape_start_request(device, request);
+	spin_unlock_irq(get_ccwdev_lock(device->cdev));
+	if (rc)
+		return rc;
+	/* Request added to the queue. Wait for its completion. */
+	wait_event(device->wait_queue, (request->callback == NULL));
+	/* Get rc from request */
+	return request->rc;
+}
+
+/*
+ * tape_do_io_interruptible/__tape_wake_up_interruptible
+ * Add the request to the request queue, try to start it if the
+ * tape is idle and wait uninterruptible for its completion.
+ */
+static void
+__tape_wake_up_interruptible(struct tape_request *request, void *data)
+{
+	request->callback = NULL;
+	wake_up_interruptible((wait_queue_head_t *) data);
+}
+
+int
+tape_do_io_interruptible(struct tape_device *device,
+			 struct tape_request *request)
+{
+	int rc;
+
+	spin_lock_irq(get_ccwdev_lock(device->cdev));
+	/* Setup callback */
+	request->callback = __tape_wake_up_interruptible;
+	request->callback_data = &device->wait_queue;
+	rc = __tape_start_request(device, request);
+	spin_unlock_irq(get_ccwdev_lock(device->cdev));
+	if (rc)
+		return rc;
+	/* Request added to the queue. Wait for its completion. */
+	rc = wait_event_interruptible(device->wait_queue,
+				      (request->callback == NULL));
+	if (rc != -ERESTARTSYS)
+		/* Request finished normally. */
+		return request->rc;
+
+	/* Interrupted by a signal. We have to stop the current request. */
+	spin_lock_irq(get_ccwdev_lock(device->cdev));
+	rc = __tape_cancel_io(device, request);
+	spin_unlock_irq(get_ccwdev_lock(device->cdev));
+	if (rc == 0) {
+		/* Wait for the interrupt that acknowledges the halt. */
+		do {
+			rc = wait_event_interruptible(
+				device->wait_queue,
+				(request->callback == NULL)
+			);
+		} while (rc == -ERESTARTSYS);
+
+		DBF_EVENT(3, "IO stopped on %08x\n", device->cdev_id);
+		rc = -ERESTARTSYS;
+	}
+	return rc;
+}
+
+/*
+ * Stop running ccw.
+ */
+int
+tape_cancel_io(struct tape_device *device, struct tape_request *request)
+{
+	int rc;
+
+	spin_lock_irq(get_ccwdev_lock(device->cdev));
+	rc = __tape_cancel_io(device, request);
+	spin_unlock_irq(get_ccwdev_lock(device->cdev));
+	return rc;
+}
+
+/*
+ * Tape interrupt routine, called from the ccw_device layer
+ */
+static void
+__tape_do_irq (struct ccw_device *cdev, unsigned long intparm, struct irb *irb)
+{
+	struct tape_device *device;
+	struct tape_request *request;
+	int rc;
+
+	device = dev_get_drvdata(&cdev->dev);
+	if (device == NULL) {
+		return;
+	}
+	request = (struct tape_request *) intparm;
+
+	DBF_LH(6, "__tape_do_irq(device=%p, request=%p)\n", device, request);
+
+	/* On special conditions irb is an error pointer */
+	if (IS_ERR(irb)) {
+		/* FIXME: What to do with the request? */
+		switch (PTR_ERR(irb)) {
+			case -ETIMEDOUT:
+				DBF_LH(1, "(%08x): Request timed out\n",
+				       device->cdev_id);
+			case -EIO:
+				__tape_end_request(device, request, -EIO);
+				break;
+			default:
+				DBF_LH(1, "(%08x): Unexpected i/o error %li\n",
+				       device->cdev_id,	PTR_ERR(irb));
+		}
+		return;
+	}
+
+	/*
+	 * If the condition code is not zero and the start function bit is
+	 * still set, this is an deferred error and the last start I/O did
+	 * not succeed. At this point the condition that caused the deferred
+	 * error might still apply. So we just schedule the request to be
+	 * started later.
+	 */
+	if (irb->scsw.cmd.cc != 0 &&
+	    (irb->scsw.cmd.fctl & SCSW_FCTL_START_FUNC) &&
+	    (request->status == TAPE_REQUEST_IN_IO)) {
+		DBF_EVENT(3,"(%08x): deferred cc=%i, fctl=%i. restarting\n",
+			device->cdev_id, irb->scsw.cmd.cc, irb->scsw.cmd.fctl);
+		request->status = TAPE_REQUEST_QUEUED;
+		schedule_delayed_work(&device->tape_dnr, HZ);
+		return;
+	}
+
+	/* May be an unsolicited irq */
+	if(request != NULL)
+		request->rescnt = irb->scsw.cmd.count;
+	else if ((irb->scsw.cmd.dstat == 0x85 || irb->scsw.cmd.dstat == 0x80) &&
+		 !list_empty(&device->req_queue)) {
+		/* Not Ready to Ready after long busy ? */
+		struct tape_request *req;
+		req = list_entry(device->req_queue.next,
+				 struct tape_request, list);
+		if (req->status == TAPE_REQUEST_LONG_BUSY) {
+			DBF_EVENT(3, "(%08x): del timer\n", device->cdev_id);
+			if (del_timer(&device->lb_timeout)) {
+				device->lb_timeout.data = 0UL;
+				tape_put_device(device);
+				__tape_start_next_request(device);
+			}
+			return;
+		}
+	}
+	if (irb->scsw.cmd.dstat != 0x0c) {
+		/* Set the 'ONLINE' flag depending on sense byte 1 */
+		if(*(((__u8 *) irb->ecw) + 1) & SENSE_DRIVE_ONLINE)
+			device->tape_generic_status |= GMT_ONLINE(~0);
+		else
+			device->tape_generic_status &= ~GMT_ONLINE(~0);
+
+		/*
+		 * Any request that does not come back with channel end
+		 * and device end is unusual. Log the sense data.
+		 */
+		DBF_EVENT(3,"-- Tape Interrupthandler --\n");
+		tape_dump_sense_dbf(device, request, irb);
+	} else {
+		/* Upon normal completion the device _is_ online */
+		device->tape_generic_status |= GMT_ONLINE(~0);
+	}
+	if (device->tape_state == TS_NOT_OPER) {
+		DBF_EVENT(6, "tape:device is not operational\n");
+		return;
+	}
+
+	/*
+	 * Request that were canceled still come back with an interrupt.
+	 * To detect these request the state will be set to TAPE_REQUEST_DONE.
+	 */
+	if(request != NULL && request->status == TAPE_REQUEST_DONE) {
+		__tape_end_request(device, request, -EIO);
+		return;
+	}
+
+	rc = device->discipline->irq(device, request, irb);
+	/*
+	 * rc < 0 : request finished unsuccessfully.
+	 * rc == TAPE_IO_SUCCESS: request finished successfully.
+	 * rc == TAPE_IO_PENDING: request is still running. Ignore rc.
+	 * rc == TAPE_IO_RETRY: request finished but needs another go.
+	 * rc == TAPE_IO_STOP: request needs to get terminated.
+	 */
+	switch (rc) {
+		case TAPE_IO_SUCCESS:
+			/* Upon normal completion the device _is_ online */
+			device->tape_generic_status |= GMT_ONLINE(~0);
+			__tape_end_request(device, request, rc);
+			break;
+		case TAPE_IO_PENDING:
+			break;
+		case TAPE_IO_LONG_BUSY:
+			device->lb_timeout.data =
+				(unsigned long) tape_get_device(device);
+			device->lb_timeout.expires = jiffies +
+				LONG_BUSY_TIMEOUT * HZ;
+			DBF_EVENT(3, "(%08x): add timer\n", device->cdev_id);
+			add_timer(&device->lb_timeout);
+			request->status = TAPE_REQUEST_LONG_BUSY;
+			break;
+		case TAPE_IO_RETRY:
+			rc = __tape_start_io(device, request);
+			if (rc)
+				__tape_end_request(device, request, rc);
+			break;
+		case TAPE_IO_STOP:
+			rc = __tape_cancel_io(device, request);
+			if (rc)
+				__tape_end_request(device, request, rc);
+			break;
+		default:
+			if (rc > 0) {
+				DBF_EVENT(6, "xunknownrc\n");
+				__tape_end_request(device, request, -EIO);
+			} else {
+				__tape_end_request(device, request, rc);
+			}
+			break;
+	}
+}
+
+/*
+ * Tape device open function used by tape_char frontend.
+ */
+int
+tape_open(struct tape_device *device)
+{
+	int rc;
+
+	spin_lock_irq(get_ccwdev_lock(device->cdev));
+	if (device->tape_state == TS_NOT_OPER) {
+		DBF_EVENT(6, "TAPE:nodev\n");
+		rc = -ENODEV;
+	} else if (device->tape_state == TS_IN_USE) {
+		DBF_EVENT(6, "TAPE:dbusy\n");
+		rc = -EBUSY;
+	} else if (device->tape_state == TS_BLKUSE) {
+		DBF_EVENT(6, "TAPE:dbusy\n");
+		rc = -EBUSY;
+	} else if (device->discipline != NULL &&
+		   !try_module_get(device->discipline->owner)) {
+		DBF_EVENT(6, "TAPE:nodisc\n");
+		rc = -ENODEV;
+	} else {
+		tape_state_set(device, TS_IN_USE);
+		rc = 0;
+	}
+	spin_unlock_irq(get_ccwdev_lock(device->cdev));
+	return rc;
+}
+
+/*
+ * Tape device release function used by tape_char frontend.
+ */
+int
+tape_release(struct tape_device *device)
+{
+	spin_lock_irq(get_ccwdev_lock(device->cdev));
+	if (device->tape_state == TS_IN_USE)
+		tape_state_set(device, TS_UNUSED);
+	module_put(device->discipline->owner);
+	spin_unlock_irq(get_ccwdev_lock(device->cdev));
+	return 0;
+}
+
+/*
+ * Execute a magnetic tape command a number of times.
+ */
+int
+tape_mtop(struct tape_device *device, int mt_op, int mt_count)
+{
+	tape_mtop_fn fn;
+	int rc;
+
+	DBF_EVENT(6, "TAPE:mtio\n");
+	DBF_EVENT(6, "TAPE:ioop: %x\n", mt_op);
+	DBF_EVENT(6, "TAPE:arg:	 %x\n", mt_count);
+
+	if (mt_op < 0 || mt_op >= TAPE_NR_MTOPS)
+		return -EINVAL;
+	fn = device->discipline->mtop_array[mt_op];
+	if (fn == NULL)
+		return -EINVAL;
+
+	/* We assume that the backends can handle count up to 500. */
+	if (mt_op == MTBSR  || mt_op == MTFSR  || mt_op == MTFSF  ||
+	    mt_op == MTBSF  || mt_op == MTFSFM || mt_op == MTBSFM) {
+		rc = 0;
+		for (; mt_count > 500; mt_count -= 500)
+			if ((rc = fn(device, 500)) != 0)
+				break;
+		if (rc == 0)
+			rc = fn(device, mt_count);
+	} else
+		rc = fn(device, mt_count);
+	return rc;
+
+}
+
+/*
+ * Tape init function.
+ */
+static int
+tape_init (void)
+{
+	TAPE_DBF_AREA = debug_register ( "tape", 2, 2, 4*sizeof(long));
+	debug_register_view(TAPE_DBF_AREA, &debug_sprintf_view);
+#ifdef DBF_LIKE_HELL
+	debug_set_level(TAPE_DBF_AREA, 6);
+#endif
+	DBF_EVENT(3, "tape init\n");
+	tape_proc_init();
+	tapechar_init ();
+	return 0;
+}
+
+/*
+ * Tape exit function.
+ */
+static void
+tape_exit(void)
+{
+	DBF_EVENT(6, "tape exit\n");
+
+	/* Get rid of the frontends */
+	tapechar_exit();
+	tape_proc_cleanup();
+	debug_unregister (TAPE_DBF_AREA);
+}
+
+MODULE_AUTHOR("(C) 2001 IBM Deutschland Entwicklung GmbH by Carsten Otte and "
+	      "Michael Holzheu (cotte@de.ibm.com,holzheu@de.ibm.com)");
+MODULE_DESCRIPTION("Linux on zSeries channel attached tape device driver");
+MODULE_LICENSE("GPL");
+
+module_init(tape_init);
+module_exit(tape_exit);
+
+EXPORT_SYMBOL(tape_generic_remove);
+EXPORT_SYMBOL(tape_generic_probe);
+EXPORT_SYMBOL(tape_generic_online);
+EXPORT_SYMBOL(tape_generic_offline);
+EXPORT_SYMBOL(tape_generic_pm_suspend);
+EXPORT_SYMBOL(tape_put_device);
+EXPORT_SYMBOL(tape_get_device);
+EXPORT_SYMBOL(tape_state_verbose);
+EXPORT_SYMBOL(tape_op_verbose);
+EXPORT_SYMBOL(tape_state_set);
+EXPORT_SYMBOL(tape_med_state_set);
+EXPORT_SYMBOL(tape_alloc_request);
+EXPORT_SYMBOL(tape_free_request);
+EXPORT_SYMBOL(tape_dump_sense_dbf);
+EXPORT_SYMBOL(tape_do_io);
+EXPORT_SYMBOL(tape_do_io_async);
+EXPORT_SYMBOL(tape_do_io_interruptible);
+EXPORT_SYMBOL(tape_cancel_io);
+EXPORT_SYMBOL(tape_mtop);
diff --git a/src/kernel/linux/v4.14/drivers/s390/char/tape_proc.c b/src/kernel/linux/v4.14/drivers/s390/char/tape_proc.c
new file mode 100644
index 0000000..faae304
--- /dev/null
+++ b/src/kernel/linux/v4.14/drivers/s390/char/tape_proc.c
@@ -0,0 +1,144 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ *    tape device driver for S/390 and zSeries tapes.
+ *
+ *  S390 and zSeries version
+ *    Copyright IBM Corp. 2001
+ *    Author(s): Carsten Otte <cotte@de.ibm.com>
+ *		 Michael Holzheu <holzheu@de.ibm.com>
+ *		 Tuan Ngo-Anh <ngoanh@de.ibm.com>
+ *
+ * PROCFS Functions
+ */
+
+#define KMSG_COMPONENT "tape"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
+#include <linux/module.h>
+#include <linux/vmalloc.h>
+#include <linux/seq_file.h>
+#include <linux/proc_fs.h>
+
+#define TAPE_DBF_AREA	tape_core_dbf
+
+#include "tape.h"
+
+static const char *tape_med_st_verbose[MS_SIZE] =
+{
+	[MS_UNKNOWN] = "UNKNOWN ",
+	[MS_LOADED] = "LOADED  ",
+	[MS_UNLOADED] = "UNLOADED"
+};
+
+/* our proc tapedevices entry */
+static struct proc_dir_entry *tape_proc_devices;
+
+/*
+ * Show function for /proc/tapedevices
+ */
+static int tape_proc_show(struct seq_file *m, void *v)
+{
+	struct tape_device *device;
+	struct tape_request *request;
+	const char *str;
+	unsigned long n;
+
+	n = (unsigned long) v - 1;
+	if (!n) {
+		seq_printf(m, "TapeNo\tBusID      CuType/Model\t"
+			"DevType/Model\tBlkSize\tState\tOp\tMedState\n");
+	}
+	device = tape_find_device(n);
+	if (IS_ERR(device))
+		return 0;
+	spin_lock_irq(get_ccwdev_lock(device->cdev));
+	seq_printf(m, "%d\t", (int) n);
+	seq_printf(m, "%-10.10s ", dev_name(&device->cdev->dev));
+	seq_printf(m, "%04X/", device->cdev->id.cu_type);
+	seq_printf(m, "%02X\t", device->cdev->id.cu_model);
+	seq_printf(m, "%04X/", device->cdev->id.dev_type);
+	seq_printf(m, "%02X\t\t", device->cdev->id.dev_model);
+	if (device->char_data.block_size == 0)
+		seq_printf(m, "auto\t");
+	else
+		seq_printf(m, "%i\t", device->char_data.block_size);
+	if (device->tape_state >= 0 &&
+	    device->tape_state < TS_SIZE)
+		str = tape_state_verbose[device->tape_state];
+	else
+		str = "UNKNOWN";
+	seq_printf(m, "%s\t", str);
+	if (!list_empty(&device->req_queue)) {
+		request = list_entry(device->req_queue.next,
+				     struct tape_request, list);
+		str = tape_op_verbose[request->op];
+	} else
+		str = "---";
+	seq_printf(m, "%s\t", str);
+	seq_printf(m, "%s\n", tape_med_st_verbose[device->medium_state]);
+	spin_unlock_irq(get_ccwdev_lock(device->cdev));
+	tape_put_device(device);
+        return 0;
+}
+
+static void *tape_proc_start(struct seq_file *m, loff_t *pos)
+{
+	if (*pos >= 256 / TAPE_MINORS_PER_DEV)
+		return NULL;
+	return (void *)((unsigned long) *pos + 1);
+}
+
+static void *tape_proc_next(struct seq_file *m, void *v, loff_t *pos)
+{
+	++*pos;
+	return tape_proc_start(m, pos);
+}
+
+static void tape_proc_stop(struct seq_file *m, void *v)
+{
+}
+
+static const struct seq_operations tape_proc_seq = {
+	.start		= tape_proc_start,
+	.next		= tape_proc_next,
+	.stop		= tape_proc_stop,
+	.show		= tape_proc_show,
+};
+
+static int tape_proc_open(struct inode *inode, struct file *file)
+{
+	return seq_open(file, &tape_proc_seq);
+}
+
+static const struct file_operations tape_proc_ops =
+{
+	.owner		= THIS_MODULE,
+	.open		= tape_proc_open,
+	.read		= seq_read,
+	.llseek		= seq_lseek,
+	.release	= seq_release,
+};
+
+/*
+ * Initialize procfs stuff on startup
+ */
+void
+tape_proc_init(void)
+{
+	tape_proc_devices =
+		proc_create("tapedevices", S_IFREG | S_IRUGO | S_IWUSR, NULL,
+			    &tape_proc_ops);
+	if (tape_proc_devices == NULL) {
+		return;
+	}
+}
+
+/*
+ * Cleanup all stuff registered to the procfs
+ */
+void
+tape_proc_cleanup(void)
+{
+	if (tape_proc_devices != NULL)
+		remove_proc_entry ("tapedevices", NULL);
+}
diff --git a/src/kernel/linux/v4.14/drivers/s390/char/tape_std.c b/src/kernel/linux/v4.14/drivers/s390/char/tape_std.c
new file mode 100644
index 0000000..7caba0c
--- /dev/null
+++ b/src/kernel/linux/v4.14/drivers/s390/char/tape_std.c
@@ -0,0 +1,751 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ *    standard tape device functions for ibm tapes.
+ *
+ *  S390 and zSeries version
+ *    Copyright IBM Corp. 2001, 2002
+ *    Author(s): Carsten Otte <cotte@de.ibm.com>
+ *		 Michael Holzheu <holzheu@de.ibm.com>
+ *		 Tuan Ngo-Anh <ngoanh@de.ibm.com>
+ *		 Martin Schwidefsky <schwidefsky@de.ibm.com>
+ *		 Stefan Bader <shbader@de.ibm.com>
+ */
+
+#define KMSG_COMPONENT "tape"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
+#include <linux/stddef.h>
+#include <linux/kernel.h>
+#include <linux/bio.h>
+#include <linux/timer.h>
+
+#include <asm/types.h>
+#include <asm/idals.h>
+#include <asm/ebcdic.h>
+#include <asm/tape390.h>
+
+#define TAPE_DBF_AREA	tape_core_dbf
+
+#include "tape.h"
+#include "tape_std.h"
+
+/*
+ * tape_std_assign
+ */
+static void
+tape_std_assign_timeout(unsigned long data)
+{
+	struct tape_request *	request;
+	struct tape_device *	device;
+	int rc;
+
+	request = (struct tape_request *) data;
+	device = request->device;
+	BUG_ON(!device);
+
+	DBF_EVENT(3, "%08x: Assignment timeout. Device busy.\n",
+			device->cdev_id);
+	rc = tape_cancel_io(device, request);
+	if(rc)
+		DBF_EVENT(3, "(%08x): Assign timeout: Cancel failed with rc = "
+			  "%i\n", device->cdev_id, rc);
+}
+
+int
+tape_std_assign(struct tape_device *device)
+{
+	int                  rc;
+	struct timer_list    timeout;
+	struct tape_request *request;
+
+	request = tape_alloc_request(2, 11);
+	if (IS_ERR(request))
+		return PTR_ERR(request);
+
+	request->op = TO_ASSIGN;
+	tape_ccw_cc(request->cpaddr, ASSIGN, 11, request->cpdata);
+	tape_ccw_end(request->cpaddr + 1, NOP, 0, NULL);
+
+	/*
+	 * The assign command sometimes blocks if the device is assigned
+	 * to another host (actually this shouldn't happen but it does).
+	 * So we set up a timeout for this call.
+	 */
+	init_timer_on_stack(&timeout);
+	timeout.function = tape_std_assign_timeout;
+	timeout.data     = (unsigned long) request;
+	timeout.expires  = jiffies + 2 * HZ;
+	add_timer(&timeout);
+
+	rc = tape_do_io_interruptible(device, request);
+
+	del_timer_sync(&timeout);
+	destroy_timer_on_stack(&timeout);
+
+	if (rc != 0) {
+		DBF_EVENT(3, "%08x: assign failed - device might be busy\n",
+			device->cdev_id);
+	} else {
+		DBF_EVENT(3, "%08x: Tape assigned\n", device->cdev_id);
+	}
+	tape_free_request(request);
+	return rc;
+}
+
+/*
+ * tape_std_unassign
+ */
+int
+tape_std_unassign (struct tape_device *device)
+{
+	int                  rc;
+	struct tape_request *request;
+
+	if (device->tape_state == TS_NOT_OPER) {
+		DBF_EVENT(3, "(%08x): Can't unassign device\n",
+			device->cdev_id);
+		return -EIO;
+	}
+
+	request = tape_alloc_request(2, 11);
+	if (IS_ERR(request))
+		return PTR_ERR(request);
+
+	request->op = TO_UNASSIGN;
+	tape_ccw_cc(request->cpaddr, UNASSIGN, 11, request->cpdata);
+	tape_ccw_end(request->cpaddr + 1, NOP, 0, NULL);
+
+	if ((rc = tape_do_io(device, request)) != 0) {
+		DBF_EVENT(3, "%08x: Unassign failed\n", device->cdev_id);
+	} else {
+		DBF_EVENT(3, "%08x: Tape unassigned\n", device->cdev_id);
+	}
+	tape_free_request(request);
+	return rc;
+}
+
+/*
+ * TAPE390_DISPLAY: Show a string on the tape display.
+ */
+int
+tape_std_display(struct tape_device *device, struct display_struct *disp)
+{
+	struct tape_request *request;
+	int rc;
+
+	request = tape_alloc_request(2, 17);
+	if (IS_ERR(request)) {
+		DBF_EVENT(3, "TAPE: load display failed\n");
+		return PTR_ERR(request);
+	}
+	request->op = TO_DIS;
+
+	*(unsigned char *) request->cpdata = disp->cntrl;
+	DBF_EVENT(5, "TAPE: display cntrl=%04x\n", disp->cntrl);
+	memcpy(((unsigned char *) request->cpdata) + 1, disp->message1, 8);
+	memcpy(((unsigned char *) request->cpdata) + 9, disp->message2, 8);
+	ASCEBC(((unsigned char*) request->cpdata) + 1, 16);
+
+	tape_ccw_cc(request->cpaddr, LOAD_DISPLAY, 17, request->cpdata);
+	tape_ccw_end(request->cpaddr + 1, NOP, 0, NULL);
+
+	rc = tape_do_io_interruptible(device, request);
+	tape_free_request(request);
+	return rc;
+}
+
+/*
+ * Read block id.
+ */
+int
+tape_std_read_block_id(struct tape_device *device, __u64 *id)
+{
+	struct tape_request *request;
+	int rc;
+
+	request = tape_alloc_request(3, 8);
+	if (IS_ERR(request))
+		return PTR_ERR(request);
+	request->op = TO_RBI;
+	/* setup ccws */
+	tape_ccw_cc(request->cpaddr, MODE_SET_DB, 1, device->modeset_byte);
+	tape_ccw_cc(request->cpaddr + 1, READ_BLOCK_ID, 8, request->cpdata);
+	tape_ccw_end(request->cpaddr + 2, NOP, 0, NULL);
+	/* execute it */
+	rc = tape_do_io(device, request);
+	if (rc == 0)
+		/* Get result from read buffer. */
+		*id = *(__u64 *) request->cpdata;
+	tape_free_request(request);
+	return rc;
+}
+
+int
+tape_std_terminate_write(struct tape_device *device)
+{
+	int rc;
+
+	if(device->required_tapemarks == 0)
+		return 0;
+
+	DBF_LH(5, "tape%d: terminate write %dxEOF\n", device->first_minor,
+		device->required_tapemarks);
+
+	rc = tape_mtop(device, MTWEOF, device->required_tapemarks);
+	if (rc)
+		return rc;
+
+	device->required_tapemarks = 0;
+	return tape_mtop(device, MTBSR, 1);
+}
+
+/*
+ * MTLOAD: Loads the tape.
+ * The default implementation just wait until the tape medium state changes
+ * to MS_LOADED.
+ */
+int
+tape_std_mtload(struct tape_device *device, int count)
+{
+	return wait_event_interruptible(device->state_change_wq,
+		(device->medium_state == MS_LOADED));
+}
+
+/*
+ * MTSETBLK: Set block size.
+ */
+int
+tape_std_mtsetblk(struct tape_device *device, int count)
+{
+	struct idal_buffer *new;
+
+	DBF_LH(6, "tape_std_mtsetblk(%d)\n", count);
+	if (count <= 0) {
+		/*
+		 * Just set block_size to 0. tapechar_read/tapechar_write
+		 * will realloc the idal buffer if a bigger one than the
+		 * current is needed.
+		 */
+		device->char_data.block_size = 0;
+		return 0;
+	}
+	if (device->char_data.idal_buf != NULL &&
+	    device->char_data.idal_buf->size == count)
+		/* We already have a idal buffer of that size. */
+		return 0;
+
+	if (count > MAX_BLOCKSIZE) {
+		DBF_EVENT(3, "Invalid block size (%d > %d) given.\n",
+			count, MAX_BLOCKSIZE);
+		return -EINVAL;
+	}
+
+	/* Allocate a new idal buffer. */
+	new = idal_buffer_alloc(count, 0);
+	if (IS_ERR(new))
+		return -ENOMEM;
+	if (device->char_data.idal_buf != NULL)
+		idal_buffer_free(device->char_data.idal_buf);
+	device->char_data.idal_buf = new;
+	device->char_data.block_size = count;
+
+	DBF_LH(6, "new blocksize is %d\n", device->char_data.block_size);
+
+	return 0;
+}
+
+/*
+ * MTRESET: Set block size to 0.
+ */
+int
+tape_std_mtreset(struct tape_device *device, int count)
+{
+	DBF_EVENT(6, "TCHAR:devreset:\n");
+	device->char_data.block_size = 0;
+	return 0;
+}
+
+/*
+ * MTFSF: Forward space over 'count' file marks. The tape is positioned
+ * at the EOT (End of Tape) side of the file mark.
+ */
+int
+tape_std_mtfsf(struct tape_device *device, int mt_count)
+{
+	struct tape_request *request;
+	struct ccw1 *ccw;
+
+	request = tape_alloc_request(mt_count + 2, 0);
+	if (IS_ERR(request))
+		return PTR_ERR(request);
+	request->op = TO_FSF;
+	/* setup ccws */
+	ccw = tape_ccw_cc(request->cpaddr, MODE_SET_DB, 1,
+			  device->modeset_byte);
+	ccw = tape_ccw_repeat(ccw, FORSPACEFILE, mt_count);
+	ccw = tape_ccw_end(ccw, NOP, 0, NULL);
+
+	/* execute it */
+	return tape_do_io_free(device, request);
+}
+
+/*
+ * MTFSR: Forward space over 'count' tape blocks (blocksize is set
+ * via MTSETBLK.
+ */
+int
+tape_std_mtfsr(struct tape_device *device, int mt_count)
+{
+	struct tape_request *request;
+	struct ccw1 *ccw;
+	int rc;
+
+	request = tape_alloc_request(mt_count + 2, 0);
+	if (IS_ERR(request))
+		return PTR_ERR(request);
+	request->op = TO_FSB;
+	/* setup ccws */
+	ccw = tape_ccw_cc(request->cpaddr, MODE_SET_DB, 1,
+			  device->modeset_byte);
+	ccw = tape_ccw_repeat(ccw, FORSPACEBLOCK, mt_count);
+	ccw = tape_ccw_end(ccw, NOP, 0, NULL);
+
+	/* execute it */
+	rc = tape_do_io(device, request);
+	if (rc == 0 && request->rescnt > 0) {
+		DBF_LH(3, "FSR over tapemark\n");
+		rc = 1;
+	}
+	tape_free_request(request);
+
+	return rc;
+}
+
+/*
+ * MTBSR: Backward space over 'count' tape blocks.
+ * (blocksize is set via MTSETBLK.
+ */
+int
+tape_std_mtbsr(struct tape_device *device, int mt_count)
+{
+	struct tape_request *request;
+	struct ccw1 *ccw;
+	int rc;
+
+	request = tape_alloc_request(mt_count + 2, 0);
+	if (IS_ERR(request))
+		return PTR_ERR(request);
+	request->op = TO_BSB;
+	/* setup ccws */
+	ccw = tape_ccw_cc(request->cpaddr, MODE_SET_DB, 1,
+			  device->modeset_byte);
+	ccw = tape_ccw_repeat(ccw, BACKSPACEBLOCK, mt_count);
+	ccw = tape_ccw_end(ccw, NOP, 0, NULL);
+
+	/* execute it */
+	rc = tape_do_io(device, request);
+	if (rc == 0 && request->rescnt > 0) {
+		DBF_LH(3, "BSR over tapemark\n");
+		rc = 1;
+	}
+	tape_free_request(request);
+
+	return rc;
+}
+
+/*
+ * MTWEOF: Write 'count' file marks at the current position.
+ */
+int
+tape_std_mtweof(struct tape_device *device, int mt_count)
+{
+	struct tape_request *request;
+	struct ccw1 *ccw;
+
+	request = tape_alloc_request(mt_count + 2, 0);
+	if (IS_ERR(request))
+		return PTR_ERR(request);
+	request->op = TO_WTM;
+	/* setup ccws */
+	ccw = tape_ccw_cc(request->cpaddr, MODE_SET_DB, 1,
+			  device->modeset_byte);
+	ccw = tape_ccw_repeat(ccw, WRITETAPEMARK, mt_count);
+	ccw = tape_ccw_end(ccw, NOP, 0, NULL);
+
+	/* execute it */
+	return tape_do_io_free(device, request);
+}
+
+/*
+ * MTBSFM: Backward space over 'count' file marks.
+ * The tape is positioned at the BOT (Begin Of Tape) side of the
+ * last skipped file mark.
+ */
+int
+tape_std_mtbsfm(struct tape_device *device, int mt_count)
+{
+	struct tape_request *request;
+	struct ccw1 *ccw;
+
+	request = tape_alloc_request(mt_count + 2, 0);
+	if (IS_ERR(request))
+		return PTR_ERR(request);
+	request->op = TO_BSF;
+	/* setup ccws */
+	ccw = tape_ccw_cc(request->cpaddr, MODE_SET_DB, 1,
+			  device->modeset_byte);
+	ccw = tape_ccw_repeat(ccw, BACKSPACEFILE, mt_count);
+	ccw = tape_ccw_end(ccw, NOP, 0, NULL);
+
+	/* execute it */
+	return tape_do_io_free(device, request);
+}
+
+/*
+ * MTBSF: Backward space over 'count' file marks. The tape is positioned at
+ * the EOT (End of Tape) side of the last skipped file mark.
+ */
+int
+tape_std_mtbsf(struct tape_device *device, int mt_count)
+{
+	struct tape_request *request;
+	struct ccw1 *ccw;
+	int rc;
+
+	request = tape_alloc_request(mt_count + 2, 0);
+	if (IS_ERR(request))
+		return PTR_ERR(request);
+	request->op = TO_BSF;
+	/* setup ccws */
+	ccw = tape_ccw_cc(request->cpaddr, MODE_SET_DB, 1,
+			  device->modeset_byte);
+	ccw = tape_ccw_repeat(ccw, BACKSPACEFILE, mt_count);
+	ccw = tape_ccw_end(ccw, NOP, 0, NULL);
+	/* execute it */
+	rc = tape_do_io_free(device, request);
+	if (rc == 0) {
+		rc = tape_mtop(device, MTFSR, 1);
+		if (rc > 0)
+			rc = 0;
+	}
+	return rc;
+}
+
+/*
+ * MTFSFM: Forward space over 'count' file marks.
+ * The tape is positioned at the BOT (Begin Of Tape) side
+ * of the last skipped file mark.
+ */
+int
+tape_std_mtfsfm(struct tape_device *device, int mt_count)
+{
+	struct tape_request *request;
+	struct ccw1 *ccw;
+	int rc;
+
+	request = tape_alloc_request(mt_count + 2, 0);
+	if (IS_ERR(request))
+		return PTR_ERR(request);
+	request->op = TO_FSF;
+	/* setup ccws */
+	ccw = tape_ccw_cc(request->cpaddr, MODE_SET_DB, 1,
+			  device->modeset_byte);
+	ccw = tape_ccw_repeat(ccw, FORSPACEFILE, mt_count);
+	ccw = tape_ccw_end(ccw, NOP, 0, NULL);
+	/* execute it */
+	rc = tape_do_io_free(device, request);
+	if (rc == 0) {
+		rc = tape_mtop(device, MTBSR, 1);
+		if (rc > 0)
+			rc = 0;
+	}
+
+	return rc;
+}
+
+/*
+ * MTREW: Rewind the tape.
+ */
+int
+tape_std_mtrew(struct tape_device *device, int mt_count)
+{
+	struct tape_request *request;
+
+	request = tape_alloc_request(3, 0);
+	if (IS_ERR(request))
+		return PTR_ERR(request);
+	request->op = TO_REW;
+	/* setup ccws */
+	tape_ccw_cc(request->cpaddr, MODE_SET_DB, 1,
+		    device->modeset_byte);
+	tape_ccw_cc(request->cpaddr + 1, REWIND, 0, NULL);
+	tape_ccw_end(request->cpaddr + 2, NOP, 0, NULL);
+
+	/* execute it */
+	return tape_do_io_free(device, request);
+}
+
+/*
+ * MTOFFL: Rewind the tape and put the drive off-line.
+ * Implement 'rewind unload'
+ */
+int
+tape_std_mtoffl(struct tape_device *device, int mt_count)
+{
+	struct tape_request *request;
+
+	request = tape_alloc_request(3, 0);
+	if (IS_ERR(request))
+		return PTR_ERR(request);
+	request->op = TO_RUN;
+	/* setup ccws */
+	tape_ccw_cc(request->cpaddr, MODE_SET_DB, 1, device->modeset_byte);
+	tape_ccw_cc(request->cpaddr + 1, REWIND_UNLOAD, 0, NULL);
+	tape_ccw_end(request->cpaddr + 2, NOP, 0, NULL);
+
+	/* execute it */
+	return tape_do_io_free(device, request);
+}
+
+/*
+ * MTNOP: 'No operation'.
+ */
+int
+tape_std_mtnop(struct tape_device *device, int mt_count)
+{
+	struct tape_request *request;
+
+	request = tape_alloc_request(2, 0);
+	if (IS_ERR(request))
+		return PTR_ERR(request);
+	request->op = TO_NOP;
+	/* setup ccws */
+	tape_ccw_cc(request->cpaddr, MODE_SET_DB, 1, device->modeset_byte);
+	tape_ccw_end(request->cpaddr + 1, NOP, 0, NULL);
+	/* execute it */
+	return tape_do_io_free(device, request);
+}
+
+/*
+ * MTEOM: positions at the end of the portion of the tape already used
+ * for recordind data. MTEOM positions after the last file mark, ready for
+ * appending another file.
+ */
+int
+tape_std_mteom(struct tape_device *device, int mt_count)
+{
+	int rc;
+
+	/*
+	 * Seek from the beginning of tape (rewind).
+	 */
+	if ((rc = tape_mtop(device, MTREW, 1)) < 0)
+		return rc;
+
+	/*
+	 * The logical end of volume is given by two sewuential tapemarks.
+	 * Look for this by skipping to the next file (over one tapemark)
+	 * and then test for another one (fsr returns 1 if a tapemark was
+	 * encountered).
+	 */
+	do {
+		if ((rc = tape_mtop(device, MTFSF, 1)) < 0)
+			return rc;
+		if ((rc = tape_mtop(device, MTFSR, 1)) < 0)
+			return rc;
+	} while (rc == 0);
+
+	return tape_mtop(device, MTBSR, 1);
+}
+
+/*
+ * MTRETEN: Retension the tape, i.e. forward space to end of tape and rewind.
+ */
+int
+tape_std_mtreten(struct tape_device *device, int mt_count)
+{
+	struct tape_request *request;
+
+	request = tape_alloc_request(4, 0);
+	if (IS_ERR(request))
+		return PTR_ERR(request);
+	request->op = TO_FSF;
+	/* setup ccws */
+	tape_ccw_cc(request->cpaddr, MODE_SET_DB, 1, device->modeset_byte);
+	tape_ccw_cc(request->cpaddr + 1,FORSPACEFILE, 0, NULL);
+	tape_ccw_cc(request->cpaddr + 2, NOP, 0, NULL);
+	tape_ccw_end(request->cpaddr + 3, CCW_CMD_TIC, 0, request->cpaddr);
+	/* execute it, MTRETEN rc gets ignored */
+	tape_do_io_interruptible(device, request);
+	tape_free_request(request);
+	return tape_mtop(device, MTREW, 1);
+}
+
+/*
+ * MTERASE: erases the tape.
+ */
+int
+tape_std_mterase(struct tape_device *device, int mt_count)
+{
+	struct tape_request *request;
+
+	request = tape_alloc_request(6, 0);
+	if (IS_ERR(request))
+		return PTR_ERR(request);
+	request->op = TO_DSE;
+	/* setup ccws */
+	tape_ccw_cc(request->cpaddr, MODE_SET_DB, 1, device->modeset_byte);
+	tape_ccw_cc(request->cpaddr + 1, REWIND, 0, NULL);
+	tape_ccw_cc(request->cpaddr + 2, ERASE_GAP, 0, NULL);
+	tape_ccw_cc(request->cpaddr + 3, DATA_SEC_ERASE, 0, NULL);
+	tape_ccw_cc(request->cpaddr + 4, REWIND, 0, NULL);
+	tape_ccw_end(request->cpaddr + 5, NOP, 0, NULL);
+
+	/* execute it */
+	return tape_do_io_free(device, request);
+}
+
+/*
+ * MTUNLOAD: Rewind the tape and unload it.
+ */
+int
+tape_std_mtunload(struct tape_device *device, int mt_count)
+{
+	return tape_mtop(device, MTOFFL, mt_count);
+}
+
+/*
+ * MTCOMPRESSION: used to enable compression.
+ * Sets the IDRC on/off.
+ */
+int
+tape_std_mtcompression(struct tape_device *device, int mt_count)
+{
+	struct tape_request *request;
+
+	if (mt_count < 0 || mt_count > 1) {
+		DBF_EXCEPTION(6, "xcom parm\n");
+		return -EINVAL;
+	}
+	request = tape_alloc_request(2, 0);
+	if (IS_ERR(request))
+		return PTR_ERR(request);
+	request->op = TO_NOP;
+	/* setup ccws */
+	if (mt_count == 0)
+		*device->modeset_byte &= ~0x08;
+	else
+		*device->modeset_byte |= 0x08;
+	tape_ccw_cc(request->cpaddr, MODE_SET_DB, 1, device->modeset_byte);
+	tape_ccw_end(request->cpaddr + 1, NOP, 0, NULL);
+	/* execute it */
+	return tape_do_io_free(device, request);
+}
+
+/*
+ * Read Block
+ */
+struct tape_request *
+tape_std_read_block(struct tape_device *device, size_t count)
+{
+	struct tape_request *request;
+
+	/*
+	 * We have to alloc 4 ccws in order to be able to transform request
+	 * into a read backward request in error case.
+	 */
+	request = tape_alloc_request(4, 0);
+	if (IS_ERR(request)) {
+		DBF_EXCEPTION(6, "xrbl fail");
+		return request;
+	}
+	request->op = TO_RFO;
+	tape_ccw_cc(request->cpaddr, MODE_SET_DB, 1, device->modeset_byte);
+	tape_ccw_end_idal(request->cpaddr + 1, READ_FORWARD,
+			  device->char_data.idal_buf);
+	DBF_EVENT(6, "xrbl ccwg\n");
+	return request;
+}
+
+/*
+ * Read Block backward transformation function.
+ */
+void
+tape_std_read_backward(struct tape_device *device, struct tape_request *request)
+{
+	/*
+	 * We have allocated 4 ccws in tape_std_read, so we can now
+	 * transform the request to a read backward, followed by a
+	 * forward space block.
+	 */
+	request->op = TO_RBA;
+	tape_ccw_cc(request->cpaddr, MODE_SET_DB, 1, device->modeset_byte);
+	tape_ccw_cc_idal(request->cpaddr + 1, READ_BACKWARD,
+			 device->char_data.idal_buf);
+	tape_ccw_cc(request->cpaddr + 2, FORSPACEBLOCK, 0, NULL);
+	tape_ccw_end(request->cpaddr + 3, NOP, 0, NULL);
+	DBF_EVENT(6, "xrop ccwg");}
+
+/*
+ * Write Block
+ */
+struct tape_request *
+tape_std_write_block(struct tape_device *device, size_t count)
+{
+	struct tape_request *request;
+
+	request = tape_alloc_request(2, 0);
+	if (IS_ERR(request)) {
+		DBF_EXCEPTION(6, "xwbl fail\n");
+		return request;
+	}
+	request->op = TO_WRI;
+	tape_ccw_cc(request->cpaddr, MODE_SET_DB, 1, device->modeset_byte);
+	tape_ccw_end_idal(request->cpaddr + 1, WRITE_CMD,
+			  device->char_data.idal_buf);
+	DBF_EVENT(6, "xwbl ccwg\n");
+	return request;
+}
+
+/*
+ * This routine is called by frontend after an ENOSP on write
+ */
+void
+tape_std_process_eov(struct tape_device *device)
+{
+	/*
+	 * End of volume: We have to backspace the last written record, then
+	 * we TRY to write a tapemark and then backspace over the written TM
+	 */
+	if (tape_mtop(device, MTBSR, 1) == 0 &&
+	    tape_mtop(device, MTWEOF, 1) == 0) {
+		tape_mtop(device, MTBSR, 1);
+	}
+}
+
+EXPORT_SYMBOL(tape_std_assign);
+EXPORT_SYMBOL(tape_std_unassign);
+EXPORT_SYMBOL(tape_std_display);
+EXPORT_SYMBOL(tape_std_read_block_id);
+EXPORT_SYMBOL(tape_std_mtload);
+EXPORT_SYMBOL(tape_std_mtsetblk);
+EXPORT_SYMBOL(tape_std_mtreset);
+EXPORT_SYMBOL(tape_std_mtfsf);
+EXPORT_SYMBOL(tape_std_mtfsr);
+EXPORT_SYMBOL(tape_std_mtbsr);
+EXPORT_SYMBOL(tape_std_mtweof);
+EXPORT_SYMBOL(tape_std_mtbsfm);
+EXPORT_SYMBOL(tape_std_mtbsf);
+EXPORT_SYMBOL(tape_std_mtfsfm);
+EXPORT_SYMBOL(tape_std_mtrew);
+EXPORT_SYMBOL(tape_std_mtoffl);
+EXPORT_SYMBOL(tape_std_mtnop);
+EXPORT_SYMBOL(tape_std_mteom);
+EXPORT_SYMBOL(tape_std_mtreten);
+EXPORT_SYMBOL(tape_std_mterase);
+EXPORT_SYMBOL(tape_std_mtunload);
+EXPORT_SYMBOL(tape_std_mtcompression);
+EXPORT_SYMBOL(tape_std_read_block);
+EXPORT_SYMBOL(tape_std_read_backward);
+EXPORT_SYMBOL(tape_std_write_block);
+EXPORT_SYMBOL(tape_std_process_eov);
diff --git a/src/kernel/linux/v4.14/drivers/s390/char/tape_std.h b/src/kernel/linux/v4.14/drivers/s390/char/tape_std.h
new file mode 100644
index 0000000..53ec8e2
--- /dev/null
+++ b/src/kernel/linux/v4.14/drivers/s390/char/tape_std.h
@@ -0,0 +1,155 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ *    standard tape device functions for ibm tapes.
+ *
+ *    Copyright IBM Corp. 2001, 2006
+ *    Author(s): Carsten Otte <cotte@de.ibm.com>
+ *		 Tuan Ngo-Anh <ngoanh@de.ibm.com>
+ *		 Martin Schwidefsky <schwidefsky@de.ibm.com>
+ */
+
+#ifndef _TAPE_STD_H
+#define _TAPE_STD_H
+
+#include <asm/tape390.h>
+
+/*
+ * Biggest block size to handle. Currently 64K because we only build
+ * channel programs without data chaining.
+ */
+#define MAX_BLOCKSIZE   65535
+
+/*
+ * The CCW commands for the Tape type of command.
+ */
+#define INVALID_00		0x00	/* Invalid cmd */
+#define BACKSPACEBLOCK		0x27	/* Back Space block */
+#define BACKSPACEFILE		0x2f	/* Back Space file */
+#define DATA_SEC_ERASE		0x97	/* Data security erase */
+#define ERASE_GAP		0x17	/* Erase Gap */
+#define FORSPACEBLOCK		0x37	/* Forward space block */
+#define FORSPACEFILE		0x3F	/* Forward Space file */
+#define FORCE_STREAM_CNT	0xEB	/* Forced streaming count # */
+#define NOP			0x03	/* No operation	*/
+#define READ_FORWARD		0x02	/* Read forward */
+#define REWIND			0x07	/* Rewind */
+#define REWIND_UNLOAD		0x0F	/* Rewind and Unload */
+#define SENSE			0x04	/* Sense */
+#define NEW_MODE_SET		0xEB	/* Guess it is Mode set */
+#define WRITE_CMD		0x01	/* Write */
+#define WRITETAPEMARK		0x1F	/* Write Tape Mark */
+
+#define ASSIGN			0xB7	/* 3420 REJECT,3480 OK	*/
+#define CONTROL_ACCESS		0xE3	/* Set high speed */
+#define DIAG_MODE_SET		0x0B	/* 3420 NOP, 3480 REJECT */
+#define LOAD_DISPLAY		0x9F	/* 3420 REJECT,3480 OK */
+#define LOCATE			0x4F	/* 3420 REJ, 3480 NOP */
+#define LOOP_WRITE_TO_READ	0x8B	/* 3480 REJECT */
+#define MODE_SET_DB		0xDB	/* 3420 REJECT,3480 OK */
+#define MODE_SET_C3		0xC3	/* for 3420 */
+#define MODE_SET_CB		0xCB	/* for 3420 */
+#define MODE_SET_D3		0xD3	/* for 3420 */
+#define READ_BACKWARD		0x0C	/* */
+#define READ_BLOCK_ID		0x22	/* 3420 REJECT,3480 OK */
+#define READ_BUFFER		0x12	/* 3420 REJECT,3480 OK */
+#define READ_BUFF_LOG		0x24	/* 3420 REJECT,3480 OK */
+#define RELEASE			0xD4	/* 3420 NOP, 3480 REJECT */
+#define REQ_TRK_IN_ERROR	0x1B	/* 3420 NOP, 3480 REJECT */
+#define RESERVE			0xF4	/* 3420 NOP, 3480 REJECT */
+#define SENSE_GROUP_ID		0x34	/* 3420 REJECT,3480 OK */
+#define SENSE_ID		0xE4	/* 3420 REJECT,3480 OK */
+#define READ_DEV_CHAR		0x64	/* Read device characteristics */
+#define SET_DIAGNOSE		0x4B	/* 3420 NOP, 3480 REJECT */
+#define SET_GROUP_ID		0xAF	/* 3420 REJECT,3480 OK */
+#define SET_TAPE_WRITE_IMMED	0xC3	/* for 3480 */
+#define SUSPEND			0x5B	/* 3420 REJ, 3480 NOP */
+#define SYNC			0x43	/* Synchronize (flush buffer) */
+#define UNASSIGN		0xC7	/* 3420 REJECT,3480 OK */
+#define PERF_SUBSYS_FUNC	0x77	/* 3490 CMD */
+#define READ_CONFIG_DATA	0xFA	/* 3490 CMD */
+#define READ_MESSAGE_ID		0x4E	/* 3490 CMD */
+#define READ_SUBSYS_DATA	0x3E	/* 3490 CMD */
+#define SET_INTERFACE_ID	0x73	/* 3490 CMD */
+
+#define SENSE_COMMAND_REJECT		0x80
+#define SENSE_INTERVENTION_REQUIRED	0x40
+#define SENSE_BUS_OUT_CHECK		0x20
+#define SENSE_EQUIPMENT_CHECK		0x10
+#define SENSE_DATA_CHECK		0x08
+#define SENSE_OVERRUN			0x04
+#define SENSE_DEFERRED_UNIT_CHECK	0x02
+#define SENSE_ASSIGNED_ELSEWHERE	0x01
+
+#define SENSE_LOCATE_FAILURE		0x80
+#define SENSE_DRIVE_ONLINE		0x40
+#define SENSE_RESERVED			0x20
+#define SENSE_RECORD_SEQUENCE_ERR	0x10
+#define SENSE_BEGINNING_OF_TAPE		0x08
+#define SENSE_WRITE_MODE		0x04
+#define SENSE_WRITE_PROTECT		0x02
+#define SENSE_NOT_CAPABLE		0x01
+
+#define SENSE_CHANNEL_ADAPTER_CODE	0xE0
+#define SENSE_CHANNEL_ADAPTER_LOC	0x10
+#define SENSE_REPORTING_CU		0x08
+#define SENSE_AUTOMATIC_LOADER		0x04
+#define SENSE_TAPE_SYNC_MODE		0x02
+#define SENSE_TAPE_POSITIONING		0x01
+
+/* discipline functions */
+struct tape_request *tape_std_read_block(struct tape_device *, size_t);
+void tape_std_read_backward(struct tape_device *device,
+			    struct tape_request *request);
+struct tape_request *tape_std_write_block(struct tape_device *, size_t);
+void tape_std_check_locate(struct tape_device *, struct tape_request *);
+
+/* Some non-mtop commands. */
+int tape_std_assign(struct tape_device *);
+int tape_std_unassign(struct tape_device *);
+int tape_std_read_block_id(struct tape_device *device, __u64 *id);
+int tape_std_display(struct tape_device *, struct display_struct *disp);
+int tape_std_terminate_write(struct tape_device *);
+
+/* Standard magnetic tape commands. */
+int tape_std_mtbsf(struct tape_device *, int);
+int tape_std_mtbsfm(struct tape_device *, int);
+int tape_std_mtbsr(struct tape_device *, int);
+int tape_std_mtcompression(struct tape_device *, int);
+int tape_std_mteom(struct tape_device *, int);
+int tape_std_mterase(struct tape_device *, int);
+int tape_std_mtfsf(struct tape_device *, int);
+int tape_std_mtfsfm(struct tape_device *, int);
+int tape_std_mtfsr(struct tape_device *, int);
+int tape_std_mtload(struct tape_device *, int);
+int tape_std_mtnop(struct tape_device *, int);
+int tape_std_mtoffl(struct tape_device *, int);
+int tape_std_mtreset(struct tape_device *, int);
+int tape_std_mtreten(struct tape_device *, int);
+int tape_std_mtrew(struct tape_device *, int);
+int tape_std_mtsetblk(struct tape_device *, int);
+int tape_std_mtunload(struct tape_device *, int);
+int tape_std_mtweof(struct tape_device *, int);
+
+/* Event handlers */
+void tape_std_default_handler(struct tape_device *);
+void tape_std_unexpect_uchk_handler(struct tape_device *);
+void tape_std_irq(struct tape_device *);
+void tape_std_process_eov(struct tape_device *);
+
+// the error recovery stuff:
+void tape_std_error_recovery(struct tape_device *);
+void tape_std_error_recovery_has_failed(struct tape_device *,int error_id);
+void tape_std_error_recovery_succeded(struct tape_device *);
+void tape_std_error_recovery_do_retry(struct tape_device *);
+void tape_std_error_recovery_read_opposite(struct tape_device *);
+void tape_std_error_recovery_HWBUG(struct tape_device *, int condno);
+
+/* S390 tape types */
+enum s390_tape_type {
+        tape_3480,
+        tape_3490,
+        tape_3590,
+        tape_3592,
+};
+
+#endif // _TAPE_STD_H
diff --git a/src/kernel/linux/v4.14/drivers/s390/char/tty3270.c b/src/kernel/linux/v4.14/drivers/s390/char/tty3270.c
new file mode 100644
index 0000000..401688b
--- /dev/null
+++ b/src/kernel/linux/v4.14/drivers/s390/char/tty3270.c
@@ -0,0 +1,1979 @@
+/*
+ *    IBM/3270 Driver - tty functions.
+ *
+ *  Author(s):
+ *    Original 3270 Code for 2.4 written by Richard Hitt (UTS Global)
+ *    Rewritten for 2.5 by Martin Schwidefsky <schwidefsky@de.ibm.com>
+ *	-- Copyright IBM Corp. 2003
+ */
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/kdev_t.h>
+#include <linux/tty.h>
+#include <linux/vt_kern.h>
+#include <linux/init.h>
+#include <linux/console.h>
+#include <linux/interrupt.h>
+#include <linux/workqueue.h>
+
+#include <linux/slab.h>
+#include <linux/bootmem.h>
+#include <linux/compat.h>
+
+#include <asm/ccwdev.h>
+#include <asm/cio.h>
+#include <asm/ebcdic.h>
+#include <linux/uaccess.h>
+
+#include "raw3270.h"
+#include "tty3270.h"
+#include "keyboard.h"
+
+#define TTY3270_CHAR_BUF_SIZE 256
+#define TTY3270_OUTPUT_BUFFER_SIZE 1024
+#define TTY3270_STRING_PAGES 5
+
+struct tty_driver *tty3270_driver;
+static int tty3270_max_index;
+
+static struct raw3270_fn tty3270_fn;
+
+struct tty3270_cell {
+	unsigned char character;
+	unsigned char highlight;
+	unsigned char f_color;
+};
+
+struct tty3270_line {
+	struct tty3270_cell *cells;
+	int len;
+};
+
+#define ESCAPE_NPAR 8
+
+/*
+ * The main tty view data structure.
+ * FIXME:
+ * 1) describe line orientation & lines list concept against screen
+ * 2) describe conversion of screen to lines
+ * 3) describe line format.
+ */
+struct tty3270 {
+	struct raw3270_view view;
+	struct tty_port port;
+	void **freemem_pages;		/* Array of pages used for freemem. */
+	struct list_head freemem;	/* List of free memory for strings. */
+
+	/* Output stuff. */
+	struct list_head lines;		/* List of lines. */
+	struct list_head update;	/* List of lines to update. */
+	unsigned char wcc;		/* Write control character. */
+	int nr_lines;			/* # lines in list. */
+	int nr_up;			/* # lines up in history. */
+	unsigned long update_flags;	/* Update indication bits. */
+	struct string *status;		/* Lower right of display. */
+	struct raw3270_request *write;	/* Single write request. */
+	struct timer_list timer;	/* Output delay timer. */
+
+	/* Current tty screen. */
+	unsigned int cx, cy;		/* Current output position. */
+	unsigned int highlight;		/* Blink/reverse/underscore */
+	unsigned int f_color;		/* Foreground color */
+	struct tty3270_line *screen;
+	unsigned int n_model, n_cols, n_rows;	/* New model & size */
+	struct work_struct resize_work;
+
+	/* Input stuff. */
+	struct string *prompt;		/* Output string for input area. */
+	struct string *input;		/* Input string for read request. */
+	struct raw3270_request *read;	/* Single read request. */
+	struct raw3270_request *kreset;	/* Single keyboard reset request. */
+	unsigned char inattr;		/* Visible/invisible input. */
+	int throttle, attn;		/* tty throttle/unthrottle. */
+	struct tasklet_struct readlet;	/* Tasklet to issue read request. */
+	struct tasklet_struct hanglet;	/* Tasklet to hang up the tty. */
+	struct kbd_data *kbd;		/* key_maps stuff. */
+
+	/* Escape sequence parsing. */
+	int esc_state, esc_ques, esc_npar;
+	int esc_par[ESCAPE_NPAR];
+	unsigned int saved_cx, saved_cy;
+	unsigned int saved_highlight, saved_f_color;
+
+	/* Command recalling. */
+	struct list_head rcl_lines;	/* List of recallable lines. */
+	struct list_head *rcl_walk;	/* Point in rcl_lines list. */
+	int rcl_nr, rcl_max;		/* Number/max number of rcl_lines. */
+
+	/* Character array for put_char/flush_chars. */
+	unsigned int char_count;
+	char char_buf[TTY3270_CHAR_BUF_SIZE];
+};
+
+/* tty3270->update_flags. See tty3270_update for details. */
+#define TTY_UPDATE_ERASE	1	/* Use EWRITEA instead of WRITE. */
+#define TTY_UPDATE_LIST		2	/* Update lines in tty3270->update. */
+#define TTY_UPDATE_INPUT	4	/* Update input line. */
+#define TTY_UPDATE_STATUS	8	/* Update status line. */
+#define TTY_UPDATE_ALL		16	/* Recreate screen. */
+
+static void tty3270_update(struct tty3270 *);
+static void tty3270_resize_work(struct work_struct *work);
+
+/*
+ * Setup timeout for a device. On timeout trigger an update.
+ */
+static void tty3270_set_timer(struct tty3270 *tp, int expires)
+{
+	mod_timer(&tp->timer, jiffies + expires);
+}
+
+/*
+ * The input line are the two last lines of the screen.
+ */
+static void
+tty3270_update_prompt(struct tty3270 *tp, char *input, int count)
+{
+	struct string *line;
+	unsigned int off;
+
+	line = tp->prompt;
+	if (count != 0)
+		line->string[5] = TF_INMDT;
+	else
+		line->string[5] = tp->inattr;
+	if (count > tp->view.cols * 2 - 11)
+		count = tp->view.cols * 2 - 11;
+	memcpy(line->string + 6, input, count);
+	line->string[6 + count] = TO_IC;
+	/* Clear to end of input line. */
+	if (count < tp->view.cols * 2 - 11) {
+		line->string[7 + count] = TO_RA;
+		line->string[10 + count] = 0;
+		off = tp->view.cols * tp->view.rows - 9;
+		raw3270_buffer_address(tp->view.dev, line->string+count+8, off);
+		line->len = 11 + count;
+	} else
+		line->len = 7 + count;
+	tp->update_flags |= TTY_UPDATE_INPUT;
+}
+
+static void
+tty3270_create_prompt(struct tty3270 *tp)
+{
+	static const unsigned char blueprint[] =
+		{ TO_SBA, 0, 0, 0x6e, TO_SF, TF_INPUT,
+		  /* empty input string */
+		  TO_IC, TO_RA, 0, 0, 0 };
+	struct string *line;
+	unsigned int offset;
+
+	line = alloc_string(&tp->freemem,
+			    sizeof(blueprint) + tp->view.cols * 2 - 9);
+	tp->prompt = line;
+	tp->inattr = TF_INPUT;
+	/* Copy blueprint to status line */
+	memcpy(line->string, blueprint, sizeof(blueprint));
+	line->len = sizeof(blueprint);
+	/* Set output offsets. */
+	offset = tp->view.cols * (tp->view.rows - 2);
+	raw3270_buffer_address(tp->view.dev, line->string + 1, offset);
+	offset = tp->view.cols * tp->view.rows - 9;
+	raw3270_buffer_address(tp->view.dev, line->string + 8, offset);
+
+	/* Allocate input string for reading. */
+	tp->input = alloc_string(&tp->freemem, tp->view.cols * 2 - 9 + 6);
+}
+
+/*
+ * The status line is the last line of the screen. It shows the string
+ * "Running"/"Holding" in the lower right corner of the screen.
+ */
+static void
+tty3270_update_status(struct tty3270 * tp)
+{
+	char *str;
+
+	str = (tp->nr_up != 0) ? "History" : "Running";
+	memcpy(tp->status->string + 8, str, 7);
+	codepage_convert(tp->view.ascebc, tp->status->string + 8, 7);
+	tp->update_flags |= TTY_UPDATE_STATUS;
+}
+
+static void
+tty3270_create_status(struct tty3270 * tp)
+{
+	static const unsigned char blueprint[] =
+		{ TO_SBA, 0, 0, TO_SF, TF_LOG, TO_SA, TAT_COLOR, TAC_GREEN,
+		  0, 0, 0, 0, 0, 0, 0, TO_SF, TF_LOG, TO_SA, TAT_COLOR,
+		  TAC_RESET };
+	struct string *line;
+	unsigned int offset;
+
+	line = alloc_string(&tp->freemem,sizeof(blueprint));
+	tp->status = line;
+	/* Copy blueprint to status line */
+	memcpy(line->string, blueprint, sizeof(blueprint));
+	/* Set address to start of status string (= last 9 characters). */
+	offset = tp->view.cols * tp->view.rows - 9;
+	raw3270_buffer_address(tp->view.dev, line->string + 1, offset);
+}
+
+/*
+ * Set output offsets to 3270 datastream fragment of a tty string.
+ * (TO_SBA offset at the start and TO_RA offset at the end of the string)
+ */
+static void
+tty3270_update_string(struct tty3270 *tp, struct string *line, int nr)
+{
+	unsigned char *cp;
+
+	raw3270_buffer_address(tp->view.dev, line->string + 1,
+			       tp->view.cols * nr);
+	cp = line->string + line->len - 4;
+	if (*cp == TO_RA)
+		raw3270_buffer_address(tp->view.dev, cp + 1,
+				       tp->view.cols * (nr + 1));
+}
+
+/*
+ * Rebuild update list to print all lines.
+ */
+static void
+tty3270_rebuild_update(struct tty3270 *tp)
+{
+	struct string *s, *n;
+	int line, nr_up;
+
+	/* 
+	 * Throw away update list and create a new one,
+	 * containing all lines that will fit on the screen.
+	 */
+	list_for_each_entry_safe(s, n, &tp->update, update)
+		list_del_init(&s->update);
+	line = tp->view.rows - 3;
+	nr_up = tp->nr_up;
+	list_for_each_entry_reverse(s, &tp->lines, list) {
+		if (nr_up > 0) {
+			nr_up--;
+			continue;
+		}
+		tty3270_update_string(tp, s, line);
+		list_add(&s->update, &tp->update);
+		if (--line < 0)
+			break;
+	}
+	tp->update_flags |= TTY_UPDATE_LIST;
+}
+
+/*
+ * Alloc string for size bytes. If there is not enough room in
+ * freemem, free strings until there is room.
+ */
+static struct string *
+tty3270_alloc_string(struct tty3270 *tp, size_t size)
+{
+	struct string *s, *n;
+
+	s = alloc_string(&tp->freemem, size);
+	if (s)
+		return s;
+	list_for_each_entry_safe(s, n, &tp->lines, list) {
+		BUG_ON(tp->nr_lines <= tp->view.rows - 2);
+		list_del(&s->list);
+		if (!list_empty(&s->update))
+			list_del(&s->update);
+		tp->nr_lines--;
+		if (free_string(&tp->freemem, s) >= size)
+			break;
+	}
+	s = alloc_string(&tp->freemem, size);
+	BUG_ON(!s);
+	if (tp->nr_up != 0 &&
+	    tp->nr_up + tp->view.rows - 2 >= tp->nr_lines) {
+		tp->nr_up = tp->nr_lines - tp->view.rows + 2;
+		tty3270_rebuild_update(tp);
+		tty3270_update_status(tp);
+	}
+	return s;
+}
+
+/*
+ * Add an empty line to the list.
+ */
+static void
+tty3270_blank_line(struct tty3270 *tp)
+{
+	static const unsigned char blueprint[] =
+		{ TO_SBA, 0, 0, TO_SA, TAT_EXTHI, TAX_RESET,
+		  TO_SA, TAT_COLOR, TAC_RESET, TO_RA, 0, 0, 0 };
+	struct string *s;
+
+	s = tty3270_alloc_string(tp, sizeof(blueprint));
+	memcpy(s->string, blueprint, sizeof(blueprint));
+	s->len = sizeof(blueprint);
+	list_add_tail(&s->list, &tp->lines);
+	tp->nr_lines++;
+	if (tp->nr_up != 0)
+		tp->nr_up++;
+}
+
+/*
+ * Create a blank screen and remove all lines from the history.
+ */
+static void
+tty3270_blank_screen(struct tty3270 *tp)
+{
+	struct string *s, *n;
+	int i;
+
+	for (i = 0; i < tp->view.rows - 2; i++)
+		tp->screen[i].len = 0;
+	tp->nr_up = 0;
+	list_for_each_entry_safe(s, n, &tp->lines, list) {
+		list_del(&s->list);
+		if (!list_empty(&s->update))
+			list_del(&s->update);
+		tp->nr_lines--;
+		free_string(&tp->freemem, s);
+	}
+}
+
+/*
+ * Write request completion callback.
+ */
+static void
+tty3270_write_callback(struct raw3270_request *rq, void *data)
+{
+	struct tty3270 *tp = container_of(rq->view, struct tty3270, view);
+
+	if (rq->rc != 0) {
+		/* Write wasn't successful. Refresh all. */
+		tp->update_flags = TTY_UPDATE_ALL;
+		tty3270_set_timer(tp, 1);
+	}
+	raw3270_request_reset(rq);
+	xchg(&tp->write, rq);
+}
+
+/*
+ * Update 3270 display.
+ */
+static void
+tty3270_update(struct tty3270 *tp)
+{
+	static char invalid_sba[2] = { 0xff, 0xff };
+	struct raw3270_request *wrq;
+	unsigned long updated;
+	struct string *s, *n;
+	char *sba, *str;
+	int rc, len;
+
+	wrq = xchg(&tp->write, 0);
+	if (!wrq) {
+		tty3270_set_timer(tp, 1);
+		return;
+	}
+
+	spin_lock(&tp->view.lock);
+	updated = 0;
+	if (tp->update_flags & TTY_UPDATE_ALL) {
+		tty3270_rebuild_update(tp);
+		tty3270_update_status(tp);
+		tp->update_flags = TTY_UPDATE_ERASE | TTY_UPDATE_LIST |
+			TTY_UPDATE_INPUT | TTY_UPDATE_STATUS;
+	}
+	if (tp->update_flags & TTY_UPDATE_ERASE) {
+		/* Use erase write alternate to erase display. */
+		raw3270_request_set_cmd(wrq, TC_EWRITEA);
+		updated |= TTY_UPDATE_ERASE;
+	} else
+		raw3270_request_set_cmd(wrq, TC_WRITE);
+
+	raw3270_request_add_data(wrq, &tp->wcc, 1);
+	tp->wcc = TW_NONE;
+
+	/*
+	 * Update status line.
+	 */
+	if (tp->update_flags & TTY_UPDATE_STATUS)
+		if (raw3270_request_add_data(wrq, tp->status->string,
+					     tp->status->len) == 0)
+			updated |= TTY_UPDATE_STATUS;
+
+	/*
+	 * Write input line.
+	 */
+	if (tp->update_flags & TTY_UPDATE_INPUT)
+		if (raw3270_request_add_data(wrq, tp->prompt->string,
+					     tp->prompt->len) == 0)
+			updated |= TTY_UPDATE_INPUT;
+
+	sba = invalid_sba;
+	
+	if (tp->update_flags & TTY_UPDATE_LIST) {
+		/* Write strings in the update list to the screen. */
+		list_for_each_entry_safe(s, n, &tp->update, update) {
+			str = s->string;
+			len = s->len;
+			/*
+			 * Skip TO_SBA at the start of the string if the
+			 * last output position matches the start address
+			 * of this line.
+			 */
+			if (s->string[1] == sba[0] && s->string[2] == sba[1])
+				str += 3, len -= 3;
+			if (raw3270_request_add_data(wrq, str, len) != 0)
+				break;
+			list_del_init(&s->update);
+			if (s->string[s->len - 4] == TO_RA)
+				sba = s->string + s->len - 3;
+			else
+				sba = invalid_sba;
+		}
+		if (list_empty(&tp->update))
+			updated |= TTY_UPDATE_LIST;
+	}
+	wrq->callback = tty3270_write_callback;
+	rc = raw3270_start(&tp->view, wrq);
+	if (rc == 0) {
+		tp->update_flags &= ~updated;
+		if (tp->update_flags)
+			tty3270_set_timer(tp, 1);
+	} else {
+		raw3270_request_reset(wrq);
+		xchg(&tp->write, wrq);
+	}
+	spin_unlock(&tp->view.lock);
+}
+
+/*
+ * Command recalling.
+ */
+static void
+tty3270_rcl_add(struct tty3270 *tp, char *input, int len)
+{
+	struct string *s;
+
+	tp->rcl_walk = NULL;
+	if (len <= 0)
+		return;
+	if (tp->rcl_nr >= tp->rcl_max) {
+		s = list_entry(tp->rcl_lines.next, struct string, list);
+		list_del(&s->list);
+		free_string(&tp->freemem, s);
+		tp->rcl_nr--;
+	}
+	s = tty3270_alloc_string(tp, len);
+	memcpy(s->string, input, len);
+	list_add_tail(&s->list, &tp->rcl_lines);
+	tp->rcl_nr++;
+}
+
+static void
+tty3270_rcl_backward(struct kbd_data *kbd)
+{
+	struct tty3270 *tp = container_of(kbd->port, struct tty3270, port);
+	struct string *s;
+
+	spin_lock_bh(&tp->view.lock);
+	if (tp->inattr == TF_INPUT) {
+		if (tp->rcl_walk && tp->rcl_walk->prev != &tp->rcl_lines)
+			tp->rcl_walk = tp->rcl_walk->prev;
+		else if (!list_empty(&tp->rcl_lines))
+			tp->rcl_walk = tp->rcl_lines.prev;
+		s = tp->rcl_walk ? 
+			list_entry(tp->rcl_walk, struct string, list) : NULL;
+		if (tp->rcl_walk) {
+			s = list_entry(tp->rcl_walk, struct string, list);
+			tty3270_update_prompt(tp, s->string, s->len);
+		} else
+			tty3270_update_prompt(tp, NULL, 0);
+		tty3270_set_timer(tp, 1);
+	}
+	spin_unlock_bh(&tp->view.lock);
+}
+
+/*
+ * Deactivate tty view.
+ */
+static void
+tty3270_exit_tty(struct kbd_data *kbd)
+{
+	struct tty3270 *tp = container_of(kbd->port, struct tty3270, port);
+
+	raw3270_deactivate_view(&tp->view);
+}
+
+/*
+ * Scroll forward in history.
+ */
+static void
+tty3270_scroll_forward(struct kbd_data *kbd)
+{
+	struct tty3270 *tp = container_of(kbd->port, struct tty3270, port);
+	int nr_up;
+
+	spin_lock_bh(&tp->view.lock);
+	nr_up = tp->nr_up - tp->view.rows + 2;
+	if (nr_up < 0)
+		nr_up = 0;
+	if (nr_up != tp->nr_up) {
+		tp->nr_up = nr_up;
+		tty3270_rebuild_update(tp);
+		tty3270_update_status(tp);
+		tty3270_set_timer(tp, 1);
+	}
+	spin_unlock_bh(&tp->view.lock);
+}
+
+/*
+ * Scroll backward in history.
+ */
+static void
+tty3270_scroll_backward(struct kbd_data *kbd)
+{
+	struct tty3270 *tp = container_of(kbd->port, struct tty3270, port);
+	int nr_up;
+
+	spin_lock_bh(&tp->view.lock);
+	nr_up = tp->nr_up + tp->view.rows - 2;
+	if (nr_up + tp->view.rows - 2 > tp->nr_lines)
+		nr_up = tp->nr_lines - tp->view.rows + 2;
+	if (nr_up != tp->nr_up) {
+		tp->nr_up = nr_up;
+		tty3270_rebuild_update(tp);
+		tty3270_update_status(tp);
+		tty3270_set_timer(tp, 1);
+	}
+	spin_unlock_bh(&tp->view.lock);
+}
+
+/*
+ * Pass input line to tty.
+ */
+static void
+tty3270_read_tasklet(struct raw3270_request *rrq)
+{
+	static char kreset_data = TW_KR;
+	struct tty3270 *tp = container_of(rrq->view, struct tty3270, view);
+	char *input;
+	int len;
+
+	spin_lock_bh(&tp->view.lock);
+	/*
+	 * Two AID keys are special: For 0x7d (enter) the input line
+	 * has to be emitted to the tty and for 0x6d the screen
+	 * needs to be redrawn.
+	 */
+	input = NULL;
+	len = 0;
+	if (tp->input->string[0] == 0x7d) {
+		/* Enter: write input to tty. */
+		input = tp->input->string + 6;
+		len = tp->input->len - 6 - rrq->rescnt;
+		if (tp->inattr != TF_INPUTN)
+			tty3270_rcl_add(tp, input, len);
+		if (tp->nr_up > 0) {
+			tp->nr_up = 0;
+			tty3270_rebuild_update(tp);
+			tty3270_update_status(tp);
+		}
+		/* Clear input area. */
+		tty3270_update_prompt(tp, NULL, 0);
+		tty3270_set_timer(tp, 1);
+	} else if (tp->input->string[0] == 0x6d) {
+		/* Display has been cleared. Redraw. */
+		tp->update_flags = TTY_UPDATE_ALL;
+		tty3270_set_timer(tp, 1);
+	}
+	spin_unlock_bh(&tp->view.lock);
+
+	/* Start keyboard reset command. */
+	raw3270_request_reset(tp->kreset);
+	raw3270_request_set_cmd(tp->kreset, TC_WRITE);
+	raw3270_request_add_data(tp->kreset, &kreset_data, 1);
+	raw3270_start(&tp->view, tp->kreset);
+
+	while (len-- > 0)
+		kbd_keycode(tp->kbd, *input++);
+	/* Emit keycode for AID byte. */
+	kbd_keycode(tp->kbd, 256 + tp->input->string[0]);
+
+	raw3270_request_reset(rrq);
+	xchg(&tp->read, rrq);
+	raw3270_put_view(&tp->view);
+}
+
+/*
+ * Read request completion callback.
+ */
+static void
+tty3270_read_callback(struct raw3270_request *rq, void *data)
+{
+	struct tty3270 *tp = container_of(rq->view, struct tty3270, view);
+	raw3270_get_view(rq->view);
+	/* Schedule tasklet to pass input to tty. */
+	tasklet_schedule(&tp->readlet);
+}
+
+/*
+ * Issue a read request. Call with device lock.
+ */
+static void
+tty3270_issue_read(struct tty3270 *tp, int lock)
+{
+	struct raw3270_request *rrq;
+	int rc;
+
+	rrq = xchg(&tp->read, 0);
+	if (!rrq)
+		/* Read already scheduled. */
+		return;
+	rrq->callback = tty3270_read_callback;
+	rrq->callback_data = tp;
+	raw3270_request_set_cmd(rrq, TC_READMOD);
+	raw3270_request_set_data(rrq, tp->input->string, tp->input->len);
+	/* Issue the read modified request. */
+	if (lock) {
+		rc = raw3270_start(&tp->view, rrq);
+	} else
+		rc = raw3270_start_irq(&tp->view, rrq);
+	if (rc) {
+		raw3270_request_reset(rrq);
+		xchg(&tp->read, rrq);
+	}
+}
+
+/*
+ * Hang up the tty
+ */
+static void
+tty3270_hangup_tasklet(struct tty3270 *tp)
+{
+	tty_port_tty_hangup(&tp->port, true);
+	raw3270_put_view(&tp->view);
+}
+
+/*
+ * Switch to the tty view.
+ */
+static int
+tty3270_activate(struct raw3270_view *view)
+{
+	struct tty3270 *tp = container_of(view, struct tty3270, view);
+
+	tp->update_flags = TTY_UPDATE_ALL;
+	tty3270_set_timer(tp, 1);
+	return 0;
+}
+
+static void
+tty3270_deactivate(struct raw3270_view *view)
+{
+	struct tty3270 *tp = container_of(view, struct tty3270, view);
+
+	del_timer(&tp->timer);
+}
+
+static void
+tty3270_irq(struct tty3270 *tp, struct raw3270_request *rq, struct irb *irb)
+{
+	/* Handle ATTN. Schedule tasklet to read aid. */
+	if (irb->scsw.cmd.dstat & DEV_STAT_ATTENTION) {
+		if (!tp->throttle)
+			tty3270_issue_read(tp, 0);
+		else
+			tp->attn = 1;
+	}
+
+	if (rq) {
+		if (irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK) {
+			rq->rc = -EIO;
+			raw3270_get_view(&tp->view);
+			tasklet_schedule(&tp->hanglet);
+		} else {
+			/* Normal end. Copy residual count. */
+			rq->rescnt = irb->scsw.cmd.count;
+		}
+	} else if (irb->scsw.cmd.dstat & DEV_STAT_DEV_END) {
+		/* Interrupt without an outstanding request -> update all */
+		tp->update_flags = TTY_UPDATE_ALL;
+		tty3270_set_timer(tp, 1);
+	}
+}
+
+/*
+ * Allocate tty3270 structure.
+ */
+static struct tty3270 *
+tty3270_alloc_view(void)
+{
+	struct tty3270 *tp;
+	int pages;
+
+	tp = kzalloc(sizeof(struct tty3270), GFP_KERNEL);
+	if (!tp)
+		goto out_err;
+	tp->freemem_pages =
+		kmalloc(sizeof(void *) * TTY3270_STRING_PAGES, GFP_KERNEL);
+	if (!tp->freemem_pages)
+		goto out_tp;
+	INIT_LIST_HEAD(&tp->freemem);
+	INIT_LIST_HEAD(&tp->lines);
+	INIT_LIST_HEAD(&tp->update);
+	INIT_LIST_HEAD(&tp->rcl_lines);
+	tp->rcl_max = 20;
+
+	for (pages = 0; pages < TTY3270_STRING_PAGES; pages++) {
+		tp->freemem_pages[pages] = (void *)
+			__get_free_pages(GFP_KERNEL|GFP_DMA, 0);
+		if (!tp->freemem_pages[pages])
+			goto out_pages;
+		add_string_memory(&tp->freemem,
+				  tp->freemem_pages[pages], PAGE_SIZE);
+	}
+	tp->write = raw3270_request_alloc(TTY3270_OUTPUT_BUFFER_SIZE);
+	if (IS_ERR(tp->write))
+		goto out_pages;
+	tp->read = raw3270_request_alloc(0);
+	if (IS_ERR(tp->read))
+		goto out_write;
+	tp->kreset = raw3270_request_alloc(1);
+	if (IS_ERR(tp->kreset))
+		goto out_read;
+	tp->kbd = kbd_alloc();
+	if (!tp->kbd)
+		goto out_reset;
+
+	tty_port_init(&tp->port);
+	setup_timer(&tp->timer, (void (*)(unsigned long)) tty3270_update,
+		    (unsigned long) tp);
+	tasklet_init(&tp->readlet,
+		     (void (*)(unsigned long)) tty3270_read_tasklet,
+		     (unsigned long) tp->read);
+	tasklet_init(&tp->hanglet,
+		     (void (*)(unsigned long)) tty3270_hangup_tasklet,
+		     (unsigned long) tp);
+	INIT_WORK(&tp->resize_work, tty3270_resize_work);
+
+	return tp;
+
+out_reset:
+	raw3270_request_free(tp->kreset);
+out_read:
+	raw3270_request_free(tp->read);
+out_write:
+	raw3270_request_free(tp->write);
+out_pages:
+	while (pages--)
+		free_pages((unsigned long) tp->freemem_pages[pages], 0);
+	kfree(tp->freemem_pages);
+	tty_port_destroy(&tp->port);
+out_tp:
+	kfree(tp);
+out_err:
+	return ERR_PTR(-ENOMEM);
+}
+
+/*
+ * Free tty3270 structure.
+ */
+static void
+tty3270_free_view(struct tty3270 *tp)
+{
+	int pages;
+
+	kbd_free(tp->kbd);
+	raw3270_request_free(tp->kreset);
+	raw3270_request_free(tp->read);
+	raw3270_request_free(tp->write);
+	for (pages = 0; pages < TTY3270_STRING_PAGES; pages++)
+		free_pages((unsigned long) tp->freemem_pages[pages], 0);
+	kfree(tp->freemem_pages);
+	tty_port_destroy(&tp->port);
+	kfree(tp);
+}
+
+/*
+ * Allocate tty3270 screen.
+ */
+static struct tty3270_line *
+tty3270_alloc_screen(unsigned int rows, unsigned int cols)
+{
+	struct tty3270_line *screen;
+	unsigned long size;
+	int lines;
+
+	size = sizeof(struct tty3270_line) * (rows - 2);
+	screen = kzalloc(size, GFP_KERNEL);
+	if (!screen)
+		goto out_err;
+	for (lines = 0; lines < rows - 2; lines++) {
+		size = sizeof(struct tty3270_cell) * cols;
+		screen[lines].cells = kzalloc(size, GFP_KERNEL);
+		if (!screen[lines].cells)
+			goto out_screen;
+	}
+	return screen;
+out_screen:
+	while (lines--)
+		kfree(screen[lines].cells);
+	kfree(screen);
+out_err:
+	return ERR_PTR(-ENOMEM);
+}
+
+/*
+ * Free tty3270 screen.
+ */
+static void
+tty3270_free_screen(struct tty3270_line *screen, unsigned int rows)
+{
+	int lines;
+
+	for (lines = 0; lines < rows - 2; lines++)
+		kfree(screen[lines].cells);
+	kfree(screen);
+}
+
+/*
+ * Resize tty3270 screen
+ */
+static void tty3270_resize_work(struct work_struct *work)
+{
+	struct tty3270 *tp = container_of(work, struct tty3270, resize_work);
+	struct tty3270_line *screen, *oscreen;
+	struct tty_struct *tty;
+	unsigned int orows;
+	struct winsize ws;
+
+	screen = tty3270_alloc_screen(tp->n_rows, tp->n_cols);
+	if (IS_ERR(screen))
+		return;
+	/* Switch to new output size */
+	spin_lock_bh(&tp->view.lock);
+	tty3270_blank_screen(tp);
+	oscreen = tp->screen;
+	orows = tp->view.rows;
+	tp->view.model = tp->n_model;
+	tp->view.rows = tp->n_rows;
+	tp->view.cols = tp->n_cols;
+	tp->screen = screen;
+	free_string(&tp->freemem, tp->prompt);
+	free_string(&tp->freemem, tp->status);
+	tty3270_create_prompt(tp);
+	tty3270_create_status(tp);
+	while (tp->nr_lines < tp->view.rows - 2)
+		tty3270_blank_line(tp);
+	tp->update_flags = TTY_UPDATE_ALL;
+	spin_unlock_bh(&tp->view.lock);
+	tty3270_free_screen(oscreen, orows);
+	tty3270_set_timer(tp, 1);
+	/* Informat tty layer about new size */
+	tty = tty_port_tty_get(&tp->port);
+	if (!tty)
+		return;
+	ws.ws_row = tp->view.rows - 2;
+	ws.ws_col = tp->view.cols;
+	tty_do_resize(tty, &ws);
+	tty_kref_put(tty);
+}
+
+static void
+tty3270_resize(struct raw3270_view *view, int model, int rows, int cols)
+{
+	struct tty3270 *tp = container_of(view, struct tty3270, view);
+
+	if (tp->n_model == model && tp->n_rows == rows && tp->n_cols == cols)
+		return;
+	tp->n_model = model;
+	tp->n_rows = rows;
+	tp->n_cols = cols;
+	schedule_work(&tp->resize_work);
+}
+
+/*
+ * Unlink tty3270 data structure from tty.
+ */
+static void
+tty3270_release(struct raw3270_view *view)
+{
+	struct tty3270 *tp = container_of(view, struct tty3270, view);
+	struct tty_struct *tty = tty_port_tty_get(&tp->port);
+
+	if (tty) {
+		tty->driver_data = NULL;
+		tty_port_tty_set(&tp->port, NULL);
+		tty_hangup(tty);
+		raw3270_put_view(&tp->view);
+		tty_kref_put(tty);
+	}
+}
+
+/*
+ * Free tty3270 data structure
+ */
+static void
+tty3270_free(struct raw3270_view *view)
+{
+	struct tty3270 *tp = container_of(view, struct tty3270, view);
+
+	del_timer_sync(&tp->timer);
+	tty3270_free_screen(tp->screen, tp->view.rows);
+	tty3270_free_view(tp);
+}
+
+/*
+ * Delayed freeing of tty3270 views.
+ */
+static void
+tty3270_del_views(void)
+{
+	int i;
+
+	for (i = RAW3270_FIRSTMINOR; i <= tty3270_max_index; i++) {
+		struct raw3270_view *view = raw3270_find_view(&tty3270_fn, i);
+		if (!IS_ERR(view))
+			raw3270_del_view(view);
+	}
+}
+
+static struct raw3270_fn tty3270_fn = {
+	.activate = tty3270_activate,
+	.deactivate = tty3270_deactivate,
+	.intv = (void *) tty3270_irq,
+	.release = tty3270_release,
+	.free = tty3270_free,
+	.resize = tty3270_resize
+};
+
+/*
+ * This routine is called whenever a 3270 tty is opened first time.
+ */
+static int tty3270_install(struct tty_driver *driver, struct tty_struct *tty)
+{
+	struct raw3270_view *view;
+	struct tty3270 *tp;
+	int i, rc;
+
+	/* Check if the tty3270 is already there. */
+	view = raw3270_find_view(&tty3270_fn, tty->index + RAW3270_FIRSTMINOR);
+	if (!IS_ERR(view)) {
+		tp = container_of(view, struct tty3270, view);
+		tty->driver_data = tp;
+		tty->winsize.ws_row = tp->view.rows - 2;
+		tty->winsize.ws_col = tp->view.cols;
+		tp->port.low_latency = 0;
+		tp->inattr = TF_INPUT;
+		goto port_install;
+	}
+	if (tty3270_max_index < tty->index + 1)
+		tty3270_max_index = tty->index + 1;
+
+	/* Allocate tty3270 structure on first open. */
+	tp = tty3270_alloc_view();
+	if (IS_ERR(tp))
+		return PTR_ERR(tp);
+
+	rc = raw3270_add_view(&tp->view, &tty3270_fn,
+			      tty->index + RAW3270_FIRSTMINOR,
+			      RAW3270_VIEW_LOCK_BH);
+	if (rc) {
+		tty3270_free_view(tp);
+		return rc;
+	}
+
+	tp->screen = tty3270_alloc_screen(tp->view.rows, tp->view.cols);
+	if (IS_ERR(tp->screen)) {
+		rc = PTR_ERR(tp->screen);
+		raw3270_put_view(&tp->view);
+		raw3270_del_view(&tp->view);
+		tty3270_free_view(tp);
+		return rc;
+	}
+
+	tp->port.low_latency = 0;
+	tty->winsize.ws_row = tp->view.rows - 2;
+	tty->winsize.ws_col = tp->view.cols;
+
+	tty3270_create_prompt(tp);
+	tty3270_create_status(tp);
+	tty3270_update_status(tp);
+
+	/* Create blank line for every line in the tty output area. */
+	for (i = 0; i < tp->view.rows - 2; i++)
+		tty3270_blank_line(tp);
+
+	tp->kbd->port = &tp->port;
+	tp->kbd->fn_handler[KVAL(K_INCRCONSOLE)] = tty3270_exit_tty;
+	tp->kbd->fn_handler[KVAL(K_SCROLLBACK)] = tty3270_scroll_backward;
+	tp->kbd->fn_handler[KVAL(K_SCROLLFORW)] = tty3270_scroll_forward;
+	tp->kbd->fn_handler[KVAL(K_CONS)] = tty3270_rcl_backward;
+	kbd_ascebc(tp->kbd, tp->view.ascebc);
+
+	raw3270_activate_view(&tp->view);
+
+port_install:
+	rc = tty_port_install(&tp->port, driver, tty);
+	if (rc) {
+		raw3270_put_view(&tp->view);
+		return rc;
+	}
+
+	tty->driver_data = tp;
+
+	return 0;
+}
+
+/*
+ * This routine is called whenever a 3270 tty is opened.
+ */
+static int
+tty3270_open(struct tty_struct *tty, struct file *filp)
+{
+	struct tty3270 *tp = tty->driver_data;
+	struct tty_port *port = &tp->port;
+
+	port->count++;
+	tty_port_tty_set(port, tty);
+	return 0;
+}
+
+/*
+ * This routine is called when the 3270 tty is closed. We wait
+ * for the remaining request to be completed. Then we clean up.
+ */
+static void
+tty3270_close(struct tty_struct *tty, struct file * filp)
+{
+	struct tty3270 *tp = tty->driver_data;
+
+	if (tty->count > 1)
+		return;
+	if (tp)
+		tty_port_tty_set(&tp->port, NULL);
+}
+
+static void tty3270_cleanup(struct tty_struct *tty)
+{
+	struct tty3270 *tp = tty->driver_data;
+
+	if (tp) {
+		tty->driver_data = NULL;
+		raw3270_put_view(&tp->view);
+	}
+}
+
+/*
+ * We always have room.
+ */
+static int
+tty3270_write_room(struct tty_struct *tty)
+{
+	return INT_MAX;
+}
+
+/*
+ * Insert character into the screen at the current position with the
+ * current color and highlight. This function does NOT do cursor movement.
+ */
+static void tty3270_put_character(struct tty3270 *tp, char ch)
+{
+	struct tty3270_line *line;
+	struct tty3270_cell *cell;
+
+	line = tp->screen + tp->cy;
+	if (line->len <= tp->cx) {
+		while (line->len < tp->cx) {
+			cell = line->cells + line->len;
+			cell->character = tp->view.ascebc[' '];
+			cell->highlight = tp->highlight;
+			cell->f_color = tp->f_color;
+			line->len++;
+		}
+		line->len++;
+	}
+	cell = line->cells + tp->cx;
+	cell->character = tp->view.ascebc[(unsigned int) ch];
+	cell->highlight = tp->highlight;
+	cell->f_color = tp->f_color;
+}
+
+/*
+ * Convert a tty3270_line to a 3270 data fragment usable for output.
+ */
+static void
+tty3270_convert_line(struct tty3270 *tp, int line_nr)
+{
+	struct tty3270_line *line;
+	struct tty3270_cell *cell;
+	struct string *s, *n;
+	unsigned char highlight;
+	unsigned char f_color;
+	char *cp;
+	int flen, i;
+
+	/* Determine how long the fragment will be. */
+	flen = 3;		/* Prefix (TO_SBA). */
+	line = tp->screen + line_nr;
+	flen += line->len;
+	highlight = TAX_RESET;
+	f_color = TAC_RESET;
+	for (i = 0, cell = line->cells; i < line->len; i++, cell++) {
+		if (cell->highlight != highlight) {
+			flen += 3;	/* TO_SA to switch highlight. */
+			highlight = cell->highlight;
+		}
+		if (cell->f_color != f_color) {
+			flen += 3;	/* TO_SA to switch color. */
+			f_color = cell->f_color;
+		}
+	}
+	if (highlight != TAX_RESET)
+		flen += 3;	/* TO_SA to reset hightlight. */
+	if (f_color != TAC_RESET)
+		flen += 3;	/* TO_SA to reset color. */
+	if (line->len < tp->view.cols)
+		flen += 4;	/* Postfix (TO_RA). */
+
+	/* Find the line in the list. */
+	i = tp->view.rows - 2 - line_nr;
+	list_for_each_entry_reverse(s, &tp->lines, list)
+		if (--i <= 0)
+			break;
+	/*
+	 * Check if the line needs to get reallocated.
+	 */
+	if (s->len != flen) {
+		/* Reallocate string. */
+		n = tty3270_alloc_string(tp, flen);
+		list_add(&n->list, &s->list);
+		list_del_init(&s->list);
+		if (!list_empty(&s->update))
+			list_del_init(&s->update);
+		free_string(&tp->freemem, s);
+		s = n;
+	}
+
+	/* Write 3270 data fragment. */
+	cp = s->string;
+	*cp++ = TO_SBA;
+	*cp++ = 0;
+	*cp++ = 0;
+
+	highlight = TAX_RESET;
+	f_color = TAC_RESET;
+	for (i = 0, cell = line->cells; i < line->len; i++, cell++) {
+		if (cell->highlight != highlight) {
+			*cp++ = TO_SA;
+			*cp++ = TAT_EXTHI;
+			*cp++ = cell->highlight;
+			highlight = cell->highlight;
+		}
+		if (cell->f_color != f_color) {
+			*cp++ = TO_SA;
+			*cp++ = TAT_COLOR;
+			*cp++ = cell->f_color;
+			f_color = cell->f_color;
+		}
+		*cp++ = cell->character;
+	}
+	if (highlight != TAX_RESET) {
+		*cp++ = TO_SA;
+		*cp++ = TAT_EXTHI;
+		*cp++ = TAX_RESET;
+	}
+	if (f_color != TAC_RESET) {
+		*cp++ = TO_SA;
+		*cp++ = TAT_COLOR;
+		*cp++ = TAC_RESET;
+	}
+	if (line->len < tp->view.cols) {
+		*cp++ = TO_RA;
+		*cp++ = 0;
+		*cp++ = 0;
+		*cp++ = 0;
+	}
+
+	if (tp->nr_up + line_nr < tp->view.rows - 2) {
+		/* Line is currently visible on screen. */
+		tty3270_update_string(tp, s, line_nr);
+		/* Add line to update list. */
+		if (list_empty(&s->update)) {
+			list_add_tail(&s->update, &tp->update);
+			tp->update_flags |= TTY_UPDATE_LIST;
+		}
+	}
+}
+
+/*
+ * Do carriage return.
+ */
+static void
+tty3270_cr(struct tty3270 *tp)
+{
+	tp->cx = 0;
+}
+
+/*
+ * Do line feed.
+ */
+static void
+tty3270_lf(struct tty3270 *tp)
+{
+	struct tty3270_line temp;
+	int i;
+
+	tty3270_convert_line(tp, tp->cy);
+	if (tp->cy < tp->view.rows - 3) {
+		tp->cy++;
+		return;
+	}
+	/* Last line just filled up. Add new, blank line. */
+	tty3270_blank_line(tp);
+	temp = tp->screen[0];
+	temp.len = 0;
+	for (i = 0; i < tp->view.rows - 3; i++)
+		tp->screen[i] = tp->screen[i+1];
+	tp->screen[tp->view.rows - 3] = temp;
+	tty3270_rebuild_update(tp);
+}
+
+static void
+tty3270_ri(struct tty3270 *tp)
+{
+	if (tp->cy > 0) {
+	    tty3270_convert_line(tp, tp->cy);
+	    tp->cy--;
+	}
+}
+
+/*
+ * Insert characters at current position.
+ */
+static void
+tty3270_insert_characters(struct tty3270 *tp, int n)
+{
+	struct tty3270_line *line;
+	int k;
+
+	line = tp->screen + tp->cy;
+	while (line->len < tp->cx) {
+		line->cells[line->len].character = tp->view.ascebc[' '];
+		line->cells[line->len].highlight = TAX_RESET;
+		line->cells[line->len].f_color = TAC_RESET;
+		line->len++;
+	}
+	if (n > tp->view.cols - tp->cx)
+		n = tp->view.cols - tp->cx;
+	k = min_t(int, line->len - tp->cx, tp->view.cols - tp->cx - n);
+	while (k--)
+		line->cells[tp->cx + n + k] = line->cells[tp->cx + k];
+	line->len += n;
+	if (line->len > tp->view.cols)
+		line->len = tp->view.cols;
+	while (n-- > 0) {
+		line->cells[tp->cx + n].character = tp->view.ascebc[' '];
+		line->cells[tp->cx + n].highlight = tp->highlight;
+		line->cells[tp->cx + n].f_color = tp->f_color;
+	}
+}
+
+/*
+ * Delete characters at current position.
+ */
+static void
+tty3270_delete_characters(struct tty3270 *tp, int n)
+{
+	struct tty3270_line *line;
+	int i;
+
+	line = tp->screen + tp->cy;
+	if (line->len <= tp->cx)
+		return;
+	if (line->len - tp->cx <= n) {
+		line->len = tp->cx;
+		return;
+	}
+	for (i = tp->cx; i + n < line->len; i++)
+		line->cells[i] = line->cells[i + n];
+	line->len -= n;
+}
+
+/*
+ * Erase characters at current position.
+ */
+static void
+tty3270_erase_characters(struct tty3270 *tp, int n)
+{
+	struct tty3270_line *line;
+	struct tty3270_cell *cell;
+
+	line = tp->screen + tp->cy;
+	while (line->len > tp->cx && n-- > 0) {
+		cell = line->cells + tp->cx++;
+		cell->character = ' ';
+		cell->highlight = TAX_RESET;
+		cell->f_color = TAC_RESET;
+	}
+	tp->cx += n;
+	tp->cx = min_t(int, tp->cx, tp->view.cols - 1);
+}
+
+/*
+ * Erase line, 3 different cases:
+ *  Esc [ 0 K	Erase from current position to end of line inclusive
+ *  Esc [ 1 K	Erase from beginning of line to current position inclusive
+ *  Esc [ 2 K	Erase entire line (without moving cursor)
+ */
+static void
+tty3270_erase_line(struct tty3270 *tp, int mode)
+{
+	struct tty3270_line *line;
+	struct tty3270_cell *cell;
+	int i;
+
+	line = tp->screen + tp->cy;
+	if (mode == 0)
+		line->len = tp->cx;
+	else if (mode == 1) {
+		for (i = 0; i < tp->cx; i++) {
+			cell = line->cells + i;
+			cell->character = ' ';
+			cell->highlight = TAX_RESET;
+			cell->f_color = TAC_RESET;
+		}
+		if (line->len <= tp->cx)
+			line->len = tp->cx + 1;
+	} else if (mode == 2)
+		line->len = 0;
+	tty3270_convert_line(tp, tp->cy);
+}
+
+/*
+ * Erase display, 3 different cases:
+ *  Esc [ 0 J	Erase from current position to bottom of screen inclusive
+ *  Esc [ 1 J	Erase from top of screen to current position inclusive
+ *  Esc [ 2 J	Erase entire screen (without moving the cursor)
+ */
+static void
+tty3270_erase_display(struct tty3270 *tp, int mode)
+{
+	int i;
+
+	if (mode == 0) {
+		tty3270_erase_line(tp, 0);
+		for (i = tp->cy + 1; i < tp->view.rows - 2; i++) {
+			tp->screen[i].len = 0;
+			tty3270_convert_line(tp, i);
+		}
+	} else if (mode == 1) {
+		for (i = 0; i < tp->cy; i++) {
+			tp->screen[i].len = 0;
+			tty3270_convert_line(tp, i);
+		}
+		tty3270_erase_line(tp, 1);
+	} else if (mode == 2) {
+		for (i = 0; i < tp->view.rows - 2; i++) {
+			tp->screen[i].len = 0;
+			tty3270_convert_line(tp, i);
+		}
+	}
+	tty3270_rebuild_update(tp);
+}
+
+/*
+ * Set attributes found in an escape sequence.
+ *  Esc [ <attr> ; <attr> ; ... m
+ */
+static void
+tty3270_set_attributes(struct tty3270 *tp)
+{
+	static unsigned char f_colors[] = {
+		TAC_DEFAULT, TAC_RED, TAC_GREEN, TAC_YELLOW, TAC_BLUE,
+		TAC_PINK, TAC_TURQ, TAC_WHITE, 0, TAC_DEFAULT
+	};
+	int i, attr;
+
+	for (i = 0; i <= tp->esc_npar; i++) {
+		attr = tp->esc_par[i];
+		switch (attr) {
+		case 0:		/* Reset */
+			tp->highlight = TAX_RESET;
+			tp->f_color = TAC_RESET;
+			break;
+		/* Highlight. */
+		case 4:		/* Start underlining. */
+			tp->highlight = TAX_UNDER;
+			break;
+		case 5:		/* Start blink. */
+			tp->highlight = TAX_BLINK;
+			break;
+		case 7:		/* Start reverse. */
+			tp->highlight = TAX_REVER;
+			break;
+		case 24:	/* End underlining */
+			if (tp->highlight == TAX_UNDER)
+				tp->highlight = TAX_RESET;
+			break;
+		case 25:	/* End blink. */
+			if (tp->highlight == TAX_BLINK)
+				tp->highlight = TAX_RESET;
+			break;
+		case 27:	/* End reverse. */
+			if (tp->highlight == TAX_REVER)
+				tp->highlight = TAX_RESET;
+			break;
+		/* Foreground color. */
+		case 30:	/* Black */
+		case 31:	/* Red */
+		case 32:	/* Green */
+		case 33:	/* Yellow */
+		case 34:	/* Blue */
+		case 35:	/* Magenta */
+		case 36:	/* Cyan */
+		case 37:	/* White */
+		case 39:	/* Black */
+			tp->f_color = f_colors[attr - 30];
+			break;
+		}
+	}
+}
+
+static inline int
+tty3270_getpar(struct tty3270 *tp, int ix)
+{
+	return (tp->esc_par[ix] > 0) ? tp->esc_par[ix] : 1;
+}
+
+static void
+tty3270_goto_xy(struct tty3270 *tp, int cx, int cy)
+{
+	int max_cx = max(0, cx);
+	int max_cy = max(0, cy);
+
+	tp->cx = min_t(int, tp->view.cols - 1, max_cx);
+	cy = min_t(int, tp->view.rows - 3, max_cy);
+	if (cy != tp->cy) {
+		tty3270_convert_line(tp, tp->cy);
+		tp->cy = cy;
+	}
+}
+
+/*
+ * Process escape sequences. Known sequences:
+ *  Esc 7			Save Cursor Position
+ *  Esc 8			Restore Cursor Position
+ *  Esc [ Pn ; Pn ; .. m	Set attributes
+ *  Esc [ Pn ; Pn H		Cursor Position
+ *  Esc [ Pn ; Pn f		Cursor Position
+ *  Esc [ Pn A			Cursor Up
+ *  Esc [ Pn B			Cursor Down
+ *  Esc [ Pn C			Cursor Forward
+ *  Esc [ Pn D			Cursor Backward
+ *  Esc [ Pn G			Cursor Horizontal Absolute
+ *  Esc [ Pn X			Erase Characters
+ *  Esc [ Ps J			Erase in Display
+ *  Esc [ Ps K			Erase in Line
+ * // FIXME: add all the new ones.
+ *
+ *  Pn is a numeric parameter, a string of zero or more decimal digits.
+ *  Ps is a selective parameter.
+ */
+static void
+tty3270_escape_sequence(struct tty3270 *tp, char ch)
+{
+	enum { ESnormal, ESesc, ESsquare, ESgetpars };
+
+	if (tp->esc_state == ESnormal) {
+		if (ch == 0x1b)
+			/* Starting new escape sequence. */
+			tp->esc_state = ESesc;
+		return;
+	}
+	if (tp->esc_state == ESesc) {
+		tp->esc_state = ESnormal;
+		switch (ch) {
+		case '[':
+			tp->esc_state = ESsquare;
+			break;
+		case 'E':
+			tty3270_cr(tp);
+			tty3270_lf(tp);
+			break;
+		case 'M':
+			tty3270_ri(tp);
+			break;
+		case 'D':
+			tty3270_lf(tp);
+			break;
+		case 'Z':		/* Respond ID. */
+			kbd_puts_queue(&tp->port, "\033[?6c");
+			break;
+		case '7':		/* Save cursor position. */
+			tp->saved_cx = tp->cx;
+			tp->saved_cy = tp->cy;
+			tp->saved_highlight = tp->highlight;
+			tp->saved_f_color = tp->f_color;
+			break;
+		case '8':		/* Restore cursor position. */
+			tty3270_convert_line(tp, tp->cy);
+			tty3270_goto_xy(tp, tp->saved_cx, tp->saved_cy);
+			tp->highlight = tp->saved_highlight;
+			tp->f_color = tp->saved_f_color;
+			break;
+		case 'c':		/* Reset terminal. */
+			tp->cx = tp->saved_cx = 0;
+			tp->cy = tp->saved_cy = 0;
+			tp->highlight = tp->saved_highlight = TAX_RESET;
+			tp->f_color = tp->saved_f_color = TAC_RESET;
+			tty3270_erase_display(tp, 2);
+			break;
+		}
+		return;
+	}
+	if (tp->esc_state == ESsquare) {
+		tp->esc_state = ESgetpars;
+		memset(tp->esc_par, 0, sizeof(tp->esc_par));
+		tp->esc_npar = 0;
+		tp->esc_ques = (ch == '?');
+		if (tp->esc_ques)
+			return;
+	}
+	if (tp->esc_state == ESgetpars) {
+		if (ch == ';' && tp->esc_npar < ESCAPE_NPAR - 1) {
+			tp->esc_npar++;
+			return;
+		}
+		if (ch >= '0' && ch <= '9') {
+			tp->esc_par[tp->esc_npar] *= 10;
+			tp->esc_par[tp->esc_npar] += ch - '0';
+			return;
+		}
+	}
+	tp->esc_state = ESnormal;
+	if (ch == 'n' && !tp->esc_ques) {
+		if (tp->esc_par[0] == 5)		/* Status report. */
+			kbd_puts_queue(&tp->port, "\033[0n");
+		else if (tp->esc_par[0] == 6) {	/* Cursor report. */
+			char buf[40];
+			sprintf(buf, "\033[%d;%dR", tp->cy + 1, tp->cx + 1);
+			kbd_puts_queue(&tp->port, buf);
+		}
+		return;
+	}
+	if (tp->esc_ques)
+		return;
+	switch (ch) {
+	case 'm':
+		tty3270_set_attributes(tp);
+		break;
+	case 'H':	/* Set cursor position. */
+	case 'f':
+		tty3270_goto_xy(tp, tty3270_getpar(tp, 1) - 1,
+				tty3270_getpar(tp, 0) - 1);
+		break;
+	case 'd':	/* Set y position. */
+		tty3270_goto_xy(tp, tp->cx, tty3270_getpar(tp, 0) - 1);
+		break;
+	case 'A':	/* Cursor up. */
+	case 'F':
+		tty3270_goto_xy(tp, tp->cx, tp->cy - tty3270_getpar(tp, 0));
+		break;
+	case 'B':	/* Cursor down. */
+	case 'e':
+	case 'E':
+		tty3270_goto_xy(tp, tp->cx, tp->cy + tty3270_getpar(tp, 0));
+		break;
+	case 'C':	/* Cursor forward. */
+	case 'a':
+		tty3270_goto_xy(tp, tp->cx + tty3270_getpar(tp, 0), tp->cy);
+		break;
+	case 'D':	/* Cursor backward. */
+		tty3270_goto_xy(tp, tp->cx - tty3270_getpar(tp, 0), tp->cy);
+		break;
+	case 'G':	/* Set x position. */
+	case '`':
+		tty3270_goto_xy(tp, tty3270_getpar(tp, 0), tp->cy);
+		break;
+	case 'X':	/* Erase Characters. */
+		tty3270_erase_characters(tp, tty3270_getpar(tp, 0));
+		break;
+	case 'J':	/* Erase display. */
+		tty3270_erase_display(tp, tp->esc_par[0]);
+		break;
+	case 'K':	/* Erase line. */
+		tty3270_erase_line(tp, tp->esc_par[0]);
+		break;
+	case 'P':	/* Delete characters. */
+		tty3270_delete_characters(tp, tty3270_getpar(tp, 0));
+		break;
+	case '@':	/* Insert characters. */
+		tty3270_insert_characters(tp, tty3270_getpar(tp, 0));
+		break;
+	case 's':	/* Save cursor position. */
+		tp->saved_cx = tp->cx;
+		tp->saved_cy = tp->cy;
+		tp->saved_highlight = tp->highlight;
+		tp->saved_f_color = tp->f_color;
+		break;
+	case 'u':	/* Restore cursor position. */
+		tty3270_convert_line(tp, tp->cy);
+		tty3270_goto_xy(tp, tp->saved_cx, tp->saved_cy);
+		tp->highlight = tp->saved_highlight;
+		tp->f_color = tp->saved_f_color;
+		break;
+	}
+}
+
+/*
+ * String write routine for 3270 ttys
+ */
+static void
+tty3270_do_write(struct tty3270 *tp, struct tty_struct *tty,
+		const unsigned char *buf, int count)
+{
+	int i_msg, i;
+
+	spin_lock_bh(&tp->view.lock);
+	for (i_msg = 0; !tty->stopped && i_msg < count; i_msg++) {
+		if (tp->esc_state != 0) {
+			/* Continue escape sequence. */
+			tty3270_escape_sequence(tp, buf[i_msg]);
+			continue;
+		}
+
+		switch (buf[i_msg]) {
+		case 0x07:		/* '\a' -- Alarm */
+			tp->wcc |= TW_PLUSALARM;
+			break;
+		case 0x08:		/* Backspace. */
+			if (tp->cx > 0) {
+				tp->cx--;
+				tty3270_put_character(tp, ' ');
+			}
+			break;
+		case 0x09:		/* '\t' -- Tabulate */
+			for (i = tp->cx % 8; i < 8; i++) {
+				if (tp->cx >= tp->view.cols) {
+					tty3270_cr(tp);
+					tty3270_lf(tp);
+					break;
+				}
+				tty3270_put_character(tp, ' ');
+				tp->cx++;
+			}
+			break;
+		case 0x0a:		/* '\n' -- New Line */
+			tty3270_cr(tp);
+			tty3270_lf(tp);
+			break;
+		case 0x0c:		/* '\f' -- Form Feed */
+			tty3270_erase_display(tp, 2);
+			tp->cx = tp->cy = 0;
+			break;
+		case 0x0d:		/* '\r' -- Carriage Return */
+			tp->cx = 0;
+			break;
+		case 0x0f:		/* SuSE "exit alternate mode" */
+			break;
+		case 0x1b:		/* Start escape sequence. */
+			tty3270_escape_sequence(tp, buf[i_msg]);
+			break;
+		default:		/* Insert normal character. */
+			if (tp->cx >= tp->view.cols) {
+				tty3270_cr(tp);
+				tty3270_lf(tp);
+			}
+			tty3270_put_character(tp, buf[i_msg]);
+			tp->cx++;
+			break;
+		}
+	}
+	/* Convert current line to 3270 data fragment. */
+	tty3270_convert_line(tp, tp->cy);
+
+	/* Setup timer to update display after 1/10 second */
+	if (!timer_pending(&tp->timer))
+		tty3270_set_timer(tp, HZ/10);
+
+	spin_unlock_bh(&tp->view.lock);
+}
+
+/*
+ * String write routine for 3270 ttys
+ */
+static int
+tty3270_write(struct tty_struct * tty,
+	      const unsigned char *buf, int count)
+{
+	struct tty3270 *tp;
+
+	tp = tty->driver_data;
+	if (!tp)
+		return 0;
+	if (tp->char_count > 0) {
+		tty3270_do_write(tp, tty, tp->char_buf, tp->char_count);
+		tp->char_count = 0;
+	}
+	tty3270_do_write(tp, tty, buf, count);
+	return count;
+}
+
+/*
+ * Put single characters to the ttys character buffer
+ */
+static int tty3270_put_char(struct tty_struct *tty, unsigned char ch)
+{
+	struct tty3270 *tp;
+
+	tp = tty->driver_data;
+	if (!tp || tp->char_count >= TTY3270_CHAR_BUF_SIZE)
+		return 0;
+	tp->char_buf[tp->char_count++] = ch;
+	return 1;
+}
+
+/*
+ * Flush all characters from the ttys characeter buffer put there
+ * by tty3270_put_char.
+ */
+static void
+tty3270_flush_chars(struct tty_struct *tty)
+{
+	struct tty3270 *tp;
+
+	tp = tty->driver_data;
+	if (!tp)
+		return;
+	if (tp->char_count > 0) {
+		tty3270_do_write(tp, tty, tp->char_buf, tp->char_count);
+		tp->char_count = 0;
+	}
+}
+
+/*
+ * Returns the number of characters in the output buffer. This is
+ * used in tty_wait_until_sent to wait until all characters have
+ * appeared on the screen.
+ */
+static int
+tty3270_chars_in_buffer(struct tty_struct *tty)
+{
+	return 0;
+}
+
+static void
+tty3270_flush_buffer(struct tty_struct *tty)
+{
+}
+
+/*
+ * Check for visible/invisible input switches
+ */
+static void
+tty3270_set_termios(struct tty_struct *tty, struct ktermios *old)
+{
+	struct tty3270 *tp;
+	int new;
+
+	tp = tty->driver_data;
+	if (!tp)
+		return;
+	spin_lock_bh(&tp->view.lock);
+	if (L_ICANON(tty)) {
+		new = L_ECHO(tty) ? TF_INPUT: TF_INPUTN;
+		if (new != tp->inattr) {
+			tp->inattr = new;
+			tty3270_update_prompt(tp, NULL, 0);
+			tty3270_set_timer(tp, 1);
+		}
+	}
+	spin_unlock_bh(&tp->view.lock);
+}
+
+/*
+ * Disable reading from a 3270 tty
+ */
+static void
+tty3270_throttle(struct tty_struct * tty)
+{
+	struct tty3270 *tp;
+
+	tp = tty->driver_data;
+	if (!tp)
+		return;
+	tp->throttle = 1;
+}
+
+/*
+ * Enable reading from a 3270 tty
+ */
+static void
+tty3270_unthrottle(struct tty_struct * tty)
+{
+	struct tty3270 *tp;
+
+	tp = tty->driver_data;
+	if (!tp)
+		return;
+	tp->throttle = 0;
+	if (tp->attn)
+		tty3270_issue_read(tp, 1);
+}
+
+/*
+ * Hang up the tty device.
+ */
+static void
+tty3270_hangup(struct tty_struct *tty)
+{
+	struct tty3270 *tp;
+
+	tp = tty->driver_data;
+	if (!tp)
+		return;
+	spin_lock_bh(&tp->view.lock);
+	tp->cx = tp->saved_cx = 0;
+	tp->cy = tp->saved_cy = 0;
+	tp->highlight = tp->saved_highlight = TAX_RESET;
+	tp->f_color = tp->saved_f_color = TAC_RESET;
+	tty3270_blank_screen(tp);
+	while (tp->nr_lines < tp->view.rows - 2)
+		tty3270_blank_line(tp);
+	tp->update_flags = TTY_UPDATE_ALL;
+	spin_unlock_bh(&tp->view.lock);
+	tty3270_set_timer(tp, 1);
+}
+
+static void
+tty3270_wait_until_sent(struct tty_struct *tty, int timeout)
+{
+}
+
+static int tty3270_ioctl(struct tty_struct *tty, unsigned int cmd,
+			 unsigned long arg)
+{
+	struct tty3270 *tp;
+
+	tp = tty->driver_data;
+	if (!tp)
+		return -ENODEV;
+	if (tty_io_error(tty))
+		return -EIO;
+	return kbd_ioctl(tp->kbd, cmd, arg);
+}
+
+#ifdef CONFIG_COMPAT
+static long tty3270_compat_ioctl(struct tty_struct *tty,
+				 unsigned int cmd, unsigned long arg)
+{
+	struct tty3270 *tp;
+
+	tp = tty->driver_data;
+	if (!tp)
+		return -ENODEV;
+	if (tty_io_error(tty))
+		return -EIO;
+	return kbd_ioctl(tp->kbd, cmd, (unsigned long)compat_ptr(arg));
+}
+#endif
+
+static const struct tty_operations tty3270_ops = {
+	.install = tty3270_install,
+	.cleanup = tty3270_cleanup,
+	.open = tty3270_open,
+	.close = tty3270_close,
+	.write = tty3270_write,
+	.put_char = tty3270_put_char,
+	.flush_chars = tty3270_flush_chars,
+	.write_room = tty3270_write_room,
+	.chars_in_buffer = tty3270_chars_in_buffer,
+	.flush_buffer = tty3270_flush_buffer,
+	.throttle = tty3270_throttle,
+	.unthrottle = tty3270_unthrottle,
+	.hangup = tty3270_hangup,
+	.wait_until_sent = tty3270_wait_until_sent,
+	.ioctl = tty3270_ioctl,
+#ifdef CONFIG_COMPAT
+	.compat_ioctl = tty3270_compat_ioctl,
+#endif
+	.set_termios = tty3270_set_termios
+};
+
+static void tty3270_create_cb(int minor)
+{
+	tty_register_device(tty3270_driver, minor - RAW3270_FIRSTMINOR, NULL);
+}
+
+static void tty3270_destroy_cb(int minor)
+{
+	tty_unregister_device(tty3270_driver, minor - RAW3270_FIRSTMINOR);
+}
+
+static struct raw3270_notifier tty3270_notifier =
+{
+	.create = tty3270_create_cb,
+	.destroy = tty3270_destroy_cb,
+};
+
+/*
+ * 3270 tty registration code called from tty_init().
+ * Most kernel services (incl. kmalloc) are available at this poimt.
+ */
+static int __init tty3270_init(void)
+{
+	struct tty_driver *driver;
+	int ret;
+
+	driver = tty_alloc_driver(RAW3270_MAXDEVS,
+				  TTY_DRIVER_REAL_RAW |
+				  TTY_DRIVER_DYNAMIC_DEV |
+				  TTY_DRIVER_RESET_TERMIOS);
+	if (IS_ERR(driver))
+		return PTR_ERR(driver);
+
+	/*
+	 * Initialize the tty_driver structure
+	 * Entries in tty3270_driver that are NOT initialized:
+	 * proc_entry, set_termios, flush_buffer, set_ldisc, write_proc
+	 */
+	driver->driver_name = "tty3270";
+	driver->name = "3270/tty";
+	driver->major = IBM_TTY3270_MAJOR;
+	driver->minor_start = RAW3270_FIRSTMINOR;
+	driver->name_base = RAW3270_FIRSTMINOR;
+	driver->type = TTY_DRIVER_TYPE_SYSTEM;
+	driver->subtype = SYSTEM_TYPE_TTY;
+	driver->init_termios = tty_std_termios;
+	tty_set_operations(driver, &tty3270_ops);
+	ret = tty_register_driver(driver);
+	if (ret) {
+		put_tty_driver(driver);
+		return ret;
+	}
+	tty3270_driver = driver;
+	raw3270_register_notifier(&tty3270_notifier);
+	return 0;
+}
+
+static void __exit
+tty3270_exit(void)
+{
+	struct tty_driver *driver;
+
+	raw3270_unregister_notifier(&tty3270_notifier);
+	driver = tty3270_driver;
+	tty3270_driver = NULL;
+	tty_unregister_driver(driver);
+	put_tty_driver(driver);
+	tty3270_del_views();
+}
+
+MODULE_LICENSE("GPL");
+MODULE_ALIAS_CHARDEV_MAJOR(IBM_TTY3270_MAJOR);
+
+module_init(tty3270_init);
+module_exit(tty3270_exit);
diff --git a/src/kernel/linux/v4.14/drivers/s390/char/tty3270.h b/src/kernel/linux/v4.14/drivers/s390/char/tty3270.h
new file mode 100644
index 0000000..52ceed6
--- /dev/null
+++ b/src/kernel/linux/v4.14/drivers/s390/char/tty3270.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ *    Copyright IBM Corp. 2007
+ *
+ */
+
+#ifndef __DRIVERS_S390_CHAR_TTY3270_H
+#define __DRIVERS_S390_CHAR_TTY3270_H
+
+#include <linux/tty.h>
+#include <linux/tty_driver.h>
+
+extern struct tty_driver *tty3270_driver;
+
+#endif /* __DRIVERS_S390_CHAR_TTY3270_H */
diff --git a/src/kernel/linux/v4.14/drivers/s390/char/vmcp.c b/src/kernel/linux/v4.14/drivers/s390/char/vmcp.c
new file mode 100644
index 0000000..17e411c
--- /dev/null
+++ b/src/kernel/linux/v4.14/drivers/s390/char/vmcp.c
@@ -0,0 +1,275 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright IBM Corp. 2004, 2010
+ * Interface implementation for communication with the z/VM control program
+ *
+ * Author(s): Christian Borntraeger <borntraeger@de.ibm.com>
+ *
+ * z/VMs CP offers the possibility to issue commands via the diagnose code 8
+ * this driver implements a character device that issues these commands and
+ * returns the answer of CP.
+ *
+ * The idea of this driver is based on cpint from Neale Ferguson and #CP in CMS
+ */
+
+#include <linux/fs.h>
+#include <linux/init.h>
+#include <linux/compat.h>
+#include <linux/kernel.h>
+#include <linux/miscdevice.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+#include <linux/export.h>
+#include <linux/mutex.h>
+#include <linux/cma.h>
+#include <linux/mm.h>
+#include <asm/compat.h>
+#include <asm/cpcmd.h>
+#include <asm/debug.h>
+#include <asm/vmcp.h>
+
+struct vmcp_session {
+	char *response;
+	unsigned int bufsize;
+	unsigned int cma_alloc : 1;
+	int resp_size;
+	int resp_code;
+	struct mutex mutex;
+};
+
+static debug_info_t *vmcp_debug;
+
+static unsigned long vmcp_cma_size __initdata = CONFIG_VMCP_CMA_SIZE * 1024 * 1024;
+static struct cma *vmcp_cma;
+
+static int __init early_parse_vmcp_cma(char *p)
+{
+	vmcp_cma_size = ALIGN(memparse(p, NULL), PAGE_SIZE);
+	return 0;
+}
+early_param("vmcp_cma", early_parse_vmcp_cma);
+
+void __init vmcp_cma_reserve(void)
+{
+	if (!MACHINE_IS_VM)
+		return;
+	cma_declare_contiguous(0, vmcp_cma_size, 0, 0, 0, false, "vmcp", &vmcp_cma);
+}
+
+static void vmcp_response_alloc(struct vmcp_session *session)
+{
+	struct page *page = NULL;
+	int nr_pages, order;
+
+	order = get_order(session->bufsize);
+	nr_pages = ALIGN(session->bufsize, PAGE_SIZE) >> PAGE_SHIFT;
+	/*
+	 * For anything below order 3 allocations rely on the buddy
+	 * allocator. If such low-order allocations can't be handled
+	 * anymore the system won't work anyway.
+	 */
+	if (order > 2)
+		page = cma_alloc(vmcp_cma, nr_pages, 0, GFP_KERNEL);
+	if (page) {
+		session->response = (char *)page_to_phys(page);
+		session->cma_alloc = 1;
+		return;
+	}
+	session->response = (char *)__get_free_pages(GFP_KERNEL | __GFP_RETRY_MAYFAIL, order);
+}
+
+static void vmcp_response_free(struct vmcp_session *session)
+{
+	int nr_pages, order;
+	struct page *page;
+
+	if (!session->response)
+		return;
+	order = get_order(session->bufsize);
+	nr_pages = ALIGN(session->bufsize, PAGE_SIZE) >> PAGE_SHIFT;
+	if (session->cma_alloc) {
+		page = phys_to_page((unsigned long)session->response);
+		cma_release(vmcp_cma, page, nr_pages);
+		session->cma_alloc = 0;
+	} else {
+		free_pages((unsigned long)session->response, order);
+	}
+	session->response = NULL;
+}
+
+static int vmcp_open(struct inode *inode, struct file *file)
+{
+	struct vmcp_session *session;
+
+	if (!capable(CAP_SYS_ADMIN))
+		return -EPERM;
+
+	session = kmalloc(sizeof(*session), GFP_KERNEL);
+	if (!session)
+		return -ENOMEM;
+
+	session->bufsize = PAGE_SIZE;
+	session->response = NULL;
+	session->resp_size = 0;
+	mutex_init(&session->mutex);
+	file->private_data = session;
+	return nonseekable_open(inode, file);
+}
+
+static int vmcp_release(struct inode *inode, struct file *file)
+{
+	struct vmcp_session *session;
+
+	session = file->private_data;
+	file->private_data = NULL;
+	vmcp_response_free(session);
+	kfree(session);
+	return 0;
+}
+
+static ssize_t
+vmcp_read(struct file *file, char __user *buff, size_t count, loff_t *ppos)
+{
+	ssize_t ret;
+	size_t size;
+	struct vmcp_session *session;
+
+	session = file->private_data;
+	if (mutex_lock_interruptible(&session->mutex))
+		return -ERESTARTSYS;
+	if (!session->response) {
+		mutex_unlock(&session->mutex);
+		return 0;
+	}
+	size = min_t(size_t, session->resp_size, session->bufsize);
+	ret = simple_read_from_buffer(buff, count, ppos,
+					session->response, size);
+
+	mutex_unlock(&session->mutex);
+
+	return ret;
+}
+
+static ssize_t
+vmcp_write(struct file *file, const char __user *buff, size_t count,
+	   loff_t *ppos)
+{
+	char *cmd;
+	struct vmcp_session *session;
+
+	if (count > 240)
+		return -EINVAL;
+	cmd = memdup_user_nul(buff, count);
+	if (IS_ERR(cmd))
+		return PTR_ERR(cmd);
+	session = file->private_data;
+	if (mutex_lock_interruptible(&session->mutex)) {
+		kfree(cmd);
+		return -ERESTARTSYS;
+	}
+	if (!session->response)
+		vmcp_response_alloc(session);
+	if (!session->response) {
+		mutex_unlock(&session->mutex);
+		kfree(cmd);
+		return -ENOMEM;
+	}
+	debug_text_event(vmcp_debug, 1, cmd);
+	session->resp_size = cpcmd(cmd, session->response, session->bufsize,
+				   &session->resp_code);
+	mutex_unlock(&session->mutex);
+	kfree(cmd);
+	*ppos = 0;		/* reset the file pointer after a command */
+	return count;
+}
+
+
+/*
+ * These ioctls are available, as the semantics of the diagnose 8 call
+ * does not fit very well into a Linux call. Diagnose X'08' is described in
+ * CP Programming Services SC24-6084-00
+ *
+ * VMCP_GETCODE: gives the CP return code back to user space
+ * VMCP_SETBUF: sets the response buffer for the next write call. diagnose 8
+ * expects adjacent pages in real storage and to make matters worse, we
+ * dont know the size of the response. Therefore we default to PAGESIZE and
+ * let userspace to change the response size, if userspace expects a bigger
+ * response
+ */
+static long vmcp_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+	struct vmcp_session *session;
+	int ret = -ENOTTY;
+	int __user *argp;
+
+	session = file->private_data;
+	if (is_compat_task())
+		argp = compat_ptr(arg);
+	else
+		argp = (int __user *)arg;
+	if (mutex_lock_interruptible(&session->mutex))
+		return -ERESTARTSYS;
+	switch (cmd) {
+	case VMCP_GETCODE:
+		ret = put_user(session->resp_code, argp);
+		break;
+	case VMCP_SETBUF:
+		vmcp_response_free(session);
+		ret = get_user(session->bufsize, argp);
+		if (ret)
+			session->bufsize = PAGE_SIZE;
+		if (!session->bufsize || get_order(session->bufsize) > 8) {
+			session->bufsize = PAGE_SIZE;
+			ret = -EINVAL;
+		}
+		break;
+	case VMCP_GETSIZE:
+		ret = put_user(session->resp_size, argp);
+		break;
+	default:
+		break;
+	}
+	mutex_unlock(&session->mutex);
+	return ret;
+}
+
+static const struct file_operations vmcp_fops = {
+	.owner		= THIS_MODULE,
+	.open		= vmcp_open,
+	.release	= vmcp_release,
+	.read		= vmcp_read,
+	.write		= vmcp_write,
+	.unlocked_ioctl	= vmcp_ioctl,
+	.compat_ioctl	= vmcp_ioctl,
+	.llseek		= no_llseek,
+};
+
+static struct miscdevice vmcp_dev = {
+	.name	= "vmcp",
+	.minor	= MISC_DYNAMIC_MINOR,
+	.fops	= &vmcp_fops,
+};
+
+static int __init vmcp_init(void)
+{
+	int ret;
+
+	if (!MACHINE_IS_VM)
+		return 0;
+
+	vmcp_debug = debug_register("vmcp", 1, 1, 240);
+	if (!vmcp_debug)
+		return -ENOMEM;
+
+	ret = debug_register_view(vmcp_debug, &debug_hex_ascii_view);
+	if (ret) {
+		debug_unregister(vmcp_debug);
+		return ret;
+	}
+
+	ret = misc_register(&vmcp_dev);
+	if (ret)
+		debug_unregister(vmcp_debug);
+	return ret;
+}
+device_initcall(vmcp_init);
diff --git a/src/kernel/linux/v4.14/drivers/s390/char/vmlogrdr.c b/src/kernel/linux/v4.14/drivers/s390/char/vmlogrdr.c
new file mode 100644
index 0000000..b19020b
--- /dev/null
+++ b/src/kernel/linux/v4.14/drivers/s390/char/vmlogrdr.c
@@ -0,0 +1,902 @@
+/*
+ *	character device driver for reading z/VM system service records
+ *
+ *
+ *	Copyright IBM Corp. 2004, 2009
+ *	character device driver for reading z/VM system service records,
+ *	Version 1.0
+ *	Author(s): Xenia Tkatschow <xenia@us.ibm.com>
+ *		   Stefan Weinhuber <wein@de.ibm.com>
+ *
+ */
+
+#define KMSG_COMPONENT "vmlogrdr"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/errno.h>
+#include <linux/types.h>
+#include <linux/interrupt.h>
+#include <linux/spinlock.h>
+#include <linux/atomic.h>
+#include <linux/uaccess.h>
+#include <asm/cpcmd.h>
+#include <asm/debug.h>
+#include <asm/ebcdic.h>
+#include <net/iucv/iucv.h>
+#include <linux/kmod.h>
+#include <linux/cdev.h>
+#include <linux/device.h>
+#include <linux/string.h>
+
+MODULE_AUTHOR
+	("(C) 2004 IBM Corporation by Xenia Tkatschow (xenia@us.ibm.com)\n"
+	 "                            Stefan Weinhuber (wein@de.ibm.com)");
+MODULE_DESCRIPTION ("Character device driver for reading z/VM "
+		    "system service records.");
+MODULE_LICENSE("GPL");
+
+
+/*
+ * The size of the buffer for iucv data transfer is one page,
+ * but in addition to the data we read from iucv we also
+ * place an integer and some characters into that buffer,
+ * so the maximum size for record data is a little less then
+ * one page.
+ */
+#define NET_BUFFER_SIZE	(PAGE_SIZE - sizeof(int) - sizeof(FENCE))
+
+/*
+ * The elements that are concurrently accessed by bottom halves are
+ * connection_established, iucv_path_severed, local_interrupt_buffer
+ * and receive_ready. The first three can be protected by
+ * priv_lock.  receive_ready is atomic, so it can be incremented and
+ * decremented without holding a lock.
+ * The variable dev_in_use needs to be protected by the lock, since
+ * it's a flag used by open to make sure that the device is opened only
+ * by one user at the same time.
+ */
+struct vmlogrdr_priv_t {
+	char system_service[8];
+	char internal_name[8];
+	char recording_name[8];
+	struct iucv_path *path;
+	int connection_established;
+	int iucv_path_severed;
+	struct iucv_message local_interrupt_buffer;
+	atomic_t receive_ready;
+	int minor_num;
+	char * buffer;
+	char * current_position;
+	int remaining;
+	ulong residual_length;
+	int buffer_free;
+	int dev_in_use; /* 1: already opened, 0: not opened*/
+	spinlock_t priv_lock;
+	struct device  *device;
+	struct device  *class_device;
+	int autorecording;
+	int autopurge;
+};
+
+
+/*
+ * File operation structure for vmlogrdr devices
+ */
+static int vmlogrdr_open(struct inode *, struct file *);
+static int vmlogrdr_release(struct inode *, struct file *);
+static ssize_t vmlogrdr_read (struct file *filp, char __user *data,
+			      size_t count, loff_t * ppos);
+
+static const struct file_operations vmlogrdr_fops = {
+	.owner   = THIS_MODULE,
+	.open    = vmlogrdr_open,
+	.release = vmlogrdr_release,
+	.read    = vmlogrdr_read,
+	.llseek  = no_llseek,
+};
+
+
+static void vmlogrdr_iucv_path_complete(struct iucv_path *, u8 *ipuser);
+static void vmlogrdr_iucv_path_severed(struct iucv_path *, u8 *ipuser);
+static void vmlogrdr_iucv_message_pending(struct iucv_path *,
+					  struct iucv_message *);
+
+
+static struct iucv_handler vmlogrdr_iucv_handler = {
+	.path_complete	 = vmlogrdr_iucv_path_complete,
+	.path_severed	 = vmlogrdr_iucv_path_severed,
+	.message_pending = vmlogrdr_iucv_message_pending,
+};
+
+
+static DECLARE_WAIT_QUEUE_HEAD(conn_wait_queue);
+static DECLARE_WAIT_QUEUE_HEAD(read_wait_queue);
+
+/*
+ * pointer to system service private structure
+ * minor number 0 --> logrec
+ * minor number 1 --> account
+ * minor number 2 --> symptom
+ */
+
+static struct vmlogrdr_priv_t sys_ser[] = {
+	{ .system_service = "*LOGREC ",
+	  .internal_name  = "logrec",
+	  .recording_name = "EREP",
+	  .minor_num      = 0,
+	  .buffer_free    = 1,
+	  .priv_lock	  = __SPIN_LOCK_UNLOCKED(sys_ser[0].priv_lock),
+	  .autorecording  = 1,
+	  .autopurge      = 1,
+	},
+	{ .system_service = "*ACCOUNT",
+	  .internal_name  = "account",
+	  .recording_name = "ACCOUNT",
+	  .minor_num      = 1,
+	  .buffer_free    = 1,
+	  .priv_lock	  = __SPIN_LOCK_UNLOCKED(sys_ser[1].priv_lock),
+	  .autorecording  = 1,
+	  .autopurge      = 1,
+	},
+	{ .system_service = "*SYMPTOM",
+	  .internal_name  = "symptom",
+	  .recording_name = "SYMPTOM",
+	  .minor_num      = 2,
+	  .buffer_free    = 1,
+	  .priv_lock	  = __SPIN_LOCK_UNLOCKED(sys_ser[2].priv_lock),
+	  .autorecording  = 1,
+	  .autopurge      = 1,
+	}
+};
+
+#define MAXMINOR  (sizeof(sys_ser)/sizeof(struct vmlogrdr_priv_t))
+
+static char FENCE[] = {"EOR"};
+static int vmlogrdr_major = 0;
+static struct cdev  *vmlogrdr_cdev = NULL;
+static int recording_class_AB;
+
+
+static void vmlogrdr_iucv_path_complete(struct iucv_path *path, u8 *ipuser)
+{
+	struct vmlogrdr_priv_t * logptr = path->private;
+
+	spin_lock(&logptr->priv_lock);
+	logptr->connection_established = 1;
+	spin_unlock(&logptr->priv_lock);
+	wake_up(&conn_wait_queue);
+}
+
+
+static void vmlogrdr_iucv_path_severed(struct iucv_path *path, u8 *ipuser)
+{
+	struct vmlogrdr_priv_t * logptr = path->private;
+	u8 reason = (u8) ipuser[8];
+
+	pr_err("vmlogrdr: connection severed with reason %i\n", reason);
+
+	iucv_path_sever(path, NULL);
+	kfree(path);
+	logptr->path = NULL;
+
+	spin_lock(&logptr->priv_lock);
+	logptr->connection_established = 0;
+	logptr->iucv_path_severed = 1;
+	spin_unlock(&logptr->priv_lock);
+
+	wake_up(&conn_wait_queue);
+	/* just in case we're sleeping waiting for a record */
+	wake_up_interruptible(&read_wait_queue);
+}
+
+
+static void vmlogrdr_iucv_message_pending(struct iucv_path *path,
+					  struct iucv_message *msg)
+{
+	struct vmlogrdr_priv_t * logptr = path->private;
+
+	/*
+	 * This function is the bottom half so it should be quick.
+	 * Copy the external interrupt data into our local eib and increment
+	 * the usage count
+	 */
+	spin_lock(&logptr->priv_lock);
+	memcpy(&logptr->local_interrupt_buffer, msg, sizeof(*msg));
+	atomic_inc(&logptr->receive_ready);
+	spin_unlock(&logptr->priv_lock);
+	wake_up_interruptible(&read_wait_queue);
+}
+
+
+static int vmlogrdr_get_recording_class_AB(void)
+{
+	static const char cp_command[] = "QUERY COMMAND RECORDING ";
+	char cp_response[80];
+	char *tail;
+	int len,i;
+
+	cpcmd(cp_command, cp_response, sizeof(cp_response), NULL);
+	len = strnlen(cp_response,sizeof(cp_response));
+	// now the parsing
+	tail=strnchr(cp_response,len,'=');
+	if (!tail)
+		return 0;
+	tail++;
+	if (!strncmp("ANY",tail,3))
+		return 1;
+	if (!strncmp("NONE",tail,4))
+		return 0;
+	/*
+	 * expect comma separated list of classes here, if one of them
+	 * is A or B return 1 otherwise 0
+	 */
+        for (i=tail-cp_response; i<len; i++)
+		if ( cp_response[i]=='A' || cp_response[i]=='B' )
+			return 1;
+	return 0;
+}
+
+
+static int vmlogrdr_recording(struct vmlogrdr_priv_t * logptr,
+			      int action, int purge)
+{
+
+	char cp_command[80];
+	char cp_response[160];
+	char *onoff, *qid_string;
+	int rc;
+
+	onoff = ((action == 1) ? "ON" : "OFF");
+	qid_string = ((recording_class_AB == 1) ? " QID * " : "");
+
+	/*
+	 * The recording commands needs to be called with option QID
+	 * for guests that have previlege classes A or B.
+	 * Purging has to be done as separate step, because recording
+	 * can't be switched on as long as records are on the queue.
+	 * Doing both at the same time doesn't work.
+	 */
+	if (purge && (action == 1)) {
+		memset(cp_command, 0x00, sizeof(cp_command));
+		memset(cp_response, 0x00, sizeof(cp_response));
+		snprintf(cp_command, sizeof(cp_command),
+			 "RECORDING %s PURGE %s",
+			 logptr->recording_name,
+			 qid_string);
+		cpcmd(cp_command, cp_response, sizeof(cp_response), NULL);
+	}
+
+	memset(cp_command, 0x00, sizeof(cp_command));
+	memset(cp_response, 0x00, sizeof(cp_response));
+	snprintf(cp_command, sizeof(cp_command), "RECORDING %s %s %s",
+		logptr->recording_name,
+		onoff,
+		qid_string);
+	cpcmd(cp_command, cp_response, sizeof(cp_response), NULL);
+	/* The recording command will usually answer with 'Command complete'
+	 * on success, but when the specific service was never connected
+	 * before then there might be an additional informational message
+	 * 'HCPCRC8072I Recording entry not found' before the
+	 * 'Command complete'. So I use strstr rather then the strncmp.
+	 */
+	if (strstr(cp_response,"Command complete"))
+		rc = 0;
+	else
+		rc = -EIO;
+	/*
+	 * If we turn recording off, we have to purge any remaining records
+	 * afterwards, as a large number of queued records may impact z/VM
+	 * performance.
+	 */
+	if (purge && (action == 0)) {
+		memset(cp_command, 0x00, sizeof(cp_command));
+		memset(cp_response, 0x00, sizeof(cp_response));
+		snprintf(cp_command, sizeof(cp_command),
+			 "RECORDING %s PURGE %s",
+			 logptr->recording_name,
+			 qid_string);
+		cpcmd(cp_command, cp_response, sizeof(cp_response), NULL);
+	}
+
+	return rc;
+}
+
+
+static int vmlogrdr_open (struct inode *inode, struct file *filp)
+{
+	int dev_num = 0;
+	struct vmlogrdr_priv_t * logptr = NULL;
+	int connect_rc = 0;
+	int ret;
+
+	dev_num = iminor(inode);
+	if (dev_num >= MAXMINOR)
+		return -ENODEV;
+	logptr = &sys_ser[dev_num];
+
+	/*
+	 * only allow for blocking reads to be open
+	 */
+	if (filp->f_flags & O_NONBLOCK)
+		return -EOPNOTSUPP;
+
+	/* Besure this device hasn't already been opened */
+	spin_lock_bh(&logptr->priv_lock);
+	if (logptr->dev_in_use)	{
+		spin_unlock_bh(&logptr->priv_lock);
+		return -EBUSY;
+	}
+	logptr->dev_in_use = 1;
+	logptr->connection_established = 0;
+	logptr->iucv_path_severed = 0;
+	atomic_set(&logptr->receive_ready, 0);
+	logptr->buffer_free = 1;
+	spin_unlock_bh(&logptr->priv_lock);
+
+	/* set the file options */
+	filp->private_data = logptr;
+
+	/* start recording for this service*/
+	if (logptr->autorecording) {
+		ret = vmlogrdr_recording(logptr,1,logptr->autopurge);
+		if (ret)
+			pr_warn("vmlogrdr: failed to start recording automatically\n");
+	}
+
+	/* create connection to the system service */
+	logptr->path = iucv_path_alloc(10, 0, GFP_KERNEL);
+	if (!logptr->path)
+		goto out_dev;
+	connect_rc = iucv_path_connect(logptr->path, &vmlogrdr_iucv_handler,
+				       logptr->system_service, NULL, NULL,
+				       logptr);
+	if (connect_rc) {
+		pr_err("vmlogrdr: iucv connection to %s "
+		       "failed with rc %i \n",
+		       logptr->system_service, connect_rc);
+		goto out_path;
+	}
+
+	/* We've issued the connect and now we must wait for a
+	 * ConnectionComplete or ConnectinSevered Interrupt
+	 * before we can continue to process.
+	 */
+	wait_event(conn_wait_queue, (logptr->connection_established)
+		   || (logptr->iucv_path_severed));
+	if (logptr->iucv_path_severed)
+		goto out_record;
+	nonseekable_open(inode, filp);
+	return 0;
+
+out_record:
+	if (logptr->autorecording)
+		vmlogrdr_recording(logptr,0,logptr->autopurge);
+out_path:
+	kfree(logptr->path);	/* kfree(NULL) is ok. */
+	logptr->path = NULL;
+out_dev:
+	logptr->dev_in_use = 0;
+	return -EIO;
+}
+
+
+static int vmlogrdr_release (struct inode *inode, struct file *filp)
+{
+	int ret;
+
+	struct vmlogrdr_priv_t * logptr = filp->private_data;
+
+	iucv_path_sever(logptr->path, NULL);
+	kfree(logptr->path);
+	logptr->path = NULL;
+	if (logptr->autorecording) {
+		ret = vmlogrdr_recording(logptr,0,logptr->autopurge);
+		if (ret)
+			pr_warn("vmlogrdr: failed to stop recording automatically\n");
+	}
+	logptr->dev_in_use = 0;
+
+	return 0;
+}
+
+
+static int vmlogrdr_receive_data(struct vmlogrdr_priv_t *priv)
+{
+	int rc, *temp;
+	/* we need to keep track of two data sizes here:
+	 * The number of bytes we need to receive from iucv and
+	 * the total number of bytes we actually write into the buffer.
+	 */
+	int user_data_count, iucv_data_count;
+	char * buffer;
+
+	if (atomic_read(&priv->receive_ready)) {
+		spin_lock_bh(&priv->priv_lock);
+		if (priv->residual_length){
+			/* receive second half of a record */
+			iucv_data_count = priv->residual_length;
+			user_data_count = 0;
+			buffer = priv->buffer;
+		} else {
+			/* receive a new record:
+			 * We need to return the total length of the record
+                         * + size of FENCE in the first 4 bytes of the buffer.
+		         */
+			iucv_data_count = priv->local_interrupt_buffer.length;
+			user_data_count = sizeof(int);
+			temp = (int*)priv->buffer;
+			*temp= iucv_data_count + sizeof(FENCE);
+			buffer = priv->buffer + sizeof(int);
+		}
+		/*
+		 * If the record is bigger than our buffer, we receive only
+		 * a part of it. We can get the rest later.
+		 */
+		if (iucv_data_count > NET_BUFFER_SIZE)
+			iucv_data_count = NET_BUFFER_SIZE;
+		rc = iucv_message_receive(priv->path,
+					  &priv->local_interrupt_buffer,
+					  0, buffer, iucv_data_count,
+					  &priv->residual_length);
+		spin_unlock_bh(&priv->priv_lock);
+		/* An rc of 5 indicates that the record was bigger than
+		 * the buffer, which is OK for us. A 9 indicates that the
+		 * record was purged befor we could receive it.
+		 */
+		if (rc == 5)
+			rc = 0;
+		if (rc == 9)
+			atomic_set(&priv->receive_ready, 0);
+	} else {
+		rc = 1;
+	}
+	if (!rc) {
+		priv->buffer_free = 0;
+ 		user_data_count += iucv_data_count;
+		priv->current_position = priv->buffer;
+		if (priv->residual_length == 0){
+			/* the whole record has been captured,
+			 * now add the fence */
+			atomic_dec(&priv->receive_ready);
+			buffer = priv->buffer + user_data_count;
+			memcpy(buffer, FENCE, sizeof(FENCE));
+			user_data_count += sizeof(FENCE);
+		}
+		priv->remaining = user_data_count;
+	}
+
+	return rc;
+}
+
+
+static ssize_t vmlogrdr_read(struct file *filp, char __user *data,
+			     size_t count, loff_t * ppos)
+{
+	int rc;
+	struct vmlogrdr_priv_t * priv = filp->private_data;
+
+	while (priv->buffer_free) {
+		rc = vmlogrdr_receive_data(priv);
+		if (rc) {
+			rc = wait_event_interruptible(read_wait_queue,
+					atomic_read(&priv->receive_ready));
+			if (rc)
+				return rc;
+		}
+	}
+	/* copy only up to end of record */
+	if (count > priv->remaining)
+		count = priv->remaining;
+
+	if (copy_to_user(data, priv->current_position, count))
+		return -EFAULT;
+
+	*ppos += count;
+	priv->current_position += count;
+	priv->remaining -= count;
+
+	/* if all data has been transferred, set buffer free */
+	if (priv->remaining == 0)
+		priv->buffer_free = 1;
+
+	return count;
+}
+
+static ssize_t vmlogrdr_autopurge_store(struct device * dev,
+					struct device_attribute *attr,
+					const char * buf, size_t count)
+{
+	struct vmlogrdr_priv_t *priv = dev_get_drvdata(dev);
+	ssize_t ret = count;
+
+	switch (buf[0]) {
+	case '0':
+		priv->autopurge=0;
+		break;
+	case '1':
+		priv->autopurge=1;
+		break;
+	default:
+		ret = -EINVAL;
+	}
+	return ret;
+}
+
+
+static ssize_t vmlogrdr_autopurge_show(struct device *dev,
+				       struct device_attribute *attr,
+				       char *buf)
+{
+	struct vmlogrdr_priv_t *priv = dev_get_drvdata(dev);
+	return sprintf(buf, "%u\n", priv->autopurge);
+}
+
+
+static DEVICE_ATTR(autopurge, 0644, vmlogrdr_autopurge_show,
+		   vmlogrdr_autopurge_store);
+
+
+static ssize_t vmlogrdr_purge_store(struct device * dev,
+				    struct device_attribute *attr,
+				    const char * buf, size_t count)
+{
+
+	char cp_command[80];
+	char cp_response[80];
+	struct vmlogrdr_priv_t *priv = dev_get_drvdata(dev);
+
+	if (buf[0] != '1')
+		return -EINVAL;
+
+	memset(cp_command, 0x00, sizeof(cp_command));
+	memset(cp_response, 0x00, sizeof(cp_response));
+
+        /*
+	 * The recording command needs to be called with option QID
+	 * for guests that have previlege classes A or B.
+	 * Other guests will not recognize the command and we have to
+	 * issue the same command without the QID parameter.
+	 */
+
+	if (recording_class_AB)
+		snprintf(cp_command, sizeof(cp_command),
+			 "RECORDING %s PURGE QID * ",
+			 priv->recording_name);
+	else
+		snprintf(cp_command, sizeof(cp_command),
+			 "RECORDING %s PURGE ",
+			 priv->recording_name);
+
+	cpcmd(cp_command, cp_response, sizeof(cp_response), NULL);
+
+	return count;
+}
+
+
+static DEVICE_ATTR(purge, 0200, NULL, vmlogrdr_purge_store);
+
+
+static ssize_t vmlogrdr_autorecording_store(struct device *dev,
+					    struct device_attribute *attr,
+					    const char *buf, size_t count)
+{
+	struct vmlogrdr_priv_t *priv = dev_get_drvdata(dev);
+	ssize_t ret = count;
+
+	switch (buf[0]) {
+	case '0':
+		priv->autorecording=0;
+		break;
+	case '1':
+		priv->autorecording=1;
+		break;
+	default:
+		ret = -EINVAL;
+	}
+	return ret;
+}
+
+
+static ssize_t vmlogrdr_autorecording_show(struct device *dev,
+					   struct device_attribute *attr,
+					   char *buf)
+{
+	struct vmlogrdr_priv_t *priv = dev_get_drvdata(dev);
+	return sprintf(buf, "%u\n", priv->autorecording);
+}
+
+
+static DEVICE_ATTR(autorecording, 0644, vmlogrdr_autorecording_show,
+		   vmlogrdr_autorecording_store);
+
+
+static ssize_t vmlogrdr_recording_store(struct device * dev,
+					struct device_attribute *attr,
+					const char * buf, size_t count)
+{
+	struct vmlogrdr_priv_t *priv = dev_get_drvdata(dev);
+	ssize_t ret;
+
+	switch (buf[0]) {
+	case '0':
+		ret = vmlogrdr_recording(priv,0,0);
+		break;
+	case '1':
+		ret = vmlogrdr_recording(priv,1,0);
+		break;
+	default:
+		ret = -EINVAL;
+	}
+	if (ret)
+		return ret;
+	else
+		return count;
+
+}
+
+
+static DEVICE_ATTR(recording, 0200, NULL, vmlogrdr_recording_store);
+
+
+static ssize_t recording_status_show(struct device_driver *driver, char *buf)
+{
+	static const char cp_command[] = "QUERY RECORDING ";
+	int len;
+
+	cpcmd(cp_command, buf, 4096, NULL);
+	len = strlen(buf);
+	return len;
+}
+static DRIVER_ATTR_RO(recording_status);
+static struct attribute *vmlogrdr_drv_attrs[] = {
+	&driver_attr_recording_status.attr,
+	NULL,
+};
+static struct attribute_group vmlogrdr_drv_attr_group = {
+	.attrs = vmlogrdr_drv_attrs,
+};
+static const struct attribute_group *vmlogrdr_drv_attr_groups[] = {
+	&vmlogrdr_drv_attr_group,
+	NULL,
+};
+
+static struct attribute *vmlogrdr_attrs[] = {
+	&dev_attr_autopurge.attr,
+	&dev_attr_purge.attr,
+	&dev_attr_autorecording.attr,
+	&dev_attr_recording.attr,
+	NULL,
+};
+static struct attribute_group vmlogrdr_attr_group = {
+	.attrs = vmlogrdr_attrs,
+};
+static const struct attribute_group *vmlogrdr_attr_groups[] = {
+	&vmlogrdr_attr_group,
+	NULL,
+};
+
+static int vmlogrdr_pm_prepare(struct device *dev)
+{
+	int rc;
+	struct vmlogrdr_priv_t *priv = dev_get_drvdata(dev);
+
+	rc = 0;
+	if (priv) {
+		spin_lock_bh(&priv->priv_lock);
+		if (priv->dev_in_use)
+			rc = -EBUSY;
+		spin_unlock_bh(&priv->priv_lock);
+	}
+	if (rc)
+		pr_err("vmlogrdr: device %s is busy. Refuse to suspend.\n",
+		       dev_name(dev));
+	return rc;
+}
+
+
+static const struct dev_pm_ops vmlogrdr_pm_ops = {
+	.prepare = vmlogrdr_pm_prepare,
+};
+
+static struct class *vmlogrdr_class;
+static struct device_driver vmlogrdr_driver = {
+	.name = "vmlogrdr",
+	.bus  = &iucv_bus,
+	.pm = &vmlogrdr_pm_ops,
+	.groups = vmlogrdr_drv_attr_groups,
+};
+
+static int vmlogrdr_register_driver(void)
+{
+	int ret;
+
+	/* Register with iucv driver */
+	ret = iucv_register(&vmlogrdr_iucv_handler, 1);
+	if (ret)
+		goto out;
+
+	ret = driver_register(&vmlogrdr_driver);
+	if (ret)
+		goto out_iucv;
+
+	vmlogrdr_class = class_create(THIS_MODULE, "vmlogrdr");
+	if (IS_ERR(vmlogrdr_class)) {
+		ret = PTR_ERR(vmlogrdr_class);
+		vmlogrdr_class = NULL;
+		goto out_driver;
+	}
+	return 0;
+
+out_driver:
+	driver_unregister(&vmlogrdr_driver);
+out_iucv:
+	iucv_unregister(&vmlogrdr_iucv_handler, 1);
+out:
+	return ret;
+}
+
+
+static void vmlogrdr_unregister_driver(void)
+{
+	class_destroy(vmlogrdr_class);
+	vmlogrdr_class = NULL;
+	driver_unregister(&vmlogrdr_driver);
+	iucv_unregister(&vmlogrdr_iucv_handler, 1);
+}
+
+
+static int vmlogrdr_register_device(struct vmlogrdr_priv_t *priv)
+{
+	struct device *dev;
+	int ret;
+
+	dev = kzalloc(sizeof(struct device), GFP_KERNEL);
+	if (dev) {
+		dev_set_name(dev, "%s", priv->internal_name);
+		dev->bus = &iucv_bus;
+		dev->parent = iucv_root;
+		dev->driver = &vmlogrdr_driver;
+		dev->groups = vmlogrdr_attr_groups;
+		dev_set_drvdata(dev, priv);
+		/*
+		 * The release function could be called after the
+		 * module has been unloaded. It's _only_ task is to
+		 * free the struct. Therefore, we specify kfree()
+		 * directly here. (Probably a little bit obfuscating
+		 * but legitime ...).
+		 */
+		dev->release = (void (*)(struct device *))kfree;
+	} else
+		return -ENOMEM;
+	ret = device_register(dev);
+	if (ret) {
+		put_device(dev);
+		return ret;
+	}
+
+	priv->class_device = device_create(vmlogrdr_class, dev,
+					   MKDEV(vmlogrdr_major,
+						 priv->minor_num),
+					   priv, "%s", dev_name(dev));
+	if (IS_ERR(priv->class_device)) {
+		ret = PTR_ERR(priv->class_device);
+		priv->class_device=NULL;
+		device_unregister(dev);
+		return ret;
+	}
+	priv->device = dev;
+	return 0;
+}
+
+
+static int vmlogrdr_unregister_device(struct vmlogrdr_priv_t *priv)
+{
+	device_destroy(vmlogrdr_class, MKDEV(vmlogrdr_major, priv->minor_num));
+	if (priv->device != NULL) {
+		device_unregister(priv->device);
+		priv->device=NULL;
+	}
+	return 0;
+}
+
+
+static int vmlogrdr_register_cdev(dev_t dev)
+{
+	int rc = 0;
+	vmlogrdr_cdev = cdev_alloc();
+	if (!vmlogrdr_cdev) {
+		return -ENOMEM;
+	}
+	vmlogrdr_cdev->owner = THIS_MODULE;
+	vmlogrdr_cdev->ops = &vmlogrdr_fops;
+	vmlogrdr_cdev->dev = dev;
+	rc = cdev_add(vmlogrdr_cdev, vmlogrdr_cdev->dev, MAXMINOR);
+	if (!rc)
+		return 0;
+
+	// cleanup: cdev is not fully registered, no cdev_del here!
+	kobject_put(&vmlogrdr_cdev->kobj);
+	vmlogrdr_cdev=NULL;
+	return rc;
+}
+
+
+static void vmlogrdr_cleanup(void)
+{
+        int i;
+
+	if (vmlogrdr_cdev) {
+		cdev_del(vmlogrdr_cdev);
+		vmlogrdr_cdev=NULL;
+	}
+	for (i=0; i < MAXMINOR; ++i ) {
+		vmlogrdr_unregister_device(&sys_ser[i]);
+		free_page((unsigned long)sys_ser[i].buffer);
+	}
+	vmlogrdr_unregister_driver();
+	if (vmlogrdr_major) {
+		unregister_chrdev_region(MKDEV(vmlogrdr_major, 0), MAXMINOR);
+		vmlogrdr_major=0;
+	}
+}
+
+
+static int __init vmlogrdr_init(void)
+{
+	int rc;
+	int i;
+	dev_t dev;
+
+	if (! MACHINE_IS_VM) {
+		pr_err("not running under VM, driver not loaded.\n");
+		return -ENODEV;
+	}
+
+        recording_class_AB = vmlogrdr_get_recording_class_AB();
+
+	rc = alloc_chrdev_region(&dev, 0, MAXMINOR, "vmlogrdr");
+	if (rc)
+		return rc;
+	vmlogrdr_major = MAJOR(dev);
+
+	rc=vmlogrdr_register_driver();
+	if (rc)
+		goto cleanup;
+
+	for (i=0; i < MAXMINOR; ++i ) {
+		sys_ser[i].buffer = (char *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
+		if (!sys_ser[i].buffer) {
+			rc = -ENOMEM;
+			break;
+		}
+		sys_ser[i].current_position = sys_ser[i].buffer;
+		rc=vmlogrdr_register_device(&sys_ser[i]);
+		if (rc)
+			break;
+	}
+	if (rc)
+		goto cleanup;
+
+	rc = vmlogrdr_register_cdev(dev);
+	if (rc)
+		goto cleanup;
+	return 0;
+
+cleanup:
+	vmlogrdr_cleanup();
+	return rc;
+}
+
+
+static void __exit vmlogrdr_exit(void)
+{
+	vmlogrdr_cleanup();
+	return;
+}
+
+
+module_init(vmlogrdr_init);
+module_exit(vmlogrdr_exit);
diff --git a/src/kernel/linux/v4.14/drivers/s390/char/vmur.c b/src/kernel/linux/v4.14/drivers/s390/char/vmur.c
new file mode 100644
index 0000000..04aceb6
--- /dev/null
+++ b/src/kernel/linux/v4.14/drivers/s390/char/vmur.c
@@ -0,0 +1,1058 @@
+/*
+ * Linux driver for System z and s390 unit record devices
+ * (z/VM virtual punch, reader, printer)
+ *
+ * Copyright IBM Corp. 2001, 2009
+ * Authors: Malcolm Beattie <beattiem@uk.ibm.com>
+ *	    Michael Holzheu <holzheu@de.ibm.com>
+ *	    Frank Munzert <munzert@de.ibm.com>
+ */
+
+#define KMSG_COMPONENT "vmur"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
+#include <linux/cdev.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+
+#include <linux/uaccess.h>
+#include <asm/cio.h>
+#include <asm/ccwdev.h>
+#include <asm/debug.h>
+#include <asm/diag.h>
+
+#include "vmur.h"
+
+/*
+ * Driver overview
+ *
+ * Unit record device support is implemented as a character device driver.
+ * We can fit at least 16 bits into a device minor number and use the
+ * simple method of mapping a character device number with minor abcd
+ * to the unit record device with devno abcd.
+ * I/O to virtual unit record devices is handled as follows:
+ * Reads: Diagnose code 0x14 (input spool file manipulation)
+ * is used to read spool data page-wise.
+ * Writes: The CCW used is WRITE_CCW_CMD (0x01). The device's record length
+ * is available by reading sysfs attr reclen. Each write() to the device
+ * must specify an integral multiple (maximal 511) of reclen.
+ */
+
+static char ur_banner[] = "z/VM virtual unit record device driver";
+
+MODULE_AUTHOR("IBM Corporation");
+MODULE_DESCRIPTION("s390 z/VM virtual unit record device driver");
+MODULE_LICENSE("GPL");
+
+static dev_t ur_first_dev_maj_min;
+static struct class *vmur_class;
+static struct debug_info *vmur_dbf;
+
+/* We put the device's record length (for writes) in the driver_info field */
+static struct ccw_device_id ur_ids[] = {
+	{ CCWDEV_CU_DI(READER_PUNCH_DEVTYPE, 80) },
+	{ CCWDEV_CU_DI(PRINTER_DEVTYPE, 132) },
+	{ /* end of list */ }
+};
+
+MODULE_DEVICE_TABLE(ccw, ur_ids);
+
+static int ur_probe(struct ccw_device *cdev);
+static void ur_remove(struct ccw_device *cdev);
+static int ur_set_online(struct ccw_device *cdev);
+static int ur_set_offline(struct ccw_device *cdev);
+static int ur_pm_suspend(struct ccw_device *cdev);
+
+static struct ccw_driver ur_driver = {
+	.driver = {
+		.name	= "vmur",
+		.owner	= THIS_MODULE,
+	},
+	.ids		= ur_ids,
+	.probe		= ur_probe,
+	.remove		= ur_remove,
+	.set_online	= ur_set_online,
+	.set_offline	= ur_set_offline,
+	.freeze		= ur_pm_suspend,
+	.int_class	= IRQIO_VMR,
+};
+
+static DEFINE_MUTEX(vmur_mutex);
+
+/*
+ * Allocation, freeing, getting and putting of urdev structures
+ *
+ * Each ur device (urd) contains a reference to its corresponding ccw device
+ * (cdev) using the urd->cdev pointer. Each ccw device has a reference to the
+ * ur device using dev_get_drvdata(&cdev->dev) pointer.
+ *
+ * urd references:
+ * - ur_probe gets a urd reference, ur_remove drops the reference
+ *   dev_get_drvdata(&cdev->dev)
+ * - ur_open gets a urd reference, ur_release drops the reference
+ *   (urf->urd)
+ *
+ * cdev references:
+ * - urdev_alloc get a cdev reference (urd->cdev)
+ * - urdev_free drops the cdev reference (urd->cdev)
+ *
+ * Setting and clearing of dev_get_drvdata(&cdev->dev) is protected by the ccwdev lock
+ */
+static struct urdev *urdev_alloc(struct ccw_device *cdev)
+{
+	struct urdev *urd;
+
+	urd = kzalloc(sizeof(struct urdev), GFP_KERNEL);
+	if (!urd)
+		return NULL;
+	urd->reclen = cdev->id.driver_info;
+	ccw_device_get_id(cdev, &urd->dev_id);
+	mutex_init(&urd->io_mutex);
+	init_waitqueue_head(&urd->wait);
+	spin_lock_init(&urd->open_lock);
+	atomic_set(&urd->ref_count,  1);
+	urd->cdev = cdev;
+	get_device(&cdev->dev);
+	return urd;
+}
+
+static void urdev_free(struct urdev *urd)
+{
+	TRACE("urdev_free: %p\n", urd);
+	if (urd->cdev)
+		put_device(&urd->cdev->dev);
+	kfree(urd);
+}
+
+static void urdev_get(struct urdev *urd)
+{
+	atomic_inc(&urd->ref_count);
+}
+
+static struct urdev *urdev_get_from_cdev(struct ccw_device *cdev)
+{
+	struct urdev *urd;
+	unsigned long flags;
+
+	spin_lock_irqsave(get_ccwdev_lock(cdev), flags);
+	urd = dev_get_drvdata(&cdev->dev);
+	if (urd)
+		urdev_get(urd);
+	spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
+	return urd;
+}
+
+static struct urdev *urdev_get_from_devno(u16 devno)
+{
+	char bus_id[16];
+	struct ccw_device *cdev;
+	struct urdev *urd;
+
+	sprintf(bus_id, "0.0.%04x", devno);
+	cdev = get_ccwdev_by_busid(&ur_driver, bus_id);
+	if (!cdev)
+		return NULL;
+	urd = urdev_get_from_cdev(cdev);
+	put_device(&cdev->dev);
+	return urd;
+}
+
+static void urdev_put(struct urdev *urd)
+{
+	if (atomic_dec_and_test(&urd->ref_count))
+		urdev_free(urd);
+}
+
+/*
+ * State and contents of ur devices can be changed by class D users issuing
+ * CP commands such as PURGE or TRANSFER, while the Linux guest is suspended.
+ * Also the Linux guest might be logged off, which causes all active spool
+ * files to be closed.
+ * So we cannot guarantee that spool files are still the same when the Linux
+ * guest is resumed. In order to avoid unpredictable results at resume time
+ * we simply refuse to suspend if a ur device node is open.
+ */
+static int ur_pm_suspend(struct ccw_device *cdev)
+{
+	struct urdev *urd = dev_get_drvdata(&cdev->dev);
+
+	TRACE("ur_pm_suspend: cdev=%p\n", cdev);
+	if (urd->open_flag) {
+		pr_err("Unit record device %s is busy, %s refusing to "
+		       "suspend.\n", dev_name(&cdev->dev), ur_banner);
+		return -EBUSY;
+	}
+	return 0;
+}
+
+/*
+ * Low-level functions to do I/O to a ur device.
+ *     alloc_chan_prog
+ *     free_chan_prog
+ *     do_ur_io
+ *     ur_int_handler
+ *
+ * alloc_chan_prog allocates and builds the channel program
+ * free_chan_prog frees memory of the channel program
+ *
+ * do_ur_io issues the channel program to the device and blocks waiting
+ * on a completion event it publishes at urd->io_done. The function
+ * serialises itself on the device's mutex so that only one I/O
+ * is issued at a time (and that I/O is synchronous).
+ *
+ * ur_int_handler catches the "I/O done" interrupt, writes the
+ * subchannel status word into the scsw member of the urdev structure
+ * and complete()s the io_done to wake the waiting do_ur_io.
+ *
+ * The caller of do_ur_io is responsible for kfree()ing the channel program
+ * address pointer that alloc_chan_prog returned.
+ */
+
+static void free_chan_prog(struct ccw1 *cpa)
+{
+	struct ccw1 *ptr = cpa;
+
+	while (ptr->cda) {
+		kfree((void *)(addr_t) ptr->cda);
+		ptr++;
+	}
+	kfree(cpa);
+}
+
+/*
+ * alloc_chan_prog
+ * The channel program we use is write commands chained together
+ * with a final NOP CCW command-chained on (which ensures that CE and DE
+ * are presented together in a single interrupt instead of as separate
+ * interrupts unless an incorrect length indication kicks in first). The
+ * data length in each CCW is reclen.
+ */
+static struct ccw1 *alloc_chan_prog(const char __user *ubuf, int rec_count,
+				    int reclen)
+{
+	struct ccw1 *cpa;
+	void *kbuf;
+	int i;
+
+	TRACE("alloc_chan_prog(%p, %i, %i)\n", ubuf, rec_count, reclen);
+
+	/*
+	 * We chain a NOP onto the writes to force CE+DE together.
+	 * That means we allocate room for CCWs to cover count/reclen
+	 * records plus a NOP.
+	 */
+	cpa = kzalloc((rec_count + 1) * sizeof(struct ccw1),
+		      GFP_KERNEL | GFP_DMA);
+	if (!cpa)
+		return ERR_PTR(-ENOMEM);
+
+	for (i = 0; i < rec_count; i++) {
+		cpa[i].cmd_code = WRITE_CCW_CMD;
+		cpa[i].flags = CCW_FLAG_CC | CCW_FLAG_SLI;
+		cpa[i].count = reclen;
+		kbuf = kmalloc(reclen, GFP_KERNEL | GFP_DMA);
+		if (!kbuf) {
+			free_chan_prog(cpa);
+			return ERR_PTR(-ENOMEM);
+		}
+		cpa[i].cda = (u32)(addr_t) kbuf;
+		if (copy_from_user(kbuf, ubuf, reclen)) {
+			free_chan_prog(cpa);
+			return ERR_PTR(-EFAULT);
+		}
+		ubuf += reclen;
+	}
+	/* The following NOP CCW forces CE+DE to be presented together */
+	cpa[i].cmd_code = CCW_CMD_NOOP;
+	return cpa;
+}
+
+static int do_ur_io(struct urdev *urd, struct ccw1 *cpa)
+{
+	int rc;
+	struct ccw_device *cdev = urd->cdev;
+	DECLARE_COMPLETION_ONSTACK(event);
+
+	TRACE("do_ur_io: cpa=%p\n", cpa);
+
+	rc = mutex_lock_interruptible(&urd->io_mutex);
+	if (rc)
+		return rc;
+
+	urd->io_done = &event;
+
+	spin_lock_irq(get_ccwdev_lock(cdev));
+	rc = ccw_device_start(cdev, cpa, 1, 0, 0);
+	spin_unlock_irq(get_ccwdev_lock(cdev));
+
+	TRACE("do_ur_io: ccw_device_start returned %d\n", rc);
+	if (rc)
+		goto out;
+
+	wait_for_completion(&event);
+	TRACE("do_ur_io: I/O complete\n");
+	rc = 0;
+
+out:
+	mutex_unlock(&urd->io_mutex);
+	return rc;
+}
+
+/*
+ * ur interrupt handler, called from the ccw_device layer
+ */
+static void ur_int_handler(struct ccw_device *cdev, unsigned long intparm,
+			   struct irb *irb)
+{
+	struct urdev *urd;
+
+	if (!IS_ERR(irb)) {
+		TRACE("ur_int_handler: intparm=0x%lx cstat=%02x dstat=%02x res=%u\n",
+		      intparm, irb->scsw.cmd.cstat, irb->scsw.cmd.dstat,
+		      irb->scsw.cmd.count);
+	}
+	if (!intparm) {
+		TRACE("ur_int_handler: unsolicited interrupt\n");
+		return;
+	}
+	urd = dev_get_drvdata(&cdev->dev);
+	BUG_ON(!urd);
+	/* On special conditions irb is an error pointer */
+	if (IS_ERR(irb))
+		urd->io_request_rc = PTR_ERR(irb);
+	else if (irb->scsw.cmd.dstat == (DEV_STAT_CHN_END | DEV_STAT_DEV_END))
+		urd->io_request_rc = 0;
+	else
+		urd->io_request_rc = -EIO;
+
+	complete(urd->io_done);
+}
+
+/*
+ * reclen sysfs attribute - The record length to be used for write CCWs
+ */
+static ssize_t ur_attr_reclen_show(struct device *dev,
+				   struct device_attribute *attr, char *buf)
+{
+	struct urdev *urd;
+	int rc;
+
+	urd = urdev_get_from_cdev(to_ccwdev(dev));
+	if (!urd)
+		return -ENODEV;
+	rc = sprintf(buf, "%zu\n", urd->reclen);
+	urdev_put(urd);
+	return rc;
+}
+
+static DEVICE_ATTR(reclen, 0444, ur_attr_reclen_show, NULL);
+
+static int ur_create_attributes(struct device *dev)
+{
+	return device_create_file(dev, &dev_attr_reclen);
+}
+
+static void ur_remove_attributes(struct device *dev)
+{
+	device_remove_file(dev, &dev_attr_reclen);
+}
+
+/*
+ * diagnose code 0x210 - retrieve device information
+ * cc=0  normal completion, we have a real device
+ * cc=1  CP paging error
+ * cc=2  The virtual device exists, but is not associated with a real device
+ * cc=3  Invalid device address, or the virtual device does not exist
+ */
+static int get_urd_class(struct urdev *urd)
+{
+	static struct diag210 ur_diag210;
+	int cc;
+
+	ur_diag210.vrdcdvno = urd->dev_id.devno;
+	ur_diag210.vrdclen = sizeof(struct diag210);
+
+	cc = diag210(&ur_diag210);
+	switch (cc) {
+	case 0:
+		return -EOPNOTSUPP;
+	case 2:
+		return ur_diag210.vrdcvcla; /* virtual device class */
+	case 3:
+		return -ENODEV;
+	default:
+		return -EIO;
+	}
+}
+
+/*
+ * Allocation and freeing of urfile structures
+ */
+static struct urfile *urfile_alloc(struct urdev *urd)
+{
+	struct urfile *urf;
+
+	urf = kzalloc(sizeof(struct urfile), GFP_KERNEL);
+	if (!urf)
+		return NULL;
+	urf->urd = urd;
+
+	TRACE("urfile_alloc: urd=%p urf=%p rl=%zu\n", urd, urf,
+	      urf->dev_reclen);
+
+	return urf;
+}
+
+static void urfile_free(struct urfile *urf)
+{
+	TRACE("urfile_free: urf=%p urd=%p\n", urf, urf->urd);
+	kfree(urf);
+}
+
+/*
+ * The fops implementation of the character device driver
+ */
+static ssize_t do_write(struct urdev *urd, const char __user *udata,
+			size_t count, size_t reclen, loff_t *ppos)
+{
+	struct ccw1 *cpa;
+	int rc;
+
+	cpa = alloc_chan_prog(udata, count / reclen, reclen);
+	if (IS_ERR(cpa))
+		return PTR_ERR(cpa);
+
+	rc = do_ur_io(urd, cpa);
+	if (rc)
+		goto fail_kfree_cpa;
+
+	if (urd->io_request_rc) {
+		rc = urd->io_request_rc;
+		goto fail_kfree_cpa;
+	}
+	*ppos += count;
+	rc = count;
+
+fail_kfree_cpa:
+	free_chan_prog(cpa);
+	return rc;
+}
+
+static ssize_t ur_write(struct file *file, const char __user *udata,
+			size_t count, loff_t *ppos)
+{
+	struct urfile *urf = file->private_data;
+
+	TRACE("ur_write: count=%zu\n", count);
+
+	if (count == 0)
+		return 0;
+
+	if (count % urf->dev_reclen)
+		return -EINVAL;	/* count must be a multiple of reclen */
+
+	if (count > urf->dev_reclen * MAX_RECS_PER_IO)
+		count = urf->dev_reclen * MAX_RECS_PER_IO;
+
+	return do_write(urf->urd, udata, count, urf->dev_reclen, ppos);
+}
+
+/*
+ * diagnose code 0x14 subcode 0x0028 - position spool file to designated
+ *				       record
+ * cc=0  normal completion
+ * cc=2  no file active on the virtual reader or device not ready
+ * cc=3  record specified is beyond EOF
+ */
+static int diag_position_to_record(int devno, int record)
+{
+	int cc;
+
+	cc = diag14(record, devno, 0x28);
+	switch (cc) {
+	case 0:
+		return 0;
+	case 2:
+		return -ENOMEDIUM;
+	case 3:
+		return -ENODATA; /* position beyond end of file */
+	default:
+		return -EIO;
+	}
+}
+
+/*
+ * diagnose code 0x14 subcode 0x0000 - read next spool file buffer
+ * cc=0  normal completion
+ * cc=1  EOF reached
+ * cc=2  no file active on the virtual reader, and no file eligible
+ * cc=3  file already active on the virtual reader or specified virtual
+ *	 reader does not exist or is not a reader
+ */
+static int diag_read_file(int devno, char *buf)
+{
+	int cc;
+
+	cc = diag14((unsigned long) buf, devno, 0x00);
+	switch (cc) {
+	case 0:
+		return 0;
+	case 1:
+		return -ENODATA;
+	case 2:
+		return -ENOMEDIUM;
+	default:
+		return -EIO;
+	}
+}
+
+static ssize_t diag14_read(struct file *file, char __user *ubuf, size_t count,
+			   loff_t *offs)
+{
+	size_t len, copied, res;
+	char *buf;
+	int rc;
+	u16 reclen;
+	struct urdev *urd;
+
+	urd = ((struct urfile *) file->private_data)->urd;
+	reclen = ((struct urfile *) file->private_data)->file_reclen;
+
+	rc = diag_position_to_record(urd->dev_id.devno, *offs / PAGE_SIZE + 1);
+	if (rc == -ENODATA)
+		return 0;
+	if (rc)
+		return rc;
+
+	len = min((size_t) PAGE_SIZE, count);
+	buf = (char *) __get_free_page(GFP_KERNEL | GFP_DMA);
+	if (!buf)
+		return -ENOMEM;
+
+	copied = 0;
+	res = (size_t) (*offs % PAGE_SIZE);
+	do {
+		rc = diag_read_file(urd->dev_id.devno, buf);
+		if (rc == -ENODATA) {
+			break;
+		}
+		if (rc)
+			goto fail;
+		if (reclen && (copied == 0) && (*offs < PAGE_SIZE))
+			*((u16 *) &buf[FILE_RECLEN_OFFSET]) = reclen;
+		len = min(count - copied, PAGE_SIZE - res);
+		if (copy_to_user(ubuf + copied, buf + res, len)) {
+			rc = -EFAULT;
+			goto fail;
+		}
+		res = 0;
+		copied += len;
+	} while (copied != count);
+
+	*offs += copied;
+	rc = copied;
+fail:
+	free_page((unsigned long) buf);
+	return rc;
+}
+
+static ssize_t ur_read(struct file *file, char __user *ubuf, size_t count,
+		       loff_t *offs)
+{
+	struct urdev *urd;
+	int rc;
+
+	TRACE("ur_read: count=%zu ppos=%li\n", count, (unsigned long) *offs);
+
+	if (count == 0)
+		return 0;
+
+	urd = ((struct urfile *) file->private_data)->urd;
+	rc = mutex_lock_interruptible(&urd->io_mutex);
+	if (rc)
+		return rc;
+	rc = diag14_read(file, ubuf, count, offs);
+	mutex_unlock(&urd->io_mutex);
+	return rc;
+}
+
+/*
+ * diagnose code 0x14 subcode 0x0fff - retrieve next file descriptor
+ * cc=0  normal completion
+ * cc=1  no files on reader queue or no subsequent file
+ * cc=2  spid specified is invalid
+ */
+static int diag_read_next_file_info(struct file_control_block *buf, int spid)
+{
+	int cc;
+
+	cc = diag14((unsigned long) buf, spid, 0xfff);
+	switch (cc) {
+	case 0:
+		return 0;
+	default:
+		return -ENODATA;
+	}
+}
+
+static int verify_uri_device(struct urdev *urd)
+{
+	struct file_control_block *fcb;
+	char *buf;
+	int rc;
+
+	fcb = kmalloc(sizeof(*fcb), GFP_KERNEL | GFP_DMA);
+	if (!fcb)
+		return -ENOMEM;
+
+	/* check for empty reader device (beginning of chain) */
+	rc = diag_read_next_file_info(fcb, 0);
+	if (rc)
+		goto fail_free_fcb;
+
+	/* if file is in hold status, we do not read it */
+	if (fcb->file_stat & (FLG_SYSTEM_HOLD | FLG_USER_HOLD)) {
+		rc = -EPERM;
+		goto fail_free_fcb;
+	}
+
+	/* open file on virtual reader	*/
+	buf = (char *) __get_free_page(GFP_KERNEL | GFP_DMA);
+	if (!buf) {
+		rc = -ENOMEM;
+		goto fail_free_fcb;
+	}
+	rc = diag_read_file(urd->dev_id.devno, buf);
+	if ((rc != 0) && (rc != -ENODATA)) /* EOF does not hurt */
+		goto fail_free_buf;
+
+	/* check if the file on top of the queue is open now */
+	rc = diag_read_next_file_info(fcb, 0);
+	if (rc)
+		goto fail_free_buf;
+	if (!(fcb->file_stat & FLG_IN_USE)) {
+		rc = -EMFILE;
+		goto fail_free_buf;
+	}
+	rc = 0;
+
+fail_free_buf:
+	free_page((unsigned long) buf);
+fail_free_fcb:
+	kfree(fcb);
+	return rc;
+}
+
+static int verify_device(struct urdev *urd)
+{
+	switch (urd->class) {
+	case DEV_CLASS_UR_O:
+		return 0; /* no check needed here */
+	case DEV_CLASS_UR_I:
+		return verify_uri_device(urd);
+	default:
+		return -EOPNOTSUPP;
+	}
+}
+
+static int get_uri_file_reclen(struct urdev *urd)
+{
+	struct file_control_block *fcb;
+	int rc;
+
+	fcb = kmalloc(sizeof(*fcb), GFP_KERNEL | GFP_DMA);
+	if (!fcb)
+		return -ENOMEM;
+	rc = diag_read_next_file_info(fcb, 0);
+	if (rc)
+		goto fail_free;
+	if (fcb->file_stat & FLG_CP_DUMP)
+		rc = 0;
+	else
+		rc = fcb->rec_len;
+
+fail_free:
+	kfree(fcb);
+	return rc;
+}
+
+static int get_file_reclen(struct urdev *urd)
+{
+	switch (urd->class) {
+	case DEV_CLASS_UR_O:
+		return 0;
+	case DEV_CLASS_UR_I:
+		return get_uri_file_reclen(urd);
+	default:
+		return -EOPNOTSUPP;
+	}
+}
+
+static int ur_open(struct inode *inode, struct file *file)
+{
+	u16 devno;
+	struct urdev *urd;
+	struct urfile *urf;
+	unsigned short accmode;
+	int rc;
+
+	accmode = file->f_flags & O_ACCMODE;
+
+	if (accmode == O_RDWR)
+		return -EACCES;
+	/*
+	 * We treat the minor number as the devno of the ur device
+	 * to find in the driver tree.
+	 */
+	devno = MINOR(file_inode(file)->i_rdev);
+
+	urd = urdev_get_from_devno(devno);
+	if (!urd) {
+		rc = -ENXIO;
+		goto out;
+	}
+
+	spin_lock(&urd->open_lock);
+	while (urd->open_flag) {
+		spin_unlock(&urd->open_lock);
+		if (file->f_flags & O_NONBLOCK) {
+			rc = -EBUSY;
+			goto fail_put;
+		}
+		if (wait_event_interruptible(urd->wait, urd->open_flag == 0)) {
+			rc = -ERESTARTSYS;
+			goto fail_put;
+		}
+		spin_lock(&urd->open_lock);
+	}
+	urd->open_flag++;
+	spin_unlock(&urd->open_lock);
+
+	TRACE("ur_open\n");
+
+	if (((accmode == O_RDONLY) && (urd->class != DEV_CLASS_UR_I)) ||
+	    ((accmode == O_WRONLY) && (urd->class != DEV_CLASS_UR_O))) {
+		TRACE("ur_open: unsupported dev class (%d)\n", urd->class);
+		rc = -EACCES;
+		goto fail_unlock;
+	}
+
+	rc = verify_device(urd);
+	if (rc)
+		goto fail_unlock;
+
+	urf = urfile_alloc(urd);
+	if (!urf) {
+		rc = -ENOMEM;
+		goto fail_unlock;
+	}
+
+	urf->dev_reclen = urd->reclen;
+	rc = get_file_reclen(urd);
+	if (rc < 0)
+		goto fail_urfile_free;
+	urf->file_reclen = rc;
+	file->private_data = urf;
+	return 0;
+
+fail_urfile_free:
+	urfile_free(urf);
+fail_unlock:
+	spin_lock(&urd->open_lock);
+	urd->open_flag--;
+	spin_unlock(&urd->open_lock);
+fail_put:
+	urdev_put(urd);
+out:
+	return rc;
+}
+
+static int ur_release(struct inode *inode, struct file *file)
+{
+	struct urfile *urf = file->private_data;
+
+	TRACE("ur_release\n");
+	spin_lock(&urf->urd->open_lock);
+	urf->urd->open_flag--;
+	spin_unlock(&urf->urd->open_lock);
+	wake_up_interruptible(&urf->urd->wait);
+	urdev_put(urf->urd);
+	urfile_free(urf);
+	return 0;
+}
+
+static loff_t ur_llseek(struct file *file, loff_t offset, int whence)
+{
+	if ((file->f_flags & O_ACCMODE) != O_RDONLY)
+		return -ESPIPE; /* seek allowed only for reader */
+	if (offset % PAGE_SIZE)
+		return -ESPIPE; /* only multiples of 4K allowed */
+	return no_seek_end_llseek(file, offset, whence);
+}
+
+static const struct file_operations ur_fops = {
+	.owner	 = THIS_MODULE,
+	.open	 = ur_open,
+	.release = ur_release,
+	.read	 = ur_read,
+	.write	 = ur_write,
+	.llseek  = ur_llseek,
+};
+
+/*
+ * ccw_device infrastructure:
+ *     ur_probe creates the struct urdev (with refcount = 1), the device
+ *     attributes, sets up the interrupt handler and validates the virtual
+ *     unit record device.
+ *     ur_remove removes the device attributes and drops the reference to
+ *     struct urdev.
+ *
+ *     ur_probe, ur_remove, ur_set_online and ur_set_offline are serialized
+ *     by the vmur_mutex lock.
+ *
+ *     urd->char_device is used as indication that the online function has
+ *     been completed successfully.
+ */
+static int ur_probe(struct ccw_device *cdev)
+{
+	struct urdev *urd;
+	int rc;
+
+	TRACE("ur_probe: cdev=%p\n", cdev);
+
+	mutex_lock(&vmur_mutex);
+	urd = urdev_alloc(cdev);
+	if (!urd) {
+		rc = -ENOMEM;
+		goto fail_unlock;
+	}
+
+	rc = ur_create_attributes(&cdev->dev);
+	if (rc) {
+		rc = -ENOMEM;
+		goto fail_urdev_put;
+	}
+	cdev->handler = ur_int_handler;
+
+	/* validate virtual unit record device */
+	urd->class = get_urd_class(urd);
+	if (urd->class < 0) {
+		rc = urd->class;
+		goto fail_remove_attr;
+	}
+	if ((urd->class != DEV_CLASS_UR_I) && (urd->class != DEV_CLASS_UR_O)) {
+		rc = -EOPNOTSUPP;
+		goto fail_remove_attr;
+	}
+	spin_lock_irq(get_ccwdev_lock(cdev));
+	dev_set_drvdata(&cdev->dev, urd);
+	spin_unlock_irq(get_ccwdev_lock(cdev));
+
+	mutex_unlock(&vmur_mutex);
+	return 0;
+
+fail_remove_attr:
+	ur_remove_attributes(&cdev->dev);
+fail_urdev_put:
+	urdev_put(urd);
+fail_unlock:
+	mutex_unlock(&vmur_mutex);
+	return rc;
+}
+
+static int ur_set_online(struct ccw_device *cdev)
+{
+	struct urdev *urd;
+	int minor, major, rc;
+	char node_id[16];
+
+	TRACE("ur_set_online: cdev=%p\n", cdev);
+
+	mutex_lock(&vmur_mutex);
+	urd = urdev_get_from_cdev(cdev);
+	if (!urd) {
+		/* ur_remove already deleted our urd */
+		rc = -ENODEV;
+		goto fail_unlock;
+	}
+
+	if (urd->char_device) {
+		/* Another ur_set_online was faster */
+		rc = -EBUSY;
+		goto fail_urdev_put;
+	}
+
+	minor = urd->dev_id.devno;
+	major = MAJOR(ur_first_dev_maj_min);
+
+	urd->char_device = cdev_alloc();
+	if (!urd->char_device) {
+		rc = -ENOMEM;
+		goto fail_urdev_put;
+	}
+
+	urd->char_device->ops = &ur_fops;
+	urd->char_device->dev = MKDEV(major, minor);
+	urd->char_device->owner = ur_fops.owner;
+
+	rc = cdev_add(urd->char_device, urd->char_device->dev, 1);
+	if (rc)
+		goto fail_free_cdev;
+	if (urd->cdev->id.cu_type == READER_PUNCH_DEVTYPE) {
+		if (urd->class == DEV_CLASS_UR_I)
+			sprintf(node_id, "vmrdr-%s", dev_name(&cdev->dev));
+		if (urd->class == DEV_CLASS_UR_O)
+			sprintf(node_id, "vmpun-%s", dev_name(&cdev->dev));
+	} else if (urd->cdev->id.cu_type == PRINTER_DEVTYPE) {
+		sprintf(node_id, "vmprt-%s", dev_name(&cdev->dev));
+	} else {
+		rc = -EOPNOTSUPP;
+		goto fail_free_cdev;
+	}
+
+	urd->device = device_create(vmur_class, &cdev->dev,
+				    urd->char_device->dev, NULL, "%s", node_id);
+	if (IS_ERR(urd->device)) {
+		rc = PTR_ERR(urd->device);
+		TRACE("ur_set_online: device_create rc=%d\n", rc);
+		goto fail_free_cdev;
+	}
+	urdev_put(urd);
+	mutex_unlock(&vmur_mutex);
+	return 0;
+
+fail_free_cdev:
+	cdev_del(urd->char_device);
+	urd->char_device = NULL;
+fail_urdev_put:
+	urdev_put(urd);
+fail_unlock:
+	mutex_unlock(&vmur_mutex);
+	return rc;
+}
+
+static int ur_set_offline_force(struct ccw_device *cdev, int force)
+{
+	struct urdev *urd;
+	int rc;
+
+	TRACE("ur_set_offline: cdev=%p\n", cdev);
+	urd = urdev_get_from_cdev(cdev);
+	if (!urd)
+		/* ur_remove already deleted our urd */
+		return -ENODEV;
+	if (!urd->char_device) {
+		/* Another ur_set_offline was faster */
+		rc = -EBUSY;
+		goto fail_urdev_put;
+	}
+	if (!force && (atomic_read(&urd->ref_count) > 2)) {
+		/* There is still a user of urd (e.g. ur_open) */
+		TRACE("ur_set_offline: BUSY\n");
+		rc = -EBUSY;
+		goto fail_urdev_put;
+	}
+	device_destroy(vmur_class, urd->char_device->dev);
+	cdev_del(urd->char_device);
+	urd->char_device = NULL;
+	rc = 0;
+
+fail_urdev_put:
+	urdev_put(urd);
+	return rc;
+}
+
+static int ur_set_offline(struct ccw_device *cdev)
+{
+	int rc;
+
+	mutex_lock(&vmur_mutex);
+	rc = ur_set_offline_force(cdev, 0);
+	mutex_unlock(&vmur_mutex);
+	return rc;
+}
+
+static void ur_remove(struct ccw_device *cdev)
+{
+	unsigned long flags;
+
+	TRACE("ur_remove\n");
+
+	mutex_lock(&vmur_mutex);
+
+	if (cdev->online)
+		ur_set_offline_force(cdev, 1);
+	ur_remove_attributes(&cdev->dev);
+
+	spin_lock_irqsave(get_ccwdev_lock(cdev), flags);
+	urdev_put(dev_get_drvdata(&cdev->dev));
+	dev_set_drvdata(&cdev->dev, NULL);
+	spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
+
+	mutex_unlock(&vmur_mutex);
+}
+
+/*
+ * Module initialisation and cleanup
+ */
+static int __init ur_init(void)
+{
+	int rc;
+	dev_t dev;
+
+	if (!MACHINE_IS_VM) {
+		pr_err("The %s cannot be loaded without z/VM\n",
+		       ur_banner);
+		return -ENODEV;
+	}
+
+	vmur_dbf = debug_register("vmur", 4, 1, 4 * sizeof(long));
+	if (!vmur_dbf)
+		return -ENOMEM;
+	rc = debug_register_view(vmur_dbf, &debug_sprintf_view);
+	if (rc)
+		goto fail_free_dbf;
+
+	debug_set_level(vmur_dbf, 6);
+
+	vmur_class = class_create(THIS_MODULE, "vmur");
+	if (IS_ERR(vmur_class)) {
+		rc = PTR_ERR(vmur_class);
+		goto fail_free_dbf;
+	}
+
+	rc = ccw_driver_register(&ur_driver);
+	if (rc)
+		goto fail_class_destroy;
+
+	rc = alloc_chrdev_region(&dev, 0, NUM_MINORS, "vmur");
+	if (rc) {
+		pr_err("Kernel function alloc_chrdev_region failed with "
+		       "error code %d\n", rc);
+		goto fail_unregister_driver;
+	}
+	ur_first_dev_maj_min = MKDEV(MAJOR(dev), 0);
+
+	pr_info("%s loaded.\n", ur_banner);
+	return 0;
+
+fail_unregister_driver:
+	ccw_driver_unregister(&ur_driver);
+fail_class_destroy:
+	class_destroy(vmur_class);
+fail_free_dbf:
+	debug_unregister(vmur_dbf);
+	return rc;
+}
+
+static void __exit ur_exit(void)
+{
+	unregister_chrdev_region(ur_first_dev_maj_min, NUM_MINORS);
+	ccw_driver_unregister(&ur_driver);
+	class_destroy(vmur_class);
+	debug_unregister(vmur_dbf);
+	pr_info("%s unloaded.\n", ur_banner);
+}
+
+module_init(ur_init);
+module_exit(ur_exit);
diff --git a/src/kernel/linux/v4.14/drivers/s390/char/vmur.h b/src/kernel/linux/v4.14/drivers/s390/char/vmur.h
new file mode 100644
index 0000000..67164ba
--- /dev/null
+++ b/src/kernel/linux/v4.14/drivers/s390/char/vmur.h
@@ -0,0 +1,111 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Linux driver for System z and s390 unit record devices
+ * (z/VM virtual punch, reader, printer)
+ *
+ * Copyright IBM Corp. 2001, 2007
+ * Authors: Malcolm Beattie <beattiem@uk.ibm.com>
+ *	    Michael Holzheu <holzheu@de.ibm.com>
+ *	    Frank Munzert <munzert@de.ibm.com>
+ */
+
+#ifndef _VMUR_H_
+#define _VMUR_H_
+
+#define DEV_CLASS_UR_I 0x20 /* diag210 unit record input device class */
+#define DEV_CLASS_UR_O 0x10 /* diag210 unit record output device class */
+/*
+ * we only support z/VM's default unit record devices:
+ * both in SPOOL directory control statement and in CP DEFINE statement
+ *	RDR defaults to 2540 reader
+ *	PUN defaults to 2540 punch
+ *	PRT defaults to 1403 printer
+ */
+#define READER_PUNCH_DEVTYPE	0x2540
+#define PRINTER_DEVTYPE		0x1403
+
+/* z/VM spool file control block SFBLOK */
+struct file_control_block {
+	char reserved_1[8];
+	char user_owner[8];
+	char user_orig[8];
+	__s32 data_recs;
+	__s16 rec_len;
+	__s16 file_num;
+	__u8  file_stat;
+	__u8  dev_type;
+	char  reserved_2[6];
+	char  file_name[12];
+	char  file_type[12];
+	char  create_date[8];
+	char  create_time[8];
+	char  reserved_3[6];
+	__u8  file_class;
+	__u8  sfb_lok;
+	__u64 distr_code;
+	__u32 reserved_4;
+	__u8  current_starting_copy_number;
+	__u8  sfblock_cntrl_flags;
+	__u8  reserved_5;
+	__u8  more_status_flags;
+	char  rest[200];
+} __attribute__ ((packed));
+
+#define FLG_SYSTEM_HOLD	0x04
+#define FLG_CP_DUMP	0x10
+#define FLG_USER_HOLD	0x20
+#define FLG_IN_USE	0x80
+
+/*
+ * A struct urdev is created for each ur device that is made available
+ * via the ccw_device driver model.
+ */
+struct urdev {
+	struct ccw_device *cdev;	/* Backpointer to ccw device */
+	struct mutex io_mutex;		/* Serialises device IO */
+	struct completion *io_done;	/* do_ur_io waits; irq completes */
+	struct device *device;
+	struct cdev *char_device;
+	struct ccw_dev_id dev_id;	/* device id */
+	size_t reclen;			/* Record length for *write* CCWs */
+	int class;			/* VM device class */
+	int io_request_rc;		/* return code from I/O request */
+	atomic_t ref_count;		/* reference counter */
+	wait_queue_head_t wait;		/* wait queue to serialize open */
+	int open_flag;			/* "urdev is open" flag */
+	spinlock_t open_lock;		/* serialize critical sections */
+};
+
+/*
+ * A struct urfile is allocated at open() time for each device and
+ * freed on release().
+ */
+struct urfile {
+	struct urdev *urd;
+	unsigned int flags;
+	size_t dev_reclen;
+	__u16 file_reclen;
+};
+
+/*
+ * Device major/minor definitions.
+ */
+
+#define UR_MAJOR 0	/* get dynamic major */
+/*
+ * We map minor numbers directly to device numbers (0-FFFF) for simplicity.
+ * This avoids having to allocate (and manage) slot numbers.
+ */
+#define NUM_MINORS 65536
+
+/* Limiting each I/O to 511 records limits chan prog to 4KB (511 r/w + 1 NOP) */
+#define MAX_RECS_PER_IO		511
+#define WRITE_CCW_CMD		0x01
+
+#define TRACE(x...) debug_sprintf_event(vmur_dbf, 1, x)
+#define CCWDEV_CU_DI(cutype, di) \
+		CCW_DEVICE(cutype, 0x00), .driver_info = (di)
+
+#define FILE_RECLEN_OFFSET	4064 /* reclen offset in spool data block */
+
+#endif /* _VMUR_H_ */
diff --git a/src/kernel/linux/v4.14/drivers/s390/char/zcore.c b/src/kernel/linux/v4.14/drivers/s390/char/zcore.c
new file mode 100644
index 0000000..aaed778
--- /dev/null
+++ b/src/kernel/linux/v4.14/drivers/s390/char/zcore.c
@@ -0,0 +1,366 @@
+/*
+ * zcore module to export memory content and register sets for creating system
+ * dumps on SCSI disks (zfcpdump). The "zcore/mem" debugfs file shows the same
+ * dump format as s390 standalone dumps.
+ *
+ * For more information please refer to Documentation/s390/zfcpdump.txt
+ *
+ * Copyright IBM Corp. 2003, 2008
+ * Author(s): Michael Holzheu
+ * License: GPL
+ */
+
+#define KMSG_COMPONENT "zdump"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/debugfs.h>
+#include <linux/memblock.h>
+
+#include <asm/asm-offsets.h>
+#include <asm/ipl.h>
+#include <asm/sclp.h>
+#include <asm/setup.h>
+#include <linux/uaccess.h>
+#include <asm/debug.h>
+#include <asm/processor.h>
+#include <asm/irqflags.h>
+#include <asm/checksum.h>
+#include <asm/os_info.h>
+#include <asm/switch_to.h>
+#include "sclp.h"
+
+#define TRACE(x...) debug_sprintf_event(zcore_dbf, 1, x)
+
+#define CHUNK_INFO_SIZE	34 /* 2 16-byte char, each followed by blank */
+
+enum arch_id {
+	ARCH_S390	= 0,
+	ARCH_S390X	= 1,
+};
+
+struct ipib_info {
+	unsigned long	ipib;
+	u32		checksum;
+}  __attribute__((packed));
+
+static struct debug_info *zcore_dbf;
+static int hsa_available;
+static struct dentry *zcore_dir;
+static struct dentry *zcore_memmap_file;
+static struct dentry *zcore_reipl_file;
+static struct dentry *zcore_hsa_file;
+static struct ipl_parameter_block *ipl_block;
+
+static char hsa_buf[PAGE_SIZE] __aligned(PAGE_SIZE);
+
+/*
+ * Copy memory from HSA to user memory (not reentrant):
+ *
+ * @dest:  User buffer where memory should be copied to
+ * @src:   Start address within HSA where data should be copied
+ * @count: Size of buffer, which should be copied
+ */
+int memcpy_hsa_user(void __user *dest, unsigned long src, size_t count)
+{
+	unsigned long offset, bytes;
+
+	if (!hsa_available)
+		return -ENODATA;
+
+	while (count) {
+		if (sclp_sdias_copy(hsa_buf, src / PAGE_SIZE + 2, 1)) {
+			TRACE("sclp_sdias_copy() failed\n");
+			return -EIO;
+		}
+		offset = src % PAGE_SIZE;
+		bytes = min(PAGE_SIZE - offset, count);
+		if (copy_to_user(dest, hsa_buf + offset, bytes))
+			return -EFAULT;
+		src += bytes;
+		dest += bytes;
+		count -= bytes;
+	}
+	return 0;
+}
+
+/*
+ * Copy memory from HSA to kernel memory (not reentrant):
+ *
+ * @dest:  Kernel or user buffer where memory should be copied to
+ * @src:   Start address within HSA where data should be copied
+ * @count: Size of buffer, which should be copied
+ */
+int memcpy_hsa_kernel(void *dest, unsigned long src, size_t count)
+{
+	unsigned long offset, bytes;
+
+	if (!hsa_available)
+		return -ENODATA;
+
+	while (count) {
+		if (sclp_sdias_copy(hsa_buf, src / PAGE_SIZE + 2, 1)) {
+			TRACE("sclp_sdias_copy() failed\n");
+			return -EIO;
+		}
+		offset = src % PAGE_SIZE;
+		bytes = min(PAGE_SIZE - offset, count);
+		memcpy(dest, hsa_buf + offset, bytes);
+		src += bytes;
+		dest += bytes;
+		count -= bytes;
+	}
+	return 0;
+}
+
+static int __init init_cpu_info(void)
+{
+	struct save_area *sa;
+
+	/* get info for boot cpu from lowcore, stored in the HSA */
+	sa = save_area_boot_cpu();
+	if (!sa)
+		return -ENOMEM;
+	if (memcpy_hsa_kernel(hsa_buf, __LC_FPREGS_SAVE_AREA, 512) < 0) {
+		TRACE("could not copy from HSA\n");
+		return -EIO;
+	}
+	save_area_add_regs(sa, hsa_buf); /* vx registers are saved in smp.c */
+	return 0;
+}
+
+/*
+ * Release the HSA
+ */
+static void release_hsa(void)
+{
+	diag308(DIAG308_REL_HSA, NULL);
+	hsa_available = 0;
+}
+
+static ssize_t zcore_memmap_read(struct file *filp, char __user *buf,
+				 size_t count, loff_t *ppos)
+{
+	return simple_read_from_buffer(buf, count, ppos, filp->private_data,
+				       memblock.memory.cnt * CHUNK_INFO_SIZE);
+}
+
+static int zcore_memmap_open(struct inode *inode, struct file *filp)
+{
+	struct memblock_region *reg;
+	char *buf;
+	int i = 0;
+
+	buf = kzalloc(memblock.memory.cnt * CHUNK_INFO_SIZE, GFP_KERNEL);
+	if (!buf) {
+		return -ENOMEM;
+	}
+	for_each_memblock(memory, reg) {
+		sprintf(buf + (i++ * CHUNK_INFO_SIZE), "%016llx %016llx ",
+			(unsigned long long) reg->base,
+			(unsigned long long) reg->size);
+	}
+	filp->private_data = buf;
+	return nonseekable_open(inode, filp);
+}
+
+static int zcore_memmap_release(struct inode *inode, struct file *filp)
+{
+	kfree(filp->private_data);
+	return 0;
+}
+
+static const struct file_operations zcore_memmap_fops = {
+	.owner		= THIS_MODULE,
+	.read		= zcore_memmap_read,
+	.open		= zcore_memmap_open,
+	.release	= zcore_memmap_release,
+	.llseek		= no_llseek,
+};
+
+static ssize_t zcore_reipl_write(struct file *filp, const char __user *buf,
+				 size_t count, loff_t *ppos)
+{
+	if (ipl_block) {
+		diag308(DIAG308_SET, ipl_block);
+		diag308(DIAG308_LOAD_CLEAR, NULL);
+	}
+	return count;
+}
+
+static int zcore_reipl_open(struct inode *inode, struct file *filp)
+{
+	return nonseekable_open(inode, filp);
+}
+
+static int zcore_reipl_release(struct inode *inode, struct file *filp)
+{
+	return 0;
+}
+
+static const struct file_operations zcore_reipl_fops = {
+	.owner		= THIS_MODULE,
+	.write		= zcore_reipl_write,
+	.open		= zcore_reipl_open,
+	.release	= zcore_reipl_release,
+	.llseek		= no_llseek,
+};
+
+static ssize_t zcore_hsa_read(struct file *filp, char __user *buf,
+			      size_t count, loff_t *ppos)
+{
+	static char str[18];
+
+	if (hsa_available)
+		snprintf(str, sizeof(str), "%lx\n", sclp.hsa_size);
+	else
+		snprintf(str, sizeof(str), "0\n");
+	return simple_read_from_buffer(buf, count, ppos, str, strlen(str));
+}
+
+static ssize_t zcore_hsa_write(struct file *filp, const char __user *buf,
+			       size_t count, loff_t *ppos)
+{
+	char value;
+
+	if (*ppos != 0)
+		return -EPIPE;
+	if (copy_from_user(&value, buf, 1))
+		return -EFAULT;
+	if (value != '0')
+		return -EINVAL;
+	release_hsa();
+	return count;
+}
+
+static const struct file_operations zcore_hsa_fops = {
+	.owner		= THIS_MODULE,
+	.write		= zcore_hsa_write,
+	.read		= zcore_hsa_read,
+	.open		= nonseekable_open,
+	.llseek		= no_llseek,
+};
+
+static int __init check_sdias(void)
+{
+	if (!sclp.hsa_size) {
+		TRACE("Could not determine HSA size\n");
+		return -ENODEV;
+	}
+	return 0;
+}
+
+/*
+ * Provide IPL parameter information block from either HSA or memory
+ * for future reipl
+ */
+static int __init zcore_reipl_init(void)
+{
+	struct ipib_info ipib_info;
+	int rc;
+
+	rc = memcpy_hsa_kernel(&ipib_info, __LC_DUMP_REIPL, sizeof(ipib_info));
+	if (rc)
+		return rc;
+	if (ipib_info.ipib == 0)
+		return 0;
+	ipl_block = (void *) __get_free_page(GFP_KERNEL);
+	if (!ipl_block)
+		return -ENOMEM;
+	if (ipib_info.ipib < sclp.hsa_size)
+		rc = memcpy_hsa_kernel(ipl_block, ipib_info.ipib, PAGE_SIZE);
+	else
+		rc = memcpy_real(ipl_block, (void *) ipib_info.ipib, PAGE_SIZE);
+	if (rc || (__force u32)csum_partial(ipl_block, ipl_block->hdr.len, 0) !=
+	    ipib_info.checksum) {
+		TRACE("Checksum does not match\n");
+		free_page((unsigned long) ipl_block);
+		ipl_block = NULL;
+	}
+	return 0;
+}
+
+static int __init zcore_init(void)
+{
+	unsigned char arch;
+	int rc;
+
+	if (ipl_info.type != IPL_TYPE_FCP_DUMP)
+		return -ENODATA;
+	if (OLDMEM_BASE)
+		return -ENODATA;
+
+	zcore_dbf = debug_register("zcore", 4, 1, 4 * sizeof(long));
+	debug_register_view(zcore_dbf, &debug_sprintf_view);
+	debug_set_level(zcore_dbf, 6);
+
+	TRACE("devno:  %x\n", ipl_info.data.fcp.dev_id.devno);
+	TRACE("wwpn:   %llx\n", (unsigned long long) ipl_info.data.fcp.wwpn);
+	TRACE("lun:    %llx\n", (unsigned long long) ipl_info.data.fcp.lun);
+
+	rc = sclp_sdias_init();
+	if (rc)
+		goto fail;
+
+	rc = check_sdias();
+	if (rc)
+		goto fail;
+	hsa_available = 1;
+
+	rc = memcpy_hsa_kernel(&arch, __LC_AR_MODE_ID, 1);
+	if (rc)
+		goto fail;
+
+	if (arch == ARCH_S390) {
+		pr_alert("The 64-bit dump tool cannot be used for a "
+			 "32-bit system\n");
+		rc = -EINVAL;
+		goto fail;
+	}
+
+	pr_alert("The dump process started for a 64-bit operating system\n");
+	rc = init_cpu_info();
+	if (rc)
+		goto fail;
+
+	rc = zcore_reipl_init();
+	if (rc)
+		goto fail;
+
+	zcore_dir = debugfs_create_dir("zcore" , NULL);
+	if (!zcore_dir) {
+		rc = -ENOMEM;
+		goto fail;
+	}
+	zcore_memmap_file = debugfs_create_file("memmap", S_IRUSR, zcore_dir,
+						NULL, &zcore_memmap_fops);
+	if (!zcore_memmap_file) {
+		rc = -ENOMEM;
+		goto fail_dir;
+	}
+	zcore_reipl_file = debugfs_create_file("reipl", S_IRUSR, zcore_dir,
+						NULL, &zcore_reipl_fops);
+	if (!zcore_reipl_file) {
+		rc = -ENOMEM;
+		goto fail_memmap_file;
+	}
+	zcore_hsa_file = debugfs_create_file("hsa", S_IRUSR|S_IWUSR, zcore_dir,
+					     NULL, &zcore_hsa_fops);
+	if (!zcore_hsa_file) {
+		rc = -ENOMEM;
+		goto fail_reipl_file;
+	}
+	return 0;
+
+fail_reipl_file:
+	debugfs_remove(zcore_reipl_file);
+fail_memmap_file:
+	debugfs_remove(zcore_memmap_file);
+fail_dir:
+	debugfs_remove(zcore_dir);
+fail:
+	diag308(DIAG308_REL_HSA, NULL);
+	return rc;
+}
+subsys_initcall(zcore_init);