[Feature] add GA346 baseline version

Change-Id: Ic62933698569507dcf98240cdf5d9931ae34348f
diff --git a/src/kernel/linux/v4.19/drivers/fsi/Kconfig b/src/kernel/linux/v4.19/drivers/fsi/Kconfig
new file mode 100644
index 0000000..99c99a5
--- /dev/null
+++ b/src/kernel/linux/v4.19/drivers/fsi/Kconfig
@@ -0,0 +1,68 @@
+#
+# FSI subsystem
+#
+
+menuconfig FSI
+	tristate "FSI support"
+	depends on OF
+	select CRC4
+	---help---
+	  FSI - the FRU Support Interface - is a simple bus for low-level
+	  access to POWER-based hardware.
+
+if FSI
+
+config FSI_NEW_DEV_NODE
+	bool "Create '/dev/fsi' directory for char devices"
+	default n
+	---help---
+	This option causes char devices created for FSI devices to be
+	located under a common /dev/fsi/ directory. Set to N unless your
+	userspace has been updated to handle the new location.
+
+	Additionally, it also causes the char device names to be offset
+	by one so that chip 0 will have /dev/scom1 and chip1 /dev/scom2
+	to match old userspace expectations.
+
+	New userspace will use udev rules to generate predictable access
+	symlinks in /dev/fsi/by-path when this option is enabled.
+
+config FSI_MASTER_GPIO
+	tristate "GPIO-based FSI master"
+	depends on GPIOLIB
+	select CRC4
+	---help---
+	This option enables a FSI master driver using GPIO lines.
+
+config FSI_MASTER_HUB
+	tristate "FSI hub master"
+	---help---
+	This option enables a FSI hub master driver.  Hub is a type of FSI
+	master that is connected to the upstream master via a slave.  Hubs
+	allow chaining of FSI links to an arbitrary depth.  This allows for
+	a high target device fanout.
+
+config FSI_MASTER_AST_CF
+	tristate "FSI master based on Aspeed ColdFire coprocessor"
+	depends on GPIOLIB
+	depends on GPIO_ASPEED
+	select GENERIC_ALLOCATOR
+	---help---
+	This option enables a FSI master using the AST2400 and AST2500 GPIO
+	lines driven by the internal ColdFire coprocessor. This requires
+	the corresponding machine specific ColdFire firmware to be available.
+
+config FSI_SCOM
+	tristate "SCOM FSI client device driver"
+	---help---
+	This option enables an FSI based SCOM device driver.
+
+config FSI_SBEFIFO
+	tristate "SBEFIFO FSI client device driver"
+	depends on OF_ADDRESS
+	---help---
+	This option enables an FSI based SBEFIFO device driver. The SBEFIFO is
+	a pipe-like FSI device for communicating with the self boot engine
+	(SBE) on POWER processors.
+
+endif
diff --git a/src/kernel/linux/v4.19/drivers/fsi/Makefile b/src/kernel/linux/v4.19/drivers/fsi/Makefile
new file mode 100644
index 0000000..a50d6ce
--- /dev/null
+++ b/src/kernel/linux/v4.19/drivers/fsi/Makefile
@@ -0,0 +1,7 @@
+
+obj-$(CONFIG_FSI) += fsi-core.o
+obj-$(CONFIG_FSI_MASTER_HUB) += fsi-master-hub.o
+obj-$(CONFIG_FSI_MASTER_GPIO) += fsi-master-gpio.o
+obj-$(CONFIG_FSI_MASTER_AST_CF) += fsi-master-ast-cf.o
+obj-$(CONFIG_FSI_SCOM) += fsi-scom.o
+obj-$(CONFIG_FSI_SBEFIFO) += fsi-sbefifo.o
diff --git a/src/kernel/linux/v4.19/drivers/fsi/cf-fsi-fw.h b/src/kernel/linux/v4.19/drivers/fsi/cf-fsi-fw.h
new file mode 100644
index 0000000..712df04
--- /dev/null
+++ b/src/kernel/linux/v4.19/drivers/fsi/cf-fsi-fw.h
@@ -0,0 +1,157 @@
+// SPDX-License-Identifier: GPL-2.0+
+#ifndef __CF_FSI_FW_H
+#define __CF_FSI_FW_H
+
+/*
+ * uCode file layout
+ *
+ * 0000...03ff : m68k exception vectors
+ * 0400...04ff : Header info & boot config block
+ * 0500....... : Code & stack
+ */
+
+/*
+ * Header info & boot config area
+ *
+ * The Header info is built into the ucode and provide version and
+ * platform information.
+ *
+ * the Boot config needs to be adjusted by the ARM prior to starting
+ * the ucode if the Command/Status area isn't at 0x320000 in CF space
+ * (ie. beginning of SRAM).
+ */
+
+#define HDR_OFFSET	        0x400
+
+/* Info: Signature & version */
+#define HDR_SYS_SIG		0x00	/* 2 bytes system signature */
+#define  SYS_SIG_SHARED		0x5348
+#define  SYS_SIG_SPLIT		0x5350
+#define HDR_FW_VERS		0x02	/* 2 bytes Major.Minor */
+#define HDR_API_VERS		0x04	/* 2 bytes Major.Minor */
+#define  API_VERSION_MAJ	2	/* Current version */
+#define  API_VERSION_MIN	1
+#define HDR_FW_OPTIONS		0x08	/* 4 bytes option flags */
+#define  FW_OPTION_TRACE_EN	0x00000001	/* FW tracing enabled */
+#define	 FW_OPTION_CONT_CLOCK	0x00000002	/* Continuous clocking supported */
+#define HDR_FW_SIZE		0x10	/* 4 bytes size for combo image */
+
+/* Boot Config: Address of Command/Status area */
+#define HDR_CMD_STAT_AREA	0x80	/* 4 bytes CF address */
+#define HDR_FW_CONTROL		0x84	/* 4 bytes control flags */
+#define	 FW_CONTROL_CONT_CLOCK	0x00000002	/* Continuous clocking enabled */
+#define	 FW_CONTROL_DUMMY_RD	0x00000004	/* Extra dummy read (AST2400) */
+#define	 FW_CONTROL_USE_STOP	0x00000008	/* Use STOP instructions */
+#define HDR_CLOCK_GPIO_VADDR	0x90	/* 2 bytes offset from GPIO base */
+#define HDR_CLOCK_GPIO_DADDR	0x92	/* 2 bytes offset from GPIO base */
+#define HDR_DATA_GPIO_VADDR	0x94	/* 2 bytes offset from GPIO base */
+#define HDR_DATA_GPIO_DADDR	0x96	/* 2 bytes offset from GPIO base */
+#define HDR_TRANS_GPIO_VADDR	0x98	/* 2 bytes offset from GPIO base */
+#define HDR_TRANS_GPIO_DADDR	0x9a	/* 2 bytes offset from GPIO base */
+#define HDR_CLOCK_GPIO_BIT	0x9c	/* 1 byte bit number */
+#define HDR_DATA_GPIO_BIT	0x9d	/* 1 byte bit number */
+#define HDR_TRANS_GPIO_BIT	0x9e	/* 1 byte bit number */
+
+/*
+ *  Command/Status area layout: Main part
+ */
+
+/* Command/Status register:
+ *
+ * +---------------------------+
+ * | STAT | RLEN | CLEN | CMD  |
+ * |   8  |   8  |   8  |   8  |
+ * +---------------------------+
+ *    |       |      |      |
+ *    status  |      |      |
+ * Response len      |      |
+ * (in bits)         |      |
+ *                   |      |
+ *         Command len      |
+ *         (in bits)        |
+ *                          |
+ *               Command code
+ *
+ * Due to the big endian layout, that means that a byte read will
+ * return the status byte
+ */
+#define	CMD_STAT_REG	        0x00
+#define  CMD_REG_CMD_MASK	0x000000ff
+#define  CMD_REG_CMD_SHIFT	0
+#define	  CMD_NONE		0x00
+#define	  CMD_COMMAND		0x01
+#define	  CMD_BREAK		0x02
+#define	  CMD_IDLE_CLOCKS	0x03 /* clen = #clocks */
+#define   CMD_INVALID		0xff
+#define  CMD_REG_CLEN_MASK	0x0000ff00
+#define  CMD_REG_CLEN_SHIFT	8
+#define  CMD_REG_RLEN_MASK	0x00ff0000
+#define  CMD_REG_RLEN_SHIFT	16
+#define  CMD_REG_STAT_MASK	0xff000000
+#define  CMD_REG_STAT_SHIFT	24
+#define	  STAT_WORKING		0x00
+#define	  STAT_COMPLETE		0x01
+#define	  STAT_ERR_INVAL_CMD	0x80
+#define	  STAT_ERR_INVAL_IRQ	0x81
+#define	  STAT_ERR_MTOE		0x82
+
+/* Response tag & CRC */
+#define	STAT_RTAG		0x04
+
+/* Response CRC */
+#define	STAT_RCRC		0x05
+
+/* Echo and Send delay */
+#define	ECHO_DLY_REG		0x08
+#define	SEND_DLY_REG		0x09
+
+/* Command data area
+ *
+ * Last byte of message must be left aligned
+ */
+#define	CMD_DATA		0x10 /* 64 bit of data */
+
+/* Response data area, right aligned, unused top bits are 1 */
+#define	RSP_DATA		0x20 /* 32 bit of data */
+
+/* Misc */
+#define	INT_CNT			0x30 /* 32-bit interrupt count */
+#define	BAD_INT_VEC		0x34 /* 32-bit bad interrupt vector # */
+#define	CF_STARTED		0x38 /* byte, set to -1 when copro started */
+#define	CLK_CNT			0x3c /* 32-bit, clock count (debug only) */
+
+/*
+ *  SRAM layout: GPIO arbitration part
+ */
+#define ARB_REG			0x40
+#define  ARB_ARM_REQ		0x01
+#define  ARB_ARM_ACK		0x02
+
+/* Misc2 */
+#define CF_RESET_D0		0x50
+#define CF_RESET_D1		0x54
+#define BAD_INT_S0		0x58
+#define BAD_INT_S1		0x5c
+#define STOP_CNT		0x60
+
+/* Internal */
+
+/*
+ * SRAM layout: Trace buffer (debug builds only)
+ */
+#define	TRACEBUF		0x100
+#define	  TR_CLKOBIT0		0xc0
+#define	  TR_CLKOBIT1		0xc1
+#define	  TR_CLKOSTART		0x82
+#define	  TR_OLEN		0x83 /* + len */
+#define	  TR_CLKZ		0x84 /* + count */
+#define	  TR_CLKWSTART		0x85
+#define	  TR_CLKTAG		0x86 /* + tag */
+#define	  TR_CLKDATA		0x87 /* + len */
+#define	  TR_CLKCRC		0x88 /* + raw crc */
+#define	  TR_CLKIBIT0		0x90
+#define	  TR_CLKIBIT1		0x91
+#define	  TR_END		0xff
+
+#endif /* __CF_FSI_FW_H */
+
diff --git a/src/kernel/linux/v4.19/drivers/fsi/fsi-core.c b/src/kernel/linux/v4.19/drivers/fsi/fsi-core.c
new file mode 100644
index 0000000..c6fa9b3
--- /dev/null
+++ b/src/kernel/linux/v4.19/drivers/fsi/fsi-core.c
@@ -0,0 +1,1393 @@
+/*
+ * FSI core driver
+ *
+ * Copyright (C) IBM Corporation 2016
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * TODO:
+ *  - Rework topology
+ *  - s/chip_id/chip_loc
+ *  - s/cfam/chip (cfam_id -> chip_id etc...)
+ */
+
+#include <linux/crc4.h>
+#include <linux/device.h>
+#include <linux/fsi.h>
+#include <linux/idr.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/slab.h>
+#include <linux/bitops.h>
+#include <linux/cdev.h>
+#include <linux/fs.h>
+#include <linux/uaccess.h>
+
+#include "fsi-master.h"
+
+#define CREATE_TRACE_POINTS
+#include <trace/events/fsi.h>
+
+#define FSI_SLAVE_CONF_NEXT_MASK	GENMASK(31, 31)
+#define FSI_SLAVE_CONF_SLOTS_MASK	GENMASK(23, 16)
+#define FSI_SLAVE_CONF_SLOTS_SHIFT	16
+#define FSI_SLAVE_CONF_VERSION_MASK	GENMASK(15, 12)
+#define FSI_SLAVE_CONF_VERSION_SHIFT	12
+#define FSI_SLAVE_CONF_TYPE_MASK	GENMASK(11, 4)
+#define FSI_SLAVE_CONF_TYPE_SHIFT	4
+#define FSI_SLAVE_CONF_CRC_SHIFT	4
+#define FSI_SLAVE_CONF_CRC_MASK		GENMASK(3, 0)
+#define FSI_SLAVE_CONF_DATA_BITS	28
+
+#define FSI_PEEK_BASE			0x410
+
+static const int engine_page_size = 0x400;
+
+#define FSI_SLAVE_BASE			0x800
+
+/*
+ * FSI slave engine control register offsets
+ */
+#define FSI_SMODE		0x0	/* R/W: Mode register */
+#define FSI_SISC		0x8	/* R/W: Interrupt condition */
+#define FSI_SSTAT		0x14	/* R  : Slave status */
+#define FSI_LLMODE		0x100	/* R/W: Link layer mode register */
+
+/*
+ * SMODE fields
+ */
+#define FSI_SMODE_WSC		0x80000000	/* Warm start done */
+#define FSI_SMODE_ECRC		0x20000000	/* Hw CRC check */
+#define FSI_SMODE_SID_SHIFT	24		/* ID shift */
+#define FSI_SMODE_SID_MASK	3		/* ID Mask */
+#define FSI_SMODE_ED_SHIFT	20		/* Echo delay shift */
+#define FSI_SMODE_ED_MASK	0xf		/* Echo delay mask */
+#define FSI_SMODE_SD_SHIFT	16		/* Send delay shift */
+#define FSI_SMODE_SD_MASK	0xf		/* Send delay mask */
+#define FSI_SMODE_LBCRR_SHIFT	8		/* Clk ratio shift */
+#define FSI_SMODE_LBCRR_MASK	0xf		/* Clk ratio mask */
+
+/*
+ * LLMODE fields
+ */
+#define FSI_LLMODE_ASYNC	0x1
+
+#define FSI_SLAVE_SIZE_23b		0x800000
+
+static DEFINE_IDA(master_ida);
+
+struct fsi_slave {
+	struct device		dev;
+	struct fsi_master	*master;
+	struct cdev		cdev;
+	int			cdev_idx;
+	int			id;	/* FSI address */
+	int			link;	/* FSI link# */
+	u32			cfam_id;
+	int			chip_id;
+	uint32_t		size;	/* size of slave address space */
+	u8			t_send_delay;
+	u8			t_echo_delay;
+};
+
+#define to_fsi_master(d) container_of(d, struct fsi_master, dev)
+#define to_fsi_slave(d) container_of(d, struct fsi_slave, dev)
+
+static const int slave_retries = 2;
+static int discard_errors;
+
+static dev_t fsi_base_dev;
+static DEFINE_IDA(fsi_minor_ida);
+#define FSI_CHAR_MAX_DEVICES	0x1000
+
+/* Legacy /dev numbering: 4 devices per chip, 16 chips */
+#define FSI_CHAR_LEGACY_TOP	64
+
+static int fsi_master_read(struct fsi_master *master, int link,
+		uint8_t slave_id, uint32_t addr, void *val, size_t size);
+static int fsi_master_write(struct fsi_master *master, int link,
+		uint8_t slave_id, uint32_t addr, const void *val, size_t size);
+static int fsi_master_break(struct fsi_master *master, int link);
+
+/*
+ * fsi_device_read() / fsi_device_write() / fsi_device_peek()
+ *
+ * FSI endpoint-device support
+ *
+ * Read / write / peek accessors for a client
+ *
+ * Parameters:
+ * dev:  Structure passed to FSI client device drivers on probe().
+ * addr: FSI address of given device.  Client should pass in its base address
+ *       plus desired offset to access its register space.
+ * val:  For read/peek this is the value read at the specified address. For
+ *       write this is value to write to the specified address.
+ *       The data in val must be FSI bus endian (big endian).
+ * size: Size in bytes of the operation.  Sizes supported are 1, 2 and 4 bytes.
+ *       Addresses must be aligned on size boundaries or an error will result.
+ */
+int fsi_device_read(struct fsi_device *dev, uint32_t addr, void *val,
+		size_t size)
+{
+	if (addr > dev->size || size > dev->size || addr > dev->size - size)
+		return -EINVAL;
+
+	return fsi_slave_read(dev->slave, dev->addr + addr, val, size);
+}
+EXPORT_SYMBOL_GPL(fsi_device_read);
+
+int fsi_device_write(struct fsi_device *dev, uint32_t addr, const void *val,
+		size_t size)
+{
+	if (addr > dev->size || size > dev->size || addr > dev->size - size)
+		return -EINVAL;
+
+	return fsi_slave_write(dev->slave, dev->addr + addr, val, size);
+}
+EXPORT_SYMBOL_GPL(fsi_device_write);
+
+int fsi_device_peek(struct fsi_device *dev, void *val)
+{
+	uint32_t addr = FSI_PEEK_BASE + ((dev->unit - 2) * sizeof(uint32_t));
+
+	return fsi_slave_read(dev->slave, addr, val, sizeof(uint32_t));
+}
+
+static void fsi_device_release(struct device *_device)
+{
+	struct fsi_device *device = to_fsi_dev(_device);
+
+	of_node_put(device->dev.of_node);
+	kfree(device);
+}
+
+static struct fsi_device *fsi_create_device(struct fsi_slave *slave)
+{
+	struct fsi_device *dev;
+
+	dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+	if (!dev)
+		return NULL;
+
+	dev->dev.parent = &slave->dev;
+	dev->dev.bus = &fsi_bus_type;
+	dev->dev.release = fsi_device_release;
+
+	return dev;
+}
+
+/* FSI slave support */
+static int fsi_slave_calc_addr(struct fsi_slave *slave, uint32_t *addrp,
+		uint8_t *idp)
+{
+	uint32_t addr = *addrp;
+	uint8_t id = *idp;
+
+	if (addr > slave->size)
+		return -EINVAL;
+
+	/* For 23 bit addressing, we encode the extra two bits in the slave
+	 * id (and the slave's actual ID needs to be 0).
+	 */
+	if (addr > 0x1fffff) {
+		if (slave->id != 0)
+			return -EINVAL;
+		id = (addr >> 21) & 0x3;
+		addr &= 0x1fffff;
+	}
+
+	*addrp = addr;
+	*idp = id;
+	return 0;
+}
+
+static int fsi_slave_report_and_clear_errors(struct fsi_slave *slave)
+{
+	struct fsi_master *master = slave->master;
+	__be32 irq, stat;
+	int rc, link;
+	uint8_t id;
+
+	link = slave->link;
+	id = slave->id;
+
+	rc = fsi_master_read(master, link, id, FSI_SLAVE_BASE + FSI_SISC,
+			&irq, sizeof(irq));
+	if (rc)
+		return rc;
+
+	rc =  fsi_master_read(master, link, id, FSI_SLAVE_BASE + FSI_SSTAT,
+			&stat, sizeof(stat));
+	if (rc)
+		return rc;
+
+	dev_dbg(&slave->dev, "status: 0x%08x, sisc: 0x%08x\n",
+			be32_to_cpu(stat), be32_to_cpu(irq));
+
+	/* clear interrupts */
+	return fsi_master_write(master, link, id, FSI_SLAVE_BASE + FSI_SISC,
+			&irq, sizeof(irq));
+}
+
+/* Encode slave local bus echo delay */
+static inline uint32_t fsi_smode_echodly(int x)
+{
+	return (x & FSI_SMODE_ED_MASK) << FSI_SMODE_ED_SHIFT;
+}
+
+/* Encode slave local bus send delay */
+static inline uint32_t fsi_smode_senddly(int x)
+{
+	return (x & FSI_SMODE_SD_MASK) << FSI_SMODE_SD_SHIFT;
+}
+
+/* Encode slave local bus clock rate ratio */
+static inline uint32_t fsi_smode_lbcrr(int x)
+{
+	return (x & FSI_SMODE_LBCRR_MASK) << FSI_SMODE_LBCRR_SHIFT;
+}
+
+/* Encode slave ID */
+static inline uint32_t fsi_smode_sid(int x)
+{
+	return (x & FSI_SMODE_SID_MASK) << FSI_SMODE_SID_SHIFT;
+}
+
+static uint32_t fsi_slave_smode(int id, u8 t_senddly, u8 t_echodly)
+{
+	return FSI_SMODE_WSC | FSI_SMODE_ECRC
+		| fsi_smode_sid(id)
+		| fsi_smode_echodly(t_echodly - 1) | fsi_smode_senddly(t_senddly - 1)
+		| fsi_smode_lbcrr(0x8);
+}
+
+static int fsi_slave_set_smode(struct fsi_slave *slave)
+{
+	uint32_t smode;
+	__be32 data;
+
+	/* set our smode register with the slave ID field to 0; this enables
+	 * extended slave addressing
+	 */
+	smode = fsi_slave_smode(slave->id, slave->t_send_delay, slave->t_echo_delay);
+	data = cpu_to_be32(smode);
+
+	return fsi_master_write(slave->master, slave->link, slave->id,
+				FSI_SLAVE_BASE + FSI_SMODE,
+				&data, sizeof(data));
+}
+
+static int fsi_slave_handle_error(struct fsi_slave *slave, bool write,
+				  uint32_t addr, size_t size)
+{
+	struct fsi_master *master = slave->master;
+	int rc, link;
+	uint32_t reg;
+	uint8_t id, send_delay, echo_delay;
+
+	if (discard_errors)
+		return -1;
+
+	link = slave->link;
+	id = slave->id;
+
+	dev_dbg(&slave->dev, "handling error on %s to 0x%08x[%zd]",
+			write ? "write" : "read", addr, size);
+
+	/* try a simple clear of error conditions, which may fail if we've lost
+	 * communication with the slave
+	 */
+	rc = fsi_slave_report_and_clear_errors(slave);
+	if (!rc)
+		return 0;
+
+	/* send a TERM and retry */
+	if (master->term) {
+		rc = master->term(master, link, id);
+		if (!rc) {
+			rc = fsi_master_read(master, link, id, 0,
+					&reg, sizeof(reg));
+			if (!rc)
+				rc = fsi_slave_report_and_clear_errors(slave);
+			if (!rc)
+				return 0;
+		}
+	}
+
+	send_delay = slave->t_send_delay;
+	echo_delay = slave->t_echo_delay;
+
+	/* getting serious, reset the slave via BREAK */
+	rc = fsi_master_break(master, link);
+	if (rc)
+		return rc;
+
+	slave->t_send_delay = send_delay;
+	slave->t_echo_delay = echo_delay;
+
+	rc = fsi_slave_set_smode(slave);
+	if (rc)
+		return rc;
+
+	if (master->link_config)
+		master->link_config(master, link,
+				    slave->t_send_delay,
+				    slave->t_echo_delay);
+
+	return fsi_slave_report_and_clear_errors(slave);
+}
+
+int fsi_slave_read(struct fsi_slave *slave, uint32_t addr,
+			void *val, size_t size)
+{
+	uint8_t id = slave->id;
+	int rc, err_rc, i;
+
+	rc = fsi_slave_calc_addr(slave, &addr, &id);
+	if (rc)
+		return rc;
+
+	for (i = 0; i < slave_retries; i++) {
+		rc = fsi_master_read(slave->master, slave->link,
+				id, addr, val, size);
+		if (!rc)
+			break;
+
+		err_rc = fsi_slave_handle_error(slave, false, addr, size);
+		if (err_rc)
+			break;
+	}
+
+	return rc;
+}
+EXPORT_SYMBOL_GPL(fsi_slave_read);
+
+int fsi_slave_write(struct fsi_slave *slave, uint32_t addr,
+			const void *val, size_t size)
+{
+	uint8_t id = slave->id;
+	int rc, err_rc, i;
+
+	rc = fsi_slave_calc_addr(slave, &addr, &id);
+	if (rc)
+		return rc;
+
+	for (i = 0; i < slave_retries; i++) {
+		rc = fsi_master_write(slave->master, slave->link,
+				id, addr, val, size);
+		if (!rc)
+			break;
+
+		err_rc = fsi_slave_handle_error(slave, true, addr, size);
+		if (err_rc)
+			break;
+	}
+
+	return rc;
+}
+EXPORT_SYMBOL_GPL(fsi_slave_write);
+
+extern int fsi_slave_claim_range(struct fsi_slave *slave,
+		uint32_t addr, uint32_t size)
+{
+	if (addr + size < addr)
+		return -EINVAL;
+
+	if (addr + size > slave->size)
+		return -EINVAL;
+
+	/* todo: check for overlapping claims */
+	return 0;
+}
+EXPORT_SYMBOL_GPL(fsi_slave_claim_range);
+
+extern void fsi_slave_release_range(struct fsi_slave *slave,
+		uint32_t addr, uint32_t size)
+{
+}
+EXPORT_SYMBOL_GPL(fsi_slave_release_range);
+
+static bool fsi_device_node_matches(struct device *dev, struct device_node *np,
+		uint32_t addr, uint32_t size)
+{
+	unsigned int len, na, ns;
+	const __be32 *prop;
+	uint32_t psize;
+
+	na = of_n_addr_cells(np);
+	ns = of_n_size_cells(np);
+
+	if (na != 1 || ns != 1)
+		return false;
+
+	prop = of_get_property(np, "reg", &len);
+	if (!prop || len != 8)
+		return false;
+
+	if (of_read_number(prop, 1) != addr)
+		return false;
+
+	psize = of_read_number(prop + 1, 1);
+	if (psize != size) {
+		dev_warn(dev,
+			"node %s matches probed address, but not size (got 0x%x, expected 0x%x)",
+			of_node_full_name(np), psize, size);
+	}
+
+	return true;
+}
+
+/* Find a matching node for the slave engine at @address, using @size bytes
+ * of space. Returns NULL if not found, or a matching node with refcount
+ * already incremented.
+ */
+static struct device_node *fsi_device_find_of_node(struct fsi_device *dev)
+{
+	struct device_node *parent, *np;
+
+	parent = dev_of_node(&dev->slave->dev);
+	if (!parent)
+		return NULL;
+
+	for_each_child_of_node(parent, np) {
+		if (fsi_device_node_matches(&dev->dev, np,
+					dev->addr, dev->size))
+			return np;
+	}
+
+	return NULL;
+}
+
+static int fsi_slave_scan(struct fsi_slave *slave)
+{
+	uint32_t engine_addr;
+	int rc, i;
+
+	/*
+	 * scan engines
+	 *
+	 * We keep the peek mode and slave engines for the core; so start
+	 * at the third slot in the configuration table. We also need to
+	 * skip the chip ID entry at the start of the address space.
+	 */
+	engine_addr = engine_page_size * 3;
+	for (i = 2; i < engine_page_size / sizeof(uint32_t); i++) {
+		uint8_t slots, version, type, crc;
+		struct fsi_device *dev;
+		uint32_t conf;
+		__be32 data;
+
+		rc = fsi_slave_read(slave, (i + 1) * sizeof(data),
+				&data, sizeof(data));
+		if (rc) {
+			dev_warn(&slave->dev,
+				"error reading slave registers\n");
+			return -1;
+		}
+		conf = be32_to_cpu(data);
+
+		crc = crc4(0, conf, 32);
+		if (crc) {
+			dev_warn(&slave->dev,
+				"crc error in slave register at 0x%04x\n",
+				i);
+			return -1;
+		}
+
+		slots = (conf & FSI_SLAVE_CONF_SLOTS_MASK)
+			>> FSI_SLAVE_CONF_SLOTS_SHIFT;
+		version = (conf & FSI_SLAVE_CONF_VERSION_MASK)
+			>> FSI_SLAVE_CONF_VERSION_SHIFT;
+		type = (conf & FSI_SLAVE_CONF_TYPE_MASK)
+			>> FSI_SLAVE_CONF_TYPE_SHIFT;
+
+		/*
+		 * Unused address areas are marked by a zero type value; this
+		 * skips the defined address areas
+		 */
+		if (type != 0 && slots != 0) {
+
+			/* create device */
+			dev = fsi_create_device(slave);
+			if (!dev)
+				return -ENOMEM;
+
+			dev->slave = slave;
+			dev->engine_type = type;
+			dev->version = version;
+			dev->unit = i;
+			dev->addr = engine_addr;
+			dev->size = slots * engine_page_size;
+
+			dev_dbg(&slave->dev,
+			"engine[%i]: type %x, version %x, addr %x size %x\n",
+					dev->unit, dev->engine_type, version,
+					dev->addr, dev->size);
+
+			dev_set_name(&dev->dev, "%02x:%02x:%02x:%02x",
+					slave->master->idx, slave->link,
+					slave->id, i - 2);
+			dev->dev.of_node = fsi_device_find_of_node(dev);
+
+			rc = device_register(&dev->dev);
+			if (rc) {
+				dev_warn(&slave->dev, "add failed: %d\n", rc);
+				put_device(&dev->dev);
+			}
+		}
+
+		engine_addr += slots * engine_page_size;
+
+		if (!(conf & FSI_SLAVE_CONF_NEXT_MASK))
+			break;
+	}
+
+	return 0;
+}
+
+static unsigned long aligned_access_size(size_t offset, size_t count)
+{
+	unsigned long offset_unit, count_unit;
+
+	/* Criteria:
+	 *
+	 * 1. Access size must be less than or equal to the maximum access
+	 *    width or the highest power-of-two factor of offset
+	 * 2. Access size must be less than or equal to the amount specified by
+	 *    count
+	 *
+	 * The access width is optimal if we can calculate 1 to be strictly
+	 * equal while still satisfying 2.
+	 */
+
+	/* Find 1 by the bottom bit of offset (with a 4 byte access cap) */
+	offset_unit = BIT(__builtin_ctzl(offset | 4));
+
+	/* Find 2 by the top bit of count */
+	count_unit = BIT(8 * sizeof(unsigned long) - 1 - __builtin_clzl(count));
+
+	/* Constrain the maximum access width to the minimum of both criteria */
+	return BIT(__builtin_ctzl(offset_unit | count_unit));
+}
+
+static ssize_t fsi_slave_sysfs_raw_read(struct file *file,
+		struct kobject *kobj, struct bin_attribute *attr, char *buf,
+		loff_t off, size_t count)
+{
+	struct fsi_slave *slave = to_fsi_slave(kobj_to_dev(kobj));
+	size_t total_len, read_len;
+	int rc;
+
+	if (off < 0)
+		return -EINVAL;
+
+	if (off > 0xffffffff || count > 0xffffffff || off + count > 0xffffffff)
+		return -EINVAL;
+
+	for (total_len = 0; total_len < count; total_len += read_len) {
+		read_len = aligned_access_size(off, count - total_len);
+
+		rc = fsi_slave_read(slave, off, buf + total_len, read_len);
+		if (rc)
+			return rc;
+
+		off += read_len;
+	}
+
+	return count;
+}
+
+static ssize_t fsi_slave_sysfs_raw_write(struct file *file,
+		struct kobject *kobj, struct bin_attribute *attr,
+		char *buf, loff_t off, size_t count)
+{
+	struct fsi_slave *slave = to_fsi_slave(kobj_to_dev(kobj));
+	size_t total_len, write_len;
+	int rc;
+
+	if (off < 0)
+		return -EINVAL;
+
+	if (off > 0xffffffff || count > 0xffffffff || off + count > 0xffffffff)
+		return -EINVAL;
+
+	for (total_len = 0; total_len < count; total_len += write_len) {
+		write_len = aligned_access_size(off, count - total_len);
+
+		rc = fsi_slave_write(slave, off, buf + total_len, write_len);
+		if (rc)
+			return rc;
+
+		off += write_len;
+	}
+
+	return count;
+}
+
+static const struct bin_attribute fsi_slave_raw_attr = {
+	.attr = {
+		.name = "raw",
+		.mode = 0600,
+	},
+	.size = 0,
+	.read = fsi_slave_sysfs_raw_read,
+	.write = fsi_slave_sysfs_raw_write,
+};
+
+static void fsi_slave_release(struct device *dev)
+{
+	struct fsi_slave *slave = to_fsi_slave(dev);
+
+	fsi_free_minor(slave->dev.devt);
+	of_node_put(dev->of_node);
+	kfree(slave);
+}
+
+static bool fsi_slave_node_matches(struct device_node *np,
+		int link, uint8_t id)
+{
+	unsigned int len, na, ns;
+	const __be32 *prop;
+
+	na = of_n_addr_cells(np);
+	ns = of_n_size_cells(np);
+
+	/* Ensure we have the correct format for addresses and sizes in
+	 * reg properties
+	 */
+	if (na != 2 || ns != 0)
+		return false;
+
+	prop = of_get_property(np, "reg", &len);
+	if (!prop || len != 8)
+		return false;
+
+	return (of_read_number(prop, 1) == link) &&
+		(of_read_number(prop + 1, 1) == id);
+}
+
+/* Find a matching node for the slave at (link, id). Returns NULL if none
+ * found, or a matching node with refcount already incremented.
+ */
+static struct device_node *fsi_slave_find_of_node(struct fsi_master *master,
+		int link, uint8_t id)
+{
+	struct device_node *parent, *np;
+
+	parent = dev_of_node(&master->dev);
+	if (!parent)
+		return NULL;
+
+	for_each_child_of_node(parent, np) {
+		if (fsi_slave_node_matches(np, link, id))
+			return np;
+	}
+
+	return NULL;
+}
+
+static ssize_t cfam_read(struct file *filep, char __user *buf, size_t count,
+			 loff_t *offset)
+{
+	struct fsi_slave *slave = filep->private_data;
+	size_t total_len, read_len;
+	loff_t off = *offset;
+	ssize_t rc;
+
+	if (off < 0)
+		return -EINVAL;
+
+	if (off > 0xffffffff || count > 0xffffffff || off + count > 0xffffffff)
+		return -EINVAL;
+
+	for (total_len = 0; total_len < count; total_len += read_len) {
+		__be32 data;
+
+		read_len = min_t(size_t, count, 4);
+		read_len -= off & 0x3;
+
+		rc = fsi_slave_read(slave, off, &data, read_len);
+		if (rc)
+			goto fail;
+		rc = copy_to_user(buf + total_len, &data, read_len);
+		if (rc) {
+			rc = -EFAULT;
+			goto fail;
+		}
+		off += read_len;
+	}
+	rc = count;
+ fail:
+	*offset = off;
+	return count;
+}
+
+static ssize_t cfam_write(struct file *filep, const char __user *buf,
+			  size_t count, loff_t *offset)
+{
+	struct fsi_slave *slave = filep->private_data;
+	size_t total_len, write_len;
+	loff_t off = *offset;
+	ssize_t rc;
+
+
+	if (off < 0)
+		return -EINVAL;
+
+	if (off > 0xffffffff || count > 0xffffffff || off + count > 0xffffffff)
+		return -EINVAL;
+
+	for (total_len = 0; total_len < count; total_len += write_len) {
+		__be32 data;
+
+		write_len = min_t(size_t, count, 4);
+		write_len -= off & 0x3;
+
+		rc = copy_from_user(&data, buf + total_len, write_len);
+		if (rc) {
+			rc = -EFAULT;
+			goto fail;
+		}
+		rc = fsi_slave_write(slave, off, &data, write_len);
+		if (rc)
+			goto fail;
+		off += write_len;
+	}
+	rc = count;
+ fail:
+	*offset = off;
+	return count;
+}
+
+static loff_t cfam_llseek(struct file *file, loff_t offset, int whence)
+{
+	switch (whence) {
+	case SEEK_CUR:
+		break;
+	case SEEK_SET:
+		file->f_pos = offset;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	return offset;
+}
+
+static int cfam_open(struct inode *inode, struct file *file)
+{
+	struct fsi_slave *slave = container_of(inode->i_cdev, struct fsi_slave, cdev);
+
+	file->private_data = slave;
+
+	return 0;
+}
+
+static const struct file_operations cfam_fops = {
+	.owner		= THIS_MODULE,
+	.open		= cfam_open,
+	.llseek		= cfam_llseek,
+	.read		= cfam_read,
+	.write		= cfam_write,
+};
+
+static ssize_t send_term_store(struct device *dev,
+			       struct device_attribute *attr,
+			       const char *buf, size_t count)
+{
+	struct fsi_slave *slave = to_fsi_slave(dev);
+	struct fsi_master *master = slave->master;
+
+	if (!master->term)
+		return -ENODEV;
+
+	master->term(master, slave->link, slave->id);
+	return count;
+}
+
+static DEVICE_ATTR_WO(send_term);
+
+static ssize_t slave_send_echo_show(struct device *dev,
+				    struct device_attribute *attr,
+				    char *buf)
+{
+	struct fsi_slave *slave = to_fsi_slave(dev);
+
+	return sprintf(buf, "%u\n", slave->t_send_delay);
+}
+
+static ssize_t slave_send_echo_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	struct fsi_slave *slave = to_fsi_slave(dev);
+	struct fsi_master *master = slave->master;
+	unsigned long val;
+	int rc;
+
+	if (kstrtoul(buf, 0, &val) < 0)
+		return -EINVAL;
+
+	if (val < 1 || val > 16)
+		return -EINVAL;
+
+	if (!master->link_config)
+		return -ENXIO;
+
+	/* Current HW mandates that send and echo delay are identical */
+	slave->t_send_delay = val;
+	slave->t_echo_delay = val;
+
+	rc = fsi_slave_set_smode(slave);
+	if (rc < 0)
+		return rc;
+	if (master->link_config)
+		master->link_config(master, slave->link,
+				    slave->t_send_delay,
+				    slave->t_echo_delay);
+
+	return count;
+}
+
+static DEVICE_ATTR(send_echo_delays, 0600,
+		   slave_send_echo_show, slave_send_echo_store);
+
+static ssize_t chip_id_show(struct device *dev,
+			    struct device_attribute *attr,
+			    char *buf)
+{
+	struct fsi_slave *slave = to_fsi_slave(dev);
+
+	return sprintf(buf, "%d\n", slave->chip_id);
+}
+
+static DEVICE_ATTR_RO(chip_id);
+
+static ssize_t cfam_id_show(struct device *dev,
+			    struct device_attribute *attr,
+			    char *buf)
+{
+	struct fsi_slave *slave = to_fsi_slave(dev);
+
+	return sprintf(buf, "0x%x\n", slave->cfam_id);
+}
+
+static DEVICE_ATTR_RO(cfam_id);
+
+static struct attribute *cfam_attr[] = {
+	&dev_attr_send_echo_delays.attr,
+	&dev_attr_chip_id.attr,
+	&dev_attr_cfam_id.attr,
+	&dev_attr_send_term.attr,
+	NULL,
+};
+
+static const struct attribute_group cfam_attr_group = {
+	.attrs = cfam_attr,
+};
+
+static const struct attribute_group *cfam_attr_groups[] = {
+	&cfam_attr_group,
+	NULL,
+};
+
+static char *cfam_devnode(struct device *dev, umode_t *mode,
+			  kuid_t *uid, kgid_t *gid)
+{
+	struct fsi_slave *slave = to_fsi_slave(dev);
+
+#ifdef CONFIG_FSI_NEW_DEV_NODE
+	return kasprintf(GFP_KERNEL, "fsi/cfam%d", slave->cdev_idx);
+#else
+	return kasprintf(GFP_KERNEL, "cfam%d", slave->cdev_idx);
+#endif
+}
+
+static const struct device_type cfam_type = {
+	.name = "cfam",
+	.devnode = cfam_devnode,
+	.groups = cfam_attr_groups
+};
+
+static char *fsi_cdev_devnode(struct device *dev, umode_t *mode,
+			      kuid_t *uid, kgid_t *gid)
+{
+#ifdef CONFIG_FSI_NEW_DEV_NODE
+	return kasprintf(GFP_KERNEL, "fsi/%s", dev_name(dev));
+#else
+	return kasprintf(GFP_KERNEL, "%s", dev_name(dev));
+#endif
+}
+
+const struct device_type fsi_cdev_type = {
+	.name = "fsi-cdev",
+	.devnode = fsi_cdev_devnode,
+};
+EXPORT_SYMBOL_GPL(fsi_cdev_type);
+
+/* Backward compatible /dev/ numbering in "old style" mode */
+static int fsi_adjust_index(int index)
+{
+#ifdef CONFIG_FSI_NEW_DEV_NODE
+	return index;
+#else
+	return index + 1;
+#endif
+}
+
+static int __fsi_get_new_minor(struct fsi_slave *slave, enum fsi_dev_type type,
+			       dev_t *out_dev, int *out_index)
+{
+	int cid = slave->chip_id;
+	int id;
+
+	/* Check if we qualify for legacy numbering */
+	if (cid >= 0 && cid < 16 && type < 4) {
+		/* Try reserving the legacy number */
+		id = (cid << 4) | type;
+		id = ida_simple_get(&fsi_minor_ida, id, id + 1, GFP_KERNEL);
+		if (id >= 0) {
+			*out_index = fsi_adjust_index(cid);
+			*out_dev = fsi_base_dev + id;
+			return 0;
+		}
+		/* Other failure */
+		if (id != -ENOSPC)
+			return id;
+		/* Fallback to non-legacy allocation */
+	}
+	id = ida_simple_get(&fsi_minor_ida, FSI_CHAR_LEGACY_TOP,
+			    FSI_CHAR_MAX_DEVICES, GFP_KERNEL);
+	if (id < 0)
+		return id;
+	*out_index = fsi_adjust_index(id);
+	*out_dev = fsi_base_dev + id;
+	return 0;
+}
+
+int fsi_get_new_minor(struct fsi_device *fdev, enum fsi_dev_type type,
+		      dev_t *out_dev, int *out_index)
+{
+	return __fsi_get_new_minor(fdev->slave, type, out_dev, out_index);
+}
+EXPORT_SYMBOL_GPL(fsi_get_new_minor);
+
+void fsi_free_minor(dev_t dev)
+{
+	ida_simple_remove(&fsi_minor_ida, MINOR(dev));
+}
+EXPORT_SYMBOL_GPL(fsi_free_minor);
+
+static int fsi_slave_init(struct fsi_master *master, int link, uint8_t id)
+{
+	uint32_t cfam_id;
+	struct fsi_slave *slave;
+	uint8_t crc;
+	__be32 data, llmode;
+	int rc;
+
+	/* Currently, we only support single slaves on a link, and use the
+	 * full 23-bit address range
+	 */
+	if (id != 0)
+		return -EINVAL;
+
+	rc = fsi_master_read(master, link, id, 0, &data, sizeof(data));
+	if (rc) {
+		dev_dbg(&master->dev, "can't read slave %02x:%02x %d\n",
+				link, id, rc);
+		return -ENODEV;
+	}
+	cfam_id = be32_to_cpu(data);
+
+	crc = crc4(0, cfam_id, 32);
+	if (crc) {
+		dev_warn(&master->dev, "slave %02x:%02x invalid cfam id CRC!\n",
+				link, id);
+		return -EIO;
+	}
+
+	dev_dbg(&master->dev, "fsi: found chip %08x at %02x:%02x:%02x\n",
+			cfam_id, master->idx, link, id);
+
+	/* If we're behind a master that doesn't provide a self-running bus
+	 * clock, put the slave into async mode
+	 */
+	if (master->flags & FSI_MASTER_FLAG_SWCLOCK) {
+		llmode = cpu_to_be32(FSI_LLMODE_ASYNC);
+		rc = fsi_master_write(master, link, id,
+				FSI_SLAVE_BASE + FSI_LLMODE,
+				&llmode, sizeof(llmode));
+		if (rc)
+			dev_warn(&master->dev,
+				"can't set llmode on slave:%02x:%02x %d\n",
+				link, id, rc);
+	}
+
+	/* We can communicate with a slave; create the slave device and
+	 * register.
+	 */
+	slave = kzalloc(sizeof(*slave), GFP_KERNEL);
+	if (!slave)
+		return -ENOMEM;
+
+	dev_set_name(&slave->dev, "slave@%02x:%02x", link, id);
+	slave->dev.type = &cfam_type;
+	slave->dev.parent = &master->dev;
+	slave->dev.of_node = fsi_slave_find_of_node(master, link, id);
+	slave->dev.release = fsi_slave_release;
+	device_initialize(&slave->dev);
+	slave->cfam_id = cfam_id;
+	slave->master = master;
+	slave->link = link;
+	slave->id = id;
+	slave->size = FSI_SLAVE_SIZE_23b;
+	slave->t_send_delay = 16;
+	slave->t_echo_delay = 16;
+
+	/* Get chip ID if any */
+	slave->chip_id = -1;
+	if (slave->dev.of_node) {
+		uint32_t prop;
+		if (!of_property_read_u32(slave->dev.of_node, "chip-id", &prop))
+			slave->chip_id = prop;
+
+	}
+
+	/* Allocate a minor in the FSI space */
+	rc = __fsi_get_new_minor(slave, fsi_dev_cfam, &slave->dev.devt,
+				 &slave->cdev_idx);
+	if (rc)
+		goto err_free;
+
+	/* Create chardev for userspace access */
+	cdev_init(&slave->cdev, &cfam_fops);
+	rc = cdev_device_add(&slave->cdev, &slave->dev);
+	if (rc) {
+		dev_err(&slave->dev, "Error %d creating slave device\n", rc);
+		goto err_free;
+	}
+
+	rc = fsi_slave_set_smode(slave);
+	if (rc) {
+		dev_warn(&master->dev,
+				"can't set smode on slave:%02x:%02x %d\n",
+				link, id, rc);
+		kfree(slave);
+		return -ENODEV;
+	}
+	if (master->link_config)
+		master->link_config(master, link,
+				    slave->t_send_delay,
+				    slave->t_echo_delay);
+
+	/* Legacy raw file -> to be removed */
+	rc = device_create_bin_file(&slave->dev, &fsi_slave_raw_attr);
+	if (rc)
+		dev_warn(&slave->dev, "failed to create raw attr: %d\n", rc);
+
+
+	rc = fsi_slave_scan(slave);
+	if (rc)
+		dev_dbg(&master->dev, "failed during slave scan with: %d\n",
+				rc);
+
+	return rc;
+
+ err_free:
+	put_device(&slave->dev);
+	return rc;
+}
+
+/* FSI master support */
+static int fsi_check_access(uint32_t addr, size_t size)
+{
+	if (size == 4) {
+		if (addr & 0x3)
+			return -EINVAL;
+	} else if (size == 2) {
+		if (addr & 0x1)
+			return -EINVAL;
+	} else if (size != 1)
+		return -EINVAL;
+
+	return 0;
+}
+
+static int fsi_master_read(struct fsi_master *master, int link,
+		uint8_t slave_id, uint32_t addr, void *val, size_t size)
+{
+	int rc;
+
+	trace_fsi_master_read(master, link, slave_id, addr, size);
+
+	rc = fsi_check_access(addr, size);
+	if (!rc)
+		rc = master->read(master, link, slave_id, addr, val, size);
+
+	trace_fsi_master_rw_result(master, link, slave_id, addr, size,
+			false, val, rc);
+
+	return rc;
+}
+
+static int fsi_master_write(struct fsi_master *master, int link,
+		uint8_t slave_id, uint32_t addr, const void *val, size_t size)
+{
+	int rc;
+
+	trace_fsi_master_write(master, link, slave_id, addr, size, val);
+
+	rc = fsi_check_access(addr, size);
+	if (!rc)
+		rc = master->write(master, link, slave_id, addr, val, size);
+
+	trace_fsi_master_rw_result(master, link, slave_id, addr, size,
+			true, val, rc);
+
+	return rc;
+}
+
+static int fsi_master_link_enable(struct fsi_master *master, int link)
+{
+	if (master->link_enable)
+		return master->link_enable(master, link);
+
+	return 0;
+}
+
+/*
+ * Issue a break command on this link
+ */
+static int fsi_master_break(struct fsi_master *master, int link)
+{
+	int rc = 0;
+
+	trace_fsi_master_break(master, link);
+
+	if (master->send_break)
+		rc = master->send_break(master, link);
+	if (master->link_config)
+		master->link_config(master, link, 16, 16);
+
+	return rc;
+}
+
+static int fsi_master_scan(struct fsi_master *master)
+{
+	int link, rc;
+
+	for (link = 0; link < master->n_links; link++) {
+		rc = fsi_master_link_enable(master, link);
+		if (rc) {
+			dev_dbg(&master->dev,
+				"enable link %d failed: %d\n", link, rc);
+			continue;
+		}
+		rc = fsi_master_break(master, link);
+		if (rc) {
+			dev_dbg(&master->dev,
+				"break to link %d failed: %d\n", link, rc);
+			continue;
+		}
+
+		fsi_slave_init(master, link, 0);
+	}
+
+	return 0;
+}
+
+static int fsi_slave_remove_device(struct device *dev, void *arg)
+{
+	device_unregister(dev);
+	return 0;
+}
+
+static int fsi_master_remove_slave(struct device *dev, void *arg)
+{
+	struct fsi_slave *slave = to_fsi_slave(dev);
+
+	device_for_each_child(dev, NULL, fsi_slave_remove_device);
+	cdev_device_del(&slave->cdev, &slave->dev);
+	put_device(dev);
+	return 0;
+}
+
+static void fsi_master_unscan(struct fsi_master *master)
+{
+	device_for_each_child(&master->dev, NULL, fsi_master_remove_slave);
+}
+
+int fsi_master_rescan(struct fsi_master *master)
+{
+	int rc;
+
+	mutex_lock(&master->scan_lock);
+	fsi_master_unscan(master);
+	rc = fsi_master_scan(master);
+	mutex_unlock(&master->scan_lock);
+
+	return rc;
+}
+EXPORT_SYMBOL_GPL(fsi_master_rescan);
+
+static ssize_t master_rescan_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	struct fsi_master *master = to_fsi_master(dev);
+	int rc;
+
+	rc = fsi_master_rescan(master);
+	if (rc < 0)
+		return rc;
+
+	return count;
+}
+
+static DEVICE_ATTR(rescan, 0200, NULL, master_rescan_store);
+
+static ssize_t master_break_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	struct fsi_master *master = to_fsi_master(dev);
+
+	fsi_master_break(master, 0);
+
+	return count;
+}
+
+static DEVICE_ATTR(break, 0200, NULL, master_break_store);
+
+int fsi_master_register(struct fsi_master *master)
+{
+	int rc;
+	struct device_node *np;
+
+	mutex_init(&master->scan_lock);
+	master->idx = ida_simple_get(&master_ida, 0, INT_MAX, GFP_KERNEL);
+	dev_set_name(&master->dev, "fsi%d", master->idx);
+
+	rc = device_register(&master->dev);
+	if (rc) {
+		ida_simple_remove(&master_ida, master->idx);
+		return rc;
+	}
+
+	rc = device_create_file(&master->dev, &dev_attr_rescan);
+	if (rc) {
+		device_del(&master->dev);
+		ida_simple_remove(&master_ida, master->idx);
+		return rc;
+	}
+
+	rc = device_create_file(&master->dev, &dev_attr_break);
+	if (rc) {
+		device_del(&master->dev);
+		ida_simple_remove(&master_ida, master->idx);
+		return rc;
+	}
+
+	np = dev_of_node(&master->dev);
+	if (!of_property_read_bool(np, "no-scan-on-init")) {
+		mutex_lock(&master->scan_lock);
+		fsi_master_scan(master);
+		mutex_unlock(&master->scan_lock);
+	}
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(fsi_master_register);
+
+void fsi_master_unregister(struct fsi_master *master)
+{
+	if (master->idx >= 0) {
+		ida_simple_remove(&master_ida, master->idx);
+		master->idx = -1;
+	}
+
+	mutex_lock(&master->scan_lock);
+	fsi_master_unscan(master);
+	mutex_unlock(&master->scan_lock);
+	device_unregister(&master->dev);
+}
+EXPORT_SYMBOL_GPL(fsi_master_unregister);
+
+/* FSI core & Linux bus type definitions */
+
+static int fsi_bus_match(struct device *dev, struct device_driver *drv)
+{
+	struct fsi_device *fsi_dev = to_fsi_dev(dev);
+	struct fsi_driver *fsi_drv = to_fsi_drv(drv);
+	const struct fsi_device_id *id;
+
+	if (!fsi_drv->id_table)
+		return 0;
+
+	for (id = fsi_drv->id_table; id->engine_type; id++) {
+		if (id->engine_type != fsi_dev->engine_type)
+			continue;
+		if (id->version == FSI_VERSION_ANY ||
+				id->version == fsi_dev->version)
+			return 1;
+	}
+
+	return 0;
+}
+
+int fsi_driver_register(struct fsi_driver *fsi_drv)
+{
+	if (!fsi_drv)
+		return -EINVAL;
+	if (!fsi_drv->id_table)
+		return -EINVAL;
+
+	return driver_register(&fsi_drv->drv);
+}
+EXPORT_SYMBOL_GPL(fsi_driver_register);
+
+void fsi_driver_unregister(struct fsi_driver *fsi_drv)
+{
+	driver_unregister(&fsi_drv->drv);
+}
+EXPORT_SYMBOL_GPL(fsi_driver_unregister);
+
+struct bus_type fsi_bus_type = {
+	.name		= "fsi",
+	.match		= fsi_bus_match,
+};
+EXPORT_SYMBOL_GPL(fsi_bus_type);
+
+static int __init fsi_init(void)
+{
+	int rc;
+
+	rc = alloc_chrdev_region(&fsi_base_dev, 0, FSI_CHAR_MAX_DEVICES, "fsi");
+	if (rc)
+		return rc;
+	rc = bus_register(&fsi_bus_type);
+	if (rc)
+		goto fail_bus;
+	return 0;
+
+ fail_bus:
+	unregister_chrdev_region(fsi_base_dev, FSI_CHAR_MAX_DEVICES);
+	return rc;
+}
+postcore_initcall(fsi_init);
+
+static void fsi_exit(void)
+{
+	bus_unregister(&fsi_bus_type);
+	unregister_chrdev_region(fsi_base_dev, FSI_CHAR_MAX_DEVICES);
+	ida_destroy(&fsi_minor_ida);
+}
+module_exit(fsi_exit);
+module_param(discard_errors, int, 0664);
+MODULE_LICENSE("GPL");
+MODULE_PARM_DESC(discard_errors, "Don't invoke error handling on bus accesses");
diff --git a/src/kernel/linux/v4.19/drivers/fsi/fsi-master-ast-cf.c b/src/kernel/linux/v4.19/drivers/fsi/fsi-master-ast-cf.c
new file mode 100644
index 0000000..04d10ea
--- /dev/null
+++ b/src/kernel/linux/v4.19/drivers/fsi/fsi-master-ast-cf.c
@@ -0,0 +1,1440 @@
+// SPDX-License-Identifier: GPL-2.0+
+// Copyright 2018 IBM Corp
+/*
+ * A FSI master controller, using a simple GPIO bit-banging interface
+ */
+
+#include <linux/crc4.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/fsi.h>
+#include <linux/gpio/consumer.h>
+#include <linux/io.h>
+#include <linux/irqflags.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/regmap.h>
+#include <linux/firmware.h>
+#include <linux/gpio/aspeed.h>
+#include <linux/mfd/syscon.h>
+#include <linux/of_address.h>
+#include <linux/genalloc.h>
+
+#include "fsi-master.h"
+#include "cf-fsi-fw.h"
+
+#define FW_FILE_NAME	"cf-fsi-fw.bin"
+
+/* Common SCU based coprocessor control registers */
+#define SCU_COPRO_CTRL			0x100
+#define   SCU_COPRO_RESET			0x00000002
+#define   SCU_COPRO_CLK_EN			0x00000001
+
+/* AST2500 specific ones */
+#define SCU_2500_COPRO_SEG0		0x104
+#define SCU_2500_COPRO_SEG1		0x108
+#define SCU_2500_COPRO_SEG2		0x10c
+#define SCU_2500_COPRO_SEG3		0x110
+#define SCU_2500_COPRO_SEG4		0x114
+#define SCU_2500_COPRO_SEG5		0x118
+#define SCU_2500_COPRO_SEG6		0x11c
+#define SCU_2500_COPRO_SEG7		0x120
+#define SCU_2500_COPRO_SEG8		0x124
+#define   SCU_2500_COPRO_SEG_SWAP		0x00000001
+#define SCU_2500_COPRO_CACHE_CTL	0x128
+#define   SCU_2500_COPRO_CACHE_EN		0x00000001
+#define   SCU_2500_COPRO_SEG0_CACHE_EN		0x00000002
+#define   SCU_2500_COPRO_SEG1_CACHE_EN		0x00000004
+#define   SCU_2500_COPRO_SEG2_CACHE_EN		0x00000008
+#define   SCU_2500_COPRO_SEG3_CACHE_EN		0x00000010
+#define   SCU_2500_COPRO_SEG4_CACHE_EN		0x00000020
+#define   SCU_2500_COPRO_SEG5_CACHE_EN		0x00000040
+#define   SCU_2500_COPRO_SEG6_CACHE_EN		0x00000080
+#define   SCU_2500_COPRO_SEG7_CACHE_EN		0x00000100
+#define   SCU_2500_COPRO_SEG8_CACHE_EN		0x00000200
+
+#define SCU_2400_COPRO_SEG0		0x104
+#define SCU_2400_COPRO_SEG2		0x108
+#define SCU_2400_COPRO_SEG4		0x10c
+#define SCU_2400_COPRO_SEG6		0x110
+#define SCU_2400_COPRO_SEG8		0x114
+#define   SCU_2400_COPRO_SEG_SWAP		0x80000000
+#define SCU_2400_COPRO_CACHE_CTL	0x118
+#define   SCU_2400_COPRO_CACHE_EN		0x00000001
+#define   SCU_2400_COPRO_SEG0_CACHE_EN		0x00000002
+#define   SCU_2400_COPRO_SEG2_CACHE_EN		0x00000004
+#define   SCU_2400_COPRO_SEG4_CACHE_EN		0x00000008
+#define   SCU_2400_COPRO_SEG6_CACHE_EN		0x00000010
+#define   SCU_2400_COPRO_SEG8_CACHE_EN		0x00000020
+
+/* CVIC registers */
+#define CVIC_EN_REG			0x10
+#define CVIC_TRIG_REG			0x18
+
+/*
+ * System register base address (needed for configuring the
+ * coldfire maps)
+ */
+#define SYSREG_BASE			0x1e600000
+
+/* Amount of SRAM required */
+#define SRAM_SIZE			0x1000
+
+#define LAST_ADDR_INVALID		0x1
+
+struct fsi_master_acf {
+	struct fsi_master	master;
+	struct device		*dev;
+	struct regmap		*scu;
+	struct mutex		lock;	/* mutex for command ordering */
+	struct gpio_desc	*gpio_clk;
+	struct gpio_desc	*gpio_data;
+	struct gpio_desc	*gpio_trans;	/* Voltage translator */
+	struct gpio_desc	*gpio_enable;	/* FSI enable */
+	struct gpio_desc	*gpio_mux;	/* Mux control */
+	uint16_t		gpio_clk_vreg;
+	uint16_t		gpio_clk_dreg;
+	uint16_t       		gpio_dat_vreg;
+	uint16_t       		gpio_dat_dreg;
+	uint16_t       		gpio_tra_vreg;
+	uint16_t       		gpio_tra_dreg;
+	uint8_t			gpio_clk_bit;
+	uint8_t			gpio_dat_bit;
+	uint8_t			gpio_tra_bit;
+	uint32_t		cf_mem_addr;
+	size_t			cf_mem_size;
+	void __iomem		*cf_mem;
+	void __iomem		*cvic;
+	struct gen_pool		*sram_pool;
+	void __iomem		*sram;
+	bool			is_ast2500;
+	bool			external_mode;
+	bool			trace_enabled;
+	uint32_t		last_addr;
+	uint8_t			t_send_delay;
+	uint8_t			t_echo_delay;
+	uint32_t		cvic_sw_irq;
+};
+#define to_fsi_master_acf(m) container_of(m, struct fsi_master_acf, master)
+
+struct fsi_msg {
+	uint64_t	msg;
+	uint8_t		bits;
+};
+
+#define CREATE_TRACE_POINTS
+#include <trace/events/fsi_master_ast_cf.h>
+
+static void msg_push_bits(struct fsi_msg *msg, uint64_t data, int bits)
+{
+	msg->msg <<= bits;
+	msg->msg |= data & ((1ull << bits) - 1);
+	msg->bits += bits;
+}
+
+static void msg_push_crc(struct fsi_msg *msg)
+{
+	uint8_t crc;
+	int top;
+
+	top = msg->bits & 0x3;
+
+	/* start bit, and any non-aligned top bits */
+	crc = crc4(0, 1 << top | msg->msg >> (msg->bits - top), top + 1);
+
+	/* aligned bits */
+	crc = crc4(crc, msg->msg, msg->bits - top);
+
+	msg_push_bits(msg, crc, 4);
+}
+
+static void msg_finish_cmd(struct fsi_msg *cmd)
+{
+	/* Left align message */
+	cmd->msg <<= (64 - cmd->bits);
+}
+
+static bool check_same_address(struct fsi_master_acf *master, int id,
+			       uint32_t addr)
+{
+	/* this will also handle LAST_ADDR_INVALID */
+	return master->last_addr == (((id & 0x3) << 21) | (addr & ~0x3));
+}
+
+static bool check_relative_address(struct fsi_master_acf *master, int id,
+				   uint32_t addr, uint32_t *rel_addrp)
+{
+	uint32_t last_addr = master->last_addr;
+	int32_t rel_addr;
+
+	if (last_addr == LAST_ADDR_INVALID)
+		return false;
+
+	/* We may be in 23-bit addressing mode, which uses the id as the
+	 * top two address bits. So, if we're referencing a different ID,
+	 * use absolute addresses.
+	 */
+	if (((last_addr >> 21) & 0x3) != id)
+		return false;
+
+	/* remove the top two bits from any 23-bit addressing */
+	last_addr &= (1 << 21) - 1;
+
+	/* We know that the addresses are limited to 21 bits, so this won't
+	 * overflow the signed rel_addr */
+	rel_addr = addr - last_addr;
+	if (rel_addr > 255 || rel_addr < -256)
+		return false;
+
+	*rel_addrp = (uint32_t)rel_addr;
+
+	return true;
+}
+
+static void last_address_update(struct fsi_master_acf *master,
+				int id, bool valid, uint32_t addr)
+{
+	if (!valid)
+		master->last_addr = LAST_ADDR_INVALID;
+	else
+		master->last_addr = ((id & 0x3) << 21) | (addr & ~0x3);
+}
+
+/*
+ * Encode an Absolute/Relative/Same Address command
+ */
+static void build_ar_command(struct fsi_master_acf *master,
+			     struct fsi_msg *cmd, uint8_t id,
+			     uint32_t addr, size_t size,
+			     const void *data)
+{
+	int i, addr_bits, opcode_bits;
+	bool write = !!data;
+	uint8_t ds, opcode;
+	uint32_t rel_addr;
+
+	cmd->bits = 0;
+	cmd->msg = 0;
+
+	/* we have 21 bits of address max */
+	addr &= ((1 << 21) - 1);
+
+	/* cmd opcodes are variable length - SAME_AR is only two bits */
+	opcode_bits = 3;
+
+	if (check_same_address(master, id, addr)) {
+		/* we still address the byte offset within the word */
+		addr_bits = 2;
+		opcode_bits = 2;
+		opcode = FSI_CMD_SAME_AR;
+		trace_fsi_master_acf_cmd_same_addr(master);
+
+	} else if (check_relative_address(master, id, addr, &rel_addr)) {
+		/* 8 bits plus sign */
+		addr_bits = 9;
+		addr = rel_addr;
+		opcode = FSI_CMD_REL_AR;
+		trace_fsi_master_acf_cmd_rel_addr(master, rel_addr);
+
+	} else {
+		addr_bits = 21;
+		opcode = FSI_CMD_ABS_AR;
+		trace_fsi_master_acf_cmd_abs_addr(master, addr);
+	}
+
+	/*
+	 * The read/write size is encoded in the lower bits of the address
+	 * (as it must be naturally-aligned), and the following ds bit.
+	 *
+	 *	size	addr:1	addr:0	ds
+	 *	1	x	x	0
+	 *	2	x	0	1
+	 *	4	0	1	1
+	 *
+	 */
+	ds = size > 1 ? 1 : 0;
+	addr &= ~(size - 1);
+	if (size == 4)
+		addr |= 1;
+
+	msg_push_bits(cmd, id, 2);
+	msg_push_bits(cmd, opcode, opcode_bits);
+	msg_push_bits(cmd, write ? 0 : 1, 1);
+	msg_push_bits(cmd, addr, addr_bits);
+	msg_push_bits(cmd, ds, 1);
+	for (i = 0; write && i < size; i++)
+		msg_push_bits(cmd, ((uint8_t *)data)[i], 8);
+
+	msg_push_crc(cmd);
+	msg_finish_cmd(cmd);
+}
+
+static void build_dpoll_command(struct fsi_msg *cmd, uint8_t slave_id)
+{
+	cmd->bits = 0;
+	cmd->msg = 0;
+
+	msg_push_bits(cmd, slave_id, 2);
+	msg_push_bits(cmd, FSI_CMD_DPOLL, 3);
+	msg_push_crc(cmd);
+	msg_finish_cmd(cmd);
+}
+
+static void build_epoll_command(struct fsi_msg *cmd, uint8_t slave_id)
+{
+	cmd->bits = 0;
+	cmd->msg = 0;
+
+	msg_push_bits(cmd, slave_id, 2);
+	msg_push_bits(cmd, FSI_CMD_EPOLL, 3);
+	msg_push_crc(cmd);
+	msg_finish_cmd(cmd);
+}
+
+static void build_term_command(struct fsi_msg *cmd, uint8_t slave_id)
+{
+	cmd->bits = 0;
+	cmd->msg = 0;
+
+	msg_push_bits(cmd, slave_id, 2);
+	msg_push_bits(cmd, FSI_CMD_TERM, 6);
+	msg_push_crc(cmd);
+	msg_finish_cmd(cmd);
+}
+
+static int do_copro_command(struct fsi_master_acf *master, uint32_t op)
+{
+	uint32_t timeout = 10000000;
+	uint8_t stat;
+
+	trace_fsi_master_acf_copro_command(master, op);
+
+	/* Send command */
+	iowrite32be(op, master->sram + CMD_STAT_REG);
+
+	/* Ring doorbell if any */
+	if (master->cvic)
+		iowrite32(0x2, master->cvic + CVIC_TRIG_REG);
+
+	/* Wait for status to indicate completion (or error) */
+	do {
+		if (timeout-- == 0) {
+			dev_warn(master->dev,
+				 "Timeout waiting for coprocessor completion\n");
+			return -ETIMEDOUT;
+		}
+		stat = ioread8(master->sram + CMD_STAT_REG);
+	} while(stat < STAT_COMPLETE || stat == 0xff);
+
+	if (stat == STAT_COMPLETE)
+		return 0;
+	switch(stat) {
+	case STAT_ERR_INVAL_CMD:
+		return -EINVAL;
+	case STAT_ERR_INVAL_IRQ:
+		return -EIO;
+	case STAT_ERR_MTOE:
+		return -ESHUTDOWN;
+	}
+	return -ENXIO;
+}
+
+static int clock_zeros(struct fsi_master_acf *master, int count)
+{
+	while (count) {
+		int rc, lcnt = min(count, 255);
+
+		rc = do_copro_command(master,
+				      CMD_IDLE_CLOCKS | (lcnt << CMD_REG_CLEN_SHIFT));
+		if (rc)
+			return rc;
+		count -= lcnt;
+	}
+	return 0;
+}
+
+static int send_request(struct fsi_master_acf *master, struct fsi_msg *cmd,
+			unsigned int resp_bits)
+{
+	uint32_t op;
+
+	trace_fsi_master_acf_send_request(master, cmd, resp_bits);
+
+	/* Store message into SRAM */
+	iowrite32be((cmd->msg >> 32), master->sram + CMD_DATA);
+	iowrite32be((cmd->msg & 0xffffffff), master->sram + CMD_DATA + 4);
+
+	op = CMD_COMMAND;
+	op |= cmd->bits << CMD_REG_CLEN_SHIFT;
+	if (resp_bits)
+		op |= resp_bits << CMD_REG_RLEN_SHIFT;
+
+	return do_copro_command(master, op);
+}
+
+static int read_copro_response(struct fsi_master_acf *master, uint8_t size,
+			       uint32_t *response, u8 *tag)
+{
+	uint8_t rtag = ioread8(master->sram + STAT_RTAG) & 0xf;
+	uint8_t rcrc = ioread8(master->sram + STAT_RCRC) & 0xf;
+	uint32_t rdata = 0;
+	uint32_t crc;
+	uint8_t ack;
+
+	*tag = ack = rtag & 3;
+
+	/* we have a whole message now; check CRC */
+	crc = crc4(0, 1, 1);
+	crc = crc4(crc, rtag, 4);
+	if (ack == FSI_RESP_ACK && size) {
+		rdata = ioread32be(master->sram + RSP_DATA);
+		crc = crc4(crc, rdata, size);
+		if (response)
+			*response = rdata;
+	}
+	crc = crc4(crc, rcrc, 4);
+
+	trace_fsi_master_acf_copro_response(master, rtag, rcrc, rdata, crc == 0);
+
+	if (crc) {
+		/*
+		 * Check if it's all 1's or all 0's, that probably means
+		 * the host is off
+		 */
+		if ((rtag == 0xf && rcrc == 0xf) || (rtag == 0 && rcrc == 0))
+			return -ENODEV;
+		dev_dbg(master->dev, "Bad response CRC !\n");
+		return -EAGAIN;
+	}
+	return 0;
+}
+
+static int send_term(struct fsi_master_acf *master, uint8_t slave)
+{
+	struct fsi_msg cmd;
+	uint8_t tag;
+	int rc;
+
+	build_term_command(&cmd, slave);
+
+	rc = send_request(master, &cmd, 0);
+	if (rc) {
+		dev_warn(master->dev, "Error %d sending term\n", rc);
+		return rc;
+	}
+
+	rc = read_copro_response(master, 0, NULL, &tag);
+	if (rc < 0) {
+		dev_err(master->dev,
+				"TERM failed; lost communication with slave\n");
+		return -EIO;
+	} else if (tag != FSI_RESP_ACK) {
+		dev_err(master->dev, "TERM failed; response %d\n", tag);
+		return -EIO;
+	}
+	return 0;
+}
+
+static void dump_ucode_trace(struct fsi_master_acf *master)
+{
+	char trbuf[52];
+	char *p;
+	int i;
+
+	dev_dbg(master->dev,
+		"CMDSTAT:%08x RTAG=%02x RCRC=%02x RDATA=%02x #INT=%08x\n",
+		ioread32be(master->sram + CMD_STAT_REG),
+		ioread8(master->sram + STAT_RTAG),
+		ioread8(master->sram + STAT_RCRC),
+		ioread32be(master->sram + RSP_DATA),
+		ioread32be(master->sram + INT_CNT));
+
+	for (i = 0; i < 512; i++) {
+		uint8_t v;
+		if ((i % 16) == 0)
+			p = trbuf;
+		v = ioread8(master->sram + TRACEBUF + i);
+		p += sprintf(p, "%02x ", v);
+		if (((i % 16) == 15) || v == TR_END)
+			dev_dbg(master->dev, "%s\n", trbuf);
+		if (v == TR_END)
+			break;
+	}
+}
+
+static int handle_response(struct fsi_master_acf *master,
+			   uint8_t slave, uint8_t size, void *data)
+{
+	int busy_count = 0, rc;
+	int crc_err_retries = 0;
+	struct fsi_msg cmd;
+	uint32_t response;
+	uint8_t tag;
+retry:
+	rc = read_copro_response(master, size, &response, &tag);
+
+	/* Handle retries on CRC errors */
+	if (rc == -EAGAIN) {
+		/* Too many retries ? */
+		if (crc_err_retries++ > FSI_CRC_ERR_RETRIES) {
+			/*
+			 * Pass it up as a -EIO otherwise upper level will retry
+			 * the whole command which isn't what we want here.
+			 */
+			rc = -EIO;
+			goto bail;
+		}
+		trace_fsi_master_acf_crc_rsp_error(master, crc_err_retries);
+		if (master->trace_enabled)
+			dump_ucode_trace(master);
+		rc = clock_zeros(master, FSI_MASTER_EPOLL_CLOCKS);
+		if (rc) {
+			dev_warn(master->dev,
+				 "Error %d clocking zeros for E_POLL\n", rc);
+			return rc;
+		}
+		build_epoll_command(&cmd, slave);
+		rc = send_request(master, &cmd, size);
+		if (rc) {
+			dev_warn(master->dev, "Error %d sending E_POLL\n", rc);
+			return -EIO;
+		}
+		goto retry;
+	}
+	if (rc)
+		return rc;
+
+	switch (tag) {
+	case FSI_RESP_ACK:
+		if (size && data) {
+			if (size == 32)
+				*(__be32 *)data = cpu_to_be32(response);
+			else if (size == 16)
+				*(__be16 *)data = cpu_to_be16(response);
+			else
+				*(u8 *)data = response;
+		}
+		break;
+	case FSI_RESP_BUSY:
+		/*
+		 * Its necessary to clock slave before issuing
+		 * d-poll, not indicated in the hardware protocol
+		 * spec. < 20 clocks causes slave to hang, 21 ok.
+		 */
+		dev_dbg(master->dev, "Busy, retrying...\n");
+		if (master->trace_enabled)
+			dump_ucode_trace(master);
+		rc = clock_zeros(master, FSI_MASTER_DPOLL_CLOCKS);
+		if (rc) {
+			dev_warn(master->dev,
+				 "Error %d clocking zeros for D_POLL\n", rc);
+			break;
+		}
+		if (busy_count++ < FSI_MASTER_MAX_BUSY) {
+			build_dpoll_command(&cmd, slave);
+			rc = send_request(master, &cmd, size);
+			if (rc) {
+				dev_warn(master->dev, "Error %d sending D_POLL\n", rc);
+				break;
+			}
+			goto retry;
+		}
+		dev_dbg(master->dev,
+			"ERR slave is stuck in busy state, issuing TERM\n");
+		send_term(master, slave);
+		rc = -EIO;
+		break;
+
+	case FSI_RESP_ERRA:
+		dev_dbg(master->dev, "ERRA received\n");
+		if (master->trace_enabled)
+			dump_ucode_trace(master);
+		rc = -EIO;
+		break;
+	case FSI_RESP_ERRC:
+		dev_dbg(master->dev, "ERRC received\n");
+		if (master->trace_enabled)
+			dump_ucode_trace(master);
+		rc = -EAGAIN;
+		break;
+	}
+ bail:
+	if (busy_count > 0) {
+		trace_fsi_master_acf_poll_response_busy(master, busy_count);
+	}
+
+	return rc;
+}
+
+static int fsi_master_acf_xfer(struct fsi_master_acf *master, uint8_t slave,
+			       struct fsi_msg *cmd, size_t resp_len, void *resp)
+{
+	int rc = -EAGAIN, retries = 0;
+
+	resp_len <<= 3;
+	while ((retries++) < FSI_CRC_ERR_RETRIES) {
+		rc = send_request(master, cmd, resp_len);
+		if (rc) {
+			if (rc != -ESHUTDOWN)
+				dev_warn(master->dev, "Error %d sending command\n", rc);
+			break;
+		}
+		rc = handle_response(master, slave, resp_len, resp);
+		if (rc != -EAGAIN)
+			break;
+		rc = -EIO;
+		dev_dbg(master->dev, "ECRC retry %d\n", retries);
+
+		/* Pace it a bit before retry */
+		msleep(1);
+	}
+
+	return rc;
+}
+
+static int fsi_master_acf_read(struct fsi_master *_master, int link,
+			       uint8_t id, uint32_t addr, void *val,
+			       size_t size)
+{
+	struct fsi_master_acf *master = to_fsi_master_acf(_master);
+	struct fsi_msg cmd;
+	int rc;
+
+	if (link != 0)
+		return -ENODEV;
+
+	mutex_lock(&master->lock);
+	dev_dbg(master->dev, "read id %d addr %x size %zd\n", id, addr, size);
+	build_ar_command(master, &cmd, id, addr, size, NULL);
+	rc = fsi_master_acf_xfer(master, id, &cmd, size, val);
+	last_address_update(master, id, rc == 0, addr);
+	if (rc)
+		dev_dbg(master->dev, "read id %d addr 0x%08x err: %d\n",
+			id, addr, rc);
+	mutex_unlock(&master->lock);
+
+	return rc;
+}
+
+static int fsi_master_acf_write(struct fsi_master *_master, int link,
+				uint8_t id, uint32_t addr, const void *val,
+				size_t size)
+{
+	struct fsi_master_acf *master = to_fsi_master_acf(_master);
+	struct fsi_msg cmd;
+	int rc;
+
+	if (link != 0)
+		return -ENODEV;
+
+	mutex_lock(&master->lock);
+	build_ar_command(master, &cmd, id, addr, size, val);
+	dev_dbg(master->dev, "write id %d addr %x size %zd raw_data: %08x\n",
+		id, addr, size, *(uint32_t *)val);
+	rc = fsi_master_acf_xfer(master, id, &cmd, 0, NULL);
+	last_address_update(master, id, rc == 0, addr);
+	if (rc)
+		dev_dbg(master->dev, "write id %d addr 0x%08x err: %d\n",
+			id, addr, rc);
+	mutex_unlock(&master->lock);
+
+	return rc;
+}
+
+static int fsi_master_acf_term(struct fsi_master *_master,
+			       int link, uint8_t id)
+{
+	struct fsi_master_acf *master = to_fsi_master_acf(_master);
+	struct fsi_msg cmd;
+	int rc;
+
+	if (link != 0)
+		return -ENODEV;
+
+	mutex_lock(&master->lock);
+	build_term_command(&cmd, id);
+	dev_dbg(master->dev, "term id %d\n", id);
+	rc = fsi_master_acf_xfer(master, id, &cmd, 0, NULL);
+	last_address_update(master, id, false, 0);
+	mutex_unlock(&master->lock);
+
+	return rc;
+}
+
+static int fsi_master_acf_break(struct fsi_master *_master, int link)
+{
+	struct fsi_master_acf *master = to_fsi_master_acf(_master);
+	int rc;
+
+	if (link != 0)
+		return -ENODEV;
+
+	mutex_lock(&master->lock);
+	if (master->external_mode) {
+		mutex_unlock(&master->lock);
+		return -EBUSY;
+	}
+	dev_dbg(master->dev, "sending BREAK\n");
+	rc = do_copro_command(master, CMD_BREAK);
+	last_address_update(master, 0, false, 0);
+	mutex_unlock(&master->lock);
+
+	/* Wait for logic reset to take effect */
+	udelay(200);
+
+	return rc;
+}
+
+static void reset_cf(struct fsi_master_acf *master)
+{
+	regmap_write(master->scu, SCU_COPRO_CTRL, SCU_COPRO_RESET);
+	usleep_range(20,20);
+	regmap_write(master->scu, SCU_COPRO_CTRL, 0);
+	usleep_range(20,20);
+}
+
+static void start_cf(struct fsi_master_acf *master)
+{
+	regmap_write(master->scu, SCU_COPRO_CTRL, SCU_COPRO_CLK_EN);
+}
+
+static void setup_ast2500_cf_maps(struct fsi_master_acf *master)
+{
+	/*
+	 * Note about byteswap setting: the bus is wired backwards,
+	 * so setting the byteswap bit actually makes the ColdFire
+	 * work "normally" for a BE processor, ie, put the MSB in
+	 * the lowest address byte.
+	 *
+	 * We thus need to set the bit for our main memory which
+	 * contains our program code. We create two mappings for
+	 * the register, one with each setting.
+	 *
+	 * Segments 2 and 3 has a "swapped" mapping (BE)
+	 * and 6 and 7 have a non-swapped mapping (LE) which allows
+	 * us to avoid byteswapping register accesses since the
+	 * registers are all LE.
+	 */
+
+	/* Setup segment 0 to our memory region */
+	regmap_write(master->scu, SCU_2500_COPRO_SEG0, master->cf_mem_addr |
+		     SCU_2500_COPRO_SEG_SWAP);
+
+	/* Segments 2 and 3 to sysregs with byteswap (for SRAM) */
+	regmap_write(master->scu, SCU_2500_COPRO_SEG2, SYSREG_BASE |
+		     SCU_2500_COPRO_SEG_SWAP);
+	regmap_write(master->scu, SCU_2500_COPRO_SEG3, SYSREG_BASE | 0x100000 |
+		     SCU_2500_COPRO_SEG_SWAP);
+
+	/* And segment 6 and 7 to sysregs no byteswap */
+	regmap_write(master->scu, SCU_2500_COPRO_SEG6, SYSREG_BASE);
+	regmap_write(master->scu, SCU_2500_COPRO_SEG7, SYSREG_BASE | 0x100000);
+
+	/* Memory cachable, regs and SRAM not cachable */
+	regmap_write(master->scu, SCU_2500_COPRO_CACHE_CTL,
+		     SCU_2500_COPRO_SEG0_CACHE_EN | SCU_2500_COPRO_CACHE_EN);
+}
+
+static void setup_ast2400_cf_maps(struct fsi_master_acf *master)
+{
+	/* Setup segment 0 to our memory region */
+	regmap_write(master->scu, SCU_2400_COPRO_SEG0, master->cf_mem_addr |
+		     SCU_2400_COPRO_SEG_SWAP);
+
+	/* Segments 2 to sysregs with byteswap (for SRAM) */
+	regmap_write(master->scu, SCU_2400_COPRO_SEG2, SYSREG_BASE |
+		     SCU_2400_COPRO_SEG_SWAP);
+
+	/* And segment 6 to sysregs no byteswap */
+	regmap_write(master->scu, SCU_2400_COPRO_SEG6, SYSREG_BASE);
+
+	/* Memory cachable, regs and SRAM not cachable */
+	regmap_write(master->scu, SCU_2400_COPRO_CACHE_CTL,
+		     SCU_2400_COPRO_SEG0_CACHE_EN | SCU_2400_COPRO_CACHE_EN);
+}
+
+static void setup_common_fw_config(struct fsi_master_acf *master,
+				   void __iomem *base)
+{
+	iowrite16be(master->gpio_clk_vreg, base + HDR_CLOCK_GPIO_VADDR);
+	iowrite16be(master->gpio_clk_dreg, base + HDR_CLOCK_GPIO_DADDR);
+	iowrite16be(master->gpio_dat_vreg, base + HDR_DATA_GPIO_VADDR);
+	iowrite16be(master->gpio_dat_dreg, base + HDR_DATA_GPIO_DADDR);
+	iowrite16be(master->gpio_tra_vreg, base + HDR_TRANS_GPIO_VADDR);
+	iowrite16be(master->gpio_tra_dreg, base + HDR_TRANS_GPIO_DADDR);
+	iowrite8(master->gpio_clk_bit, base + HDR_CLOCK_GPIO_BIT);
+	iowrite8(master->gpio_dat_bit, base + HDR_DATA_GPIO_BIT);
+	iowrite8(master->gpio_tra_bit, base + HDR_TRANS_GPIO_BIT);
+}
+
+static void setup_ast2500_fw_config(struct fsi_master_acf *master)
+{
+	void __iomem *base = master->cf_mem + HDR_OFFSET;
+
+	setup_common_fw_config(master, base);
+	iowrite32be(FW_CONTROL_USE_STOP, base + HDR_FW_CONTROL);
+}
+
+static void setup_ast2400_fw_config(struct fsi_master_acf *master)
+{
+	void __iomem *base = master->cf_mem + HDR_OFFSET;
+
+	setup_common_fw_config(master, base);
+	iowrite32be(FW_CONTROL_CONT_CLOCK|FW_CONTROL_DUMMY_RD, base + HDR_FW_CONTROL);
+}
+
+static int setup_gpios_for_copro(struct fsi_master_acf *master)
+{
+
+	int rc;
+
+	/* This aren't under ColdFire control, just set them up appropriately */
+	gpiod_direction_output(master->gpio_mux, 1);
+	gpiod_direction_output(master->gpio_enable, 1);
+
+	/* Those are under ColdFire control, let it configure them */
+	rc = aspeed_gpio_copro_grab_gpio(master->gpio_clk, &master->gpio_clk_vreg,
+					 &master->gpio_clk_dreg, &master->gpio_clk_bit);
+	if (rc) {
+		dev_err(master->dev, "failed to assign clock gpio to coprocessor\n");
+		return rc;
+	}
+	rc = aspeed_gpio_copro_grab_gpio(master->gpio_data, &master->gpio_dat_vreg,
+					 &master->gpio_dat_dreg, &master->gpio_dat_bit);
+	if (rc) {
+		dev_err(master->dev, "failed to assign data gpio to coprocessor\n");
+		aspeed_gpio_copro_release_gpio(master->gpio_clk);
+		return rc;
+	}
+	rc = aspeed_gpio_copro_grab_gpio(master->gpio_trans, &master->gpio_tra_vreg,
+					 &master->gpio_tra_dreg, &master->gpio_tra_bit);
+	if (rc) {
+		dev_err(master->dev, "failed to assign trans gpio to coprocessor\n");
+		aspeed_gpio_copro_release_gpio(master->gpio_clk);
+		aspeed_gpio_copro_release_gpio(master->gpio_data);
+		return rc;
+	}
+	return 0;
+}
+
+static void release_copro_gpios(struct fsi_master_acf *master)
+{
+	aspeed_gpio_copro_release_gpio(master->gpio_clk);
+	aspeed_gpio_copro_release_gpio(master->gpio_data);
+	aspeed_gpio_copro_release_gpio(master->gpio_trans);
+}
+
+static int load_copro_firmware(struct fsi_master_acf *master)
+{
+	const struct firmware *fw;
+	uint16_t sig = 0, wanted_sig;
+	const u8 *data;
+	size_t size = 0;
+	int rc;
+
+	/* Get the binary */
+	rc = request_firmware(&fw, FW_FILE_NAME, master->dev);
+	if (rc) {
+		dev_err(
+			master->dev, "Error %d to load firwmare '%s' !\n",
+			rc, FW_FILE_NAME);
+		return rc;
+	}
+
+	/* Which image do we want ? (shared vs. split clock/data GPIOs) */
+	if (master->gpio_clk_vreg == master->gpio_dat_vreg)
+		wanted_sig = SYS_SIG_SHARED;
+	else
+		wanted_sig = SYS_SIG_SPLIT;
+	dev_dbg(master->dev, "Looking for image sig %04x\n", wanted_sig);
+
+	/* Try to find it */
+	for (data = fw->data; data < (fw->data + fw->size);) {
+		sig = be16_to_cpup((__be16 *)(data + HDR_OFFSET + HDR_SYS_SIG));
+		size = be32_to_cpup((__be32 *)(data + HDR_OFFSET + HDR_FW_SIZE));
+		if (sig == wanted_sig)
+			break;
+		data += size;
+	}
+	if (sig != wanted_sig) {
+		dev_err(master->dev, "Failed to locate image sig %04x in FW blob\n",
+			wanted_sig);
+		rc = -ENODEV;
+		goto release_fw;
+	}
+	if (size > master->cf_mem_size) {
+		dev_err(master->dev, "FW size (%zd) bigger than memory reserve (%zd)\n",
+			fw->size, master->cf_mem_size);
+		rc = -ENOMEM;
+	} else {
+		memcpy_toio(master->cf_mem, data, size);
+	}
+
+release_fw:
+	release_firmware(fw);
+	return rc;
+}
+
+static int check_firmware_image(struct fsi_master_acf *master)
+{
+	uint32_t fw_vers, fw_api, fw_options;
+
+	fw_vers = ioread16be(master->cf_mem + HDR_OFFSET + HDR_FW_VERS);
+	fw_api = ioread16be(master->cf_mem + HDR_OFFSET + HDR_API_VERS);
+	fw_options = ioread32be(master->cf_mem + HDR_OFFSET + HDR_FW_OPTIONS);
+	master->trace_enabled = !!(fw_options & FW_OPTION_TRACE_EN);
+
+	/* Check version and signature */
+	dev_info(master->dev, "ColdFire initialized, firmware v%d API v%d.%d (trace %s)\n",
+		 fw_vers, fw_api >> 8, fw_api & 0xff,
+		 master->trace_enabled ? "enabled" : "disabled");
+
+	if ((fw_api >> 8) != API_VERSION_MAJ) {
+		dev_err(master->dev, "Unsupported coprocessor API version !\n");
+		return -ENODEV;
+	}
+
+	return 0;
+}
+
+static int copro_enable_sw_irq(struct fsi_master_acf *master)
+{
+	int timeout;
+	uint32_t val;
+
+	/*
+	 * Enable coprocessor interrupt input. I've had problems getting the
+	 * value to stick, so try in a loop
+	 */
+	for (timeout = 0; timeout < 10; timeout++) {
+		iowrite32(0x2, master->cvic + CVIC_EN_REG);
+		val = ioread32(master->cvic + CVIC_EN_REG);
+		if (val & 2)
+			break;
+		msleep(1);
+	}
+	if (!(val & 2)) {
+		dev_err(master->dev, "Failed to enable coprocessor interrupt !\n");
+		return -ENODEV;
+	}
+	return 0;
+}
+
+static int fsi_master_acf_setup(struct fsi_master_acf *master)
+{
+	int timeout, rc;
+	uint32_t val;
+
+	/* Make sure the ColdFire is stopped  */
+	reset_cf(master);
+
+	/*
+	 * Clear SRAM. This needs to happen before we setup the GPIOs
+	 * as we might start trying to arbitrate as soon as that happens.
+	 */
+	memset_io(master->sram, 0, SRAM_SIZE);
+
+	/* Configure GPIOs */
+	rc = setup_gpios_for_copro(master);
+	if (rc)
+		return rc;
+
+	/* Load the firmware into the reserved memory */
+	rc = load_copro_firmware(master);
+	if (rc)
+		return rc;
+
+	/* Read signature and check versions */
+	rc = check_firmware_image(master);
+	if (rc)
+		return rc;
+
+	/* Setup coldfire memory map */
+	if (master->is_ast2500) {
+		setup_ast2500_cf_maps(master);
+		setup_ast2500_fw_config(master);
+	} else {
+		setup_ast2400_cf_maps(master);
+		setup_ast2400_fw_config(master);
+	}
+
+	/* Start the ColdFire */
+	start_cf(master);
+
+	/* Wait for status register to indicate command completion
+	 * which signals the initialization is complete
+	 */
+	for (timeout = 0; timeout < 10; timeout++) {
+		val = ioread8(master->sram + CF_STARTED);
+		if (val)
+			break;
+		msleep(1);
+	}
+	if (!val) {
+		dev_err(master->dev, "Coprocessor startup timeout !\n");
+		rc = -ENODEV;
+		goto err;
+	}
+
+	/* Configure echo & send delay */
+	iowrite8(master->t_send_delay, master->sram + SEND_DLY_REG);
+	iowrite8(master->t_echo_delay, master->sram + ECHO_DLY_REG);
+
+	/* Enable SW interrupt to copro if any */
+	if (master->cvic) {
+		rc = copro_enable_sw_irq(master);
+		if (rc)
+			goto err;
+	}
+	return 0;
+ err:
+	/* An error occurred, don't leave the coprocessor running */
+	reset_cf(master);
+
+	/* Release the GPIOs */
+	release_copro_gpios(master);
+
+	return rc;
+}
+
+
+static void fsi_master_acf_terminate(struct fsi_master_acf *master)
+{
+	unsigned long flags;
+
+	/*
+	 * A GPIO arbitration requestion could come in while this is
+	 * happening. To avoid problems, we disable interrupts so it
+	 * cannot preempt us on this CPU
+	 */
+
+	local_irq_save(flags);
+
+	/* Stop the coprocessor */
+	reset_cf(master);
+
+	/* We mark the copro not-started */
+	iowrite32(0, master->sram + CF_STARTED);
+
+	/* We mark the ARB register as having given up arbitration to
+	 * deal with a potential race with the arbitration request
+	 */
+	iowrite8(ARB_ARM_ACK, master->sram + ARB_REG);
+
+	local_irq_restore(flags);
+
+	/* Return the GPIOs to the ARM */
+	release_copro_gpios(master);
+}
+
+static void fsi_master_acf_setup_external(struct fsi_master_acf *master)
+{
+	/* Setup GPIOs for external FSI master (FSP box) */
+	gpiod_direction_output(master->gpio_mux, 0);
+	gpiod_direction_output(master->gpio_trans, 0);
+	gpiod_direction_output(master->gpio_enable, 1);
+	gpiod_direction_input(master->gpio_clk);
+	gpiod_direction_input(master->gpio_data);
+}
+
+static int fsi_master_acf_link_enable(struct fsi_master *_master, int link)
+{
+	struct fsi_master_acf *master = to_fsi_master_acf(_master);
+	int rc = -EBUSY;
+
+	if (link != 0)
+		return -ENODEV;
+
+	mutex_lock(&master->lock);
+	if (!master->external_mode) {
+		gpiod_set_value(master->gpio_enable, 1);
+		rc = 0;
+	}
+	mutex_unlock(&master->lock);
+
+	return rc;
+}
+
+static int fsi_master_acf_link_config(struct fsi_master *_master, int link,
+				      u8 t_send_delay, u8 t_echo_delay)
+{
+	struct fsi_master_acf *master = to_fsi_master_acf(_master);
+
+	if (link != 0)
+		return -ENODEV;
+
+	mutex_lock(&master->lock);
+	master->t_send_delay = t_send_delay;
+	master->t_echo_delay = t_echo_delay;
+	dev_dbg(master->dev, "Changing delays: send=%d echo=%d\n",
+		t_send_delay, t_echo_delay);
+	iowrite8(master->t_send_delay, master->sram + SEND_DLY_REG);
+	iowrite8(master->t_echo_delay, master->sram + ECHO_DLY_REG);
+	mutex_unlock(&master->lock);
+
+	return 0;
+}
+
+static ssize_t external_mode_show(struct device *dev,
+				  struct device_attribute *attr, char *buf)
+{
+	struct fsi_master_acf *master = dev_get_drvdata(dev);
+
+	return snprintf(buf, PAGE_SIZE - 1, "%u\n",
+			master->external_mode ? 1 : 0);
+}
+
+static ssize_t external_mode_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	struct fsi_master_acf *master = dev_get_drvdata(dev);
+	unsigned long val;
+	bool external_mode;
+	int err;
+
+	err = kstrtoul(buf, 0, &val);
+	if (err)
+		return err;
+
+	external_mode = !!val;
+
+	mutex_lock(&master->lock);
+
+	if (external_mode == master->external_mode) {
+		mutex_unlock(&master->lock);
+		return count;
+	}
+
+	master->external_mode = external_mode;
+	if (master->external_mode) {
+		fsi_master_acf_terminate(master);
+		fsi_master_acf_setup_external(master);
+	} else
+		fsi_master_acf_setup(master);
+
+	mutex_unlock(&master->lock);
+
+	fsi_master_rescan(&master->master);
+
+	return count;
+}
+
+static DEVICE_ATTR(external_mode, 0664,
+		external_mode_show, external_mode_store);
+
+static int fsi_master_acf_gpio_request(void *data)
+{
+	struct fsi_master_acf *master = data;
+	int timeout;
+	u8 val;
+
+	/* Note: This doesn't require holding out mutex */
+
+	/* Write reqest */
+	iowrite8(ARB_ARM_REQ, master->sram + ARB_REG);
+
+	/*
+	 * There is a race (which does happen at boot time) when we get an
+	 * arbitration request as we are either about to or just starting
+	 * the coprocessor.
+	 *
+	 * To handle it, we first check if we are running. If not yet we
+	 * check whether the copro is started in the SCU.
+	 *
+	 * If it's not started, we can basically just assume we have arbitration
+	 * and return. Otherwise, we wait normally expecting for the arbitration
+	 * to eventually complete.
+	 */
+	if (ioread32(master->sram + CF_STARTED) == 0) {
+		unsigned int reg = 0;
+
+		regmap_read(master->scu, SCU_COPRO_CTRL, &reg);
+		if (!(reg & SCU_COPRO_CLK_EN))
+			return 0;
+	}
+
+	/* Ring doorbell if any */
+	if (master->cvic)
+		iowrite32(0x2, master->cvic + CVIC_TRIG_REG);
+
+	for (timeout = 0; timeout < 10000; timeout++) {
+		val = ioread8(master->sram + ARB_REG);
+		if (val != ARB_ARM_REQ)
+			break;
+		udelay(1);
+	}
+
+	/* If it failed, override anyway */
+	if (val != ARB_ARM_ACK)
+		dev_warn(master->dev, "GPIO request arbitration timeout\n");
+
+	return 0;
+}
+
+static int fsi_master_acf_gpio_release(void *data)
+{
+	struct fsi_master_acf *master = data;
+
+	/* Write release */
+	iowrite8(0, master->sram + ARB_REG);
+
+	/* Ring doorbell if any */
+	if (master->cvic)
+		iowrite32(0x2, master->cvic + CVIC_TRIG_REG);
+
+	return 0;
+}
+
+static void fsi_master_acf_release(struct device *dev)
+{
+	struct fsi_master_acf *master = to_fsi_master_acf(dev_to_fsi_master(dev));
+
+	/* Cleanup, stop coprocessor */
+	mutex_lock(&master->lock);
+	fsi_master_acf_terminate(master);
+	aspeed_gpio_copro_set_ops(NULL, NULL);
+	mutex_unlock(&master->lock);
+
+	/* Free resources */
+	gen_pool_free(master->sram_pool, (unsigned long)master->sram, SRAM_SIZE);
+	of_node_put(dev_of_node(master->dev));
+
+	kfree(master);
+}
+
+static const struct aspeed_gpio_copro_ops fsi_master_acf_gpio_ops = {
+	.request_access = fsi_master_acf_gpio_request,
+	.release_access = fsi_master_acf_gpio_release,
+};
+
+static int fsi_master_acf_probe(struct platform_device *pdev)
+{
+	struct device_node *np, *mnode = dev_of_node(&pdev->dev);
+	struct genpool_data_fixed gpdf;
+	struct fsi_master_acf *master;
+	struct gpio_desc *gpio;
+	struct resource res;
+	uint32_t cf_mem_align;
+	int rc;
+
+	master = kzalloc(sizeof(*master), GFP_KERNEL);
+	if (!master)
+		return -ENOMEM;
+
+	master->dev = &pdev->dev;
+	master->master.dev.parent = master->dev;
+	master->last_addr = LAST_ADDR_INVALID;
+
+	/* AST2400 vs. AST2500 */
+	master->is_ast2500 = of_device_is_compatible(mnode, "aspeed,ast2500-cf-fsi-master");
+
+	/* Grab the SCU, we'll need to access it to configure the coprocessor */
+	if (master->is_ast2500)
+		master->scu = syscon_regmap_lookup_by_compatible("aspeed,ast2500-scu");
+	else
+		master->scu = syscon_regmap_lookup_by_compatible("aspeed,ast2400-scu");
+	if (IS_ERR(master->scu)) {
+		dev_err(&pdev->dev, "failed to find SCU regmap\n");
+		rc = PTR_ERR(master->scu);
+		goto err_free;
+	}
+
+	/* Grab all the GPIOs we need */
+	gpio = devm_gpiod_get(&pdev->dev, "clock", 0);
+	if (IS_ERR(gpio)) {
+		dev_err(&pdev->dev, "failed to get clock gpio\n");
+		rc = PTR_ERR(gpio);
+		goto err_free;
+	}
+	master->gpio_clk = gpio;
+
+	gpio = devm_gpiod_get(&pdev->dev, "data", 0);
+	if (IS_ERR(gpio)) {
+		dev_err(&pdev->dev, "failed to get data gpio\n");
+		rc = PTR_ERR(gpio);
+		goto err_free;
+	}
+	master->gpio_data = gpio;
+
+	/* Optional GPIOs */
+	gpio = devm_gpiod_get_optional(&pdev->dev, "trans", 0);
+	if (IS_ERR(gpio)) {
+		dev_err(&pdev->dev, "failed to get trans gpio\n");
+		rc = PTR_ERR(gpio);
+		goto err_free;
+	}
+	master->gpio_trans = gpio;
+
+	gpio = devm_gpiod_get_optional(&pdev->dev, "enable", 0);
+	if (IS_ERR(gpio)) {
+		dev_err(&pdev->dev, "failed to get enable gpio\n");
+		rc = PTR_ERR(gpio);
+		goto err_free;
+	}
+	master->gpio_enable = gpio;
+
+	gpio = devm_gpiod_get_optional(&pdev->dev, "mux", 0);
+	if (IS_ERR(gpio)) {
+		dev_err(&pdev->dev, "failed to get mux gpio\n");
+		rc = PTR_ERR(gpio);
+		goto err_free;
+	}
+	master->gpio_mux = gpio;
+
+	/* Grab the reserved memory region (use DMA API instead ?) */
+	np = of_parse_phandle(mnode, "memory-region", 0);
+	if (!np) {
+		dev_err(&pdev->dev, "Didn't find reserved memory\n");
+		rc = -EINVAL;
+		goto err_free;
+	}
+	rc = of_address_to_resource(np, 0, &res);
+	of_node_put(np);
+	if (rc) {
+		dev_err(&pdev->dev, "Couldn't address to resource for reserved memory\n");
+		rc = -ENOMEM;
+		goto err_free;
+	}
+	master->cf_mem_size = resource_size(&res);
+	master->cf_mem_addr = (uint32_t)res.start;
+	cf_mem_align = master->is_ast2500 ? 0x00100000 : 0x00200000;
+	if (master->cf_mem_addr & (cf_mem_align - 1)) {
+		dev_err(&pdev->dev, "Reserved memory has insufficient alignment\n");
+		rc = -ENOMEM;
+		goto err_free;
+	}
+	master->cf_mem = devm_ioremap_resource(&pdev->dev, &res);
+ 	if (IS_ERR(master->cf_mem)) {
+		rc = PTR_ERR(master->cf_mem);
+		dev_err(&pdev->dev, "Error %d mapping coldfire memory\n", rc);
+ 		goto err_free;
+	}
+	dev_dbg(&pdev->dev, "DRAM allocation @%x\n", master->cf_mem_addr);
+
+	/* AST2500 has a SW interrupt to the coprocessor */
+	if (master->is_ast2500) {
+		/* Grab the CVIC (ColdFire interrupts controller) */
+		np = of_parse_phandle(mnode, "aspeed,cvic", 0);
+		if (!np) {
+			dev_err(&pdev->dev, "Didn't find CVIC\n");
+			rc = -EINVAL;
+			goto err_free;
+		}
+		master->cvic = devm_of_iomap(&pdev->dev, np, 0, NULL);
+		if (IS_ERR(master->cvic)) {
+			rc = PTR_ERR(master->cvic);
+			dev_err(&pdev->dev, "Error %d mapping CVIC\n", rc);
+			goto err_free;
+		}
+		rc = of_property_read_u32(np, "copro-sw-interrupts",
+					  &master->cvic_sw_irq);
+		if (rc) {
+			dev_err(&pdev->dev, "Can't find coprocessor SW interrupt\n");
+			goto err_free;
+		}
+	}
+
+	/* Grab the SRAM */
+	master->sram_pool = of_gen_pool_get(dev_of_node(&pdev->dev), "aspeed,sram", 0);
+	if (!master->sram_pool) {
+		rc = -ENODEV;
+		dev_err(&pdev->dev, "Can't find sram pool\n");
+		goto err_free;
+	}
+
+	/* Current microcode only deals with fixed location in SRAM */
+	gpdf.offset = 0;
+	master->sram = (void __iomem *)gen_pool_alloc_algo(master->sram_pool, SRAM_SIZE,
+							   gen_pool_fixed_alloc, &gpdf);
+	if (!master->sram) {
+		rc = -ENOMEM;
+		dev_err(&pdev->dev, "Failed to allocate sram from pool\n");
+		goto err_free;
+	}
+	dev_dbg(&pdev->dev, "SRAM allocation @%lx\n",
+		(unsigned long)gen_pool_virt_to_phys(master->sram_pool,
+						     (unsigned long)master->sram));
+
+	/*
+	 * Hookup with the GPIO driver for arbitration of GPIO banks
+	 * ownership.
+	 */
+	aspeed_gpio_copro_set_ops(&fsi_master_acf_gpio_ops, master);
+
+	/* Default FSI command delays */
+	master->t_send_delay = FSI_SEND_DELAY_CLOCKS;
+	master->t_echo_delay = FSI_ECHO_DELAY_CLOCKS;
+	master->master.n_links = 1;
+	if (master->is_ast2500)
+		master->master.flags = FSI_MASTER_FLAG_SWCLOCK;
+	master->master.read = fsi_master_acf_read;
+	master->master.write = fsi_master_acf_write;
+	master->master.term = fsi_master_acf_term;
+	master->master.send_break = fsi_master_acf_break;
+	master->master.link_enable = fsi_master_acf_link_enable;
+	master->master.link_config = fsi_master_acf_link_config;
+	master->master.dev.of_node = of_node_get(dev_of_node(master->dev));
+	master->master.dev.release = fsi_master_acf_release;
+	platform_set_drvdata(pdev, master);
+	mutex_init(&master->lock);
+
+	mutex_lock(&master->lock);
+	rc = fsi_master_acf_setup(master);
+	mutex_unlock(&master->lock);
+	if (rc)
+		goto release_of_dev;
+
+	rc = device_create_file(&pdev->dev, &dev_attr_external_mode);
+	if (rc)
+		goto stop_copro;
+
+	rc = fsi_master_register(&master->master);
+	if (!rc)
+		return 0;
+
+	device_remove_file(master->dev, &dev_attr_external_mode);
+	put_device(&master->master.dev);
+	return rc;
+
+ stop_copro:
+	fsi_master_acf_terminate(master);
+ release_of_dev:
+	aspeed_gpio_copro_set_ops(NULL, NULL);
+	gen_pool_free(master->sram_pool, (unsigned long)master->sram, SRAM_SIZE);
+	of_node_put(dev_of_node(master->dev));
+ err_free:
+	kfree(master);
+	return rc;
+}
+
+
+static int fsi_master_acf_remove(struct platform_device *pdev)
+{
+	struct fsi_master_acf *master = platform_get_drvdata(pdev);
+
+	device_remove_file(master->dev, &dev_attr_external_mode);
+
+	fsi_master_unregister(&master->master);
+
+	return 0;
+}
+
+static const struct of_device_id fsi_master_acf_match[] = {
+	{ .compatible = "aspeed,ast2400-cf-fsi-master" },
+	{ .compatible = "aspeed,ast2500-cf-fsi-master" },
+	{ },
+};
+
+static struct platform_driver fsi_master_acf = {
+	.driver = {
+		.name		= "fsi-master-acf",
+		.of_match_table	= fsi_master_acf_match,
+	},
+	.probe	= fsi_master_acf_probe,
+	.remove = fsi_master_acf_remove,
+};
+
+module_platform_driver(fsi_master_acf);
+MODULE_LICENSE("GPL");
diff --git a/src/kernel/linux/v4.19/drivers/fsi/fsi-master-gpio.c b/src/kernel/linux/v4.19/drivers/fsi/fsi-master-gpio.c
new file mode 100644
index 0000000..4eb3a76
--- /dev/null
+++ b/src/kernel/linux/v4.19/drivers/fsi/fsi-master-gpio.c
@@ -0,0 +1,894 @@
+/*
+ * A FSI master controller, using a simple GPIO bit-banging interface
+ */
+
+#include <linux/crc4.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/fsi.h>
+#include <linux/gpio/consumer.h>
+#include <linux/io.h>
+#include <linux/irqflags.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+#include "fsi-master.h"
+
+#define	FSI_GPIO_STD_DLY	1	/* Standard pin delay in nS */
+#define LAST_ADDR_INVALID		0x1
+
+struct fsi_master_gpio {
+	struct fsi_master	master;
+	struct device		*dev;
+	struct mutex		cmd_lock;	/* mutex for command ordering */
+	struct gpio_desc	*gpio_clk;
+	struct gpio_desc	*gpio_data;
+	struct gpio_desc	*gpio_trans;	/* Voltage translator */
+	struct gpio_desc	*gpio_enable;	/* FSI enable */
+	struct gpio_desc	*gpio_mux;	/* Mux control */
+	bool			external_mode;
+	bool			no_delays;
+	uint32_t		last_addr;
+	uint8_t			t_send_delay;
+	uint8_t			t_echo_delay;
+};
+
+#define CREATE_TRACE_POINTS
+#include <trace/events/fsi_master_gpio.h>
+
+#define to_fsi_master_gpio(m) container_of(m, struct fsi_master_gpio, master)
+
+struct fsi_gpio_msg {
+	uint64_t	msg;
+	uint8_t		bits;
+};
+
+static void clock_toggle(struct fsi_master_gpio *master, int count)
+{
+	int i;
+
+	for (i = 0; i < count; i++) {
+		if (!master->no_delays)
+			ndelay(FSI_GPIO_STD_DLY);
+		gpiod_set_value(master->gpio_clk, 0);
+		if (!master->no_delays)
+			ndelay(FSI_GPIO_STD_DLY);
+		gpiod_set_value(master->gpio_clk, 1);
+	}
+}
+
+static int sda_clock_in(struct fsi_master_gpio *master)
+{
+	int in;
+
+	if (!master->no_delays)
+		ndelay(FSI_GPIO_STD_DLY);
+	gpiod_set_value(master->gpio_clk, 0);
+
+	/* Dummy read to feed the synchronizers */
+	gpiod_get_value(master->gpio_data);
+
+	/* Actual data read */
+	in = gpiod_get_value(master->gpio_data);
+	if (!master->no_delays)
+		ndelay(FSI_GPIO_STD_DLY);
+	gpiod_set_value(master->gpio_clk, 1);
+	return in ? 1 : 0;
+}
+
+static void sda_out(struct fsi_master_gpio *master, int value)
+{
+	gpiod_set_value(master->gpio_data, value);
+}
+
+static void set_sda_input(struct fsi_master_gpio *master)
+{
+	gpiod_direction_input(master->gpio_data);
+	gpiod_set_value(master->gpio_trans, 0);
+}
+
+static void set_sda_output(struct fsi_master_gpio *master, int value)
+{
+	gpiod_set_value(master->gpio_trans, 1);
+	gpiod_direction_output(master->gpio_data, value);
+}
+
+static void clock_zeros(struct fsi_master_gpio *master, int count)
+{
+	trace_fsi_master_gpio_clock_zeros(master, count);
+	set_sda_output(master, 1);
+	clock_toggle(master, count);
+}
+
+static void echo_delay(struct fsi_master_gpio *master)
+{
+	clock_zeros(master, master->t_echo_delay);
+}
+
+
+static void serial_in(struct fsi_master_gpio *master, struct fsi_gpio_msg *msg,
+			uint8_t num_bits)
+{
+	uint8_t bit, in_bit;
+
+	set_sda_input(master);
+
+	for (bit = 0; bit < num_bits; bit++) {
+		in_bit = sda_clock_in(master);
+		msg->msg <<= 1;
+		msg->msg |= ~in_bit & 0x1;	/* Data is active low */
+	}
+	msg->bits += num_bits;
+
+	trace_fsi_master_gpio_in(master, num_bits, msg->msg);
+}
+
+static void serial_out(struct fsi_master_gpio *master,
+			const struct fsi_gpio_msg *cmd)
+{
+	uint8_t bit;
+	uint64_t msg = ~cmd->msg;	/* Data is active low */
+	uint64_t sda_mask = 0x1ULL << (cmd->bits - 1);
+	uint64_t last_bit = ~0;
+	int next_bit;
+
+	trace_fsi_master_gpio_out(master, cmd->bits, cmd->msg);
+
+	if (!cmd->bits) {
+		dev_warn(master->dev, "trying to output 0 bits\n");
+		return;
+	}
+	set_sda_output(master, 0);
+
+	/* Send the start bit */
+	sda_out(master, 0);
+	clock_toggle(master, 1);
+
+	/* Send the message */
+	for (bit = 0; bit < cmd->bits; bit++) {
+		next_bit = (msg & sda_mask) >> (cmd->bits - 1);
+		if (last_bit ^ next_bit) {
+			sda_out(master, next_bit);
+			last_bit = next_bit;
+		}
+		clock_toggle(master, 1);
+		msg <<= 1;
+	}
+}
+
+static void msg_push_bits(struct fsi_gpio_msg *msg, uint64_t data, int bits)
+{
+	msg->msg <<= bits;
+	msg->msg |= data & ((1ull << bits) - 1);
+	msg->bits += bits;
+}
+
+static void msg_push_crc(struct fsi_gpio_msg *msg)
+{
+	uint8_t crc;
+	int top;
+
+	top = msg->bits & 0x3;
+
+	/* start bit, and any non-aligned top bits */
+	crc = crc4(0, 1 << top | msg->msg >> (msg->bits - top), top + 1);
+
+	/* aligned bits */
+	crc = crc4(crc, msg->msg, msg->bits - top);
+
+	msg_push_bits(msg, crc, 4);
+}
+
+static bool check_same_address(struct fsi_master_gpio *master, int id,
+		uint32_t addr)
+{
+	/* this will also handle LAST_ADDR_INVALID */
+	return master->last_addr == (((id & 0x3) << 21) | (addr & ~0x3));
+}
+
+static bool check_relative_address(struct fsi_master_gpio *master, int id,
+		uint32_t addr, uint32_t *rel_addrp)
+{
+	uint32_t last_addr = master->last_addr;
+	int32_t rel_addr;
+
+	if (last_addr == LAST_ADDR_INVALID)
+		return false;
+
+	/* We may be in 23-bit addressing mode, which uses the id as the
+	 * top two address bits. So, if we're referencing a different ID,
+	 * use absolute addresses.
+	 */
+	if (((last_addr >> 21) & 0x3) != id)
+		return false;
+
+	/* remove the top two bits from any 23-bit addressing */
+	last_addr &= (1 << 21) - 1;
+
+	/* We know that the addresses are limited to 21 bits, so this won't
+	 * overflow the signed rel_addr */
+	rel_addr = addr - last_addr;
+	if (rel_addr > 255 || rel_addr < -256)
+		return false;
+
+	*rel_addrp = (uint32_t)rel_addr;
+
+	return true;
+}
+
+static void last_address_update(struct fsi_master_gpio *master,
+		int id, bool valid, uint32_t addr)
+{
+	if (!valid)
+		master->last_addr = LAST_ADDR_INVALID;
+	else
+		master->last_addr = ((id & 0x3) << 21) | (addr & ~0x3);
+}
+
+/*
+ * Encode an Absolute/Relative/Same Address command
+ */
+static void build_ar_command(struct fsi_master_gpio *master,
+		struct fsi_gpio_msg *cmd, uint8_t id,
+		uint32_t addr, size_t size, const void *data)
+{
+	int i, addr_bits, opcode_bits;
+	bool write = !!data;
+	uint8_t ds, opcode;
+	uint32_t rel_addr;
+
+	cmd->bits = 0;
+	cmd->msg = 0;
+
+	/* we have 21 bits of address max */
+	addr &= ((1 << 21) - 1);
+
+	/* cmd opcodes are variable length - SAME_AR is only two bits */
+	opcode_bits = 3;
+
+	if (check_same_address(master, id, addr)) {
+		/* we still address the byte offset within the word */
+		addr_bits = 2;
+		opcode_bits = 2;
+		opcode = FSI_CMD_SAME_AR;
+		trace_fsi_master_gpio_cmd_same_addr(master);
+
+	} else if (check_relative_address(master, id, addr, &rel_addr)) {
+		/* 8 bits plus sign */
+		addr_bits = 9;
+		addr = rel_addr;
+		opcode = FSI_CMD_REL_AR;
+		trace_fsi_master_gpio_cmd_rel_addr(master, rel_addr);
+
+	} else {
+		addr_bits = 21;
+		opcode = FSI_CMD_ABS_AR;
+		trace_fsi_master_gpio_cmd_abs_addr(master, addr);
+	}
+
+	/*
+	 * The read/write size is encoded in the lower bits of the address
+	 * (as it must be naturally-aligned), and the following ds bit.
+	 *
+	 *	size	addr:1	addr:0	ds
+	 *	1	x	x	0
+	 *	2	x	0	1
+	 *	4	0	1	1
+	 *
+	 */
+	ds = size > 1 ? 1 : 0;
+	addr &= ~(size - 1);
+	if (size == 4)
+		addr |= 1;
+
+	msg_push_bits(cmd, id, 2);
+	msg_push_bits(cmd, opcode, opcode_bits);
+	msg_push_bits(cmd, write ? 0 : 1, 1);
+	msg_push_bits(cmd, addr, addr_bits);
+	msg_push_bits(cmd, ds, 1);
+	for (i = 0; write && i < size; i++)
+		msg_push_bits(cmd, ((uint8_t *)data)[i], 8);
+
+	msg_push_crc(cmd);
+}
+
+static void build_dpoll_command(struct fsi_gpio_msg *cmd, uint8_t slave_id)
+{
+	cmd->bits = 0;
+	cmd->msg = 0;
+
+	msg_push_bits(cmd, slave_id, 2);
+	msg_push_bits(cmd, FSI_CMD_DPOLL, 3);
+	msg_push_crc(cmd);
+}
+
+static void build_epoll_command(struct fsi_gpio_msg *cmd, uint8_t slave_id)
+{
+	cmd->bits = 0;
+	cmd->msg = 0;
+
+	msg_push_bits(cmd, slave_id, 2);
+	msg_push_bits(cmd, FSI_CMD_EPOLL, 3);
+	msg_push_crc(cmd);
+}
+
+static void build_term_command(struct fsi_gpio_msg *cmd, uint8_t slave_id)
+{
+	cmd->bits = 0;
+	cmd->msg = 0;
+
+	msg_push_bits(cmd, slave_id, 2);
+	msg_push_bits(cmd, FSI_CMD_TERM, 6);
+	msg_push_crc(cmd);
+}
+
+/*
+ * Note: callers rely specifically on this returning -EAGAIN for
+ * a CRC error detected in the response. Use other error code
+ * for other situations. It will be converted to something else
+ * higher up the stack before it reaches userspace.
+ */
+static int read_one_response(struct fsi_master_gpio *master,
+		uint8_t data_size, struct fsi_gpio_msg *msgp, uint8_t *tagp)
+{
+	struct fsi_gpio_msg msg;
+	unsigned long flags;
+	uint32_t crc;
+	uint8_t tag;
+	int i;
+
+	local_irq_save(flags);
+
+	/* wait for the start bit */
+	for (i = 0; i < FSI_MASTER_MTOE_COUNT; i++) {
+		msg.bits = 0;
+		msg.msg = 0;
+		serial_in(master, &msg, 1);
+		if (msg.msg)
+			break;
+	}
+	if (i == FSI_MASTER_MTOE_COUNT) {
+		dev_dbg(master->dev,
+			"Master time out waiting for response\n");
+		local_irq_restore(flags);
+		return -ETIMEDOUT;
+	}
+
+	msg.bits = 0;
+	msg.msg = 0;
+
+	/* Read slave ID & response tag */
+	serial_in(master, &msg, 4);
+
+	tag = msg.msg & 0x3;
+
+	/* If we have an ACK and we're expecting data, clock the data in too */
+	if (tag == FSI_RESP_ACK && data_size)
+		serial_in(master, &msg, data_size * 8);
+
+	/* read CRC */
+	serial_in(master, &msg, FSI_CRC_SIZE);
+
+	local_irq_restore(flags);
+
+	/* we have a whole message now; check CRC */
+	crc = crc4(0, 1, 1);
+	crc = crc4(crc, msg.msg, msg.bits);
+	if (crc) {
+		/* Check if it's all 1's, that probably means the host is off */
+		if (((~msg.msg) & ((1ull << msg.bits) - 1)) == 0)
+			return -ENODEV;
+		dev_dbg(master->dev, "ERR response CRC msg: 0x%016llx (%d bits)\n",
+			msg.msg, msg.bits);
+		return -EAGAIN;
+	}
+
+	if (msgp)
+		*msgp = msg;
+	if (tagp)
+		*tagp = tag;
+
+	return 0;
+}
+
+static int issue_term(struct fsi_master_gpio *master, uint8_t slave)
+{
+	struct fsi_gpio_msg cmd;
+	unsigned long flags;
+	uint8_t tag;
+	int rc;
+
+	build_term_command(&cmd, slave);
+
+	local_irq_save(flags);
+	serial_out(master, &cmd);
+	echo_delay(master);
+	local_irq_restore(flags);
+
+	rc = read_one_response(master, 0, NULL, &tag);
+	if (rc < 0) {
+		dev_err(master->dev,
+				"TERM failed; lost communication with slave\n");
+		return -EIO;
+	} else if (tag != FSI_RESP_ACK) {
+		dev_err(master->dev, "TERM failed; response %d\n", tag);
+		return -EIO;
+	}
+
+	return 0;
+}
+
+static int poll_for_response(struct fsi_master_gpio *master,
+		uint8_t slave, uint8_t size, void *data)
+{
+	struct fsi_gpio_msg response, cmd;
+	int busy_count = 0, rc, i;
+	unsigned long flags;
+	uint8_t tag;
+	uint8_t *data_byte = data;
+	int crc_err_retries = 0;
+retry:
+	rc = read_one_response(master, size, &response, &tag);
+
+	/* Handle retries on CRC errors */
+	if (rc == -EAGAIN) {
+		/* Too many retries ? */
+		if (crc_err_retries++ > FSI_CRC_ERR_RETRIES) {
+			/*
+			 * Pass it up as a -EIO otherwise upper level will retry
+			 * the whole command which isn't what we want here.
+			 */
+			rc = -EIO;
+			goto fail;
+		}
+		dev_dbg(master->dev,
+			 "CRC error retry %d\n", crc_err_retries);
+		trace_fsi_master_gpio_crc_rsp_error(master);
+		build_epoll_command(&cmd, slave);
+		local_irq_save(flags);
+		clock_zeros(master, FSI_MASTER_EPOLL_CLOCKS);
+		serial_out(master, &cmd);
+		echo_delay(master);
+		local_irq_restore(flags);
+		goto retry;
+	} else if (rc)
+		goto fail;
+
+	switch (tag) {
+	case FSI_RESP_ACK:
+		if (size && data) {
+			uint64_t val = response.msg;
+			/* clear crc & mask */
+			val >>= 4;
+			val &= (1ull << (size * 8)) - 1;
+
+			for (i = 0; i < size; i++) {
+				data_byte[size-i-1] = val;
+				val >>= 8;
+			}
+		}
+		break;
+	case FSI_RESP_BUSY:
+		/*
+		 * Its necessary to clock slave before issuing
+		 * d-poll, not indicated in the hardware protocol
+		 * spec. < 20 clocks causes slave to hang, 21 ok.
+		 */
+		if (busy_count++ < FSI_MASTER_MAX_BUSY) {
+			build_dpoll_command(&cmd, slave);
+			local_irq_save(flags);
+			clock_zeros(master, FSI_MASTER_DPOLL_CLOCKS);
+			serial_out(master, &cmd);
+			echo_delay(master);
+			local_irq_restore(flags);
+			goto retry;
+		}
+		dev_warn(master->dev,
+			"ERR slave is stuck in busy state, issuing TERM\n");
+		local_irq_save(flags);
+		clock_zeros(master, FSI_MASTER_DPOLL_CLOCKS);
+		local_irq_restore(flags);
+		issue_term(master, slave);
+		rc = -EIO;
+		break;
+
+	case FSI_RESP_ERRA:
+		dev_dbg(master->dev, "ERRA received: 0x%x\n", (int)response.msg);
+		rc = -EIO;
+		break;
+	case FSI_RESP_ERRC:
+		dev_dbg(master->dev, "ERRC received: 0x%x\n", (int)response.msg);
+		trace_fsi_master_gpio_crc_cmd_error(master);
+		rc = -EAGAIN;
+		break;
+	}
+
+	if (busy_count > 0)
+		trace_fsi_master_gpio_poll_response_busy(master, busy_count);
+ fail:
+	/*
+	 * tSendDelay clocks, avoids signal reflections when switching
+	 * from receive of response back to send of data.
+	 */
+	local_irq_save(flags);
+	clock_zeros(master, master->t_send_delay);
+	local_irq_restore(flags);
+
+	return rc;
+}
+
+static int send_request(struct fsi_master_gpio *master,
+		struct fsi_gpio_msg *cmd)
+{
+	unsigned long flags;
+
+	if (master->external_mode)
+		return -EBUSY;
+
+	local_irq_save(flags);
+	serial_out(master, cmd);
+	echo_delay(master);
+	local_irq_restore(flags);
+
+	return 0;
+}
+
+static int fsi_master_gpio_xfer(struct fsi_master_gpio *master, uint8_t slave,
+		struct fsi_gpio_msg *cmd, size_t resp_len, void *resp)
+{
+	int rc = -EAGAIN, retries = 0;
+
+	while ((retries++) < FSI_CRC_ERR_RETRIES) {
+		rc = send_request(master, cmd);
+		if (rc)
+			break;
+		rc = poll_for_response(master, slave, resp_len, resp);
+		if (rc != -EAGAIN)
+			break;
+		rc = -EIO;
+		dev_warn(master->dev, "ECRC retry %d\n", retries);
+
+		/* Pace it a bit before retry */
+		msleep(1);
+	}
+
+	return rc;
+}
+
+static int fsi_master_gpio_read(struct fsi_master *_master, int link,
+		uint8_t id, uint32_t addr, void *val, size_t size)
+{
+	struct fsi_master_gpio *master = to_fsi_master_gpio(_master);
+	struct fsi_gpio_msg cmd;
+	int rc;
+
+	if (link != 0)
+		return -ENODEV;
+
+	mutex_lock(&master->cmd_lock);
+	build_ar_command(master, &cmd, id, addr, size, NULL);
+	rc = fsi_master_gpio_xfer(master, id, &cmd, size, val);
+	last_address_update(master, id, rc == 0, addr);
+	mutex_unlock(&master->cmd_lock);
+
+	return rc;
+}
+
+static int fsi_master_gpio_write(struct fsi_master *_master, int link,
+		uint8_t id, uint32_t addr, const void *val, size_t size)
+{
+	struct fsi_master_gpio *master = to_fsi_master_gpio(_master);
+	struct fsi_gpio_msg cmd;
+	int rc;
+
+	if (link != 0)
+		return -ENODEV;
+
+	mutex_lock(&master->cmd_lock);
+	build_ar_command(master, &cmd, id, addr, size, val);
+	rc = fsi_master_gpio_xfer(master, id, &cmd, 0, NULL);
+	last_address_update(master, id, rc == 0, addr);
+	mutex_unlock(&master->cmd_lock);
+
+	return rc;
+}
+
+static int fsi_master_gpio_term(struct fsi_master *_master,
+		int link, uint8_t id)
+{
+	struct fsi_master_gpio *master = to_fsi_master_gpio(_master);
+	struct fsi_gpio_msg cmd;
+	int rc;
+
+	if (link != 0)
+		return -ENODEV;
+
+	mutex_lock(&master->cmd_lock);
+	build_term_command(&cmd, id);
+	rc = fsi_master_gpio_xfer(master, id, &cmd, 0, NULL);
+	last_address_update(master, id, false, 0);
+	mutex_unlock(&master->cmd_lock);
+
+	return rc;
+}
+
+static int fsi_master_gpio_break(struct fsi_master *_master, int link)
+{
+	struct fsi_master_gpio *master = to_fsi_master_gpio(_master);
+	unsigned long flags;
+
+	if (link != 0)
+		return -ENODEV;
+
+	trace_fsi_master_gpio_break(master);
+
+	mutex_lock(&master->cmd_lock);
+	if (master->external_mode) {
+		mutex_unlock(&master->cmd_lock);
+		return -EBUSY;
+	}
+
+	local_irq_save(flags);
+
+	set_sda_output(master, 1);
+	sda_out(master, 1);
+	clock_toggle(master, FSI_PRE_BREAK_CLOCKS);
+	sda_out(master, 0);
+	clock_toggle(master, FSI_BREAK_CLOCKS);
+	echo_delay(master);
+	sda_out(master, 1);
+	clock_toggle(master, FSI_POST_BREAK_CLOCKS);
+
+	local_irq_restore(flags);
+
+	last_address_update(master, 0, false, 0);
+	mutex_unlock(&master->cmd_lock);
+
+	/* Wait for logic reset to take effect */
+	udelay(200);
+
+	return 0;
+}
+
+static void fsi_master_gpio_init(struct fsi_master_gpio *master)
+{
+	unsigned long flags;
+
+	gpiod_direction_output(master->gpio_mux, 1);
+	gpiod_direction_output(master->gpio_trans, 1);
+	gpiod_direction_output(master->gpio_enable, 1);
+	gpiod_direction_output(master->gpio_clk, 1);
+	gpiod_direction_output(master->gpio_data, 1);
+
+	/* todo: evaluate if clocks can be reduced */
+	local_irq_save(flags);
+	clock_zeros(master, FSI_INIT_CLOCKS);
+	local_irq_restore(flags);
+}
+
+static void fsi_master_gpio_init_external(struct fsi_master_gpio *master)
+{
+	gpiod_direction_output(master->gpio_mux, 0);
+	gpiod_direction_output(master->gpio_trans, 0);
+	gpiod_direction_output(master->gpio_enable, 1);
+	gpiod_direction_input(master->gpio_clk);
+	gpiod_direction_input(master->gpio_data);
+}
+
+static int fsi_master_gpio_link_enable(struct fsi_master *_master, int link)
+{
+	struct fsi_master_gpio *master = to_fsi_master_gpio(_master);
+	int rc = -EBUSY;
+
+	if (link != 0)
+		return -ENODEV;
+
+	mutex_lock(&master->cmd_lock);
+	if (!master->external_mode) {
+		gpiod_set_value(master->gpio_enable, 1);
+		rc = 0;
+	}
+	mutex_unlock(&master->cmd_lock);
+
+	return rc;
+}
+
+static int fsi_master_gpio_link_config(struct fsi_master *_master, int link,
+				       u8 t_send_delay, u8 t_echo_delay)
+{
+	struct fsi_master_gpio *master = to_fsi_master_gpio(_master);
+
+	if (link != 0)
+		return -ENODEV;
+
+	mutex_lock(&master->cmd_lock);
+	master->t_send_delay = t_send_delay;
+	master->t_echo_delay = t_echo_delay;
+	mutex_unlock(&master->cmd_lock);
+
+	return 0;
+}
+
+static ssize_t external_mode_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct fsi_master_gpio *master = dev_get_drvdata(dev);
+
+	return snprintf(buf, PAGE_SIZE - 1, "%u\n",
+			master->external_mode ? 1 : 0);
+}
+
+static ssize_t external_mode_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	struct fsi_master_gpio *master = dev_get_drvdata(dev);
+	unsigned long val;
+	bool external_mode;
+	int err;
+
+	err = kstrtoul(buf, 0, &val);
+	if (err)
+		return err;
+
+	external_mode = !!val;
+
+	mutex_lock(&master->cmd_lock);
+
+	if (external_mode == master->external_mode) {
+		mutex_unlock(&master->cmd_lock);
+		return count;
+	}
+
+	master->external_mode = external_mode;
+	if (master->external_mode)
+		fsi_master_gpio_init_external(master);
+	else
+		fsi_master_gpio_init(master);
+
+	mutex_unlock(&master->cmd_lock);
+
+	fsi_master_rescan(&master->master);
+
+	return count;
+}
+
+static DEVICE_ATTR(external_mode, 0664,
+		external_mode_show, external_mode_store);
+
+static void fsi_master_gpio_release(struct device *dev)
+{
+	struct fsi_master_gpio *master = to_fsi_master_gpio(dev_to_fsi_master(dev));
+
+	of_node_put(dev_of_node(master->dev));
+
+	kfree(master);
+}
+
+static int fsi_master_gpio_probe(struct platform_device *pdev)
+{
+	struct fsi_master_gpio *master;
+	struct gpio_desc *gpio;
+	int rc;
+
+	master = kzalloc(sizeof(*master), GFP_KERNEL);
+	if (!master)
+		return -ENOMEM;
+
+	master->dev = &pdev->dev;
+	master->master.dev.parent = master->dev;
+	master->master.dev.of_node = of_node_get(dev_of_node(master->dev));
+	master->master.dev.release = fsi_master_gpio_release;
+	master->last_addr = LAST_ADDR_INVALID;
+
+	gpio = devm_gpiod_get(&pdev->dev, "clock", 0);
+	if (IS_ERR(gpio)) {
+		dev_err(&pdev->dev, "failed to get clock gpio\n");
+		rc = PTR_ERR(gpio);
+		goto err_free;
+	}
+	master->gpio_clk = gpio;
+
+	gpio = devm_gpiod_get(&pdev->dev, "data", 0);
+	if (IS_ERR(gpio)) {
+		dev_err(&pdev->dev, "failed to get data gpio\n");
+		rc = PTR_ERR(gpio);
+		goto err_free;
+	}
+	master->gpio_data = gpio;
+
+	/* Optional GPIOs */
+	gpio = devm_gpiod_get_optional(&pdev->dev, "trans", 0);
+	if (IS_ERR(gpio)) {
+		dev_err(&pdev->dev, "failed to get trans gpio\n");
+		rc = PTR_ERR(gpio);
+		goto err_free;
+	}
+	master->gpio_trans = gpio;
+
+	gpio = devm_gpiod_get_optional(&pdev->dev, "enable", 0);
+	if (IS_ERR(gpio)) {
+		dev_err(&pdev->dev, "failed to get enable gpio\n");
+		rc = PTR_ERR(gpio);
+		goto err_free;
+	}
+	master->gpio_enable = gpio;
+
+	gpio = devm_gpiod_get_optional(&pdev->dev, "mux", 0);
+	if (IS_ERR(gpio)) {
+		dev_err(&pdev->dev, "failed to get mux gpio\n");
+		rc = PTR_ERR(gpio);
+		goto err_free;
+	}
+	master->gpio_mux = gpio;
+
+	/*
+	 * Check if GPIO block is slow enought that no extra delays
+	 * are necessary. This improves performance on ast2500 by
+	 * an order of magnitude.
+	 */
+	master->no_delays = device_property_present(&pdev->dev, "no-gpio-delays");
+
+	/* Default FSI command delays */
+	master->t_send_delay = FSI_SEND_DELAY_CLOCKS;
+	master->t_echo_delay = FSI_ECHO_DELAY_CLOCKS;
+
+	master->master.n_links = 1;
+	master->master.flags = FSI_MASTER_FLAG_SWCLOCK;
+	master->master.read = fsi_master_gpio_read;
+	master->master.write = fsi_master_gpio_write;
+	master->master.term = fsi_master_gpio_term;
+	master->master.send_break = fsi_master_gpio_break;
+	master->master.link_enable = fsi_master_gpio_link_enable;
+	master->master.link_config = fsi_master_gpio_link_config;
+	platform_set_drvdata(pdev, master);
+	mutex_init(&master->cmd_lock);
+
+	fsi_master_gpio_init(master);
+
+	rc = device_create_file(&pdev->dev, &dev_attr_external_mode);
+	if (rc)
+		goto err_free;
+
+	rc = fsi_master_register(&master->master);
+	if (rc) {
+		device_remove_file(&pdev->dev, &dev_attr_external_mode);
+		put_device(&master->master.dev);
+		return rc;
+	}
+	return 0;
+ err_free:
+	kfree(master);
+	return rc;
+}
+
+
+
+static int fsi_master_gpio_remove(struct platform_device *pdev)
+{
+	struct fsi_master_gpio *master = platform_get_drvdata(pdev);
+
+	device_remove_file(&pdev->dev, &dev_attr_external_mode);
+
+	fsi_master_unregister(&master->master);
+
+	return 0;
+}
+
+static const struct of_device_id fsi_master_gpio_match[] = {
+	{ .compatible = "fsi-master-gpio" },
+	{ },
+};
+
+static struct platform_driver fsi_master_gpio_driver = {
+	.driver = {
+		.name		= "fsi-master-gpio",
+		.of_match_table	= fsi_master_gpio_match,
+	},
+	.probe	= fsi_master_gpio_probe,
+	.remove = fsi_master_gpio_remove,
+};
+
+module_platform_driver(fsi_master_gpio_driver);
+MODULE_LICENSE("GPL");
diff --git a/src/kernel/linux/v4.19/drivers/fsi/fsi-master-hub.c b/src/kernel/linux/v4.19/drivers/fsi/fsi-master-hub.c
new file mode 100644
index 0000000..b3c1e9d
--- /dev/null
+++ b/src/kernel/linux/v4.19/drivers/fsi/fsi-master-hub.c
@@ -0,0 +1,347 @@
+/*
+ * FSI hub master driver
+ *
+ * Copyright (C) IBM Corporation 2016
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/delay.h>
+#include <linux/fsi.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/slab.h>
+
+#include "fsi-master.h"
+
+/* Control Registers */
+#define FSI_MMODE		0x0		/* R/W: mode */
+#define FSI_MDLYR		0x4		/* R/W: delay */
+#define FSI_MCRSP		0x8		/* R/W: clock rate */
+#define FSI_MENP0		0x10		/* R/W: enable */
+#define FSI_MLEVP0		0x18		/* R: plug detect */
+#define FSI_MSENP0		0x18		/* S: Set enable */
+#define FSI_MCENP0		0x20		/* C: Clear enable */
+#define FSI_MAEB		0x70		/* R: Error address */
+#define FSI_MVER		0x74		/* R: master version/type */
+#define FSI_MRESP0		0xd0		/* W: Port reset */
+#define FSI_MESRB0		0x1d0		/* R: Master error status */
+#define FSI_MRESB0		0x1d0		/* W: Reset bridge */
+#define FSI_MECTRL		0x2e0		/* W: Error control */
+
+/* MMODE: Mode control */
+#define FSI_MMODE_EIP		0x80000000	/* Enable interrupt polling */
+#define FSI_MMODE_ECRC		0x40000000	/* Enable error recovery */
+#define FSI_MMODE_EPC		0x10000000	/* Enable parity checking */
+#define FSI_MMODE_P8_TO_LSB	0x00000010	/* Timeout value LSB */
+						/*   MSB=1, LSB=0 is 0.8 ms */
+						/*   MSB=0, LSB=1 is 0.9 ms */
+#define FSI_MMODE_CRS0SHFT	18		/* Clk rate selection 0 shift */
+#define FSI_MMODE_CRS0MASK	0x3ff		/* Clk rate selection 0 mask */
+#define FSI_MMODE_CRS1SHFT	8		/* Clk rate selection 1 shift */
+#define FSI_MMODE_CRS1MASK	0x3ff		/* Clk rate selection 1 mask */
+
+/* MRESB: Reset brindge */
+#define FSI_MRESB_RST_GEN	0x80000000	/* General reset */
+#define FSI_MRESB_RST_ERR	0x40000000	/* Error Reset */
+
+/* MRESB: Reset port */
+#define FSI_MRESP_RST_ALL_MASTER 0x20000000	/* Reset all FSI masters */
+#define FSI_MRESP_RST_ALL_LINK	0x10000000	/* Reset all FSI port contr. */
+#define FSI_MRESP_RST_MCR	0x08000000	/* Reset FSI master reg. */
+#define FSI_MRESP_RST_PYE	0x04000000	/* Reset FSI parity error */
+#define FSI_MRESP_RST_ALL	0xfc000000	/* Reset any error */
+
+/* MECTRL: Error control */
+#define FSI_MECTRL_EOAE		0x8000		/* Enable machine check when */
+						/* master 0 in error */
+#define FSI_MECTRL_P8_AUTO_TERM	0x4000		/* Auto terminate */
+
+#define FSI_ENGID_HUB_MASTER		0x1c
+#define FSI_HUB_LINK_OFFSET		0x80000
+#define FSI_HUB_LINK_SIZE		0x80000
+#define FSI_HUB_MASTER_MAX_LINKS	8
+
+#define FSI_LINK_ENABLE_SETUP_TIME	10	/* in mS */
+
+/*
+ * FSI hub master support
+ *
+ * A hub master increases the number of potential target devices that the
+ * primary FSI master can access. For each link a primary master supports,
+ * each of those links can in turn be chained to a hub master with multiple
+ * links of its own.
+ *
+ * The hub is controlled by a set of control registers exposed as a regular fsi
+ * device (the hub->upstream device), and provides access to the downstream FSI
+ * bus as through an address range on the slave itself (->addr and ->size).
+ *
+ * [This differs from "cascaded" masters, which expose the entire downstream
+ * bus entirely through the fsi device address range, and so have a smaller
+ * accessible address space.]
+ */
+struct fsi_master_hub {
+	struct fsi_master	master;
+	struct fsi_device	*upstream;
+	uint32_t		addr, size;	/* slave-relative addr of */
+						/* master address space */
+};
+
+#define to_fsi_master_hub(m) container_of(m, struct fsi_master_hub, master)
+
+static int hub_master_read(struct fsi_master *master, int link,
+			uint8_t id, uint32_t addr, void *val, size_t size)
+{
+	struct fsi_master_hub *hub = to_fsi_master_hub(master);
+
+	if (id != 0)
+		return -EINVAL;
+
+	addr += hub->addr + (link * FSI_HUB_LINK_SIZE);
+	return fsi_slave_read(hub->upstream->slave, addr, val, size);
+}
+
+static int hub_master_write(struct fsi_master *master, int link,
+			uint8_t id, uint32_t addr, const void *val, size_t size)
+{
+	struct fsi_master_hub *hub = to_fsi_master_hub(master);
+
+	if (id != 0)
+		return -EINVAL;
+
+	addr += hub->addr + (link * FSI_HUB_LINK_SIZE);
+	return fsi_slave_write(hub->upstream->slave, addr, val, size);
+}
+
+static int hub_master_break(struct fsi_master *master, int link)
+{
+	uint32_t addr;
+	__be32 cmd;
+
+	addr = 0x4;
+	cmd = cpu_to_be32(0xc0de0000);
+
+	return hub_master_write(master, link, 0, addr, &cmd, sizeof(cmd));
+}
+
+static int hub_master_link_enable(struct fsi_master *master, int link)
+{
+	struct fsi_master_hub *hub = to_fsi_master_hub(master);
+	int idx, bit;
+	__be32 reg;
+	int rc;
+
+	idx = link / 32;
+	bit = link % 32;
+
+	reg = cpu_to_be32(0x80000000 >> bit);
+
+	rc = fsi_device_write(hub->upstream, FSI_MSENP0 + (4 * idx), &reg, 4);
+
+	mdelay(FSI_LINK_ENABLE_SETUP_TIME);
+
+	fsi_device_read(hub->upstream, FSI_MENP0 + (4 * idx), &reg, 4);
+
+	return rc;
+}
+
+static void hub_master_release(struct device *dev)
+{
+	struct fsi_master_hub *hub = to_fsi_master_hub(dev_to_fsi_master(dev));
+
+	kfree(hub);
+}
+
+/* mmode encoders */
+static inline u32 fsi_mmode_crs0(u32 x)
+{
+	return (x & FSI_MMODE_CRS0MASK) << FSI_MMODE_CRS0SHFT;
+}
+
+static inline u32 fsi_mmode_crs1(u32 x)
+{
+	return (x & FSI_MMODE_CRS1MASK) << FSI_MMODE_CRS1SHFT;
+}
+
+static int hub_master_init(struct fsi_master_hub *hub)
+{
+	struct fsi_device *dev = hub->upstream;
+	__be32 reg;
+	int rc;
+
+	reg = cpu_to_be32(FSI_MRESP_RST_ALL_MASTER | FSI_MRESP_RST_ALL_LINK
+			| FSI_MRESP_RST_MCR | FSI_MRESP_RST_PYE);
+	rc = fsi_device_write(dev, FSI_MRESP0, &reg, sizeof(reg));
+	if (rc)
+		return rc;
+
+	/* Initialize the MFSI (hub master) engine */
+	reg = cpu_to_be32(FSI_MRESP_RST_ALL_MASTER | FSI_MRESP_RST_ALL_LINK
+			| FSI_MRESP_RST_MCR | FSI_MRESP_RST_PYE);
+	rc = fsi_device_write(dev, FSI_MRESP0, &reg, sizeof(reg));
+	if (rc)
+		return rc;
+
+	reg = cpu_to_be32(FSI_MECTRL_EOAE | FSI_MECTRL_P8_AUTO_TERM);
+	rc = fsi_device_write(dev, FSI_MECTRL, &reg, sizeof(reg));
+	if (rc)
+		return rc;
+
+	reg = cpu_to_be32(FSI_MMODE_EIP | FSI_MMODE_ECRC | FSI_MMODE_EPC
+			| fsi_mmode_crs0(1) | fsi_mmode_crs1(1)
+			| FSI_MMODE_P8_TO_LSB);
+	rc = fsi_device_write(dev, FSI_MMODE, &reg, sizeof(reg));
+	if (rc)
+		return rc;
+
+	reg = cpu_to_be32(0xffff0000);
+	rc = fsi_device_write(dev, FSI_MDLYR, &reg, sizeof(reg));
+	if (rc)
+		return rc;
+
+	reg = cpu_to_be32(~0);
+	rc = fsi_device_write(dev, FSI_MSENP0, &reg, sizeof(reg));
+	if (rc)
+		return rc;
+
+	/* Leave enabled long enough for master logic to set up */
+	mdelay(FSI_LINK_ENABLE_SETUP_TIME);
+
+	rc = fsi_device_write(dev, FSI_MCENP0, &reg, sizeof(reg));
+	if (rc)
+		return rc;
+
+	rc = fsi_device_read(dev, FSI_MAEB, &reg, sizeof(reg));
+	if (rc)
+		return rc;
+
+	reg = cpu_to_be32(FSI_MRESP_RST_ALL_MASTER | FSI_MRESP_RST_ALL_LINK);
+	rc = fsi_device_write(dev, FSI_MRESP0, &reg, sizeof(reg));
+	if (rc)
+		return rc;
+
+	rc = fsi_device_read(dev, FSI_MLEVP0, &reg, sizeof(reg));
+	if (rc)
+		return rc;
+
+	/* Reset the master bridge */
+	reg = cpu_to_be32(FSI_MRESB_RST_GEN);
+	rc = fsi_device_write(dev, FSI_MRESB0, &reg, sizeof(reg));
+	if (rc)
+		return rc;
+
+	reg = cpu_to_be32(FSI_MRESB_RST_ERR);
+	return fsi_device_write(dev, FSI_MRESB0, &reg, sizeof(reg));
+}
+
+static int hub_master_probe(struct device *dev)
+{
+	struct fsi_device *fsi_dev = to_fsi_dev(dev);
+	struct fsi_master_hub *hub;
+	uint32_t reg, links;
+	__be32 __reg;
+	int rc;
+
+	rc = fsi_device_read(fsi_dev, FSI_MVER, &__reg, sizeof(__reg));
+	if (rc)
+		return rc;
+
+	reg = be32_to_cpu(__reg);
+	links = (reg >> 8) & 0xff;
+	dev_dbg(dev, "hub version %08x (%d links)\n", reg, links);
+
+	rc = fsi_slave_claim_range(fsi_dev->slave, FSI_HUB_LINK_OFFSET,
+			FSI_HUB_LINK_SIZE * links);
+	if (rc) {
+		dev_err(dev, "can't claim slave address range for links");
+		return rc;
+	}
+
+	hub = kzalloc(sizeof(*hub), GFP_KERNEL);
+	if (!hub) {
+		rc = -ENOMEM;
+		goto err_release;
+	}
+
+	hub->addr = FSI_HUB_LINK_OFFSET;
+	hub->size = FSI_HUB_LINK_SIZE * links;
+	hub->upstream = fsi_dev;
+
+	hub->master.dev.parent = dev;
+	hub->master.dev.release = hub_master_release;
+	hub->master.dev.of_node = of_node_get(dev_of_node(dev));
+
+	hub->master.n_links = links;
+	hub->master.read = hub_master_read;
+	hub->master.write = hub_master_write;
+	hub->master.send_break = hub_master_break;
+	hub->master.link_enable = hub_master_link_enable;
+
+	dev_set_drvdata(dev, hub);
+
+	hub_master_init(hub);
+
+	rc = fsi_master_register(&hub->master);
+	if (rc)
+		goto err_release;
+
+	/* At this point, fsi_master_register performs the device_initialize(),
+	 * and holds the sole reference on master.dev. This means the device
+	 * will be freed (via ->release) during any subsequent call to
+	 * fsi_master_unregister.  We add our own reference to it here, so we
+	 * can perform cleanup (in _remove()) without it being freed before
+	 * we're ready.
+	 */
+	get_device(&hub->master.dev);
+	return 0;
+
+err_release:
+	fsi_slave_release_range(fsi_dev->slave, FSI_HUB_LINK_OFFSET,
+			FSI_HUB_LINK_SIZE * links);
+	return rc;
+}
+
+static int hub_master_remove(struct device *dev)
+{
+	struct fsi_master_hub *hub = dev_get_drvdata(dev);
+
+	fsi_master_unregister(&hub->master);
+	fsi_slave_release_range(hub->upstream->slave, hub->addr, hub->size);
+	of_node_put(hub->master.dev.of_node);
+
+	/*
+	 * master.dev will likely be ->release()ed after this, which free()s
+	 * the hub
+	 */
+	put_device(&hub->master.dev);
+
+	return 0;
+}
+
+static struct fsi_device_id hub_master_ids[] = {
+	{
+		.engine_type = FSI_ENGID_HUB_MASTER,
+		.version = FSI_VERSION_ANY,
+	},
+	{ 0 }
+};
+
+static struct fsi_driver hub_master_driver = {
+	.id_table = hub_master_ids,
+	.drv = {
+		.name = "fsi-master-hub",
+		.bus = &fsi_bus_type,
+		.probe = hub_master_probe,
+		.remove = hub_master_remove,
+	}
+};
+
+module_fsi_driver(hub_master_driver);
+MODULE_LICENSE("GPL");
diff --git a/src/kernel/linux/v4.19/drivers/fsi/fsi-master.h b/src/kernel/linux/v4.19/drivers/fsi/fsi-master.h
new file mode 100644
index 0000000..040a7d4
--- /dev/null
+++ b/src/kernel/linux/v4.19/drivers/fsi/fsi-master.h
@@ -0,0 +1,97 @@
+/*
+ * FSI master definitions. These comprise the core <--> master interface,
+ * to allow the core to interact with the (hardware-specific) masters.
+ *
+ * Copyright (C) IBM Corporation 2016
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef DRIVERS_FSI_MASTER_H
+#define DRIVERS_FSI_MASTER_H
+
+#include <linux/device.h>
+#include <linux/mutex.h>
+
+/* Various protocol delays */
+#define	FSI_ECHO_DELAY_CLOCKS	16	/* Number clocks for echo delay */
+#define	FSI_SEND_DELAY_CLOCKS	16	/* Number clocks for send delay */
+#define	FSI_PRE_BREAK_CLOCKS	50	/* Number clocks to prep for break */
+#define	FSI_BREAK_CLOCKS	256	/* Number of clocks to issue break */
+#define	FSI_POST_BREAK_CLOCKS	16000	/* Number clocks to set up cfam */
+#define	FSI_INIT_CLOCKS		5000	/* Clock out any old data */
+#define	FSI_MASTER_DPOLL_CLOCKS	50      /* < 21 will cause slave to hang */
+#define	FSI_MASTER_EPOLL_CLOCKS	50      /* Number of clocks for E_POLL retry */
+
+/* Various retry maximums */
+#define FSI_CRC_ERR_RETRIES	10
+#define	FSI_MASTER_MAX_BUSY	200
+#define	FSI_MASTER_MTOE_COUNT	1000
+
+/* Command encodings */
+#define	FSI_CMD_DPOLL		0x2
+#define	FSI_CMD_EPOLL		0x3
+#define	FSI_CMD_TERM		0x3f
+#define FSI_CMD_ABS_AR		0x4
+#define FSI_CMD_REL_AR		0x5
+#define FSI_CMD_SAME_AR		0x3	/* but only a 2-bit opcode... */
+
+/* Slave responses */
+#define	FSI_RESP_ACK		0	/* Success */
+#define	FSI_RESP_BUSY		1	/* Slave busy */
+#define	FSI_RESP_ERRA		2	/* Any (misc) Error */
+#define	FSI_RESP_ERRC		3	/* Slave reports master CRC error */
+
+/* Misc */
+#define	FSI_CRC_SIZE		4
+
+/* fsi-master definition and flags */
+#define FSI_MASTER_FLAG_SWCLOCK		0x1
+
+struct fsi_master {
+	struct device	dev;
+	int		idx;
+	int		n_links;
+	int		flags;
+	struct mutex	scan_lock;
+	int		(*read)(struct fsi_master *, int link, uint8_t id,
+				uint32_t addr, void *val, size_t size);
+	int		(*write)(struct fsi_master *, int link, uint8_t id,
+				uint32_t addr, const void *val, size_t size);
+	int		(*term)(struct fsi_master *, int link, uint8_t id);
+	int		(*send_break)(struct fsi_master *, int link);
+	int		(*link_enable)(struct fsi_master *, int link);
+	int		(*link_config)(struct fsi_master *, int link,
+				       u8 t_send_delay, u8 t_echo_delay);
+};
+
+#define dev_to_fsi_master(d) container_of(d, struct fsi_master, dev)
+
+/**
+ * fsi_master registration & lifetime: the fsi_master_register() and
+ * fsi_master_unregister() functions will take ownership of the master, and
+ * ->dev in particular. The registration path performs a get_device(), which
+ * takes the first reference on the device. Similarly, the unregistration path
+ * performs a put_device(), which may well drop the last reference.
+ *
+ * This means that master implementations *may* need to hold their own
+ * reference (via get_device()) on master->dev. In particular, if the device's
+ * ->release callback frees the fsi_master, then fsi_master_unregister will
+ * invoke this free if no other reference is held.
+ *
+ * The same applies for the error path of fsi_master_register; if the call
+ * fails, dev->release will have been invoked.
+ */
+extern int fsi_master_register(struct fsi_master *master);
+extern void fsi_master_unregister(struct fsi_master *master);
+
+extern int fsi_master_rescan(struct fsi_master *master);
+
+#endif /* DRIVERS_FSI_MASTER_H */
diff --git a/src/kernel/linux/v4.19/drivers/fsi/fsi-sbefifo.c b/src/kernel/linux/v4.19/drivers/fsi/fsi-sbefifo.c
new file mode 100644
index 0000000..ae86134
--- /dev/null
+++ b/src/kernel/linux/v4.19/drivers/fsi/fsi-sbefifo.c
@@ -0,0 +1,1066 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) IBM Corporation 2017
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERGCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/device.h>
+#include <linux/errno.h>
+#include <linux/fs.h>
+#include <linux/fsi.h>
+#include <linux/fsi-sbefifo.h>
+#include <linux/kernel.h>
+#include <linux/cdev.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/of_platform.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+#include <linux/delay.h>
+#include <linux/uio.h>
+#include <linux/vmalloc.h>
+#include <linux/mm.h>
+
+/*
+ * The SBEFIFO is a pipe-like FSI device for communicating with
+ * the self boot engine on POWER processors.
+ */
+
+#define DEVICE_NAME		"sbefifo"
+#define FSI_ENGID_SBE		0x22
+
+/*
+ * Register layout
+ */
+
+/* Register banks */
+#define SBEFIFO_UP		0x00		/* FSI -> Host */
+#define SBEFIFO_DOWN		0x40		/* Host -> FSI */
+
+/* Per-bank registers */
+#define SBEFIFO_FIFO		0x00		/* The FIFO itself */
+#define SBEFIFO_STS		0x04		/* Status register */
+#define   SBEFIFO_STS_PARITY_ERR	0x20000000
+#define   SBEFIFO_STS_RESET_REQ		0x02000000
+#define   SBEFIFO_STS_GOT_EOT		0x00800000
+#define   SBEFIFO_STS_MAX_XFER_LIMIT	0x00400000
+#define   SBEFIFO_STS_FULL		0x00200000
+#define   SBEFIFO_STS_EMPTY		0x00100000
+#define   SBEFIFO_STS_ECNT_MASK		0x000f0000
+#define   SBEFIFO_STS_ECNT_SHIFT	16
+#define   SBEFIFO_STS_VALID_MASK	0x0000ff00
+#define   SBEFIFO_STS_VALID_SHIFT	8
+#define   SBEFIFO_STS_EOT_MASK		0x000000ff
+#define   SBEFIFO_STS_EOT_SHIFT		0
+#define SBEFIFO_EOT_RAISE	0x08		/* (Up only) Set End Of Transfer */
+#define SBEFIFO_REQ_RESET	0x0C		/* (Up only) Reset Request */
+#define SBEFIFO_PERFORM_RESET	0x10		/* (Down only) Perform Reset */
+#define SBEFIFO_EOT_ACK		0x14		/* (Down only) Acknowledge EOT */
+#define SBEFIFO_DOWN_MAX	0x18		/* (Down only) Max transfer */
+
+/* CFAM GP Mailbox SelfBoot Message register */
+#define CFAM_GP_MBOX_SBM_ADDR	0x2824	/* Converted 0x2809 */
+
+#define CFAM_SBM_SBE_BOOTED		0x80000000
+#define CFAM_SBM_SBE_ASYNC_FFDC		0x40000000
+#define CFAM_SBM_SBE_STATE_MASK		0x00f00000
+#define CFAM_SBM_SBE_STATE_SHIFT	20
+
+enum sbe_state
+{
+	SBE_STATE_UNKNOWN = 0x0, // Unkown, initial state
+	SBE_STATE_IPLING  = 0x1, // IPL'ing - autonomous mode (transient)
+	SBE_STATE_ISTEP   = 0x2, // ISTEP - Running IPL by steps (transient)
+	SBE_STATE_MPIPL   = 0x3, // MPIPL
+	SBE_STATE_RUNTIME = 0x4, // SBE Runtime
+	SBE_STATE_DMT     = 0x5, // Dead Man Timer State (transient)
+	SBE_STATE_DUMP    = 0x6, // Dumping
+	SBE_STATE_FAILURE = 0x7, // Internal SBE failure
+	SBE_STATE_QUIESCE = 0x8, // Final state - needs SBE reset to get out
+};
+
+/* FIFO depth */
+#define SBEFIFO_FIFO_DEPTH		8
+
+/* Helpers */
+#define sbefifo_empty(sts)	((sts) & SBEFIFO_STS_EMPTY)
+#define sbefifo_full(sts)	((sts) & SBEFIFO_STS_FULL)
+#define sbefifo_parity_err(sts)	((sts) & SBEFIFO_STS_PARITY_ERR)
+#define sbefifo_populated(sts)	(((sts) & SBEFIFO_STS_ECNT_MASK) >> SBEFIFO_STS_ECNT_SHIFT)
+#define sbefifo_vacant(sts)	(SBEFIFO_FIFO_DEPTH - sbefifo_populated(sts))
+#define sbefifo_eot_set(sts)	(((sts) & SBEFIFO_STS_EOT_MASK) >> SBEFIFO_STS_EOT_SHIFT)
+
+/* Reset request timeout in ms */
+#define SBEFIFO_RESET_TIMEOUT		10000
+
+/* Timeouts for commands in ms */
+#define SBEFIFO_TIMEOUT_START_CMD	10000
+#define SBEFIFO_TIMEOUT_IN_CMD		1000
+#define SBEFIFO_TIMEOUT_START_RSP	10000
+#define SBEFIFO_TIMEOUT_IN_RSP		1000
+
+/* Other constants */
+#define SBEFIFO_MAX_USER_CMD_LEN	(0x100000 + PAGE_SIZE)
+#define SBEFIFO_RESET_MAGIC		0x52534554 /* "RSET" */
+
+struct sbefifo {
+	uint32_t		magic;
+#define SBEFIFO_MAGIC		0x53424546 /* "SBEF" */
+	struct fsi_device	*fsi_dev;
+	struct device		dev;
+	struct cdev		cdev;
+	struct mutex		lock;
+	bool			broken;
+	bool			dead;
+	bool			async_ffdc;
+};
+
+struct sbefifo_user {
+	struct sbefifo		*sbefifo;
+	struct mutex		file_lock;
+	void			*cmd_page;
+	void			*pending_cmd;
+	size_t			pending_len;
+};
+
+static DEFINE_MUTEX(sbefifo_ffdc_mutex);
+
+
+static void __sbefifo_dump_ffdc(struct device *dev, const __be32 *ffdc,
+				size_t ffdc_sz, bool internal)
+{
+	int pack = 0;
+#define FFDC_LSIZE	60
+	static char ffdc_line[FFDC_LSIZE];
+	char *p = ffdc_line;
+
+	while (ffdc_sz) {
+		u32 w0, w1, w2, i;
+		if (ffdc_sz < 3) {
+			dev_err(dev, "SBE invalid FFDC package size %zd\n", ffdc_sz);
+			return;
+		}
+		w0 = be32_to_cpu(*(ffdc++));
+		w1 = be32_to_cpu(*(ffdc++));
+		w2 = be32_to_cpu(*(ffdc++));
+		ffdc_sz -= 3;
+		if ((w0 >> 16) != 0xFFDC) {
+			dev_err(dev, "SBE invalid FFDC package signature %08x %08x %08x\n",
+				w0, w1, w2);
+			break;
+		}
+		w0 &= 0xffff;
+		if (w0 > ffdc_sz) {
+			dev_err(dev, "SBE FFDC package len %d words but only %zd remaining\n",
+				w0, ffdc_sz);
+			w0 = ffdc_sz;
+			break;
+		}
+		if (internal) {
+			dev_warn(dev, "+---- SBE FFDC package %d for async err -----+\n",
+				 pack++);
+		} else {
+			dev_warn(dev, "+---- SBE FFDC package %d for cmd %02x:%02x -----+\n",
+				 pack++, (w1 >> 8) & 0xff, w1 & 0xff);
+		}
+		dev_warn(dev, "| Response code: %08x                   |\n", w2);
+		dev_warn(dev, "|-------------------------------------------|\n");
+		for (i = 0; i < w0; i++) {
+			if ((i & 3) == 0) {
+				p = ffdc_line;
+				p += sprintf(p, "| %04x:", i << 4);
+			}
+			p += sprintf(p, " %08x", be32_to_cpu(*(ffdc++)));
+			ffdc_sz--;
+			if ((i & 3) == 3 || i == (w0 - 1)) {
+				while ((i & 3) < 3) {
+					p += sprintf(p, "         ");
+					i++;
+				}
+				dev_warn(dev, "%s |\n", ffdc_line);
+			}
+		}
+		dev_warn(dev, "+-------------------------------------------+\n");
+	}
+}
+
+static void sbefifo_dump_ffdc(struct device *dev, const __be32 *ffdc,
+			      size_t ffdc_sz, bool internal)
+{
+	mutex_lock(&sbefifo_ffdc_mutex);
+	__sbefifo_dump_ffdc(dev, ffdc, ffdc_sz, internal);
+	mutex_unlock(&sbefifo_ffdc_mutex);
+}
+
+int sbefifo_parse_status(struct device *dev, u16 cmd, __be32 *response,
+			 size_t resp_len, size_t *data_len)
+{
+	u32 dh, s0, s1;
+	size_t ffdc_sz;
+
+	if (resp_len < 3) {
+		pr_debug("sbefifo: cmd %04x, response too small: %zd\n",
+			 cmd, resp_len);
+		return -ENXIO;
+	}
+	dh = be32_to_cpu(response[resp_len - 1]);
+	if (dh > resp_len || dh < 3) {
+		dev_err(dev, "SBE cmd %02x:%02x status offset out of range: %d/%zd\n",
+			cmd >> 8, cmd & 0xff, dh, resp_len);
+		return -ENXIO;
+	}
+	s0 = be32_to_cpu(response[resp_len - dh]);
+	s1 = be32_to_cpu(response[resp_len - dh + 1]);
+	if (((s0 >> 16) != 0xC0DE) || ((s0 & 0xffff) != cmd)) {
+		dev_err(dev, "SBE cmd %02x:%02x, status signature invalid: 0x%08x 0x%08x\n",
+			cmd >> 8, cmd & 0xff, s0, s1);
+		return -ENXIO;
+	}
+	if (s1 != 0) {
+		ffdc_sz = dh - 3;
+		dev_warn(dev, "SBE error cmd %02x:%02x status=%04x:%04x\n",
+			 cmd >> 8, cmd & 0xff, s1 >> 16, s1 & 0xffff);
+		if (ffdc_sz)
+			sbefifo_dump_ffdc(dev, &response[resp_len - dh + 2],
+					  ffdc_sz, false);
+	}
+	if (data_len)
+		*data_len = resp_len - dh;
+
+	/*
+	 * Primary status don't have the top bit set, so can't be confused with
+	 * Linux negative error codes, so return the status word whole.
+	 */
+	return s1;
+}
+EXPORT_SYMBOL_GPL(sbefifo_parse_status);
+
+static int sbefifo_regr(struct sbefifo *sbefifo, int reg, u32 *word)
+{
+	__be32 raw_word;
+	int rc;
+
+	rc = fsi_device_read(sbefifo->fsi_dev, reg, &raw_word,
+			     sizeof(raw_word));
+	if (rc)
+		return rc;
+
+	*word = be32_to_cpu(raw_word);
+
+	return 0;
+}
+
+static int sbefifo_regw(struct sbefifo *sbefifo, int reg, u32 word)
+{
+	__be32 raw_word = cpu_to_be32(word);
+
+	return fsi_device_write(sbefifo->fsi_dev, reg, &raw_word,
+				sizeof(raw_word));
+}
+
+static int sbefifo_check_sbe_state(struct sbefifo *sbefifo)
+{
+	__be32 raw_word;
+	u32 sbm;
+	int rc;
+
+	rc = fsi_slave_read(sbefifo->fsi_dev->slave, CFAM_GP_MBOX_SBM_ADDR,
+			    &raw_word, sizeof(raw_word));
+	if (rc)
+		return rc;
+	sbm = be32_to_cpu(raw_word);
+
+	/* SBE booted at all ? */
+	if (!(sbm & CFAM_SBM_SBE_BOOTED))
+		return -ESHUTDOWN;
+
+	/* Check its state */
+	switch ((sbm & CFAM_SBM_SBE_STATE_MASK) >> CFAM_SBM_SBE_STATE_SHIFT) {
+	case SBE_STATE_UNKNOWN:
+		return -ESHUTDOWN;
+	case SBE_STATE_IPLING:
+	case SBE_STATE_ISTEP:
+	case SBE_STATE_MPIPL:
+	case SBE_STATE_DMT:
+		return -EBUSY;
+	case SBE_STATE_RUNTIME:
+	case SBE_STATE_DUMP: /* Not sure about that one */
+		break;
+	case SBE_STATE_FAILURE:
+	case SBE_STATE_QUIESCE:
+		return -ESHUTDOWN;
+	}
+
+	/* Is there async FFDC available ? Remember it */
+	if (sbm & CFAM_SBM_SBE_ASYNC_FFDC)
+		sbefifo->async_ffdc = true;
+
+	return 0;
+}
+
+/* Don't flip endianness of data to/from FIFO, just pass through. */
+static int sbefifo_down_read(struct sbefifo *sbefifo, __be32 *word)
+{
+	return fsi_device_read(sbefifo->fsi_dev, SBEFIFO_DOWN, word,
+			       sizeof(*word));
+}
+
+static int sbefifo_up_write(struct sbefifo *sbefifo, __be32 word)
+{
+	return fsi_device_write(sbefifo->fsi_dev, SBEFIFO_UP, &word,
+				sizeof(word));
+}
+
+static int sbefifo_request_reset(struct sbefifo *sbefifo)
+{
+	struct device *dev = &sbefifo->fsi_dev->dev;
+	u32 status, timeout;
+	int rc;
+
+	dev_dbg(dev, "Requesting FIFO reset\n");
+
+	/* Mark broken first, will be cleared if reset succeeds */
+	sbefifo->broken = true;
+
+	/* Send reset request */
+	rc = sbefifo_regw(sbefifo, SBEFIFO_UP | SBEFIFO_REQ_RESET, 1);
+	if (rc) {
+		dev_err(dev, "Sending reset request failed, rc=%d\n", rc);
+		return rc;
+	}
+
+	/* Wait for it to complete */
+	for (timeout = 0; timeout < SBEFIFO_RESET_TIMEOUT; timeout++) {
+		rc = sbefifo_regr(sbefifo, SBEFIFO_UP | SBEFIFO_STS, &status);
+		if (rc) {
+			dev_err(dev, "Failed to read UP fifo status during reset"
+				" , rc=%d\n", rc);
+			return rc;
+		}
+
+		if (!(status & SBEFIFO_STS_RESET_REQ)) {
+			dev_dbg(dev, "FIFO reset done\n");
+			sbefifo->broken = false;
+			return 0;
+		}
+
+		msleep(1);
+	}
+	dev_err(dev, "FIFO reset timed out\n");
+
+	return -ETIMEDOUT;
+}
+
+static int sbefifo_cleanup_hw(struct sbefifo *sbefifo)
+{
+	struct device *dev = &sbefifo->fsi_dev->dev;
+	u32 up_status, down_status;
+	bool need_reset = false;
+	int rc;
+
+	rc = sbefifo_check_sbe_state(sbefifo);
+	if (rc) {
+		dev_dbg(dev, "SBE state=%d\n", rc);
+		return rc;
+	}
+
+	/* If broken, we don't need to look at status, go straight to reset */
+	if (sbefifo->broken)
+		goto do_reset;
+
+	rc = sbefifo_regr(sbefifo, SBEFIFO_UP | SBEFIFO_STS, &up_status);
+	if (rc) {
+		dev_err(dev, "Cleanup: Reading UP status failed, rc=%d\n", rc);
+
+		/* Will try reset again on next attempt at using it */
+		sbefifo->broken = true;
+		return rc;
+	}
+
+	rc = sbefifo_regr(sbefifo, SBEFIFO_DOWN | SBEFIFO_STS, &down_status);
+	if (rc) {
+		dev_err(dev, "Cleanup: Reading DOWN status failed, rc=%d\n", rc);
+
+		/* Will try reset again on next attempt at using it */
+		sbefifo->broken = true;
+		return rc;
+	}
+
+	/* The FIFO already contains a reset request from the SBE ? */
+	if (down_status & SBEFIFO_STS_RESET_REQ) {
+		dev_info(dev, "Cleanup: FIFO reset request set, resetting\n");
+		rc = sbefifo_regw(sbefifo, SBEFIFO_UP, SBEFIFO_PERFORM_RESET);
+		if (rc) {
+			sbefifo->broken = true;
+			dev_err(dev, "Cleanup: Reset reg write failed, rc=%d\n", rc);
+			return rc;
+		}
+		sbefifo->broken = false;
+		return 0;
+	}
+
+	/* Parity error on either FIFO ? */
+	if ((up_status | down_status) & SBEFIFO_STS_PARITY_ERR)
+		need_reset = true;
+
+	/* Either FIFO not empty ? */
+	if (!((up_status & down_status) & SBEFIFO_STS_EMPTY))
+		need_reset = true;
+
+	if (!need_reset)
+		return 0;
+
+	dev_info(dev, "Cleanup: FIFO not clean (up=0x%08x down=0x%08x)\n",
+		 up_status, down_status);
+
+ do_reset:
+
+	/* Mark broken, will be cleared if/when reset succeeds */
+	return sbefifo_request_reset(sbefifo);
+}
+
+static int sbefifo_wait(struct sbefifo *sbefifo, bool up,
+			u32 *status, unsigned long timeout)
+{
+	struct device *dev = &sbefifo->fsi_dev->dev;
+	unsigned long end_time;
+	bool ready = false;
+	u32 addr, sts = 0;
+	int rc;
+
+	dev_vdbg(dev, "Wait on %s fifo...\n", up ? "up" : "down");
+
+	addr = (up ? SBEFIFO_UP : SBEFIFO_DOWN) | SBEFIFO_STS;
+
+	end_time = jiffies + timeout;
+	while (!time_after(jiffies, end_time)) {
+		cond_resched();
+		rc = sbefifo_regr(sbefifo, addr, &sts);
+		if (rc < 0) {
+			dev_err(dev, "FSI error %d reading status register\n", rc);
+			return rc;
+		}
+		if (!up && sbefifo_parity_err(sts)) {
+			dev_err(dev, "Parity error in DOWN FIFO\n");
+			return -ENXIO;
+		}
+		ready = !(up ? sbefifo_full(sts) : sbefifo_empty(sts));
+		if (ready)
+			break;
+	}
+	if (!ready) {
+		dev_err(dev, "%s FIFO Timeout ! status=%08x\n", up ? "UP" : "DOWN", sts);
+		return -ETIMEDOUT;
+	}
+	dev_vdbg(dev, "End of wait status: %08x\n", sts);
+
+	*status = sts;
+
+	return 0;
+}
+
+static int sbefifo_send_command(struct sbefifo *sbefifo,
+				const __be32 *command, size_t cmd_len)
+{
+	struct device *dev = &sbefifo->fsi_dev->dev;
+	size_t len, chunk, vacant = 0, remaining = cmd_len;
+	unsigned long timeout;
+	u32 status;
+	int rc;
+
+	dev_vdbg(dev, "sending command (%zd words, cmd=%04x)\n",
+		 cmd_len, be32_to_cpu(command[1]));
+
+	/* As long as there's something to send */
+	timeout = msecs_to_jiffies(SBEFIFO_TIMEOUT_START_CMD);
+	while (remaining) {
+		/* Wait for room in the FIFO */
+		rc = sbefifo_wait(sbefifo, true, &status, timeout);
+		if (rc < 0)
+			return rc;
+		timeout = msecs_to_jiffies(SBEFIFO_TIMEOUT_IN_CMD);
+
+		vacant = sbefifo_vacant(status);
+		len = chunk = min(vacant, remaining);
+
+		dev_vdbg(dev, "  status=%08x vacant=%zd chunk=%zd\n",
+			 status, vacant, chunk);
+
+		/* Write as much as we can */
+		while (len--) {
+			rc = sbefifo_up_write(sbefifo, *(command++));
+			if (rc) {
+				dev_err(dev, "FSI error %d writing UP FIFO\n", rc);
+				return rc;
+			}
+		}
+		remaining -= chunk;
+		vacant -= chunk;
+	}
+
+	/* If there's no room left, wait for some to write EOT */
+	if (!vacant) {
+		rc = sbefifo_wait(sbefifo, true, &status, timeout);
+		if (rc)
+			return rc;
+	}
+
+	/* Send an EOT */
+	rc = sbefifo_regw(sbefifo, SBEFIFO_UP | SBEFIFO_EOT_RAISE, 0);
+	if (rc)
+		dev_err(dev, "FSI error %d writing EOT\n", rc);
+	return rc;
+}
+
+static int sbefifo_read_response(struct sbefifo *sbefifo, struct iov_iter *response)
+{
+	struct device *dev = &sbefifo->fsi_dev->dev;
+	u32 status, eot_set;
+	unsigned long timeout;
+	bool overflow = false;
+	__be32 data;
+	size_t len;
+	int rc;
+
+	dev_vdbg(dev, "reading response, buflen = %zd\n", iov_iter_count(response));
+
+	timeout = msecs_to_jiffies(SBEFIFO_TIMEOUT_START_RSP);
+	for (;;) {
+		/* Grab FIFO status (this will handle parity errors) */
+		rc = sbefifo_wait(sbefifo, false, &status, timeout);
+		if (rc < 0)
+			return rc;
+		timeout = msecs_to_jiffies(SBEFIFO_TIMEOUT_IN_RSP);
+
+		/* Decode status */
+		len = sbefifo_populated(status);
+		eot_set = sbefifo_eot_set(status);
+
+		dev_vdbg(dev, "  chunk size %zd eot_set=0x%x\n", len, eot_set);
+
+		/* Go through the chunk */
+		while(len--) {
+			/* Read the data */
+			rc = sbefifo_down_read(sbefifo, &data);
+			if (rc < 0)
+				return rc;
+
+			/* Was it an EOT ? */
+			if (eot_set & 0x80) {
+				/*
+				 * There should be nothing else in the FIFO,
+				 * if there is, mark broken, this will force
+				 * a reset on next use, but don't fail the
+				 * command.
+				 */
+				if (len) {
+					dev_warn(dev, "FIFO read hit"
+						 " EOT with still %zd data\n",
+						 len);
+					sbefifo->broken = true;
+				}
+
+				/* We are done */
+				rc = sbefifo_regw(sbefifo,
+						  SBEFIFO_DOWN | SBEFIFO_EOT_ACK, 0);
+
+				/*
+				 * If that write fail, still complete the request but mark
+				 * the fifo as broken for subsequent reset (not much else
+				 * we can do here).
+				 */
+				if (rc) {
+					dev_err(dev, "FSI error %d ack'ing EOT\n", rc);
+					sbefifo->broken = true;
+				}
+
+				/* Tell whether we overflowed */
+				return overflow ? -EOVERFLOW : 0;
+			}
+
+			/* Store it if there is room */
+			if (iov_iter_count(response) >= sizeof(__be32)) {
+				if (copy_to_iter(&data, sizeof(__be32), response) < sizeof(__be32))
+					return -EFAULT;
+			} else {
+				dev_vdbg(dev, "Response overflowed !\n");
+
+				overflow = true;
+			}
+
+			/* Next EOT bit */
+			eot_set <<= 1;
+		}
+	}
+	/* Shouldn't happen */
+	return -EIO;
+}
+
+static int sbefifo_do_command(struct sbefifo *sbefifo,
+			      const __be32 *command, size_t cmd_len,
+			      struct iov_iter *response)
+{
+	/* Try sending the command */
+	int rc = sbefifo_send_command(sbefifo, command, cmd_len);
+	if (rc)
+		return rc;
+
+	/* Now, get the response */
+	return sbefifo_read_response(sbefifo, response);
+}
+
+static void sbefifo_collect_async_ffdc(struct sbefifo *sbefifo)
+{
+	struct device *dev = &sbefifo->fsi_dev->dev;
+        struct iov_iter ffdc_iter;
+        struct kvec ffdc_iov;
+	__be32 *ffdc;
+	size_t ffdc_sz;
+	__be32 cmd[2];
+	int rc;
+
+	sbefifo->async_ffdc = false;
+	ffdc = vmalloc(SBEFIFO_MAX_FFDC_SIZE);
+	if (!ffdc) {
+		dev_err(dev, "Failed to allocate SBE FFDC buffer\n");
+		return;
+	}
+        ffdc_iov.iov_base = ffdc;
+	ffdc_iov.iov_len = SBEFIFO_MAX_FFDC_SIZE;
+        iov_iter_kvec(&ffdc_iter, WRITE | ITER_KVEC, &ffdc_iov, 1, SBEFIFO_MAX_FFDC_SIZE);
+	cmd[0] = cpu_to_be32(2);
+	cmd[1] = cpu_to_be32(SBEFIFO_CMD_GET_SBE_FFDC);
+	rc = sbefifo_do_command(sbefifo, cmd, 2, &ffdc_iter);
+	if (rc != 0) {
+		dev_err(dev, "Error %d retrieving SBE FFDC\n", rc);
+		goto bail;
+	}
+	ffdc_sz = SBEFIFO_MAX_FFDC_SIZE - iov_iter_count(&ffdc_iter);
+	ffdc_sz /= sizeof(__be32);
+	rc = sbefifo_parse_status(dev, SBEFIFO_CMD_GET_SBE_FFDC, ffdc,
+				  ffdc_sz, &ffdc_sz);
+	if (rc != 0) {
+		dev_err(dev, "Error %d decoding SBE FFDC\n", rc);
+		goto bail;
+	}
+	if (ffdc_sz > 0)
+		sbefifo_dump_ffdc(dev, ffdc, ffdc_sz, true);
+ bail:
+	vfree(ffdc);
+
+}
+
+static int __sbefifo_submit(struct sbefifo *sbefifo,
+			    const __be32 *command, size_t cmd_len,
+			    struct iov_iter *response)
+{
+	struct device *dev = &sbefifo->fsi_dev->dev;
+	int rc;
+
+	if (sbefifo->dead)
+		return -ENODEV;
+
+	if (cmd_len < 2 || be32_to_cpu(command[0]) != cmd_len) {
+		dev_vdbg(dev, "Invalid command len %zd (header: %d)\n",
+			 cmd_len, be32_to_cpu(command[0]));
+		return -EINVAL;
+	}
+
+	/* First ensure the HW is in a clean state */
+	rc = sbefifo_cleanup_hw(sbefifo);
+	if (rc)
+		return rc;
+
+	/* Look for async FFDC first if any */
+	if (sbefifo->async_ffdc)
+		sbefifo_collect_async_ffdc(sbefifo);
+
+	rc = sbefifo_do_command(sbefifo, command, cmd_len, response);
+	if (rc != 0 && rc != -EOVERFLOW)
+		goto fail;
+	return rc;
+ fail:
+	/*
+	 * On failure, attempt a reset. Ignore the result, it will mark
+	 * the fifo broken if the reset fails
+	 */
+        sbefifo_request_reset(sbefifo);
+
+	/* Return original error */
+	return rc;
+}
+
+/**
+ * sbefifo_submit() - Submit and SBE fifo command and receive response
+ * @dev: The sbefifo device
+ * @command: The raw command data
+ * @cmd_len: The command size (in 32-bit words)
+ * @response: The output response buffer
+ * @resp_len: In: Response buffer size, Out: Response size
+ *
+ * This will perform the entire operation. If the reponse buffer
+ * overflows, returns -EOVERFLOW
+ */
+int sbefifo_submit(struct device *dev, const __be32 *command, size_t cmd_len,
+		   __be32 *response, size_t *resp_len)
+{
+	struct sbefifo *sbefifo;
+        struct iov_iter resp_iter;
+        struct kvec resp_iov;
+	size_t rbytes;
+	int rc;
+
+	if (!dev)
+		return -ENODEV;
+	sbefifo = dev_get_drvdata(dev);
+	if (!sbefifo)
+		return -ENODEV;
+	if (WARN_ON_ONCE(sbefifo->magic != SBEFIFO_MAGIC))
+		return -ENODEV;
+	if (!resp_len || !command || !response)
+		return -EINVAL;
+
+	/* Prepare iov iterator */
+	rbytes = (*resp_len) * sizeof(__be32);
+	resp_iov.iov_base = response;
+	resp_iov.iov_len = rbytes;
+        iov_iter_kvec(&resp_iter, WRITE | ITER_KVEC, &resp_iov, 1, rbytes);
+
+	/* Perform the command */
+	mutex_lock(&sbefifo->lock);
+	rc = __sbefifo_submit(sbefifo, command, cmd_len, &resp_iter);
+	mutex_unlock(&sbefifo->lock);
+
+	/* Extract the response length */
+	rbytes -= iov_iter_count(&resp_iter);
+	*resp_len = rbytes / sizeof(__be32);
+
+	return rc;
+}
+EXPORT_SYMBOL_GPL(sbefifo_submit);
+
+/*
+ * Char device interface
+ */
+
+static void sbefifo_release_command(struct sbefifo_user *user)
+{
+	if (is_vmalloc_addr(user->pending_cmd))
+		vfree(user->pending_cmd);
+	user->pending_cmd = NULL;
+	user->pending_len = 0;
+}
+
+static int sbefifo_user_open(struct inode *inode, struct file *file)
+{
+	struct sbefifo *sbefifo = container_of(inode->i_cdev, struct sbefifo, cdev);
+	struct sbefifo_user *user;
+
+	user = kzalloc(sizeof(struct sbefifo_user), GFP_KERNEL);
+	if (!user)
+		return -ENOMEM;
+
+	file->private_data = user;
+	user->sbefifo = sbefifo;
+	user->cmd_page = (void *)__get_free_page(GFP_KERNEL);
+	if (!user->cmd_page) {
+		kfree(user);
+		return -ENOMEM;
+	}
+	mutex_init(&user->file_lock);
+
+	return 0;
+}
+
+static ssize_t sbefifo_user_read(struct file *file, char __user *buf,
+				 size_t len, loff_t *offset)
+{
+	struct sbefifo_user *user = file->private_data;
+	struct sbefifo *sbefifo;
+	struct iov_iter resp_iter;
+        struct iovec resp_iov;
+	size_t cmd_len;
+	int rc;
+
+	if (!user)
+		return -EINVAL;
+	sbefifo = user->sbefifo;
+	if (len & 3)
+		return -EINVAL;
+
+	mutex_lock(&user->file_lock);
+
+	/* Cronus relies on -EAGAIN after a short read */
+	if (user->pending_len == 0) {
+		rc = -EAGAIN;
+		goto bail;
+	}
+	if (user->pending_len < 8) {
+		rc = -EINVAL;
+		goto bail;
+	}
+	cmd_len = user->pending_len >> 2;
+
+	/* Prepare iov iterator */
+	resp_iov.iov_base = buf;
+	resp_iov.iov_len = len;
+	iov_iter_init(&resp_iter, WRITE, &resp_iov, 1, len);
+
+	/* Perform the command */
+	mutex_lock(&sbefifo->lock);
+	rc = __sbefifo_submit(sbefifo, user->pending_cmd, cmd_len, &resp_iter);
+	mutex_unlock(&sbefifo->lock);
+	if (rc < 0)
+		goto bail;
+
+	/* Extract the response length */
+	rc = len - iov_iter_count(&resp_iter);
+ bail:
+	sbefifo_release_command(user);
+	mutex_unlock(&user->file_lock);
+	return rc;
+}
+
+static ssize_t sbefifo_user_write(struct file *file, const char __user *buf,
+				  size_t len, loff_t *offset)
+{
+	struct sbefifo_user *user = file->private_data;
+	struct sbefifo *sbefifo;
+	int rc = len;
+
+	if (!user)
+		return -EINVAL;
+	sbefifo = user->sbefifo;
+	if (len > SBEFIFO_MAX_USER_CMD_LEN)
+		return -EINVAL;
+	if (len & 3)
+		return -EINVAL;
+
+	mutex_lock(&user->file_lock);
+
+	/* Can we use the pre-allocate buffer ? If not, allocate */
+	if (len <= PAGE_SIZE)
+		user->pending_cmd = user->cmd_page;
+	else
+		user->pending_cmd = vmalloc(len);
+	if (!user->pending_cmd) {
+		rc = -ENOMEM;
+		goto bail;
+	}
+
+	/* Copy the command into the staging buffer */
+	if (copy_from_user(user->pending_cmd, buf, len)) {
+		rc = -EFAULT;
+		goto bail;
+	}
+
+	/* Check for the magic reset command */
+	if (len == 4 && be32_to_cpu(*(__be32 *)user->pending_cmd) ==
+	    SBEFIFO_RESET_MAGIC)  {
+
+		/* Clear out any pending command */
+		user->pending_len = 0;
+
+		/* Trigger reset request */
+		mutex_lock(&sbefifo->lock);
+		rc = sbefifo_request_reset(user->sbefifo);
+		mutex_unlock(&sbefifo->lock);
+		if (rc == 0)
+			rc = 4;
+		goto bail;
+	}
+
+	/* Update the staging buffer size */
+	user->pending_len = len;
+ bail:
+	if (!user->pending_len)
+		sbefifo_release_command(user);
+
+	mutex_unlock(&user->file_lock);
+
+	/* And that's it, we'll issue the command on a read */
+	return rc;
+}
+
+static int sbefifo_user_release(struct inode *inode, struct file *file)
+{
+	struct sbefifo_user *user = file->private_data;
+
+	if (!user)
+		return -EINVAL;
+
+	sbefifo_release_command(user);
+	free_page((unsigned long)user->cmd_page);
+	kfree(user);
+
+	return 0;
+}
+
+static const struct file_operations sbefifo_fops = {
+	.owner		= THIS_MODULE,
+	.open		= sbefifo_user_open,
+	.read		= sbefifo_user_read,
+	.write		= sbefifo_user_write,
+	.release	= sbefifo_user_release,
+};
+
+static void sbefifo_free(struct device *dev)
+{
+	struct sbefifo *sbefifo = container_of(dev, struct sbefifo, dev);
+
+	put_device(&sbefifo->fsi_dev->dev);
+	kfree(sbefifo);
+}
+
+/*
+ * Probe/remove
+ */
+
+static int sbefifo_probe(struct device *dev)
+{
+	struct fsi_device *fsi_dev = to_fsi_dev(dev);
+	struct sbefifo *sbefifo;
+	struct device_node *np;
+	struct platform_device *child;
+	char child_name[32];
+	int rc, didx, child_idx = 0;
+
+	dev_dbg(dev, "Found sbefifo device\n");
+
+	sbefifo = kzalloc(sizeof(*sbefifo), GFP_KERNEL);
+	if (!sbefifo)
+		return -ENOMEM;
+
+	/* Grab a reference to the device (parent of our cdev), we'll drop it later */
+	if (!get_device(dev)) {
+		kfree(sbefifo);
+		return -ENODEV;
+	}
+
+	sbefifo->magic = SBEFIFO_MAGIC;
+	sbefifo->fsi_dev = fsi_dev;
+	dev_set_drvdata(dev, sbefifo);
+	mutex_init(&sbefifo->lock);
+
+	/*
+	 * Try cleaning up the FIFO. If this fails, we still register the
+	 * driver and will try cleaning things up again on the next access.
+	 */
+	rc = sbefifo_cleanup_hw(sbefifo);
+	if (rc && rc != -ESHUTDOWN)
+		dev_err(dev, "Initial HW cleanup failed, will retry later\n");
+
+	/* Create chardev for userspace access */
+	sbefifo->dev.type = &fsi_cdev_type;
+	sbefifo->dev.parent = dev;
+	sbefifo->dev.release = sbefifo_free;
+	device_initialize(&sbefifo->dev);
+
+	/* Allocate a minor in the FSI space */
+	rc = fsi_get_new_minor(fsi_dev, fsi_dev_sbefifo, &sbefifo->dev.devt, &didx);
+	if (rc)
+		goto err;
+
+	dev_set_name(&sbefifo->dev, "sbefifo%d", didx);
+	cdev_init(&sbefifo->cdev, &sbefifo_fops);
+	rc = cdev_device_add(&sbefifo->cdev, &sbefifo->dev);
+	if (rc) {
+		dev_err(dev, "Error %d creating char device %s\n",
+			rc, dev_name(&sbefifo->dev));
+		goto err_free_minor;
+	}
+
+	/* Create platform devs for dts child nodes (occ, etc) */
+	for_each_available_child_of_node(dev->of_node, np) {
+		snprintf(child_name, sizeof(child_name), "%s-dev%d",
+			 dev_name(&sbefifo->dev), child_idx++);
+		child = of_platform_device_create(np, child_name, dev);
+		if (!child)
+			dev_warn(dev, "failed to create child %s dev\n",
+				 child_name);
+	}
+
+	return 0;
+ err_free_minor:
+	fsi_free_minor(sbefifo->dev.devt);
+ err:
+	put_device(&sbefifo->dev);
+	return rc;
+}
+
+static int sbefifo_unregister_child(struct device *dev, void *data)
+{
+	struct platform_device *child = to_platform_device(dev);
+
+	of_device_unregister(child);
+	if (dev->of_node)
+		of_node_clear_flag(dev->of_node, OF_POPULATED);
+
+	return 0;
+}
+
+static int sbefifo_remove(struct device *dev)
+{
+	struct sbefifo *sbefifo = dev_get_drvdata(dev);
+
+	dev_dbg(dev, "Removing sbefifo device...\n");
+
+	mutex_lock(&sbefifo->lock);
+	sbefifo->dead = true;
+	mutex_unlock(&sbefifo->lock);
+
+	cdev_device_del(&sbefifo->cdev, &sbefifo->dev);
+	fsi_free_minor(sbefifo->dev.devt);
+	device_for_each_child(dev, NULL, sbefifo_unregister_child);
+	put_device(&sbefifo->dev);
+
+	return 0;
+}
+
+static struct fsi_device_id sbefifo_ids[] = {
+	{
+		.engine_type = FSI_ENGID_SBE,
+		.version = FSI_VERSION_ANY,
+	},
+	{ 0 }
+};
+
+static struct fsi_driver sbefifo_drv = {
+	.id_table = sbefifo_ids,
+	.drv = {
+		.name = DEVICE_NAME,
+		.bus = &fsi_bus_type,
+		.probe = sbefifo_probe,
+		.remove = sbefifo_remove,
+	}
+};
+
+static int sbefifo_init(void)
+{
+	return fsi_driver_register(&sbefifo_drv);
+}
+
+static void sbefifo_exit(void)
+{
+	fsi_driver_unregister(&sbefifo_drv);
+}
+
+module_init(sbefifo_init);
+module_exit(sbefifo_exit);
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Brad Bishop <bradleyb@fuzziesquirrel.com>");
+MODULE_AUTHOR("Eddie James <eajames@linux.vnet.ibm.com>");
+MODULE_AUTHOR("Andrew Jeffery <andrew@aj.id.au>");
+MODULE_AUTHOR("Benjamin Herrenschmidt <benh@kernel.crashing.org>");
+MODULE_DESCRIPTION("Linux device interface to the POWER Self Boot Engine");
diff --git a/src/kernel/linux/v4.19/drivers/fsi/fsi-scom.c b/src/kernel/linux/v4.19/drivers/fsi/fsi-scom.c
new file mode 100644
index 0000000..fdc0e45
--- /dev/null
+++ b/src/kernel/linux/v4.19/drivers/fsi/fsi-scom.c
@@ -0,0 +1,669 @@
+/*
+ * SCOM FSI Client device driver
+ *
+ * Copyright (C) IBM Corporation 2016
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERGCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/fsi.h>
+#include <linux/module.h>
+#include <linux/cdev.h>
+#include <linux/delay.h>
+#include <linux/fs.h>
+#include <linux/uaccess.h>
+#include <linux/slab.h>
+#include <linux/cdev.h>
+#include <linux/list.h>
+
+#include <uapi/linux/fsi.h>
+
+#define FSI_ENGID_SCOM		0x5
+
+/* SCOM engine register set */
+#define SCOM_DATA0_REG		0x00
+#define SCOM_DATA1_REG		0x04
+#define SCOM_CMD_REG		0x08
+#define SCOM_FSI2PIB_RESET_REG	0x18
+#define SCOM_STATUS_REG		0x1C /* Read */
+#define SCOM_PIB_RESET_REG	0x1C /* Write */
+
+/* Command register */
+#define SCOM_WRITE_CMD		0x80000000
+#define SCOM_READ_CMD		0x00000000
+
+/* Status register bits */
+#define SCOM_STATUS_ERR_SUMMARY		0x80000000
+#define SCOM_STATUS_PROTECTION		0x01000000
+#define SCOM_STATUS_PARITY		0x04000000
+#define SCOM_STATUS_PIB_ABORT		0x00100000
+#define SCOM_STATUS_PIB_RESP_MASK	0x00007000
+#define SCOM_STATUS_PIB_RESP_SHIFT	12
+
+#define SCOM_STATUS_ANY_ERR		(SCOM_STATUS_PROTECTION | \
+					 SCOM_STATUS_PARITY |	  \
+					 SCOM_STATUS_PIB_ABORT | \
+					 SCOM_STATUS_PIB_RESP_MASK)
+/* SCOM address encodings */
+#define XSCOM_ADDR_IND_FLAG		BIT_ULL(63)
+#define XSCOM_ADDR_INF_FORM1		BIT_ULL(60)
+
+/* SCOM indirect stuff */
+#define XSCOM_ADDR_DIRECT_PART		0x7fffffffull
+#define XSCOM_ADDR_INDIRECT_PART	0x000fffff00000000ull
+#define XSCOM_DATA_IND_READ		BIT_ULL(63)
+#define XSCOM_DATA_IND_COMPLETE		BIT_ULL(31)
+#define XSCOM_DATA_IND_ERR_MASK		0x70000000ull
+#define XSCOM_DATA_IND_ERR_SHIFT	28
+#define XSCOM_DATA_IND_DATA		0x0000ffffull
+#define XSCOM_DATA_IND_FORM1_DATA	0x000fffffffffffffull
+#define XSCOM_ADDR_FORM1_LOW		0x000ffffffffull
+#define XSCOM_ADDR_FORM1_HI		0xfff00000000ull
+#define XSCOM_ADDR_FORM1_HI_SHIFT	20
+
+/* Retries */
+#define SCOM_MAX_RETRIES		100	/* Retries on busy */
+#define SCOM_MAX_IND_RETRIES		10	/* Retries indirect not ready */
+
+struct scom_device {
+	struct list_head link;
+	struct fsi_device *fsi_dev;
+	struct device dev;
+	struct cdev cdev;
+	struct mutex lock;
+	bool dead;
+};
+
+static int __put_scom(struct scom_device *scom_dev, uint64_t value,
+		      uint32_t addr, uint32_t *status)
+{
+	__be32 data, raw_status;
+	int rc;
+
+	data = cpu_to_be32((value >> 32) & 0xffffffff);
+	rc = fsi_device_write(scom_dev->fsi_dev, SCOM_DATA0_REG, &data,
+				sizeof(uint32_t));
+	if (rc)
+		return rc;
+
+	data = cpu_to_be32(value & 0xffffffff);
+	rc = fsi_device_write(scom_dev->fsi_dev, SCOM_DATA1_REG, &data,
+				sizeof(uint32_t));
+	if (rc)
+		return rc;
+
+	data = cpu_to_be32(SCOM_WRITE_CMD | addr);
+	rc = fsi_device_write(scom_dev->fsi_dev, SCOM_CMD_REG, &data,
+				sizeof(uint32_t));
+	if (rc)
+		return rc;
+	rc = fsi_device_read(scom_dev->fsi_dev, SCOM_STATUS_REG, &raw_status,
+			     sizeof(uint32_t));
+	if (rc)
+		return rc;
+	*status = be32_to_cpu(raw_status);
+
+	return 0;
+}
+
+static int __get_scom(struct scom_device *scom_dev, uint64_t *value,
+		      uint32_t addr, uint32_t *status)
+{
+	__be32 data, raw_status;
+	int rc;
+
+
+	*value = 0ULL;
+	data = cpu_to_be32(SCOM_READ_CMD | addr);
+	rc = fsi_device_write(scom_dev->fsi_dev, SCOM_CMD_REG, &data,
+				sizeof(uint32_t));
+	if (rc)
+		return rc;
+	rc = fsi_device_read(scom_dev->fsi_dev, SCOM_STATUS_REG, &raw_status,
+			     sizeof(uint32_t));
+	if (rc)
+		return rc;
+
+	/*
+	 * Read the data registers even on error, so we don't have
+	 * to interpret the status register here.
+	 */
+	rc = fsi_device_read(scom_dev->fsi_dev, SCOM_DATA0_REG, &data,
+				sizeof(uint32_t));
+	if (rc)
+		return rc;
+	*value |= (uint64_t)be32_to_cpu(data) << 32;
+	rc = fsi_device_read(scom_dev->fsi_dev, SCOM_DATA1_REG, &data,
+				sizeof(uint32_t));
+	if (rc)
+		return rc;
+	*value |= be32_to_cpu(data);
+	*status = be32_to_cpu(raw_status);
+
+	return rc;
+}
+
+static int put_indirect_scom_form0(struct scom_device *scom, uint64_t value,
+				   uint64_t addr, uint32_t *status)
+{
+	uint64_t ind_data, ind_addr;
+	int rc, retries, err = 0;
+
+	if (value & ~XSCOM_DATA_IND_DATA)
+		return -EINVAL;
+
+	ind_addr = addr & XSCOM_ADDR_DIRECT_PART;
+	ind_data = (addr & XSCOM_ADDR_INDIRECT_PART) | value;
+	rc = __put_scom(scom, ind_data, ind_addr, status);
+	if (rc || (*status & SCOM_STATUS_ANY_ERR))
+		return rc;
+
+	for (retries = 0; retries < SCOM_MAX_IND_RETRIES; retries++) {
+		rc = __get_scom(scom, &ind_data, addr, status);
+		if (rc || (*status & SCOM_STATUS_ANY_ERR))
+			return rc;
+
+		err = (ind_data & XSCOM_DATA_IND_ERR_MASK) >> XSCOM_DATA_IND_ERR_SHIFT;
+		*status = err << SCOM_STATUS_PIB_RESP_SHIFT;
+		if ((ind_data & XSCOM_DATA_IND_COMPLETE) || (err != SCOM_PIB_BLOCKED))
+			return 0;
+
+		msleep(1);
+	}
+	return rc;
+}
+
+static int put_indirect_scom_form1(struct scom_device *scom, uint64_t value,
+				   uint64_t addr, uint32_t *status)
+{
+	uint64_t ind_data, ind_addr;
+
+	if (value & ~XSCOM_DATA_IND_FORM1_DATA)
+		return -EINVAL;
+
+	ind_addr = addr & XSCOM_ADDR_FORM1_LOW;
+	ind_data = value | (addr & XSCOM_ADDR_FORM1_HI) << XSCOM_ADDR_FORM1_HI_SHIFT;
+	return __put_scom(scom, ind_data, ind_addr, status);
+}
+
+static int get_indirect_scom_form0(struct scom_device *scom, uint64_t *value,
+				   uint64_t addr, uint32_t *status)
+{
+	uint64_t ind_data, ind_addr;
+	int rc, retries, err = 0;
+
+	ind_addr = addr & XSCOM_ADDR_DIRECT_PART;
+	ind_data = (addr & XSCOM_ADDR_INDIRECT_PART) | XSCOM_DATA_IND_READ;
+	rc = __put_scom(scom, ind_data, ind_addr, status);
+	if (rc || (*status & SCOM_STATUS_ANY_ERR))
+		return rc;
+
+	for (retries = 0; retries < SCOM_MAX_IND_RETRIES; retries++) {
+		rc = __get_scom(scom, &ind_data, addr, status);
+		if (rc || (*status & SCOM_STATUS_ANY_ERR))
+			return rc;
+
+		err = (ind_data & XSCOM_DATA_IND_ERR_MASK) >> XSCOM_DATA_IND_ERR_SHIFT;
+		*status = err << SCOM_STATUS_PIB_RESP_SHIFT;
+		*value = ind_data & XSCOM_DATA_IND_DATA;
+
+		if ((ind_data & XSCOM_DATA_IND_COMPLETE) || (err != SCOM_PIB_BLOCKED))
+			return 0;
+
+		msleep(1);
+	}
+	return rc;
+}
+
+static int raw_put_scom(struct scom_device *scom, uint64_t value,
+			uint64_t addr, uint32_t *status)
+{
+	if (addr & XSCOM_ADDR_IND_FLAG) {
+		if (addr & XSCOM_ADDR_INF_FORM1)
+			return put_indirect_scom_form1(scom, value, addr, status);
+		else
+			return put_indirect_scom_form0(scom, value, addr, status);
+	} else
+		return __put_scom(scom, value, addr, status);
+}
+
+static int raw_get_scom(struct scom_device *scom, uint64_t *value,
+			uint64_t addr, uint32_t *status)
+{
+	if (addr & XSCOM_ADDR_IND_FLAG) {
+		if (addr & XSCOM_ADDR_INF_FORM1)
+			return -ENXIO;
+		return get_indirect_scom_form0(scom, value, addr, status);
+	} else
+		return __get_scom(scom, value, addr, status);
+}
+
+static int handle_fsi2pib_status(struct scom_device *scom, uint32_t status)
+{
+	uint32_t dummy = -1;
+
+	if (status & SCOM_STATUS_PROTECTION)
+		return -EPERM;
+	if (status & SCOM_STATUS_PARITY) {
+		fsi_device_write(scom->fsi_dev, SCOM_FSI2PIB_RESET_REG, &dummy,
+				 sizeof(uint32_t));
+		return -EIO;
+	}
+	/* Return -EBUSY on PIB abort to force a retry */
+	if (status & SCOM_STATUS_PIB_ABORT)
+		return -EBUSY;
+	return 0;
+}
+
+static int handle_pib_status(struct scom_device *scom, uint8_t status)
+{
+	uint32_t dummy = -1;
+
+	if (status == SCOM_PIB_SUCCESS)
+		return 0;
+	if (status == SCOM_PIB_BLOCKED)
+		return -EBUSY;
+
+	/* Reset the bridge */
+	fsi_device_write(scom->fsi_dev, SCOM_FSI2PIB_RESET_REG, &dummy,
+			 sizeof(uint32_t));
+
+	switch(status) {
+	case SCOM_PIB_OFFLINE:
+		return -ENODEV;
+	case SCOM_PIB_BAD_ADDR:
+		return -ENXIO;
+	case SCOM_PIB_TIMEOUT:
+		return -ETIMEDOUT;
+	case SCOM_PIB_PARTIAL:
+	case SCOM_PIB_CLK_ERR:
+	case SCOM_PIB_PARITY_ERR:
+	default:
+		return -EIO;
+	}
+}
+
+static int put_scom(struct scom_device *scom, uint64_t value,
+		    uint64_t addr)
+{
+	uint32_t status, dummy = -1;
+	int rc, retries;
+
+	for (retries = 0; retries < SCOM_MAX_RETRIES; retries++) {
+		rc = raw_put_scom(scom, value, addr, &status);
+		if (rc) {
+			/* Try resetting the bridge if FSI fails */
+			if (rc != -ENODEV && retries == 0) {
+				fsi_device_write(scom->fsi_dev, SCOM_FSI2PIB_RESET_REG,
+						 &dummy, sizeof(uint32_t));
+				rc = -EBUSY;
+			} else
+				return rc;
+		} else
+			rc = handle_fsi2pib_status(scom, status);
+		if (rc && rc != -EBUSY)
+			break;
+		if (rc == 0) {
+			rc = handle_pib_status(scom,
+					       (status & SCOM_STATUS_PIB_RESP_MASK)
+					       >> SCOM_STATUS_PIB_RESP_SHIFT);
+			if (rc && rc != -EBUSY)
+				break;
+		}
+		if (rc == 0)
+			break;
+		msleep(1);
+	}
+	return rc;
+}
+
+static int get_scom(struct scom_device *scom, uint64_t *value,
+		    uint64_t addr)
+{
+	uint32_t status, dummy = -1;
+	int rc, retries;
+
+	for (retries = 0; retries < SCOM_MAX_RETRIES; retries++) {
+		rc = raw_get_scom(scom, value, addr, &status);
+		if (rc) {
+			/* Try resetting the bridge if FSI fails */
+			if (rc != -ENODEV && retries == 0) {
+				fsi_device_write(scom->fsi_dev, SCOM_FSI2PIB_RESET_REG,
+						 &dummy, sizeof(uint32_t));
+				rc = -EBUSY;
+			} else
+				return rc;
+		} else
+			rc = handle_fsi2pib_status(scom, status);
+		if (rc && rc != -EBUSY)
+			break;
+		if (rc == 0) {
+			rc = handle_pib_status(scom,
+					       (status & SCOM_STATUS_PIB_RESP_MASK)
+					       >> SCOM_STATUS_PIB_RESP_SHIFT);
+			if (rc && rc != -EBUSY)
+				break;
+		}
+		if (rc == 0)
+			break;
+		msleep(1);
+	}
+	return rc;
+}
+
+static ssize_t scom_read(struct file *filep, char __user *buf, size_t len,
+			 loff_t *offset)
+{
+	struct scom_device *scom = filep->private_data;
+	struct device *dev = &scom->fsi_dev->dev;
+	uint64_t val;
+	int rc;
+
+	if (len != sizeof(uint64_t))
+		return -EINVAL;
+
+	mutex_lock(&scom->lock);
+	if (scom->dead)
+		rc = -ENODEV;
+	else
+		rc = get_scom(scom, &val, *offset);
+	mutex_unlock(&scom->lock);
+	if (rc) {
+		dev_dbg(dev, "get_scom fail:%d\n", rc);
+		return rc;
+	}
+
+	rc = copy_to_user(buf, &val, len);
+	if (rc)
+		dev_dbg(dev, "copy to user failed:%d\n", rc);
+
+	return rc ? rc : len;
+}
+
+static ssize_t scom_write(struct file *filep, const char __user *buf,
+			  size_t len, loff_t *offset)
+{
+	int rc;
+	struct scom_device *scom = filep->private_data;
+	struct device *dev = &scom->fsi_dev->dev;
+	uint64_t val;
+
+	if (len != sizeof(uint64_t))
+		return -EINVAL;
+
+	rc = copy_from_user(&val, buf, len);
+	if (rc) {
+		dev_dbg(dev, "copy from user failed:%d\n", rc);
+		return -EINVAL;
+	}
+
+	mutex_lock(&scom->lock);
+	if (scom->dead)
+		rc = -ENODEV;
+	else
+		rc = put_scom(scom, val, *offset);
+	mutex_unlock(&scom->lock);
+	if (rc) {
+		dev_dbg(dev, "put_scom failed with:%d\n", rc);
+		return rc;
+	}
+
+	return len;
+}
+
+static loff_t scom_llseek(struct file *file, loff_t offset, int whence)
+{
+	switch (whence) {
+	case SEEK_CUR:
+		break;
+	case SEEK_SET:
+		file->f_pos = offset;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	return offset;
+}
+
+static void raw_convert_status(struct scom_access *acc, uint32_t status)
+{
+	acc->pib_status = (status & SCOM_STATUS_PIB_RESP_MASK) >>
+		SCOM_STATUS_PIB_RESP_SHIFT;
+	acc->intf_errors = 0;
+
+	if (status & SCOM_STATUS_PROTECTION)
+		acc->intf_errors |= SCOM_INTF_ERR_PROTECTION;
+	else if (status & SCOM_STATUS_PARITY)
+		acc->intf_errors |= SCOM_INTF_ERR_PARITY;
+	else if (status & SCOM_STATUS_PIB_ABORT)
+		acc->intf_errors |= SCOM_INTF_ERR_ABORT;
+	else if (status & SCOM_STATUS_ERR_SUMMARY)
+		acc->intf_errors |= SCOM_INTF_ERR_UNKNOWN;
+}
+
+static int scom_raw_read(struct scom_device *scom, void __user *argp)
+{
+	struct scom_access acc;
+	uint32_t status;
+	int rc;
+
+	if (copy_from_user(&acc, argp, sizeof(struct scom_access)))
+		return -EFAULT;
+
+	rc = raw_get_scom(scom, &acc.data, acc.addr, &status);
+	if (rc)
+		return rc;
+	raw_convert_status(&acc, status);
+	if (copy_to_user(argp, &acc, sizeof(struct scom_access)))
+		return -EFAULT;
+	return 0;
+}
+
+static int scom_raw_write(struct scom_device *scom, void __user *argp)
+{
+	u64 prev_data, mask, data;
+	struct scom_access acc;
+	uint32_t status;
+	int rc;
+
+	if (copy_from_user(&acc, argp, sizeof(struct scom_access)))
+		return -EFAULT;
+
+	if (acc.mask) {
+		rc = raw_get_scom(scom, &prev_data, acc.addr, &status);
+		if (rc)
+			return rc;
+		if (status & SCOM_STATUS_ANY_ERR)
+			goto fail;
+		mask = acc.mask;
+	} else {
+		prev_data = mask = -1ull;
+	}
+	data = (prev_data & ~mask) | (acc.data & mask);
+	rc = raw_put_scom(scom, data, acc.addr, &status);
+	if (rc)
+		return rc;
+ fail:
+	raw_convert_status(&acc, status);
+	if (copy_to_user(argp, &acc, sizeof(struct scom_access)))
+		return -EFAULT;
+	return 0;
+}
+
+static int scom_reset(struct scom_device *scom, void __user *argp)
+{
+	uint32_t flags, dummy = -1;
+	int rc = 0;
+
+	if (get_user(flags, (__u32 __user *)argp))
+		return -EFAULT;
+	if (flags & SCOM_RESET_PIB)
+		rc = fsi_device_write(scom->fsi_dev, SCOM_PIB_RESET_REG, &dummy,
+				      sizeof(uint32_t));
+	if (!rc && (flags & (SCOM_RESET_PIB | SCOM_RESET_INTF)))
+		rc = fsi_device_write(scom->fsi_dev, SCOM_FSI2PIB_RESET_REG, &dummy,
+				      sizeof(uint32_t));
+	return rc;
+}
+
+static int scom_check(struct scom_device *scom, void __user *argp)
+{
+	/* Still need to find out how to get "protected" */
+	return put_user(SCOM_CHECK_SUPPORTED, (__u32 __user *)argp);
+}
+
+static long scom_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+	struct scom_device *scom = file->private_data;
+	void __user *argp = (void __user *)arg;
+	int rc = -ENOTTY;
+
+	mutex_lock(&scom->lock);
+	if (scom->dead) {
+		mutex_unlock(&scom->lock);
+		return -ENODEV;
+	}
+	switch(cmd) {
+	case FSI_SCOM_CHECK:
+		rc = scom_check(scom, argp);
+		break;
+	case FSI_SCOM_READ:
+		rc = scom_raw_read(scom, argp);
+		break;
+	case FSI_SCOM_WRITE:
+		rc = scom_raw_write(scom, argp);
+		break;
+	case FSI_SCOM_RESET:
+		rc = scom_reset(scom, argp);
+		break;
+	}
+	mutex_unlock(&scom->lock);
+	return rc;
+}
+
+static int scom_open(struct inode *inode, struct file *file)
+{
+	struct scom_device *scom = container_of(inode->i_cdev, struct scom_device, cdev);
+
+	file->private_data = scom;
+
+	return 0;
+}
+
+static const struct file_operations scom_fops = {
+	.owner		= THIS_MODULE,
+	.open		= scom_open,
+	.llseek		= scom_llseek,
+	.read		= scom_read,
+	.write		= scom_write,
+	.unlocked_ioctl	= scom_ioctl,
+};
+
+static void scom_free(struct device *dev)
+{
+	struct scom_device *scom = container_of(dev, struct scom_device, dev);
+
+	put_device(&scom->fsi_dev->dev);
+	kfree(scom);
+}
+
+static int scom_probe(struct device *dev)
+{
+	struct fsi_device *fsi_dev = to_fsi_dev(dev);
+	struct scom_device *scom;
+	int rc, didx;
+
+	scom = kzalloc(sizeof(*scom), GFP_KERNEL);
+	if (!scom)
+		return -ENOMEM;
+	dev_set_drvdata(dev, scom);
+	mutex_init(&scom->lock);
+
+	/* Grab a reference to the device (parent of our cdev), we'll drop it later */
+	if (!get_device(dev)) {
+		kfree(scom);
+		return -ENODEV;
+	}
+	scom->fsi_dev = fsi_dev;
+
+	/* Create chardev for userspace access */
+	scom->dev.type = &fsi_cdev_type;
+	scom->dev.parent = dev;
+	scom->dev.release = scom_free;
+	device_initialize(&scom->dev);
+
+	/* Allocate a minor in the FSI space */
+	rc = fsi_get_new_minor(fsi_dev, fsi_dev_scom, &scom->dev.devt, &didx);
+	if (rc)
+		goto err;
+
+	dev_set_name(&scom->dev, "scom%d", didx);
+	cdev_init(&scom->cdev, &scom_fops);
+	rc = cdev_device_add(&scom->cdev, &scom->dev);
+	if (rc) {
+		dev_err(dev, "Error %d creating char device %s\n",
+			rc, dev_name(&scom->dev));
+		goto err_free_minor;
+	}
+
+	return 0;
+ err_free_minor:
+	fsi_free_minor(scom->dev.devt);
+ err:
+	put_device(&scom->dev);
+	return rc;
+}
+
+static int scom_remove(struct device *dev)
+{
+	struct scom_device *scom = dev_get_drvdata(dev);
+
+	mutex_lock(&scom->lock);
+	scom->dead = true;
+	mutex_unlock(&scom->lock);
+	cdev_device_del(&scom->cdev, &scom->dev);
+	fsi_free_minor(scom->dev.devt);
+	put_device(&scom->dev);
+
+	return 0;
+}
+
+static struct fsi_device_id scom_ids[] = {
+	{
+		.engine_type = FSI_ENGID_SCOM,
+		.version = FSI_VERSION_ANY,
+	},
+	{ 0 }
+};
+
+static struct fsi_driver scom_drv = {
+	.id_table = scom_ids,
+	.drv = {
+		.name = "scom",
+		.bus = &fsi_bus_type,
+		.probe = scom_probe,
+		.remove = scom_remove,
+	}
+};
+
+static int scom_init(void)
+{
+	return fsi_driver_register(&scom_drv);
+}
+
+static void scom_exit(void)
+{
+	fsi_driver_unregister(&scom_drv);
+}
+
+module_init(scom_init);
+module_exit(scom_exit);
+MODULE_LICENSE("GPL");