[Feature]add MT2731_MP2_MR2_SVN388 baseline version

Change-Id: Ief04314834b31e27effab435d3ca8ba33b499059
diff --git a/src/kernel/linux/v4.14/drivers/soc/qcom/Kconfig b/src/kernel/linux/v4.14/drivers/soc/qcom/Kconfig
new file mode 100644
index 0000000..b00bccd
--- /dev/null
+++ b/src/kernel/linux/v4.14/drivers/soc/qcom/Kconfig
@@ -0,0 +1,89 @@
+#
+# QCOM Soc drivers
+#
+menu "Qualcomm SoC drivers"
+
+config QCOM_GLINK_SSR
+	tristate "Qualcomm Glink SSR driver"
+	depends on RPMSG
+	depends on QCOM_RPROC_COMMON
+	help
+	  Say y here to enable GLINK SSR support. The GLINK SSR driver
+	  implements the SSR protocol for notifying the remote processor about
+	  neighboring subsystems going up or down.
+
+config QCOM_GSBI
+        tristate "QCOM General Serial Bus Interface"
+        depends on ARCH_QCOM
+        select MFD_SYSCON
+        help
+          Say y here to enable GSBI support.  The GSBI provides control
+          functions for connecting the underlying serial UART, SPI, and I2C
+          devices to the output pins.
+
+config QCOM_MDT_LOADER
+	tristate
+	select QCOM_SCM
+
+config QCOM_PM
+	bool "Qualcomm Power Management"
+	depends on ARCH_QCOM && !ARM64
+	select ARM_CPU_SUSPEND
+	select QCOM_SCM
+	help
+	  QCOM Platform specific power driver to manage cores and L2 low power
+	  modes. It interface with various system drivers to put the cores in
+	  low power modes.
+
+config QCOM_SMEM
+	tristate "Qualcomm Shared Memory Manager (SMEM)"
+	depends on ARCH_QCOM
+	depends on HWSPINLOCK
+	help
+	  Say y here to enable support for the Qualcomm Shared Memory Manager.
+	  The driver provides an interface to items in a heap shared among all
+	  processors in a Qualcomm platform.
+
+config QCOM_SMD_RPM
+	tristate "Qualcomm Resource Power Manager (RPM) over SMD"
+	depends on ARCH_QCOM
+	depends on RPMSG && OF
+	help
+	  If you say yes to this option, support will be included for the
+	  Resource Power Manager system found in the Qualcomm 8974 based
+	  devices.
+
+	  This is required to access many regulators, clocks and bus
+	  frequencies controlled by the RPM on these devices.
+
+	  Say M here if you want to include support for the Qualcomm RPM as a
+	  module. This will build a module called "qcom-smd-rpm".
+
+config QCOM_SMEM_STATE
+	bool
+
+config QCOM_SMP2P
+	tristate "Qualcomm Shared Memory Point to Point support"
+	depends on QCOM_SMEM
+	select QCOM_SMEM_STATE
+	help
+	  Say yes here to support the Qualcomm Shared Memory Point to Point
+	  protocol.
+
+config QCOM_SMSM
+	tristate "Qualcomm Shared Memory State Machine"
+	depends on QCOM_SMEM
+	select QCOM_SMEM_STATE
+	help
+	  Say yes here to support the Qualcomm Shared Memory State Machine.
+	  The state machine is represented by bits in shared memory.
+
+config QCOM_WCNSS_CTRL
+	tristate "Qualcomm WCNSS control driver"
+	depends on ARCH_QCOM
+	depends on RPMSG
+	help
+	  Client driver for the WCNSS_CTRL SMD channel, used to download nv
+	  firmware to a newly booted WCNSS chip.
+
+endmenu
diff --git a/src/kernel/linux/v4.14/drivers/soc/qcom/Makefile b/src/kernel/linux/v4.14/drivers/soc/qcom/Makefile
new file mode 100644
index 0000000..fab4466
--- /dev/null
+++ b/src/kernel/linux/v4.14/drivers/soc/qcom/Makefile
@@ -0,0 +1,11 @@
+# SPDX-License-Identifier: GPL-2.0
+obj-$(CONFIG_QCOM_GLINK_SSR) +=	glink_ssr.o
+obj-$(CONFIG_QCOM_GSBI)	+=	qcom_gsbi.o
+obj-$(CONFIG_QCOM_MDT_LOADER)	+= mdt_loader.o
+obj-$(CONFIG_QCOM_PM)	+=	spm.o
+obj-$(CONFIG_QCOM_SMD_RPM)	+= smd-rpm.o
+obj-$(CONFIG_QCOM_SMEM) +=	smem.o
+obj-$(CONFIG_QCOM_SMEM_STATE) += smem_state.o
+obj-$(CONFIG_QCOM_SMP2P)	+= smp2p.o
+obj-$(CONFIG_QCOM_SMSM)	+= smsm.o
+obj-$(CONFIG_QCOM_WCNSS_CTRL) += wcnss_ctrl.o
diff --git a/src/kernel/linux/v4.14/drivers/soc/qcom/glink_ssr.c b/src/kernel/linux/v4.14/drivers/soc/qcom/glink_ssr.c
new file mode 100644
index 0000000..19c7399
--- /dev/null
+++ b/src/kernel/linux/v4.14/drivers/soc/qcom/glink_ssr.c
@@ -0,0 +1,164 @@
+/*
+ * Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2017, Linaro Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/completion.h>
+#include <linux/module.h>
+#include <linux/notifier.h>
+#include <linux/rpmsg.h>
+#include <linux/remoteproc/qcom_rproc.h>
+
+/**
+ * struct do_cleanup_msg - The data structure for an SSR do_cleanup message
+ * version:     The G-Link SSR protocol version
+ * command:     The G-Link SSR command - do_cleanup
+ * seq_num:     Sequence number
+ * name_len:    Length of the name of the subsystem being restarted
+ * name:        G-Link edge name of the subsystem being restarted
+ */
+struct do_cleanup_msg {
+	__le32 version;
+	__le32 command;
+	__le32 seq_num;
+	__le32 name_len;
+	char name[32];
+};
+
+/**
+ * struct cleanup_done_msg - The data structure for an SSR cleanup_done message
+ * version:     The G-Link SSR protocol version
+ * response:    The G-Link SSR response to a do_cleanup command, cleanup_done
+ * seq_num:     Sequence number
+ */
+struct cleanup_done_msg {
+	__le32 version;
+	__le32 response;
+	__le32 seq_num;
+};
+
+/**
+ * G-Link SSR protocol commands
+ */
+#define GLINK_SSR_DO_CLEANUP	0
+#define GLINK_SSR_CLEANUP_DONE	1
+
+struct glink_ssr {
+	struct device *dev;
+	struct rpmsg_endpoint *ept;
+
+	struct notifier_block nb;
+
+	u32 seq_num;
+	struct completion completion;
+};
+
+static int qcom_glink_ssr_callback(struct rpmsg_device *rpdev,
+				   void *data, int len, void *priv, u32 addr)
+{
+	struct cleanup_done_msg *msg = data;
+	struct glink_ssr *ssr = dev_get_drvdata(&rpdev->dev);
+
+	if (len < sizeof(*msg)) {
+		dev_err(ssr->dev, "message too short\n");
+		return -EINVAL;
+	}
+
+	if (le32_to_cpu(msg->version) != 0)
+		return -EINVAL;
+
+	if (le32_to_cpu(msg->response) != GLINK_SSR_CLEANUP_DONE)
+		return 0;
+
+	if (le32_to_cpu(msg->seq_num) != ssr->seq_num) {
+		dev_err(ssr->dev, "invalid sequence number of response\n");
+		return -EINVAL;
+	}
+
+	complete(&ssr->completion);
+
+	return 0;
+}
+
+static int qcom_glink_ssr_notify(struct notifier_block *nb, unsigned long event,
+				 void *data)
+{
+	struct glink_ssr *ssr = container_of(nb, struct glink_ssr, nb);
+	struct do_cleanup_msg msg;
+	char *ssr_name = data;
+	int ret;
+
+	ssr->seq_num++;
+	reinit_completion(&ssr->completion);
+
+	memset(&msg, 0, sizeof(msg));
+	msg.command = cpu_to_le32(GLINK_SSR_DO_CLEANUP);
+	msg.seq_num = cpu_to_le32(ssr->seq_num);
+	msg.name_len = cpu_to_le32(strlen(ssr_name));
+	strlcpy(msg.name, ssr_name, sizeof(msg.name));
+
+	ret = rpmsg_send(ssr->ept, &msg, sizeof(msg));
+	if (ret < 0)
+		dev_err(ssr->dev, "failed to send cleanup message\n");
+
+	ret = wait_for_completion_timeout(&ssr->completion, HZ);
+	if (!ret)
+		dev_err(ssr->dev, "timeout waiting for cleanup done message\n");
+
+	return NOTIFY_DONE;
+}
+
+static int qcom_glink_ssr_probe(struct rpmsg_device *rpdev)
+{
+	struct glink_ssr *ssr;
+
+	ssr = devm_kzalloc(&rpdev->dev, sizeof(*ssr), GFP_KERNEL);
+	if (!ssr)
+		return -ENOMEM;
+
+	init_completion(&ssr->completion);
+
+	ssr->dev = &rpdev->dev;
+	ssr->ept = rpdev->ept;
+	ssr->nb.notifier_call = qcom_glink_ssr_notify;
+
+	dev_set_drvdata(&rpdev->dev, ssr);
+
+	return qcom_register_ssr_notifier(&ssr->nb);
+}
+
+static void qcom_glink_ssr_remove(struct rpmsg_device *rpdev)
+{
+	struct glink_ssr *ssr = dev_get_drvdata(&rpdev->dev);
+
+	qcom_unregister_ssr_notifier(&ssr->nb);
+}
+
+static const struct rpmsg_device_id qcom_glink_ssr_match[] = {
+	{ "glink_ssr" },
+	{}
+};
+
+static struct rpmsg_driver qcom_glink_ssr_driver = {
+	.probe = qcom_glink_ssr_probe,
+	.remove = qcom_glink_ssr_remove,
+	.callback = qcom_glink_ssr_callback,
+	.id_table = qcom_glink_ssr_match,
+	.drv = {
+		.name = "qcom_glink_ssr",
+	},
+};
+module_rpmsg_driver(qcom_glink_ssr_driver);
+
+MODULE_ALIAS("rpmsg:glink_ssr");
+MODULE_DESCRIPTION("Qualcomm GLINK SSR notifier");
+MODULE_LICENSE("GPL v2");
diff --git a/src/kernel/linux/v4.14/drivers/soc/qcom/mdt_loader.c b/src/kernel/linux/v4.14/drivers/soc/qcom/mdt_loader.c
new file mode 100644
index 0000000..08bd854
--- /dev/null
+++ b/src/kernel/linux/v4.14/drivers/soc/qcom/mdt_loader.c
@@ -0,0 +1,203 @@
+/*
+ * Qualcomm Peripheral Image Loader
+ *
+ * Copyright (C) 2016 Linaro Ltd
+ * Copyright (C) 2015 Sony Mobile Communications Inc
+ * Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/device.h>
+#include <linux/elf.h>
+#include <linux/firmware.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/qcom_scm.h>
+#include <linux/sizes.h>
+#include <linux/slab.h>
+#include <linux/soc/qcom/mdt_loader.h>
+
+static bool mdt_phdr_valid(const struct elf32_phdr *phdr)
+{
+	if (phdr->p_type != PT_LOAD)
+		return false;
+
+	if ((phdr->p_flags & QCOM_MDT_TYPE_MASK) == QCOM_MDT_TYPE_HASH)
+		return false;
+
+	if (!phdr->p_memsz)
+		return false;
+
+	return true;
+}
+
+/**
+ * qcom_mdt_get_size() - acquire size of the memory region needed to load mdt
+ * @fw:		firmware object for the mdt file
+ *
+ * Returns size of the loaded firmware blob, or -EINVAL on failure.
+ */
+ssize_t qcom_mdt_get_size(const struct firmware *fw)
+{
+	const struct elf32_phdr *phdrs;
+	const struct elf32_phdr *phdr;
+	const struct elf32_hdr *ehdr;
+	phys_addr_t min_addr = (phys_addr_t)ULLONG_MAX;
+	phys_addr_t max_addr = 0;
+	int i;
+
+	ehdr = (struct elf32_hdr *)fw->data;
+	phdrs = (struct elf32_phdr *)(ehdr + 1);
+
+	for (i = 0; i < ehdr->e_phnum; i++) {
+		phdr = &phdrs[i];
+
+		if (!mdt_phdr_valid(phdr))
+			continue;
+
+		if (phdr->p_paddr < min_addr)
+			min_addr = phdr->p_paddr;
+
+		if (phdr->p_paddr + phdr->p_memsz > max_addr)
+			max_addr = ALIGN(phdr->p_paddr + phdr->p_memsz, SZ_4K);
+	}
+
+	return min_addr < max_addr ? max_addr - min_addr : -EINVAL;
+}
+EXPORT_SYMBOL_GPL(qcom_mdt_get_size);
+
+/**
+ * qcom_mdt_load() - load the firmware which header is loaded as fw
+ * @dev:	device handle to associate resources with
+ * @fw:		firmware object for the mdt file
+ * @firmware:	name of the firmware, for construction of segment file names
+ * @pas_id:	PAS identifier
+ * @mem_region:	allocated memory region to load firmware into
+ * @mem_phys:	physical address of allocated memory region
+ * @mem_size:	size of the allocated memory region
+ *
+ * Returns 0 on success, negative errno otherwise.
+ */
+int qcom_mdt_load(struct device *dev, const struct firmware *fw,
+		  const char *firmware, int pas_id, void *mem_region,
+		  phys_addr_t mem_phys, size_t mem_size)
+{
+	const struct elf32_phdr *phdrs;
+	const struct elf32_phdr *phdr;
+	const struct elf32_hdr *ehdr;
+	const struct firmware *seg_fw;
+	phys_addr_t mem_reloc;
+	phys_addr_t min_addr = (phys_addr_t)ULLONG_MAX;
+	phys_addr_t max_addr = 0;
+	size_t fw_name_len;
+	ssize_t offset;
+	char *fw_name;
+	bool relocate = false;
+	void *ptr;
+	int ret;
+	int i;
+
+	if (!fw || !mem_region || !mem_phys || !mem_size)
+		return -EINVAL;
+
+	ehdr = (struct elf32_hdr *)fw->data;
+	phdrs = (struct elf32_phdr *)(ehdr + 1);
+
+	fw_name_len = strlen(firmware);
+	if (fw_name_len <= 4)
+		return -EINVAL;
+
+	fw_name = kstrdup(firmware, GFP_KERNEL);
+	if (!fw_name)
+		return -ENOMEM;
+
+	ret = qcom_scm_pas_init_image(pas_id, fw->data, fw->size);
+	if (ret) {
+		dev_err(dev, "invalid firmware metadata\n");
+		goto out;
+	}
+
+	for (i = 0; i < ehdr->e_phnum; i++) {
+		phdr = &phdrs[i];
+
+		if (!mdt_phdr_valid(phdr))
+			continue;
+
+		if (phdr->p_flags & QCOM_MDT_RELOCATABLE)
+			relocate = true;
+
+		if (phdr->p_paddr < min_addr)
+			min_addr = phdr->p_paddr;
+
+		if (phdr->p_paddr + phdr->p_memsz > max_addr)
+			max_addr = ALIGN(phdr->p_paddr + phdr->p_memsz, SZ_4K);
+	}
+
+	if (relocate) {
+		ret = qcom_scm_pas_mem_setup(pas_id, mem_phys, max_addr - min_addr);
+		if (ret) {
+			dev_err(dev, "unable to setup relocation\n");
+			goto out;
+		}
+
+		/*
+		 * The image is relocatable, so offset each segment based on
+		 * the lowest segment address.
+		 */
+		mem_reloc = min_addr;
+	} else {
+		/*
+		 * Image is not relocatable, so offset each segment based on
+		 * the allocated physical chunk of memory.
+		 */
+		mem_reloc = mem_phys;
+	}
+
+	for (i = 0; i < ehdr->e_phnum; i++) {
+		phdr = &phdrs[i];
+
+		if (!mdt_phdr_valid(phdr))
+			continue;
+
+		offset = phdr->p_paddr - mem_reloc;
+		if (offset < 0 || offset + phdr->p_memsz > mem_size) {
+			dev_err(dev, "segment outside memory range\n");
+			ret = -EINVAL;
+			break;
+		}
+
+		ptr = mem_region + offset;
+
+		if (phdr->p_filesz) {
+			sprintf(fw_name + fw_name_len - 3, "b%02d", i);
+			ret = request_firmware_into_buf(&seg_fw, fw_name, dev,
+							ptr, phdr->p_filesz);
+			if (ret) {
+				dev_err(dev, "failed to load %s\n", fw_name);
+				break;
+			}
+
+			release_firmware(seg_fw);
+		}
+
+		if (phdr->p_memsz > phdr->p_filesz)
+			memset(ptr + phdr->p_filesz, 0, phdr->p_memsz - phdr->p_filesz);
+	}
+
+out:
+	kfree(fw_name);
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(qcom_mdt_load);
+
+MODULE_DESCRIPTION("Firmware parser for Qualcomm MDT format");
+MODULE_LICENSE("GPL v2");
diff --git a/src/kernel/linux/v4.14/drivers/soc/qcom/qcom_gsbi.c b/src/kernel/linux/v4.14/drivers/soc/qcom/qcom_gsbi.c
new file mode 100644
index 0000000..038abc3
--- /dev/null
+++ b/src/kernel/linux/v4.14/drivers/soc/qcom/qcom_gsbi.c
@@ -0,0 +1,259 @@
+/*
+ * Copyright (c) 2014, The Linux foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License rev 2 and
+ * only rev 2 as published by the free Software foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or fITNESS fOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/mfd/syscon.h>
+#include <dt-bindings/soc/qcom,gsbi.h>
+
+#define GSBI_CTRL_REG		0x0000
+#define GSBI_PROTOCOL_SHIFT	4
+#define MAX_GSBI		12
+
+#define TCSR_ADM_CRCI_BASE	0x70
+
+struct crci_config {
+	u32 num_rows;
+	const u32 (*array)[MAX_GSBI];
+};
+
+static const u32 crci_ipq8064[][MAX_GSBI] = {
+	{
+		0x000003, 0x00000c, 0x000030, 0x0000c0,
+		0x000300, 0x000c00, 0x003000, 0x00c000,
+		0x030000, 0x0c0000, 0x300000, 0xc00000
+	},
+	{
+		0x000003, 0x00000c, 0x000030, 0x0000c0,
+		0x000300, 0x000c00, 0x003000, 0x00c000,
+		0x030000, 0x0c0000, 0x300000, 0xc00000
+	},
+};
+
+static const struct crci_config config_ipq8064 = {
+	.num_rows = ARRAY_SIZE(crci_ipq8064),
+	.array = crci_ipq8064,
+};
+
+static const unsigned int crci_apq8064[][MAX_GSBI] = {
+	{
+		0x001800, 0x006000, 0x000030, 0x0000c0,
+		0x000300, 0x000400, 0x000000, 0x000000,
+		0x000000, 0x000000, 0x000000, 0x000000
+	},
+	{
+		0x000000, 0x000000, 0x000000, 0x000000,
+		0x000000, 0x000020, 0x0000c0, 0x000000,
+		0x000000, 0x000000, 0x000000, 0x000000
+	},
+};
+
+static const struct crci_config config_apq8064 = {
+	.num_rows = ARRAY_SIZE(crci_apq8064),
+	.array = crci_apq8064,
+};
+
+static const unsigned int crci_msm8960[][MAX_GSBI] = {
+	{
+		0x000003, 0x00000c, 0x000030, 0x0000c0,
+		0x000300, 0x000400, 0x000000, 0x000000,
+		0x000000, 0x000000, 0x000000, 0x000000
+	},
+	{
+		0x000000, 0x000000, 0x000000, 0x000000,
+		0x000000, 0x000020, 0x0000c0, 0x000300,
+		0x001800, 0x006000, 0x000000, 0x000000
+	},
+};
+
+static const struct crci_config config_msm8960 = {
+	.num_rows = ARRAY_SIZE(crci_msm8960),
+	.array = crci_msm8960,
+};
+
+static const unsigned int crci_msm8660[][MAX_GSBI] = {
+	{	/* ADM 0 - B */
+		0x000003, 0x00000c, 0x000030, 0x0000c0,
+		0x000300, 0x000c00, 0x003000, 0x00c000,
+		0x030000, 0x0c0000, 0x300000, 0xc00000
+	},
+	{	/* ADM 0 - B */
+		0x000003, 0x00000c, 0x000030, 0x0000c0,
+		0x000300, 0x000c00, 0x003000, 0x00c000,
+		0x030000, 0x0c0000, 0x300000, 0xc00000
+	},
+	{	/* ADM 1 - A */
+		0x000003, 0x00000c, 0x000030, 0x0000c0,
+		0x000300, 0x000c00, 0x003000, 0x00c000,
+		0x030000, 0x0c0000, 0x300000, 0xc00000
+	},
+	{	/* ADM 1 - B */
+		0x000003, 0x00000c, 0x000030, 0x0000c0,
+		0x000300, 0x000c00, 0x003000, 0x00c000,
+		0x030000, 0x0c0000, 0x300000, 0xc00000
+	},
+};
+
+static const struct crci_config config_msm8660 = {
+	.num_rows = ARRAY_SIZE(crci_msm8660),
+	.array = crci_msm8660,
+};
+
+struct gsbi_info {
+	struct clk *hclk;
+	u32 mode;
+	u32 crci;
+	struct regmap *tcsr;
+};
+
+static const struct of_device_id tcsr_dt_match[] = {
+	{ .compatible = "qcom,tcsr-ipq8064", .data = &config_ipq8064},
+	{ .compatible = "qcom,tcsr-apq8064", .data = &config_apq8064},
+	{ .compatible = "qcom,tcsr-msm8960", .data = &config_msm8960},
+	{ .compatible = "qcom,tcsr-msm8660", .data = &config_msm8660},
+	{ },
+};
+
+static int gsbi_probe(struct platform_device *pdev)
+{
+	struct device_node *node = pdev->dev.of_node;
+	struct device_node *tcsr_node;
+	const struct of_device_id *match;
+	struct resource *res;
+	void __iomem *base;
+	struct gsbi_info *gsbi;
+	int i, ret;
+	u32 mask, gsbi_num;
+	const struct crci_config *config = NULL;
+
+	gsbi = devm_kzalloc(&pdev->dev, sizeof(*gsbi), GFP_KERNEL);
+
+	if (!gsbi)
+		return -ENOMEM;
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	base = devm_ioremap_resource(&pdev->dev, res);
+	if (IS_ERR(base))
+		return PTR_ERR(base);
+
+	/* get the tcsr node and setup the config and regmap */
+	gsbi->tcsr = syscon_regmap_lookup_by_phandle(node, "syscon-tcsr");
+
+	if (!IS_ERR(gsbi->tcsr)) {
+		tcsr_node = of_parse_phandle(node, "syscon-tcsr", 0);
+		if (tcsr_node) {
+			match = of_match_node(tcsr_dt_match, tcsr_node);
+			if (match)
+				config = match->data;
+			else
+				dev_warn(&pdev->dev, "no matching TCSR\n");
+
+			of_node_put(tcsr_node);
+		}
+	}
+
+	if (of_property_read_u32(node, "cell-index", &gsbi_num)) {
+		dev_err(&pdev->dev, "missing cell-index\n");
+		return -EINVAL;
+	}
+
+	if (gsbi_num < 1 || gsbi_num > MAX_GSBI) {
+		dev_err(&pdev->dev, "invalid cell-index\n");
+		return -EINVAL;
+	}
+
+	if (of_property_read_u32(node, "qcom,mode", &gsbi->mode)) {
+		dev_err(&pdev->dev, "missing mode configuration\n");
+		return -EINVAL;
+	}
+
+	/* not required, so default to 0 if not present */
+	of_property_read_u32(node, "qcom,crci", &gsbi->crci);
+
+	dev_info(&pdev->dev, "GSBI port protocol: %d crci: %d\n",
+		 gsbi->mode, gsbi->crci);
+	gsbi->hclk = devm_clk_get(&pdev->dev, "iface");
+	if (IS_ERR(gsbi->hclk))
+		return PTR_ERR(gsbi->hclk);
+
+	clk_prepare_enable(gsbi->hclk);
+
+	writel_relaxed((gsbi->mode << GSBI_PROTOCOL_SHIFT) | gsbi->crci,
+				base + GSBI_CTRL_REG);
+
+	/*
+	 * modify tcsr to reflect mode and ADM CRCI mux
+	 * Each gsbi contains a pair of bits, one for RX and one for TX
+	 * SPI mode requires both bits cleared, otherwise they are set
+	 */
+	if (config) {
+		for (i = 0; i < config->num_rows; i++) {
+			mask = config->array[i][gsbi_num - 1];
+
+			if (gsbi->mode == GSBI_PROT_SPI)
+				regmap_update_bits(gsbi->tcsr,
+					TCSR_ADM_CRCI_BASE + 4 * i, mask, 0);
+			else
+				regmap_update_bits(gsbi->tcsr,
+					TCSR_ADM_CRCI_BASE + 4 * i, mask, mask);
+
+		}
+	}
+
+	/* make sure the gsbi control write is not reordered */
+	wmb();
+
+	platform_set_drvdata(pdev, gsbi);
+
+	ret = of_platform_populate(node, NULL, NULL, &pdev->dev);
+	if (ret)
+		clk_disable_unprepare(gsbi->hclk);
+	return ret;
+}
+
+static int gsbi_remove(struct platform_device *pdev)
+{
+	struct gsbi_info *gsbi = platform_get_drvdata(pdev);
+
+	clk_disable_unprepare(gsbi->hclk);
+
+	return 0;
+}
+
+static const struct of_device_id gsbi_dt_match[] = {
+	{ .compatible = "qcom,gsbi-v1.0.0", },
+	{ },
+};
+
+MODULE_DEVICE_TABLE(of, gsbi_dt_match);
+
+static struct platform_driver gsbi_driver = {
+	.driver = {
+		.name		= "gsbi",
+		.of_match_table	= gsbi_dt_match,
+	},
+	.probe = gsbi_probe,
+	.remove	= gsbi_remove,
+};
+
+module_platform_driver(gsbi_driver);
+
+MODULE_AUTHOR("Andy Gross <agross@codeaurora.org>");
+MODULE_DESCRIPTION("QCOM GSBI driver");
+MODULE_LICENSE("GPL v2");
diff --git a/src/kernel/linux/v4.14/drivers/soc/qcom/smd-rpm.c b/src/kernel/linux/v4.14/drivers/soc/qcom/smd-rpm.c
new file mode 100644
index 0000000..c234675
--- /dev/null
+++ b/src/kernel/linux/v4.14/drivers/soc/qcom/smd-rpm.c
@@ -0,0 +1,257 @@
+/*
+ * Copyright (c) 2015, Sony Mobile Communications AB.
+ * Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/of_platform.h>
+#include <linux/io.h>
+#include <linux/interrupt.h>
+#include <linux/slab.h>
+
+#include <linux/rpmsg.h>
+#include <linux/soc/qcom/smd-rpm.h>
+
+#define RPM_REQUEST_TIMEOUT     (5 * HZ)
+
+/**
+ * struct qcom_smd_rpm - state of the rpm device driver
+ * @rpm_channel:	reference to the smd channel
+ * @ack:		completion for acks
+ * @lock:		mutual exclusion around the send/complete pair
+ * @ack_status:		result of the rpm request
+ */
+struct qcom_smd_rpm {
+	struct rpmsg_endpoint *rpm_channel;
+	struct device *dev;
+
+	struct completion ack;
+	struct mutex lock;
+	int ack_status;
+};
+
+/**
+ * struct qcom_rpm_header - header for all rpm requests and responses
+ * @service_type:	identifier of the service
+ * @length:		length of the payload
+ */
+struct qcom_rpm_header {
+	__le32 service_type;
+	__le32 length;
+};
+
+/**
+ * struct qcom_rpm_request - request message to the rpm
+ * @msg_id:	identifier of the outgoing message
+ * @flags:	active/sleep state flags
+ * @type:	resource type
+ * @id:		resource id
+ * @data_len:	length of the payload following this header
+ */
+struct qcom_rpm_request {
+	__le32 msg_id;
+	__le32 flags;
+	__le32 type;
+	__le32 id;
+	__le32 data_len;
+};
+
+/**
+ * struct qcom_rpm_message - response message from the rpm
+ * @msg_type:	indicator of the type of message
+ * @length:	the size of this message, including the message header
+ * @msg_id:	message id
+ * @message:	textual message from the rpm
+ *
+ * Multiple of these messages can be stacked in an rpm message.
+ */
+struct qcom_rpm_message {
+	__le32 msg_type;
+	__le32 length;
+	union {
+		__le32 msg_id;
+		u8 message[0];
+	};
+};
+
+#define RPM_SERVICE_TYPE_REQUEST	0x00716572 /* "req\0" */
+
+#define RPM_MSG_TYPE_ERR		0x00727265 /* "err\0" */
+#define RPM_MSG_TYPE_MSG_ID		0x2367736d /* "msg#" */
+
+/**
+ * qcom_rpm_smd_write - write @buf to @type:@id
+ * @rpm:	rpm handle
+ * @type:	resource type
+ * @id:		resource identifier
+ * @buf:	the data to be written
+ * @count:	number of bytes in @buf
+ */
+int qcom_rpm_smd_write(struct qcom_smd_rpm *rpm,
+		       int state,
+		       u32 type, u32 id,
+		       void *buf,
+		       size_t count)
+{
+	static unsigned msg_id = 1;
+	int left;
+	int ret;
+	struct {
+		struct qcom_rpm_header hdr;
+		struct qcom_rpm_request req;
+		u8 payload[];
+	} *pkt;
+	size_t size = sizeof(*pkt) + count;
+
+	/* SMD packets to the RPM may not exceed 256 bytes */
+	if (WARN_ON(size >= 256))
+		return -EINVAL;
+
+	pkt = kmalloc(size, GFP_KERNEL);
+	if (!pkt)
+		return -ENOMEM;
+
+	mutex_lock(&rpm->lock);
+
+	pkt->hdr.service_type = cpu_to_le32(RPM_SERVICE_TYPE_REQUEST);
+	pkt->hdr.length = cpu_to_le32(sizeof(struct qcom_rpm_request) + count);
+
+	pkt->req.msg_id = cpu_to_le32(msg_id++);
+	pkt->req.flags = cpu_to_le32(state);
+	pkt->req.type = cpu_to_le32(type);
+	pkt->req.id = cpu_to_le32(id);
+	pkt->req.data_len = cpu_to_le32(count);
+	memcpy(pkt->payload, buf, count);
+
+	ret = rpmsg_send(rpm->rpm_channel, pkt, size);
+	if (ret)
+		goto out;
+
+	left = wait_for_completion_timeout(&rpm->ack, RPM_REQUEST_TIMEOUT);
+	if (!left)
+		ret = -ETIMEDOUT;
+	else
+		ret = rpm->ack_status;
+
+out:
+	kfree(pkt);
+	mutex_unlock(&rpm->lock);
+	return ret;
+}
+EXPORT_SYMBOL(qcom_rpm_smd_write);
+
+static int qcom_smd_rpm_callback(struct rpmsg_device *rpdev,
+				 void *data,
+				 int count,
+				 void *priv,
+				 u32 addr)
+{
+	const struct qcom_rpm_header *hdr = data;
+	size_t hdr_length = le32_to_cpu(hdr->length);
+	const struct qcom_rpm_message *msg;
+	struct qcom_smd_rpm *rpm = dev_get_drvdata(&rpdev->dev);
+	const u8 *buf = data + sizeof(struct qcom_rpm_header);
+	const u8 *end = buf + hdr_length;
+	char msgbuf[32];
+	int status = 0;
+	u32 len, msg_length;
+
+	if (le32_to_cpu(hdr->service_type) != RPM_SERVICE_TYPE_REQUEST ||
+	    hdr_length < sizeof(struct qcom_rpm_message)) {
+		dev_err(rpm->dev, "invalid request\n");
+		return 0;
+	}
+
+	while (buf < end) {
+		msg = (struct qcom_rpm_message *)buf;
+		msg_length = le32_to_cpu(msg->length);
+		switch (le32_to_cpu(msg->msg_type)) {
+		case RPM_MSG_TYPE_MSG_ID:
+			break;
+		case RPM_MSG_TYPE_ERR:
+			len = min_t(u32, ALIGN(msg_length, 4), sizeof(msgbuf));
+			memcpy_fromio(msgbuf, msg->message, len);
+			msgbuf[len - 1] = 0;
+
+			if (!strcmp(msgbuf, "resource does not exist"))
+				status = -ENXIO;
+			else
+				status = -EINVAL;
+			break;
+		}
+
+		buf = PTR_ALIGN(buf + 2 * sizeof(u32) + msg_length, 4);
+	}
+
+	rpm->ack_status = status;
+	complete(&rpm->ack);
+	return 0;
+}
+
+static int qcom_smd_rpm_probe(struct rpmsg_device *rpdev)
+{
+	struct qcom_smd_rpm *rpm;
+
+	rpm = devm_kzalloc(&rpdev->dev, sizeof(*rpm), GFP_KERNEL);
+	if (!rpm)
+		return -ENOMEM;
+
+	mutex_init(&rpm->lock);
+	init_completion(&rpm->ack);
+
+	rpm->dev = &rpdev->dev;
+	rpm->rpm_channel = rpdev->ept;
+	dev_set_drvdata(&rpdev->dev, rpm);
+
+	return of_platform_populate(rpdev->dev.of_node, NULL, NULL, &rpdev->dev);
+}
+
+static void qcom_smd_rpm_remove(struct rpmsg_device *rpdev)
+{
+	of_platform_depopulate(&rpdev->dev);
+}
+
+static const struct of_device_id qcom_smd_rpm_of_match[] = {
+	{ .compatible = "qcom,rpm-apq8084" },
+	{ .compatible = "qcom,rpm-msm8916" },
+	{ .compatible = "qcom,rpm-msm8974" },
+	{ .compatible = "qcom,rpm-msm8996" },
+	{}
+};
+MODULE_DEVICE_TABLE(of, qcom_smd_rpm_of_match);
+
+static struct rpmsg_driver qcom_smd_rpm_driver = {
+	.probe = qcom_smd_rpm_probe,
+	.remove = qcom_smd_rpm_remove,
+	.callback = qcom_smd_rpm_callback,
+	.drv  = {
+		.name  = "qcom_smd_rpm",
+		.of_match_table = qcom_smd_rpm_of_match,
+	},
+};
+
+static int __init qcom_smd_rpm_init(void)
+{
+	return register_rpmsg_driver(&qcom_smd_rpm_driver);
+}
+arch_initcall(qcom_smd_rpm_init);
+
+static void __exit qcom_smd_rpm_exit(void)
+{
+	unregister_rpmsg_driver(&qcom_smd_rpm_driver);
+}
+module_exit(qcom_smd_rpm_exit);
+
+MODULE_AUTHOR("Bjorn Andersson <bjorn.andersson@sonymobile.com>");
+MODULE_DESCRIPTION("Qualcomm SMD backed RPM driver");
+MODULE_LICENSE("GPL v2");
diff --git a/src/kernel/linux/v4.14/drivers/soc/qcom/smem.c b/src/kernel/linux/v4.14/drivers/soc/qcom/smem.c
new file mode 100644
index 0000000..89dd50f
--- /dev/null
+++ b/src/kernel/linux/v4.14/drivers/soc/qcom/smem.c
@@ -0,0 +1,795 @@
+/*
+ * Copyright (c) 2015, Sony Mobile Communications AB.
+ * Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/hwspinlock.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/soc/qcom/smem.h>
+
+/*
+ * The Qualcomm shared memory system is a allocate only heap structure that
+ * consists of one of more memory areas that can be accessed by the processors
+ * in the SoC.
+ *
+ * All systems contains a global heap, accessible by all processors in the SoC,
+ * with a table of contents data structure (@smem_header) at the beginning of
+ * the main shared memory block.
+ *
+ * The global header contains meta data for allocations as well as a fixed list
+ * of 512 entries (@smem_global_entry) that can be initialized to reference
+ * parts of the shared memory space.
+ *
+ *
+ * In addition to this global heap a set of "private" heaps can be set up at
+ * boot time with access restrictions so that only certain processor pairs can
+ * access the data.
+ *
+ * These partitions are referenced from an optional partition table
+ * (@smem_ptable), that is found 4kB from the end of the main smem region. The
+ * partition table entries (@smem_ptable_entry) lists the involved processors
+ * (or hosts) and their location in the main shared memory region.
+ *
+ * Each partition starts with a header (@smem_partition_header) that identifies
+ * the partition and holds properties for the two internal memory regions. The
+ * two regions are cached and non-cached memory respectively. Each region
+ * contain a link list of allocation headers (@smem_private_entry) followed by
+ * their data.
+ *
+ * Items in the non-cached region are allocated from the start of the partition
+ * while items in the cached region are allocated from the end. The free area
+ * is hence the region between the cached and non-cached offsets.
+ *
+ *
+ * To synchronize allocations in the shared memory heaps a remote spinlock must
+ * be held - currently lock number 3 of the sfpb or tcsr is used for this on all
+ * platforms.
+ *
+ */
+
+/*
+ * Item 3 of the global heap contains an array of versions for the various
+ * software components in the SoC. We verify that the boot loader version is
+ * what the expected version (SMEM_EXPECTED_VERSION) as a sanity check.
+ */
+#define SMEM_ITEM_VERSION	3
+#define  SMEM_MASTER_SBL_VERSION_INDEX	7
+#define  SMEM_EXPECTED_VERSION		11
+
+/*
+ * The first 8 items are only to be allocated by the boot loader while
+ * initializing the heap.
+ */
+#define SMEM_ITEM_LAST_FIXED	8
+
+/* Highest accepted item number, for both global and private heaps */
+#define SMEM_ITEM_COUNT		512
+
+/* Processor/host identifier for the application processor */
+#define SMEM_HOST_APPS		0
+
+/* Max number of processors/hosts in a system */
+#define SMEM_HOST_COUNT		9
+
+/**
+  * struct smem_proc_comm - proc_comm communication struct (legacy)
+  * @command:	current command to be executed
+  * @status:	status of the currently requested command
+  * @params:	parameters to the command
+  */
+struct smem_proc_comm {
+	__le32 command;
+	__le32 status;
+	__le32 params[2];
+};
+
+/**
+ * struct smem_global_entry - entry to reference smem items on the heap
+ * @allocated:	boolean to indicate if this entry is used
+ * @offset:	offset to the allocated space
+ * @size:	size of the allocated space, 8 byte aligned
+ * @aux_base:	base address for the memory region used by this unit, or 0 for
+ *		the default region. bits 0,1 are reserved
+ */
+struct smem_global_entry {
+	__le32 allocated;
+	__le32 offset;
+	__le32 size;
+	__le32 aux_base; /* bits 1:0 reserved */
+};
+#define AUX_BASE_MASK		0xfffffffc
+
+/**
+ * struct smem_header - header found in beginning of primary smem region
+ * @proc_comm:		proc_comm communication interface (legacy)
+ * @version:		array of versions for the various subsystems
+ * @initialized:	boolean to indicate that smem is initialized
+ * @free_offset:	index of the first unallocated byte in smem
+ * @available:		number of bytes available for allocation
+ * @reserved:		reserved field, must be 0
+ * toc:			array of references to items
+ */
+struct smem_header {
+	struct smem_proc_comm proc_comm[4];
+	__le32 version[32];
+	__le32 initialized;
+	__le32 free_offset;
+	__le32 available;
+	__le32 reserved;
+	struct smem_global_entry toc[SMEM_ITEM_COUNT];
+};
+
+/**
+ * struct smem_ptable_entry - one entry in the @smem_ptable list
+ * @offset:	offset, within the main shared memory region, of the partition
+ * @size:	size of the partition
+ * @flags:	flags for the partition (currently unused)
+ * @host0:	first processor/host with access to this partition
+ * @host1:	second processor/host with access to this partition
+ * @reserved:	reserved entries for later use
+ */
+struct smem_ptable_entry {
+	__le32 offset;
+	__le32 size;
+	__le32 flags;
+	__le16 host0;
+	__le16 host1;
+	__le32 reserved[8];
+};
+
+/**
+ * struct smem_ptable - partition table for the private partitions
+ * @magic:	magic number, must be SMEM_PTABLE_MAGIC
+ * @version:	version of the partition table
+ * @num_entries: number of partitions in the table
+ * @reserved:	for now reserved entries
+ * @entry:	list of @smem_ptable_entry for the @num_entries partitions
+ */
+struct smem_ptable {
+	u8 magic[4];
+	__le32 version;
+	__le32 num_entries;
+	__le32 reserved[5];
+	struct smem_ptable_entry entry[];
+};
+
+static const u8 SMEM_PTABLE_MAGIC[] = { 0x24, 0x54, 0x4f, 0x43 }; /* "$TOC" */
+
+/**
+ * struct smem_partition_header - header of the partitions
+ * @magic:	magic number, must be SMEM_PART_MAGIC
+ * @host0:	first processor/host with access to this partition
+ * @host1:	second processor/host with access to this partition
+ * @size:	size of the partition
+ * @offset_free_uncached: offset to the first free byte of uncached memory in
+ *		this partition
+ * @offset_free_cached: offset to the first free byte of cached memory in this
+ *		partition
+ * @reserved:	for now reserved entries
+ */
+struct smem_partition_header {
+	u8 magic[4];
+	__le16 host0;
+	__le16 host1;
+	__le32 size;
+	__le32 offset_free_uncached;
+	__le32 offset_free_cached;
+	__le32 reserved[3];
+};
+
+static const u8 SMEM_PART_MAGIC[] = { 0x24, 0x50, 0x52, 0x54 };
+
+/**
+ * struct smem_private_entry - header of each item in the private partition
+ * @canary:	magic number, must be SMEM_PRIVATE_CANARY
+ * @item:	identifying number of the smem item
+ * @size:	size of the data, including padding bytes
+ * @padding_data: number of bytes of padding of data
+ * @padding_hdr: number of bytes of padding between the header and the data
+ * @reserved:	for now reserved entry
+ */
+struct smem_private_entry {
+	u16 canary; /* bytes are the same so no swapping needed */
+	__le16 item;
+	__le32 size; /* includes padding bytes */
+	__le16 padding_data;
+	__le16 padding_hdr;
+	__le32 reserved;
+};
+#define SMEM_PRIVATE_CANARY	0xa5a5
+
+/**
+ * struct smem_region - representation of a chunk of memory used for smem
+ * @aux_base:	identifier of aux_mem base
+ * @virt_base:	virtual base address of memory with this aux_mem identifier
+ * @size:	size of the memory region
+ */
+struct smem_region {
+	u32 aux_base;
+	void __iomem *virt_base;
+	size_t size;
+};
+
+/**
+ * struct qcom_smem - device data for the smem device
+ * @dev:	device pointer
+ * @hwlock:	reference to a hwspinlock
+ * @partitions:	list of pointers to partitions affecting the current
+ *		processor/host
+ * @num_regions: number of @regions
+ * @regions:	list of the memory regions defining the shared memory
+ */
+struct qcom_smem {
+	struct device *dev;
+
+	struct hwspinlock *hwlock;
+
+	struct smem_partition_header *partitions[SMEM_HOST_COUNT];
+
+	unsigned num_regions;
+	struct smem_region regions[0];
+};
+
+static struct smem_private_entry *
+phdr_to_last_private_entry(struct smem_partition_header *phdr)
+{
+	void *p = phdr;
+
+	return p + le32_to_cpu(phdr->offset_free_uncached);
+}
+
+static void *phdr_to_first_cached_entry(struct smem_partition_header *phdr)
+{
+	void *p = phdr;
+
+	return p + le32_to_cpu(phdr->offset_free_cached);
+}
+
+static struct smem_private_entry *
+phdr_to_first_private_entry(struct smem_partition_header *phdr)
+{
+	void *p = phdr;
+
+	return p + sizeof(*phdr);
+}
+
+static struct smem_private_entry *
+private_entry_next(struct smem_private_entry *e)
+{
+	void *p = e;
+
+	return p + sizeof(*e) + le16_to_cpu(e->padding_hdr) +
+	       le32_to_cpu(e->size);
+}
+
+static void *entry_to_item(struct smem_private_entry *e)
+{
+	void *p = e;
+
+	return p + sizeof(*e) + le16_to_cpu(e->padding_hdr);
+}
+
+/* Pointer to the one and only smem handle */
+static struct qcom_smem *__smem;
+
+/* Timeout (ms) for the trylock of remote spinlocks */
+#define HWSPINLOCK_TIMEOUT	1000
+
+static int qcom_smem_alloc_private(struct qcom_smem *smem,
+				   unsigned host,
+				   unsigned item,
+				   size_t size)
+{
+	struct smem_partition_header *phdr;
+	struct smem_private_entry *hdr, *end;
+	size_t alloc_size;
+	void *cached;
+
+	phdr = smem->partitions[host];
+	hdr = phdr_to_first_private_entry(phdr);
+	end = phdr_to_last_private_entry(phdr);
+	cached = phdr_to_first_cached_entry(phdr);
+
+	while (hdr < end) {
+		if (hdr->canary != SMEM_PRIVATE_CANARY) {
+			dev_err(smem->dev,
+				"Found invalid canary in host %d partition\n",
+				host);
+			return -EINVAL;
+		}
+
+		if (le16_to_cpu(hdr->item) == item)
+			return -EEXIST;
+
+		hdr = private_entry_next(hdr);
+	}
+
+	/* Check that we don't grow into the cached region */
+	alloc_size = sizeof(*hdr) + ALIGN(size, 8);
+	if ((void *)hdr + alloc_size >= cached) {
+		dev_err(smem->dev, "Out of memory\n");
+		return -ENOSPC;
+	}
+
+	hdr->canary = SMEM_PRIVATE_CANARY;
+	hdr->item = cpu_to_le16(item);
+	hdr->size = cpu_to_le32(ALIGN(size, 8));
+	hdr->padding_data = cpu_to_le16(le32_to_cpu(hdr->size) - size);
+	hdr->padding_hdr = 0;
+
+	/*
+	 * Ensure the header is written before we advance the free offset, so
+	 * that remote processors that does not take the remote spinlock still
+	 * gets a consistent view of the linked list.
+	 */
+	wmb();
+	le32_add_cpu(&phdr->offset_free_uncached, alloc_size);
+
+	return 0;
+}
+
+static int qcom_smem_alloc_global(struct qcom_smem *smem,
+				  unsigned item,
+				  size_t size)
+{
+	struct smem_header *header;
+	struct smem_global_entry *entry;
+
+	if (WARN_ON(item >= SMEM_ITEM_COUNT))
+		return -EINVAL;
+
+	header = smem->regions[0].virt_base;
+	entry = &header->toc[item];
+	if (entry->allocated)
+		return -EEXIST;
+
+	size = ALIGN(size, 8);
+	if (WARN_ON(size > le32_to_cpu(header->available)))
+		return -ENOMEM;
+
+	entry->offset = header->free_offset;
+	entry->size = cpu_to_le32(size);
+
+	/*
+	 * Ensure the header is consistent before we mark the item allocated,
+	 * so that remote processors will get a consistent view of the item
+	 * even though they do not take the spinlock on read.
+	 */
+	wmb();
+	entry->allocated = cpu_to_le32(1);
+
+	le32_add_cpu(&header->free_offset, size);
+	le32_add_cpu(&header->available, -size);
+
+	return 0;
+}
+
+/**
+ * qcom_smem_alloc() - allocate space for a smem item
+ * @host:	remote processor id, or -1
+ * @item:	smem item handle
+ * @size:	number of bytes to be allocated
+ *
+ * Allocate space for a given smem item of size @size, given that the item is
+ * not yet allocated.
+ */
+int qcom_smem_alloc(unsigned host, unsigned item, size_t size)
+{
+	unsigned long flags;
+	int ret;
+
+	if (!__smem)
+		return -EPROBE_DEFER;
+
+	if (item < SMEM_ITEM_LAST_FIXED) {
+		dev_err(__smem->dev,
+			"Rejecting allocation of static entry %d\n", item);
+		return -EINVAL;
+	}
+
+	ret = hwspin_lock_timeout_irqsave(__smem->hwlock,
+					  HWSPINLOCK_TIMEOUT,
+					  &flags);
+	if (ret)
+		return ret;
+
+	if (host < SMEM_HOST_COUNT && __smem->partitions[host])
+		ret = qcom_smem_alloc_private(__smem, host, item, size);
+	else
+		ret = qcom_smem_alloc_global(__smem, item, size);
+
+	hwspin_unlock_irqrestore(__smem->hwlock, &flags);
+
+	return ret;
+}
+EXPORT_SYMBOL(qcom_smem_alloc);
+
+static void *qcom_smem_get_global(struct qcom_smem *smem,
+				  unsigned item,
+				  size_t *size)
+{
+	struct smem_header *header;
+	struct smem_region *area;
+	struct smem_global_entry *entry;
+	u32 aux_base;
+	unsigned i;
+
+	if (WARN_ON(item >= SMEM_ITEM_COUNT))
+		return ERR_PTR(-EINVAL);
+
+	header = smem->regions[0].virt_base;
+	entry = &header->toc[item];
+	if (!entry->allocated)
+		return ERR_PTR(-ENXIO);
+
+	aux_base = le32_to_cpu(entry->aux_base) & AUX_BASE_MASK;
+
+	for (i = 0; i < smem->num_regions; i++) {
+		area = &smem->regions[i];
+
+		if (area->aux_base == aux_base || !aux_base) {
+			if (size != NULL)
+				*size = le32_to_cpu(entry->size);
+			return area->virt_base + le32_to_cpu(entry->offset);
+		}
+	}
+
+	return ERR_PTR(-ENOENT);
+}
+
+static void *qcom_smem_get_private(struct qcom_smem *smem,
+				   unsigned host,
+				   unsigned item,
+				   size_t *size)
+{
+	struct smem_partition_header *phdr;
+	struct smem_private_entry *e, *end;
+
+	phdr = smem->partitions[host];
+	e = phdr_to_first_private_entry(phdr);
+	end = phdr_to_last_private_entry(phdr);
+
+	while (e < end) {
+		if (e->canary != SMEM_PRIVATE_CANARY) {
+			dev_err(smem->dev,
+				"Found invalid canary in host %d partition\n",
+				host);
+			return ERR_PTR(-EINVAL);
+		}
+
+		if (le16_to_cpu(e->item) == item) {
+			if (size != NULL)
+				*size = le32_to_cpu(e->size) -
+					le16_to_cpu(e->padding_data);
+
+			return entry_to_item(e);
+		}
+
+		e = private_entry_next(e);
+	}
+
+	return ERR_PTR(-ENOENT);
+}
+
+/**
+ * qcom_smem_get() - resolve ptr of size of a smem item
+ * @host:	the remote processor, or -1
+ * @item:	smem item handle
+ * @size:	pointer to be filled out with size of the item
+ *
+ * Looks up smem item and returns pointer to it. Size of smem
+ * item is returned in @size.
+ */
+void *qcom_smem_get(unsigned host, unsigned item, size_t *size)
+{
+	unsigned long flags;
+	int ret;
+	void *ptr = ERR_PTR(-EPROBE_DEFER);
+
+	if (!__smem)
+		return ptr;
+
+	ret = hwspin_lock_timeout_irqsave(__smem->hwlock,
+					  HWSPINLOCK_TIMEOUT,
+					  &flags);
+	if (ret)
+		return ERR_PTR(ret);
+
+	if (host < SMEM_HOST_COUNT && __smem->partitions[host])
+		ptr = qcom_smem_get_private(__smem, host, item, size);
+	else
+		ptr = qcom_smem_get_global(__smem, item, size);
+
+	hwspin_unlock_irqrestore(__smem->hwlock, &flags);
+
+	return ptr;
+
+}
+EXPORT_SYMBOL(qcom_smem_get);
+
+/**
+ * qcom_smem_get_free_space() - retrieve amount of free space in a partition
+ * @host:	the remote processor identifying a partition, or -1
+ *
+ * To be used by smem clients as a quick way to determine if any new
+ * allocations has been made.
+ */
+int qcom_smem_get_free_space(unsigned host)
+{
+	struct smem_partition_header *phdr;
+	struct smem_header *header;
+	unsigned ret;
+
+	if (!__smem)
+		return -EPROBE_DEFER;
+
+	if (host < SMEM_HOST_COUNT && __smem->partitions[host]) {
+		phdr = __smem->partitions[host];
+		ret = le32_to_cpu(phdr->offset_free_cached) -
+		      le32_to_cpu(phdr->offset_free_uncached);
+	} else {
+		header = __smem->regions[0].virt_base;
+		ret = le32_to_cpu(header->available);
+	}
+
+	return ret;
+}
+EXPORT_SYMBOL(qcom_smem_get_free_space);
+
+static int qcom_smem_get_sbl_version(struct qcom_smem *smem)
+{
+	__le32 *versions;
+	size_t size;
+
+	versions = qcom_smem_get_global(smem, SMEM_ITEM_VERSION, &size);
+	if (IS_ERR(versions)) {
+		dev_err(smem->dev, "Unable to read the version item\n");
+		return -ENOENT;
+	}
+
+	if (size < sizeof(unsigned) * SMEM_MASTER_SBL_VERSION_INDEX) {
+		dev_err(smem->dev, "Version item is too small\n");
+		return -EINVAL;
+	}
+
+	return le32_to_cpu(versions[SMEM_MASTER_SBL_VERSION_INDEX]);
+}
+
+static int qcom_smem_enumerate_partitions(struct qcom_smem *smem,
+					  unsigned local_host)
+{
+	struct smem_partition_header *header;
+	struct smem_ptable_entry *entry;
+	struct smem_ptable *ptable;
+	unsigned remote_host;
+	u32 version, host0, host1;
+	int i;
+
+	ptable = smem->regions[0].virt_base + smem->regions[0].size - SZ_4K;
+	if (memcmp(ptable->magic, SMEM_PTABLE_MAGIC, sizeof(ptable->magic)))
+		return 0;
+
+	version = le32_to_cpu(ptable->version);
+	if (version != 1) {
+		dev_err(smem->dev,
+			"Unsupported partition header version %d\n", version);
+		return -EINVAL;
+	}
+
+	for (i = 0; i < le32_to_cpu(ptable->num_entries); i++) {
+		entry = &ptable->entry[i];
+		host0 = le16_to_cpu(entry->host0);
+		host1 = le16_to_cpu(entry->host1);
+
+		if (host0 != local_host && host1 != local_host)
+			continue;
+
+		if (!le32_to_cpu(entry->offset))
+			continue;
+
+		if (!le32_to_cpu(entry->size))
+			continue;
+
+		if (host0 == local_host)
+			remote_host = host1;
+		else
+			remote_host = host0;
+
+		if (remote_host >= SMEM_HOST_COUNT) {
+			dev_err(smem->dev,
+				"Invalid remote host %d\n",
+				remote_host);
+			return -EINVAL;
+		}
+
+		if (smem->partitions[remote_host]) {
+			dev_err(smem->dev,
+				"Already found a partition for host %d\n",
+				remote_host);
+			return -EINVAL;
+		}
+
+		header = smem->regions[0].virt_base + le32_to_cpu(entry->offset);
+		host0 = le16_to_cpu(header->host0);
+		host1 = le16_to_cpu(header->host1);
+
+		if (memcmp(header->magic, SMEM_PART_MAGIC,
+			    sizeof(header->magic))) {
+			dev_err(smem->dev,
+				"Partition %d has invalid magic\n", i);
+			return -EINVAL;
+		}
+
+		if (host0 != local_host && host1 != local_host) {
+			dev_err(smem->dev,
+				"Partition %d hosts are invalid\n", i);
+			return -EINVAL;
+		}
+
+		if (host0 != remote_host && host1 != remote_host) {
+			dev_err(smem->dev,
+				"Partition %d hosts are invalid\n", i);
+			return -EINVAL;
+		}
+
+		if (le32_to_cpu(header->size) != le32_to_cpu(entry->size)) {
+			dev_err(smem->dev,
+				"Partition %d has invalid size\n", i);
+			return -EINVAL;
+		}
+
+		if (le32_to_cpu(header->offset_free_uncached) > le32_to_cpu(header->size)) {
+			dev_err(smem->dev,
+				"Partition %d has invalid free pointer\n", i);
+			return -EINVAL;
+		}
+
+		smem->partitions[remote_host] = header;
+	}
+
+	return 0;
+}
+
+static int qcom_smem_map_memory(struct qcom_smem *smem, struct device *dev,
+				const char *name, int i)
+{
+	struct device_node *np;
+	struct resource r;
+	int ret;
+
+	np = of_parse_phandle(dev->of_node, name, 0);
+	if (!np) {
+		dev_err(dev, "No %s specified\n", name);
+		return -EINVAL;
+	}
+
+	ret = of_address_to_resource(np, 0, &r);
+	of_node_put(np);
+	if (ret)
+		return ret;
+
+	smem->regions[i].aux_base = (u32)r.start;
+	smem->regions[i].size = resource_size(&r);
+	smem->regions[i].virt_base = devm_ioremap_wc(dev, r.start, resource_size(&r));
+	if (!smem->regions[i].virt_base)
+		return -ENOMEM;
+
+	return 0;
+}
+
+static int qcom_smem_probe(struct platform_device *pdev)
+{
+	struct smem_header *header;
+	struct qcom_smem *smem;
+	size_t array_size;
+	int num_regions;
+	int hwlock_id;
+	u32 version;
+	int ret;
+
+	num_regions = 1;
+	if (of_find_property(pdev->dev.of_node, "qcom,rpm-msg-ram", NULL))
+		num_regions++;
+
+	array_size = num_regions * sizeof(struct smem_region);
+	smem = devm_kzalloc(&pdev->dev, sizeof(*smem) + array_size, GFP_KERNEL);
+	if (!smem)
+		return -ENOMEM;
+
+	smem->dev = &pdev->dev;
+	smem->num_regions = num_regions;
+
+	ret = qcom_smem_map_memory(smem, &pdev->dev, "memory-region", 0);
+	if (ret)
+		return ret;
+
+	if (num_regions > 1 && (ret = qcom_smem_map_memory(smem, &pdev->dev,
+					"qcom,rpm-msg-ram", 1)))
+		return ret;
+
+	header = smem->regions[0].virt_base;
+	if (le32_to_cpu(header->initialized) != 1 ||
+	    le32_to_cpu(header->reserved)) {
+		dev_err(&pdev->dev, "SMEM is not initialized by SBL\n");
+		return -EINVAL;
+	}
+
+	version = qcom_smem_get_sbl_version(smem);
+	if (version >> 16 != SMEM_EXPECTED_VERSION) {
+		dev_err(&pdev->dev, "Unsupported SMEM version 0x%x\n", version);
+		return -EINVAL;
+	}
+
+	ret = qcom_smem_enumerate_partitions(smem, SMEM_HOST_APPS);
+	if (ret < 0)
+		return ret;
+
+	hwlock_id = of_hwspin_lock_get_id(pdev->dev.of_node, 0);
+	if (hwlock_id < 0) {
+		if (hwlock_id != -EPROBE_DEFER)
+			dev_err(&pdev->dev, "failed to retrieve hwlock\n");
+		return hwlock_id;
+	}
+
+	smem->hwlock = hwspin_lock_request_specific(hwlock_id);
+	if (!smem->hwlock)
+		return -ENXIO;
+
+	__smem = smem;
+
+	return 0;
+}
+
+static int qcom_smem_remove(struct platform_device *pdev)
+{
+	hwspin_lock_free(__smem->hwlock);
+	__smem = NULL;
+
+	return 0;
+}
+
+static const struct of_device_id qcom_smem_of_match[] = {
+	{ .compatible = "qcom,smem" },
+	{}
+};
+MODULE_DEVICE_TABLE(of, qcom_smem_of_match);
+
+static struct platform_driver qcom_smem_driver = {
+	.probe = qcom_smem_probe,
+	.remove = qcom_smem_remove,
+	.driver  = {
+		.name = "qcom-smem",
+		.of_match_table = qcom_smem_of_match,
+		.suppress_bind_attrs = true,
+	},
+};
+
+static int __init qcom_smem_init(void)
+{
+	return platform_driver_register(&qcom_smem_driver);
+}
+arch_initcall(qcom_smem_init);
+
+static void __exit qcom_smem_exit(void)
+{
+	platform_driver_unregister(&qcom_smem_driver);
+}
+module_exit(qcom_smem_exit)
+
+MODULE_AUTHOR("Bjorn Andersson <bjorn.andersson@sonymobile.com>");
+MODULE_DESCRIPTION("Qualcomm Shared Memory Manager");
+MODULE_LICENSE("GPL v2");
diff --git a/src/kernel/linux/v4.14/drivers/soc/qcom/smem_state.c b/src/kernel/linux/v4.14/drivers/soc/qcom/smem_state.c
new file mode 100644
index 0000000..d5437ca
--- /dev/null
+++ b/src/kernel/linux/v4.14/drivers/soc/qcom/smem_state.c
@@ -0,0 +1,201 @@
+/*
+ * Copyright (c) 2015, Sony Mobile Communications Inc.
+ * Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#include <linux/device.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/slab.h>
+#include <linux/soc/qcom/smem_state.h>
+
+static LIST_HEAD(smem_states);
+static DEFINE_MUTEX(list_lock);
+
+/**
+ * struct qcom_smem_state - state context
+ * @refcount:	refcount for the state
+ * @orphan:	boolean indicator that this state has been unregistered
+ * @list:	entry in smem_states list
+ * @of_node:	of_node to use for matching the state in DT
+ * @priv:	implementation private data
+ * @ops:	ops for the state
+ */
+struct qcom_smem_state {
+	struct kref refcount;
+	bool orphan;
+
+	struct list_head list;
+	struct device_node *of_node;
+
+	void *priv;
+
+	struct qcom_smem_state_ops ops;
+};
+
+/**
+ * qcom_smem_state_update_bits() - update the masked bits in state with value
+ * @state:	state handle acquired by calling qcom_smem_state_get()
+ * @mask:	bit mask for the change
+ * @value:	new value for the masked bits
+ *
+ * Returns 0 on success, otherwise negative errno.
+ */
+int qcom_smem_state_update_bits(struct qcom_smem_state *state,
+				u32 mask,
+				u32 value)
+{
+	if (state->orphan)
+		return -ENXIO;
+
+	if (!state->ops.update_bits)
+		return -ENOTSUPP;
+
+	return state->ops.update_bits(state->priv, mask, value);
+}
+EXPORT_SYMBOL_GPL(qcom_smem_state_update_bits);
+
+static struct qcom_smem_state *of_node_to_state(struct device_node *np)
+{
+	struct qcom_smem_state *state;
+
+	mutex_lock(&list_lock);
+
+	list_for_each_entry(state, &smem_states, list) {
+		if (state->of_node == np) {
+			kref_get(&state->refcount);
+			goto unlock;
+		}
+	}
+	state = ERR_PTR(-EPROBE_DEFER);
+
+unlock:
+	mutex_unlock(&list_lock);
+
+	return state;
+}
+
+/**
+ * qcom_smem_state_get() - acquire handle to a state
+ * @dev:	client device pointer
+ * @con_id:	name of the state to lookup
+ * @bit:	flags from the state reference, indicating which bit's affected
+ *
+ * Returns handle to the state, or ERR_PTR(). qcom_smem_state_put() must be
+ * called to release the returned state handle.
+ */
+struct qcom_smem_state *qcom_smem_state_get(struct device *dev,
+					    const char *con_id,
+					    unsigned *bit)
+{
+	struct qcom_smem_state *state;
+	struct of_phandle_args args;
+	int index = 0;
+	int ret;
+
+	if (con_id) {
+		index = of_property_match_string(dev->of_node,
+						 "qcom,smem-state-names",
+						 con_id);
+		if (index < 0) {
+			dev_err(dev, "missing qcom,smem-state-names\n");
+			return ERR_PTR(index);
+		}
+	}
+
+	ret = of_parse_phandle_with_args(dev->of_node,
+					 "qcom,smem-states",
+					 "#qcom,smem-state-cells",
+					 index,
+					 &args);
+	if (ret) {
+		dev_err(dev, "failed to parse qcom,smem-states property\n");
+		return ERR_PTR(ret);
+	}
+
+	if (args.args_count != 1) {
+		dev_err(dev, "invalid #qcom,smem-state-cells\n");
+		return ERR_PTR(-EINVAL);
+	}
+
+	state = of_node_to_state(args.np);
+	if (IS_ERR(state))
+		goto put;
+
+	*bit = args.args[0];
+
+put:
+	of_node_put(args.np);
+	return state;
+}
+EXPORT_SYMBOL_GPL(qcom_smem_state_get);
+
+static void qcom_smem_state_release(struct kref *ref)
+{
+	struct qcom_smem_state *state = container_of(ref, struct qcom_smem_state, refcount);
+
+	list_del(&state->list);
+	kfree(state);
+}
+
+/**
+ * qcom_smem_state_put() - release state handle
+ * @state:	state handle to be released
+ */
+void qcom_smem_state_put(struct qcom_smem_state *state)
+{
+	mutex_lock(&list_lock);
+	kref_put(&state->refcount, qcom_smem_state_release);
+	mutex_unlock(&list_lock);
+}
+EXPORT_SYMBOL_GPL(qcom_smem_state_put);
+
+/**
+ * qcom_smem_state_register() - register a new state
+ * @of_node:	of_node used for matching client lookups
+ * @ops:	implementation ops
+ * @priv:	implementation specific private data
+ */
+struct qcom_smem_state *qcom_smem_state_register(struct device_node *of_node,
+						 const struct qcom_smem_state_ops *ops,
+						 void *priv)
+{
+	struct qcom_smem_state *state;
+
+	state = kzalloc(sizeof(*state), GFP_KERNEL);
+	if (!state)
+		return ERR_PTR(-ENOMEM);
+
+	kref_init(&state->refcount);
+
+	state->of_node = of_node;
+	state->ops = *ops;
+	state->priv = priv;
+
+	mutex_lock(&list_lock);
+	list_add(&state->list, &smem_states);
+	mutex_unlock(&list_lock);
+
+	return state;
+}
+EXPORT_SYMBOL_GPL(qcom_smem_state_register);
+
+/**
+ * qcom_smem_state_unregister() - unregister a registered state
+ * @state:	state handle to be unregistered
+ */
+void qcom_smem_state_unregister(struct qcom_smem_state *state)
+{
+	state->orphan = true;
+	qcom_smem_state_put(state);
+}
+EXPORT_SYMBOL_GPL(qcom_smem_state_unregister);
diff --git a/src/kernel/linux/v4.14/drivers/soc/qcom/smp2p.c b/src/kernel/linux/v4.14/drivers/soc/qcom/smp2p.c
new file mode 100644
index 0000000..f51fb2e
--- /dev/null
+++ b/src/kernel/linux/v4.14/drivers/soc/qcom/smp2p.c
@@ -0,0 +1,579 @@
+/*
+ * Copyright (c) 2015, Sony Mobile Communications AB.
+ * Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/interrupt.h>
+#include <linux/list.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/irq.h>
+#include <linux/irqdomain.h>
+#include <linux/mfd/syscon.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/soc/qcom/smem.h>
+#include <linux/soc/qcom/smem_state.h>
+#include <linux/spinlock.h>
+
+/*
+ * The Shared Memory Point to Point (SMP2P) protocol facilitates communication
+ * of a single 32-bit value between two processors.  Each value has a single
+ * writer (the local side) and a single reader (the remote side). Values are
+ * uniquely identified in the system by the directed edge (local processor ID
+ * to remote processor ID) and a string identifier.
+ *
+ * Each processor is responsible for creating the outgoing SMEM items and each
+ * item is writable by the local processor and readable by the remote
+ * processor.  By using two separate SMEM items that are single-reader and
+ * single-writer, SMP2P does not require any remote locking mechanisms.
+ *
+ * The driver uses the Linux GPIO and interrupt framework to expose a virtual
+ * GPIO for each outbound entry and a virtual interrupt controller for each
+ * inbound entry.
+ */
+
+#define SMP2P_MAX_ENTRY 16
+#define SMP2P_MAX_ENTRY_NAME 16
+
+#define SMP2P_FEATURE_SSR_ACK 0x1
+
+#define SMP2P_MAGIC 0x504d5324
+
+/**
+ * struct smp2p_smem_item - in memory communication structure
+ * @magic:		magic number
+ * @version:		version - must be 1
+ * @features:		features flag - currently unused
+ * @local_pid:		processor id of sending end
+ * @remote_pid:		processor id of receiving end
+ * @total_entries:	number of entries - always SMP2P_MAX_ENTRY
+ * @valid_entries:	number of allocated entries
+ * @flags:
+ * @entries:		individual communication entries
+ *     @name:		name of the entry
+ *     @value:		content of the entry
+ */
+struct smp2p_smem_item {
+	u32 magic;
+	u8 version;
+	unsigned features:24;
+	u16 local_pid;
+	u16 remote_pid;
+	u16 total_entries;
+	u16 valid_entries;
+	u32 flags;
+
+	struct {
+		u8 name[SMP2P_MAX_ENTRY_NAME];
+		u32 value;
+	} entries[SMP2P_MAX_ENTRY];
+} __packed;
+
+/**
+ * struct smp2p_entry - driver context matching one entry
+ * @node:	list entry to keep track of allocated entries
+ * @smp2p:	reference to the device driver context
+ * @name:	name of the entry, to match against smp2p_smem_item
+ * @value:	pointer to smp2p_smem_item entry value
+ * @last_value:	last handled value
+ * @domain:	irq_domain for inbound entries
+ * @irq_enabled:bitmap to track enabled irq bits
+ * @irq_rising:	bitmap to mark irq bits for rising detection
+ * @irq_falling:bitmap to mark irq bits for falling detection
+ * @state:	smem state handle
+ * @lock:	spinlock to protect read-modify-write of the value
+ */
+struct smp2p_entry {
+	struct list_head node;
+	struct qcom_smp2p *smp2p;
+
+	const char *name;
+	u32 *value;
+	u32 last_value;
+
+	struct irq_domain *domain;
+	DECLARE_BITMAP(irq_enabled, 32);
+	DECLARE_BITMAP(irq_rising, 32);
+	DECLARE_BITMAP(irq_falling, 32);
+
+	struct qcom_smem_state *state;
+
+	spinlock_t lock;
+};
+
+#define SMP2P_INBOUND	0
+#define SMP2P_OUTBOUND	1
+
+/**
+ * struct qcom_smp2p - device driver context
+ * @dev:	device driver handle
+ * @in:		pointer to the inbound smem item
+ * @smem_items:	ids of the two smem items
+ * @valid_entries: already scanned inbound entries
+ * @local_pid:	processor id of the inbound edge
+ * @remote_pid:	processor id of the outbound edge
+ * @ipc_regmap:	regmap for the outbound ipc
+ * @ipc_offset:	offset within the regmap
+ * @ipc_bit:	bit in regmap@offset to kick to signal remote processor
+ * @inbound:	list of inbound entries
+ * @outbound:	list of outbound entries
+ */
+struct qcom_smp2p {
+	struct device *dev;
+
+	struct smp2p_smem_item *in;
+	struct smp2p_smem_item *out;
+
+	unsigned smem_items[SMP2P_OUTBOUND + 1];
+
+	unsigned valid_entries;
+
+	unsigned local_pid;
+	unsigned remote_pid;
+
+	struct regmap *ipc_regmap;
+	int ipc_offset;
+	int ipc_bit;
+
+	struct list_head inbound;
+	struct list_head outbound;
+};
+
+static void qcom_smp2p_kick(struct qcom_smp2p *smp2p)
+{
+	/* Make sure any updated data is written before the kick */
+	wmb();
+	regmap_write(smp2p->ipc_regmap, smp2p->ipc_offset, BIT(smp2p->ipc_bit));
+}
+
+/**
+ * qcom_smp2p_intr() - interrupt handler for incoming notifications
+ * @irq:	unused
+ * @data:	smp2p driver context
+ *
+ * Handle notifications from the remote side to handle newly allocated entries
+ * or any changes to the state bits of existing entries.
+ */
+static irqreturn_t qcom_smp2p_intr(int irq, void *data)
+{
+	struct smp2p_smem_item *in;
+	struct smp2p_entry *entry;
+	struct qcom_smp2p *smp2p = data;
+	unsigned smem_id = smp2p->smem_items[SMP2P_INBOUND];
+	unsigned pid = smp2p->remote_pid;
+	size_t size;
+	int irq_pin;
+	u32 status;
+	char buf[SMP2P_MAX_ENTRY_NAME];
+	u32 val;
+	int i;
+
+	in = smp2p->in;
+
+	/* Acquire smem item, if not already found */
+	if (!in) {
+		in = qcom_smem_get(pid, smem_id, &size);
+		if (IS_ERR(in)) {
+			dev_err(smp2p->dev,
+				"Unable to acquire remote smp2p item\n");
+			return IRQ_HANDLED;
+		}
+
+		smp2p->in = in;
+	}
+
+	/* Match newly created entries */
+	for (i = smp2p->valid_entries; i < in->valid_entries; i++) {
+		list_for_each_entry(entry, &smp2p->inbound, node) {
+			memcpy(buf, in->entries[i].name, sizeof(buf));
+			if (!strcmp(buf, entry->name)) {
+				entry->value = &in->entries[i].value;
+				break;
+			}
+		}
+	}
+	smp2p->valid_entries = i;
+
+	/* Fire interrupts based on any value changes */
+	list_for_each_entry(entry, &smp2p->inbound, node) {
+		/* Ignore entries not yet allocated by the remote side */
+		if (!entry->value)
+			continue;
+
+		val = readl(entry->value);
+
+		status = val ^ entry->last_value;
+		entry->last_value = val;
+
+		/* No changes of this entry? */
+		if (!status)
+			continue;
+
+		for_each_set_bit(i, entry->irq_enabled, 32) {
+			if (!(status & BIT(i)))
+				continue;
+
+			if ((val & BIT(i) && test_bit(i, entry->irq_rising)) ||
+			    (!(val & BIT(i)) && test_bit(i, entry->irq_falling))) {
+				irq_pin = irq_find_mapping(entry->domain, i);
+				handle_nested_irq(irq_pin);
+			}
+		}
+	}
+
+	return IRQ_HANDLED;
+}
+
+static void smp2p_mask_irq(struct irq_data *irqd)
+{
+	struct smp2p_entry *entry = irq_data_get_irq_chip_data(irqd);
+	irq_hw_number_t irq = irqd_to_hwirq(irqd);
+
+	clear_bit(irq, entry->irq_enabled);
+}
+
+static void smp2p_unmask_irq(struct irq_data *irqd)
+{
+	struct smp2p_entry *entry = irq_data_get_irq_chip_data(irqd);
+	irq_hw_number_t irq = irqd_to_hwirq(irqd);
+
+	set_bit(irq, entry->irq_enabled);
+}
+
+static int smp2p_set_irq_type(struct irq_data *irqd, unsigned int type)
+{
+	struct smp2p_entry *entry = irq_data_get_irq_chip_data(irqd);
+	irq_hw_number_t irq = irqd_to_hwirq(irqd);
+
+	if (!(type & IRQ_TYPE_EDGE_BOTH))
+		return -EINVAL;
+
+	if (type & IRQ_TYPE_EDGE_RISING)
+		set_bit(irq, entry->irq_rising);
+	else
+		clear_bit(irq, entry->irq_rising);
+
+	if (type & IRQ_TYPE_EDGE_FALLING)
+		set_bit(irq, entry->irq_falling);
+	else
+		clear_bit(irq, entry->irq_falling);
+
+	return 0;
+}
+
+static struct irq_chip smp2p_irq_chip = {
+	.name           = "smp2p",
+	.irq_mask       = smp2p_mask_irq,
+	.irq_unmask     = smp2p_unmask_irq,
+	.irq_set_type	= smp2p_set_irq_type,
+};
+
+static int smp2p_irq_map(struct irq_domain *d,
+			 unsigned int irq,
+			 irq_hw_number_t hw)
+{
+	struct smp2p_entry *entry = d->host_data;
+
+	irq_set_chip_and_handler(irq, &smp2p_irq_chip, handle_level_irq);
+	irq_set_chip_data(irq, entry);
+	irq_set_nested_thread(irq, 1);
+	irq_set_noprobe(irq);
+
+	return 0;
+}
+
+static const struct irq_domain_ops smp2p_irq_ops = {
+	.map = smp2p_irq_map,
+	.xlate = irq_domain_xlate_twocell,
+};
+
+static int qcom_smp2p_inbound_entry(struct qcom_smp2p *smp2p,
+				    struct smp2p_entry *entry,
+				    struct device_node *node)
+{
+	entry->domain = irq_domain_add_linear(node, 32, &smp2p_irq_ops, entry);
+	if (!entry->domain) {
+		dev_err(smp2p->dev, "failed to add irq_domain\n");
+		return -ENOMEM;
+	}
+
+	return 0;
+}
+
+static int smp2p_update_bits(void *data, u32 mask, u32 value)
+{
+	struct smp2p_entry *entry = data;
+	u32 orig;
+	u32 val;
+
+	spin_lock(&entry->lock);
+	val = orig = readl(entry->value);
+	val &= ~mask;
+	val |= value;
+	writel(val, entry->value);
+	spin_unlock(&entry->lock);
+
+	if (val != orig)
+		qcom_smp2p_kick(entry->smp2p);
+
+	return 0;
+}
+
+static const struct qcom_smem_state_ops smp2p_state_ops = {
+	.update_bits = smp2p_update_bits,
+};
+
+static int qcom_smp2p_outbound_entry(struct qcom_smp2p *smp2p,
+				     struct smp2p_entry *entry,
+				     struct device_node *node)
+{
+	struct smp2p_smem_item *out = smp2p->out;
+	char buf[SMP2P_MAX_ENTRY_NAME] = {};
+
+	/* Allocate an entry from the smem item */
+	strlcpy(buf, entry->name, SMP2P_MAX_ENTRY_NAME);
+	memcpy(out->entries[out->valid_entries].name, buf, SMP2P_MAX_ENTRY_NAME);
+
+	/* Make the logical entry reference the physical value */
+	entry->value = &out->entries[out->valid_entries].value;
+
+	out->valid_entries++;
+
+	entry->state = qcom_smem_state_register(node, &smp2p_state_ops, entry);
+	if (IS_ERR(entry->state)) {
+		dev_err(smp2p->dev, "failed to register qcom_smem_state\n");
+		return PTR_ERR(entry->state);
+	}
+
+	return 0;
+}
+
+static int qcom_smp2p_alloc_outbound_item(struct qcom_smp2p *smp2p)
+{
+	struct smp2p_smem_item *out;
+	unsigned smem_id = smp2p->smem_items[SMP2P_OUTBOUND];
+	unsigned pid = smp2p->remote_pid;
+	int ret;
+
+	ret = qcom_smem_alloc(pid, smem_id, sizeof(*out));
+	if (ret < 0 && ret != -EEXIST) {
+		if (ret != -EPROBE_DEFER)
+			dev_err(smp2p->dev,
+				"unable to allocate local smp2p item\n");
+		return ret;
+	}
+
+	out = qcom_smem_get(pid, smem_id, NULL);
+	if (IS_ERR(out)) {
+		dev_err(smp2p->dev, "Unable to acquire local smp2p item\n");
+		return PTR_ERR(out);
+	}
+
+	memset(out, 0, sizeof(*out));
+	out->magic = SMP2P_MAGIC;
+	out->local_pid = smp2p->local_pid;
+	out->remote_pid = smp2p->remote_pid;
+	out->total_entries = SMP2P_MAX_ENTRY;
+	out->valid_entries = 0;
+
+	/*
+	 * Make sure the rest of the header is written before we validate the
+	 * item by writing a valid version number.
+	 */
+	wmb();
+	out->version = 1;
+
+	qcom_smp2p_kick(smp2p);
+
+	smp2p->out = out;
+
+	return 0;
+}
+
+static int smp2p_parse_ipc(struct qcom_smp2p *smp2p)
+{
+	struct device_node *syscon;
+	struct device *dev = smp2p->dev;
+	const char *key;
+	int ret;
+
+	syscon = of_parse_phandle(dev->of_node, "qcom,ipc", 0);
+	if (!syscon) {
+		dev_err(dev, "no qcom,ipc node\n");
+		return -ENODEV;
+	}
+
+	smp2p->ipc_regmap = syscon_node_to_regmap(syscon);
+	if (IS_ERR(smp2p->ipc_regmap))
+		return PTR_ERR(smp2p->ipc_regmap);
+
+	key = "qcom,ipc";
+	ret = of_property_read_u32_index(dev->of_node, key, 1, &smp2p->ipc_offset);
+	if (ret < 0) {
+		dev_err(dev, "no offset in %s\n", key);
+		return -EINVAL;
+	}
+
+	ret = of_property_read_u32_index(dev->of_node, key, 2, &smp2p->ipc_bit);
+	if (ret < 0) {
+		dev_err(dev, "no bit in %s\n", key);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int qcom_smp2p_probe(struct platform_device *pdev)
+{
+	struct smp2p_entry *entry;
+	struct device_node *node;
+	struct qcom_smp2p *smp2p;
+	const char *key;
+	int irq;
+	int ret;
+
+	smp2p = devm_kzalloc(&pdev->dev, sizeof(*smp2p), GFP_KERNEL);
+	if (!smp2p)
+		return -ENOMEM;
+
+	smp2p->dev = &pdev->dev;
+	INIT_LIST_HEAD(&smp2p->inbound);
+	INIT_LIST_HEAD(&smp2p->outbound);
+
+	platform_set_drvdata(pdev, smp2p);
+
+	ret = smp2p_parse_ipc(smp2p);
+	if (ret)
+		return ret;
+
+	key = "qcom,smem";
+	ret = of_property_read_u32_array(pdev->dev.of_node, key,
+					 smp2p->smem_items, 2);
+	if (ret)
+		return ret;
+
+	key = "qcom,local-pid";
+	ret = of_property_read_u32(pdev->dev.of_node, key, &smp2p->local_pid);
+	if (ret < 0) {
+		dev_err(&pdev->dev, "failed to read %s\n", key);
+		return -EINVAL;
+	}
+
+	key = "qcom,remote-pid";
+	ret = of_property_read_u32(pdev->dev.of_node, key, &smp2p->remote_pid);
+	if (ret < 0) {
+		dev_err(&pdev->dev, "failed to read %s\n", key);
+		return -EINVAL;
+	}
+
+	irq = platform_get_irq(pdev, 0);
+	if (irq < 0) {
+		dev_err(&pdev->dev, "unable to acquire smp2p interrupt\n");
+		return irq;
+	}
+
+	ret = qcom_smp2p_alloc_outbound_item(smp2p);
+	if (ret < 0)
+		return ret;
+
+	for_each_available_child_of_node(pdev->dev.of_node, node) {
+		entry = devm_kzalloc(&pdev->dev, sizeof(*entry), GFP_KERNEL);
+		if (!entry) {
+			ret = -ENOMEM;
+			goto unwind_interfaces;
+		}
+
+		entry->smp2p = smp2p;
+		spin_lock_init(&entry->lock);
+
+		ret = of_property_read_string(node, "qcom,entry-name", &entry->name);
+		if (ret < 0)
+			goto unwind_interfaces;
+
+		if (of_property_read_bool(node, "interrupt-controller")) {
+			ret = qcom_smp2p_inbound_entry(smp2p, entry, node);
+			if (ret < 0)
+				goto unwind_interfaces;
+
+			list_add(&entry->node, &smp2p->inbound);
+		} else  {
+			ret = qcom_smp2p_outbound_entry(smp2p, entry, node);
+			if (ret < 0)
+				goto unwind_interfaces;
+
+			list_add(&entry->node, &smp2p->outbound);
+		}
+	}
+
+	/* Kick the outgoing edge after allocating entries */
+	qcom_smp2p_kick(smp2p);
+
+	ret = devm_request_threaded_irq(&pdev->dev, irq,
+					NULL, qcom_smp2p_intr,
+					IRQF_ONESHOT,
+					"smp2p", (void *)smp2p);
+	if (ret) {
+		dev_err(&pdev->dev, "failed to request interrupt\n");
+		goto unwind_interfaces;
+	}
+
+
+	return 0;
+
+unwind_interfaces:
+	list_for_each_entry(entry, &smp2p->inbound, node)
+		irq_domain_remove(entry->domain);
+
+	list_for_each_entry(entry, &smp2p->outbound, node)
+		qcom_smem_state_unregister(entry->state);
+
+	smp2p->out->valid_entries = 0;
+
+	return ret;
+}
+
+static int qcom_smp2p_remove(struct platform_device *pdev)
+{
+	struct qcom_smp2p *smp2p = platform_get_drvdata(pdev);
+	struct smp2p_entry *entry;
+
+	list_for_each_entry(entry, &smp2p->inbound, node)
+		irq_domain_remove(entry->domain);
+
+	list_for_each_entry(entry, &smp2p->outbound, node)
+		qcom_smem_state_unregister(entry->state);
+
+	smp2p->out->valid_entries = 0;
+
+	return 0;
+}
+
+static const struct of_device_id qcom_smp2p_of_match[] = {
+	{ .compatible = "qcom,smp2p" },
+	{}
+};
+MODULE_DEVICE_TABLE(of, qcom_smp2p_of_match);
+
+static struct platform_driver qcom_smp2p_driver = {
+	.probe = qcom_smp2p_probe,
+	.remove = qcom_smp2p_remove,
+	.driver  = {
+		.name  = "qcom_smp2p",
+		.of_match_table = qcom_smp2p_of_match,
+	},
+};
+module_platform_driver(qcom_smp2p_driver);
+
+MODULE_DESCRIPTION("Qualcomm Shared Memory Point to Point driver");
+MODULE_LICENSE("GPL v2");
diff --git a/src/kernel/linux/v4.14/drivers/soc/qcom/smsm.c b/src/kernel/linux/v4.14/drivers/soc/qcom/smsm.c
new file mode 100644
index 0000000..50214b6
--- /dev/null
+++ b/src/kernel/linux/v4.14/drivers/soc/qcom/smsm.c
@@ -0,0 +1,629 @@
+/*
+ * Copyright (c) 2015, Sony Mobile Communications Inc.
+ * Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/interrupt.h>
+#include <linux/mfd/syscon.h>
+#include <linux/module.h>
+#include <linux/of_irq.h>
+#include <linux/platform_device.h>
+#include <linux/spinlock.h>
+#include <linux/regmap.h>
+#include <linux/soc/qcom/smem.h>
+#include <linux/soc/qcom/smem_state.h>
+
+/*
+ * This driver implements the Qualcomm Shared Memory State Machine, a mechanism
+ * for communicating single bit state information to remote processors.
+ *
+ * The implementation is based on two sections of shared memory; the first
+ * holding the state bits and the second holding a matrix of subscription bits.
+ *
+ * The state bits are structured in entries of 32 bits, each belonging to one
+ * system in the SoC. The entry belonging to the local system is considered
+ * read-write, while the rest should be considered read-only.
+ *
+ * The subscription matrix consists of N bitmaps per entry, denoting interest
+ * in updates of the entry for each of the N hosts. Upon updating a state bit
+ * each host's subscription bitmap should be queried and the remote system
+ * should be interrupted if they request so.
+ *
+ * The subscription matrix is laid out in entry-major order:
+ * entry0: [host0 ... hostN]
+ *	.
+ *	.
+ * entryM: [host0 ... hostN]
+ *
+ * A third, optional, shared memory region might contain information regarding
+ * the number of entries in the state bitmap as well as number of columns in
+ * the subscription matrix.
+ */
+
+/*
+ * Shared memory identifiers, used to acquire handles to respective memory
+ * region.
+ */
+#define SMEM_SMSM_SHARED_STATE		85
+#define SMEM_SMSM_CPU_INTR_MASK		333
+#define SMEM_SMSM_SIZE_INFO		419
+
+/*
+ * Default sizes, in case SMEM_SMSM_SIZE_INFO is not found.
+ */
+#define SMSM_DEFAULT_NUM_ENTRIES	8
+#define SMSM_DEFAULT_NUM_HOSTS		3
+
+struct smsm_entry;
+struct smsm_host;
+
+/**
+ * struct qcom_smsm - smsm driver context
+ * @dev:	smsm device pointer
+ * @local_host:	column in the subscription matrix representing this system
+ * @num_hosts:	number of columns in the subscription matrix
+ * @num_entries: number of entries in the state map and rows in the subscription
+ *		matrix
+ * @local_state: pointer to the local processor's state bits
+ * @subscription: pointer to local processor's row in subscription matrix
+ * @state:	smem state handle
+ * @lock:	spinlock for read-modify-write of the outgoing state
+ * @entries:	context for each of the entries
+ * @hosts:	context for each of the hosts
+ */
+struct qcom_smsm {
+	struct device *dev;
+
+	u32 local_host;
+
+	u32 num_hosts;
+	u32 num_entries;
+
+	u32 *local_state;
+	u32 *subscription;
+	struct qcom_smem_state *state;
+
+	spinlock_t lock;
+
+	struct smsm_entry *entries;
+	struct smsm_host *hosts;
+};
+
+/**
+ * struct smsm_entry - per remote processor entry context
+ * @smsm:	back-reference to driver context
+ * @domain:	IRQ domain for this entry, if representing a remote system
+ * @irq_enabled: bitmap of which state bits IRQs are enabled
+ * @irq_rising:	bitmap tracking if rising bits should be propagated
+ * @irq_falling: bitmap tracking if falling bits should be propagated
+ * @last_value:	snapshot of state bits last time the interrupts where propagated
+ * @remote_state: pointer to this entry's state bits
+ * @subscription: pointer to a row in the subscription matrix representing this
+ *		entry
+ */
+struct smsm_entry {
+	struct qcom_smsm *smsm;
+
+	struct irq_domain *domain;
+	DECLARE_BITMAP(irq_enabled, 32);
+	DECLARE_BITMAP(irq_rising, 32);
+	DECLARE_BITMAP(irq_falling, 32);
+	u32 last_value;
+
+	u32 *remote_state;
+	u32 *subscription;
+};
+
+/**
+ * struct smsm_host - representation of a remote host
+ * @ipc_regmap:	regmap for outgoing interrupt
+ * @ipc_offset:	offset in @ipc_regmap for outgoing interrupt
+ * @ipc_bit:	bit in @ipc_regmap + @ipc_offset for outgoing interrupt
+ */
+struct smsm_host {
+	struct regmap *ipc_regmap;
+	int ipc_offset;
+	int ipc_bit;
+};
+
+/**
+ * smsm_update_bits() - change bit in outgoing entry and inform subscribers
+ * @data:	smsm context pointer
+ * @offset:	bit in the entry
+ * @value:	new value
+ *
+ * Used to set and clear the bits in the outgoing/local entry and inform
+ * subscribers about the change.
+ */
+static int smsm_update_bits(void *data, u32 mask, u32 value)
+{
+	struct qcom_smsm *smsm = data;
+	struct smsm_host *hostp;
+	unsigned long flags;
+	u32 changes;
+	u32 host;
+	u32 orig;
+	u32 val;
+
+	spin_lock_irqsave(&smsm->lock, flags);
+
+	/* Update the entry */
+	val = orig = readl(smsm->local_state);
+	val &= ~mask;
+	val |= value;
+
+	/* Don't signal if we didn't change the value */
+	changes = val ^ orig;
+	if (!changes) {
+		spin_unlock_irqrestore(&smsm->lock, flags);
+		goto done;
+	}
+
+	/* Write out the new value */
+	writel(val, smsm->local_state);
+	spin_unlock_irqrestore(&smsm->lock, flags);
+
+	/* Make sure the value update is ordered before any kicks */
+	wmb();
+
+	/* Iterate over all hosts to check whom wants a kick */
+	for (host = 0; host < smsm->num_hosts; host++) {
+		hostp = &smsm->hosts[host];
+
+		val = readl(smsm->subscription + host);
+		if (val & changes && hostp->ipc_regmap) {
+			regmap_write(hostp->ipc_regmap,
+				     hostp->ipc_offset,
+				     BIT(hostp->ipc_bit));
+		}
+	}
+
+done:
+	return 0;
+}
+
+static const struct qcom_smem_state_ops smsm_state_ops = {
+	.update_bits = smsm_update_bits,
+};
+
+/**
+ * smsm_intr() - cascading IRQ handler for SMSM
+ * @irq:	unused
+ * @data:	entry related to this IRQ
+ *
+ * This function cascades an incoming interrupt from a remote system, based on
+ * the state bits and configuration.
+ */
+static irqreturn_t smsm_intr(int irq, void *data)
+{
+	struct smsm_entry *entry = data;
+	unsigned i;
+	int irq_pin;
+	u32 changed;
+	u32 val;
+
+	val = readl(entry->remote_state);
+	changed = val ^ entry->last_value;
+	entry->last_value = val;
+
+	for_each_set_bit(i, entry->irq_enabled, 32) {
+		if (!(changed & BIT(i)))
+			continue;
+
+		if (val & BIT(i)) {
+			if (test_bit(i, entry->irq_rising)) {
+				irq_pin = irq_find_mapping(entry->domain, i);
+				handle_nested_irq(irq_pin);
+			}
+		} else {
+			if (test_bit(i, entry->irq_falling)) {
+				irq_pin = irq_find_mapping(entry->domain, i);
+				handle_nested_irq(irq_pin);
+			}
+		}
+	}
+
+	return IRQ_HANDLED;
+}
+
+/**
+ * smsm_mask_irq() - un-subscribe from cascades of IRQs of a certain staus bit
+ * @irqd:	IRQ handle to be masked
+ *
+ * This un-subscribes the local CPU from interrupts upon changes to the defines
+ * status bit. The bit is also cleared from cascading.
+ */
+static void smsm_mask_irq(struct irq_data *irqd)
+{
+	struct smsm_entry *entry = irq_data_get_irq_chip_data(irqd);
+	irq_hw_number_t irq = irqd_to_hwirq(irqd);
+	struct qcom_smsm *smsm = entry->smsm;
+	u32 val;
+
+	if (entry->subscription) {
+		val = readl(entry->subscription + smsm->local_host);
+		val &= ~BIT(irq);
+		writel(val, entry->subscription + smsm->local_host);
+	}
+
+	clear_bit(irq, entry->irq_enabled);
+}
+
+/**
+ * smsm_unmask_irq() - subscribe to cascades of IRQs of a certain status bit
+ * @irqd:	IRQ handle to be unmasked
+ *
+
+ * This subscribes the local CPU to interrupts upon changes to the defined
+ * status bit. The bit is also marked for cascading.
+
+ */
+static void smsm_unmask_irq(struct irq_data *irqd)
+{
+	struct smsm_entry *entry = irq_data_get_irq_chip_data(irqd);
+	irq_hw_number_t irq = irqd_to_hwirq(irqd);
+	struct qcom_smsm *smsm = entry->smsm;
+	u32 val;
+
+	set_bit(irq, entry->irq_enabled);
+
+	if (entry->subscription) {
+		val = readl(entry->subscription + smsm->local_host);
+		val |= BIT(irq);
+		writel(val, entry->subscription + smsm->local_host);
+	}
+}
+
+/**
+ * smsm_set_irq_type() - updates the requested IRQ type for the cascading
+ * @irqd:	consumer interrupt handle
+ * @type:	requested flags
+ */
+static int smsm_set_irq_type(struct irq_data *irqd, unsigned int type)
+{
+	struct smsm_entry *entry = irq_data_get_irq_chip_data(irqd);
+	irq_hw_number_t irq = irqd_to_hwirq(irqd);
+
+	if (!(type & IRQ_TYPE_EDGE_BOTH))
+		return -EINVAL;
+
+	if (type & IRQ_TYPE_EDGE_RISING)
+		set_bit(irq, entry->irq_rising);
+	else
+		clear_bit(irq, entry->irq_rising);
+
+	if (type & IRQ_TYPE_EDGE_FALLING)
+		set_bit(irq, entry->irq_falling);
+	else
+		clear_bit(irq, entry->irq_falling);
+
+	return 0;
+}
+
+static struct irq_chip smsm_irq_chip = {
+	.name           = "smsm",
+	.irq_mask       = smsm_mask_irq,
+	.irq_unmask     = smsm_unmask_irq,
+	.irq_set_type	= smsm_set_irq_type,
+};
+
+/**
+ * smsm_irq_map() - sets up a mapping for a cascaded IRQ
+ * @d:		IRQ domain representing an entry
+ * @irq:	IRQ to set up
+ * @hw:		unused
+ */
+static int smsm_irq_map(struct irq_domain *d,
+			unsigned int irq,
+			irq_hw_number_t hw)
+{
+	struct smsm_entry *entry = d->host_data;
+
+	irq_set_chip_and_handler(irq, &smsm_irq_chip, handle_level_irq);
+	irq_set_chip_data(irq, entry);
+	irq_set_nested_thread(irq, 1);
+
+	return 0;
+}
+
+static const struct irq_domain_ops smsm_irq_ops = {
+	.map = smsm_irq_map,
+	.xlate = irq_domain_xlate_twocell,
+};
+
+/**
+ * smsm_parse_ipc() - parses a qcom,ipc-%d device tree property
+ * @smsm:	smsm driver context
+ * @host_id:	index of the remote host to be resolved
+ *
+ * Parses device tree to acquire the information needed for sending the
+ * outgoing interrupts to a remote host - identified by @host_id.
+ */
+static int smsm_parse_ipc(struct qcom_smsm *smsm, unsigned host_id)
+{
+	struct device_node *syscon;
+	struct device_node *node = smsm->dev->of_node;
+	struct smsm_host *host = &smsm->hosts[host_id];
+	char key[16];
+	int ret;
+
+	snprintf(key, sizeof(key), "qcom,ipc-%d", host_id);
+	syscon = of_parse_phandle(node, key, 0);
+	if (!syscon)
+		return 0;
+
+	host->ipc_regmap = syscon_node_to_regmap(syscon);
+	if (IS_ERR(host->ipc_regmap))
+		return PTR_ERR(host->ipc_regmap);
+
+	ret = of_property_read_u32_index(node, key, 1, &host->ipc_offset);
+	if (ret < 0) {
+		dev_err(smsm->dev, "no offset in %s\n", key);
+		return -EINVAL;
+	}
+
+	ret = of_property_read_u32_index(node, key, 2, &host->ipc_bit);
+	if (ret < 0) {
+		dev_err(smsm->dev, "no bit in %s\n", key);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+/**
+ * smsm_inbound_entry() - parse DT and set up an entry representing a remote system
+ * @smsm:	smsm driver context
+ * @entry:	entry context to be set up
+ * @node:	dt node containing the entry's properties
+ */
+static int smsm_inbound_entry(struct qcom_smsm *smsm,
+			      struct smsm_entry *entry,
+			      struct device_node *node)
+{
+	int ret;
+	int irq;
+
+	irq = irq_of_parse_and_map(node, 0);
+	if (!irq) {
+		dev_err(smsm->dev, "failed to parse smsm interrupt\n");
+		return -EINVAL;
+	}
+
+	ret = devm_request_threaded_irq(smsm->dev, irq,
+					NULL, smsm_intr,
+					IRQF_ONESHOT,
+					"smsm", (void *)entry);
+	if (ret) {
+		dev_err(smsm->dev, "failed to request interrupt\n");
+		return ret;
+	}
+
+	entry->domain = irq_domain_add_linear(node, 32, &smsm_irq_ops, entry);
+	if (!entry->domain) {
+		dev_err(smsm->dev, "failed to add irq_domain\n");
+		return -ENOMEM;
+	}
+
+	return 0;
+}
+
+/**
+ * smsm_get_size_info() - parse the optional memory segment for sizes
+ * @smsm:	smsm driver context
+ *
+ * Attempt to acquire the number of hosts and entries from the optional shared
+ * memory location. Not being able to find this segment should indicate that
+ * we're on a older system where these values was hard coded to
+ * SMSM_DEFAULT_NUM_ENTRIES and SMSM_DEFAULT_NUM_HOSTS.
+ *
+ * Returns 0 on success, negative errno on failure.
+ */
+static int smsm_get_size_info(struct qcom_smsm *smsm)
+{
+	size_t size;
+	struct {
+		u32 num_hosts;
+		u32 num_entries;
+		u32 reserved0;
+		u32 reserved1;
+	} *info;
+
+	info = qcom_smem_get(QCOM_SMEM_HOST_ANY, SMEM_SMSM_SIZE_INFO, &size);
+	if (IS_ERR(info) && PTR_ERR(info) != -ENOENT) {
+		if (PTR_ERR(info) != -EPROBE_DEFER)
+			dev_err(smsm->dev, "unable to retrieve smsm size info\n");
+		return PTR_ERR(info);
+	} else if (IS_ERR(info) || size != sizeof(*info)) {
+		dev_warn(smsm->dev, "no smsm size info, using defaults\n");
+		smsm->num_entries = SMSM_DEFAULT_NUM_ENTRIES;
+		smsm->num_hosts = SMSM_DEFAULT_NUM_HOSTS;
+		return 0;
+	}
+
+	smsm->num_entries = info->num_entries;
+	smsm->num_hosts = info->num_hosts;
+
+	dev_dbg(smsm->dev,
+		"found custom size of smsm: %d entries %d hosts\n",
+		smsm->num_entries, smsm->num_hosts);
+
+	return 0;
+}
+
+static int qcom_smsm_probe(struct platform_device *pdev)
+{
+	struct device_node *local_node;
+	struct device_node *node;
+	struct smsm_entry *entry;
+	struct qcom_smsm *smsm;
+	u32 *intr_mask;
+	size_t size;
+	u32 *states;
+	u32 id;
+	int ret;
+
+	smsm = devm_kzalloc(&pdev->dev, sizeof(*smsm), GFP_KERNEL);
+	if (!smsm)
+		return -ENOMEM;
+	smsm->dev = &pdev->dev;
+	spin_lock_init(&smsm->lock);
+
+	ret = smsm_get_size_info(smsm);
+	if (ret)
+		return ret;
+
+	smsm->entries = devm_kcalloc(&pdev->dev,
+				     smsm->num_entries,
+				     sizeof(struct smsm_entry),
+				     GFP_KERNEL);
+	if (!smsm->entries)
+		return -ENOMEM;
+
+	smsm->hosts = devm_kcalloc(&pdev->dev,
+				   smsm->num_hosts,
+				   sizeof(struct smsm_host),
+				   GFP_KERNEL);
+	if (!smsm->hosts)
+		return -ENOMEM;
+
+	for_each_child_of_node(pdev->dev.of_node, local_node) {
+		if (of_find_property(local_node, "#qcom,smem-state-cells", NULL))
+			break;
+	}
+	if (!local_node) {
+		dev_err(&pdev->dev, "no state entry\n");
+		return -EINVAL;
+	}
+
+	of_property_read_u32(pdev->dev.of_node,
+			     "qcom,local-host",
+			     &smsm->local_host);
+
+	/* Parse the host properties */
+	for (id = 0; id < smsm->num_hosts; id++) {
+		ret = smsm_parse_ipc(smsm, id);
+		if (ret < 0)
+			return ret;
+	}
+
+	/* Acquire the main SMSM state vector */
+	ret = qcom_smem_alloc(QCOM_SMEM_HOST_ANY, SMEM_SMSM_SHARED_STATE,
+			      smsm->num_entries * sizeof(u32));
+	if (ret < 0 && ret != -EEXIST) {
+		dev_err(&pdev->dev, "unable to allocate shared state entry\n");
+		return ret;
+	}
+
+	states = qcom_smem_get(QCOM_SMEM_HOST_ANY, SMEM_SMSM_SHARED_STATE, NULL);
+	if (IS_ERR(states)) {
+		dev_err(&pdev->dev, "Unable to acquire shared state entry\n");
+		return PTR_ERR(states);
+	}
+
+	/* Acquire the list of interrupt mask vectors */
+	size = smsm->num_entries * smsm->num_hosts * sizeof(u32);
+	ret = qcom_smem_alloc(QCOM_SMEM_HOST_ANY, SMEM_SMSM_CPU_INTR_MASK, size);
+	if (ret < 0 && ret != -EEXIST) {
+		dev_err(&pdev->dev, "unable to allocate smsm interrupt mask\n");
+		return ret;
+	}
+
+	intr_mask = qcom_smem_get(QCOM_SMEM_HOST_ANY, SMEM_SMSM_CPU_INTR_MASK, NULL);
+	if (IS_ERR(intr_mask)) {
+		dev_err(&pdev->dev, "unable to acquire shared memory interrupt mask\n");
+		return PTR_ERR(intr_mask);
+	}
+
+	/* Setup the reference to the local state bits */
+	smsm->local_state = states + smsm->local_host;
+	smsm->subscription = intr_mask + smsm->local_host * smsm->num_hosts;
+
+	/* Register the outgoing state */
+	smsm->state = qcom_smem_state_register(local_node, &smsm_state_ops, smsm);
+	if (IS_ERR(smsm->state)) {
+		dev_err(smsm->dev, "failed to register qcom_smem_state\n");
+		return PTR_ERR(smsm->state);
+	}
+
+	/* Register handlers for remote processor entries of interest. */
+	for_each_available_child_of_node(pdev->dev.of_node, node) {
+		if (!of_property_read_bool(node, "interrupt-controller"))
+			continue;
+
+		ret = of_property_read_u32(node, "reg", &id);
+		if (ret || id >= smsm->num_entries) {
+			dev_err(&pdev->dev, "invalid reg of entry\n");
+			if (!ret)
+				ret = -EINVAL;
+			goto unwind_interfaces;
+		}
+		entry = &smsm->entries[id];
+
+		entry->smsm = smsm;
+		entry->remote_state = states + id;
+
+		/* Setup subscription pointers and unsubscribe to any kicks */
+		entry->subscription = intr_mask + id * smsm->num_hosts;
+		writel(0, entry->subscription + smsm->local_host);
+
+		ret = smsm_inbound_entry(smsm, entry, node);
+		if (ret < 0)
+			goto unwind_interfaces;
+	}
+
+	platform_set_drvdata(pdev, smsm);
+
+	return 0;
+
+unwind_interfaces:
+	for (id = 0; id < smsm->num_entries; id++)
+		if (smsm->entries[id].domain)
+			irq_domain_remove(smsm->entries[id].domain);
+
+	qcom_smem_state_unregister(smsm->state);
+
+	return ret;
+}
+
+static int qcom_smsm_remove(struct platform_device *pdev)
+{
+	struct qcom_smsm *smsm = platform_get_drvdata(pdev);
+	unsigned id;
+
+	for (id = 0; id < smsm->num_entries; id++)
+		if (smsm->entries[id].domain)
+			irq_domain_remove(smsm->entries[id].domain);
+
+	qcom_smem_state_unregister(smsm->state);
+
+	return 0;
+}
+
+static const struct of_device_id qcom_smsm_of_match[] = {
+	{ .compatible = "qcom,smsm" },
+	{}
+};
+MODULE_DEVICE_TABLE(of, qcom_smsm_of_match);
+
+static struct platform_driver qcom_smsm_driver = {
+	.probe = qcom_smsm_probe,
+	.remove = qcom_smsm_remove,
+	.driver  = {
+		.name  = "qcom-smsm",
+		.of_match_table = qcom_smsm_of_match,
+	},
+};
+module_platform_driver(qcom_smsm_driver);
+
+MODULE_DESCRIPTION("Qualcomm Shared Memory State Machine driver");
+MODULE_LICENSE("GPL v2");
diff --git a/src/kernel/linux/v4.14/drivers/soc/qcom/spm.c b/src/kernel/linux/v4.14/drivers/soc/qcom/spm.c
new file mode 100644
index 0000000..f9d7a85
--- /dev/null
+++ b/src/kernel/linux/v4.14/drivers/soc/qcom/spm.c
@@ -0,0 +1,383 @@
+/*
+ * Copyright (c) 2011-2014, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2014,2015, Linaro Ltd.
+ *
+ * SAW power controller driver
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/slab.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <linux/err.h>
+#include <linux/platform_device.h>
+#include <linux/cpuidle.h>
+#include <linux/cpu_pm.h>
+#include <linux/qcom_scm.h>
+
+#include <asm/cpuidle.h>
+#include <asm/proc-fns.h>
+#include <asm/suspend.h>
+
+#define MAX_PMIC_DATA		2
+#define MAX_SEQ_DATA		64
+#define SPM_CTL_INDEX		0x7f
+#define SPM_CTL_INDEX_SHIFT	4
+#define SPM_CTL_EN		BIT(0)
+
+enum pm_sleep_mode {
+	PM_SLEEP_MODE_STBY,
+	PM_SLEEP_MODE_RET,
+	PM_SLEEP_MODE_SPC,
+	PM_SLEEP_MODE_PC,
+	PM_SLEEP_MODE_NR,
+};
+
+enum spm_reg {
+	SPM_REG_CFG,
+	SPM_REG_SPM_CTL,
+	SPM_REG_DLY,
+	SPM_REG_PMIC_DLY,
+	SPM_REG_PMIC_DATA_0,
+	SPM_REG_PMIC_DATA_1,
+	SPM_REG_VCTL,
+	SPM_REG_SEQ_ENTRY,
+	SPM_REG_SPM_STS,
+	SPM_REG_PMIC_STS,
+	SPM_REG_NR,
+};
+
+struct spm_reg_data {
+	const u8 *reg_offset;
+	u32 spm_cfg;
+	u32 spm_dly;
+	u32 pmic_dly;
+	u32 pmic_data[MAX_PMIC_DATA];
+	u8 seq[MAX_SEQ_DATA];
+	u8 start_index[PM_SLEEP_MODE_NR];
+};
+
+struct spm_driver_data {
+	void __iomem *reg_base;
+	const struct spm_reg_data *reg_data;
+};
+
+static const u8 spm_reg_offset_v2_1[SPM_REG_NR] = {
+	[SPM_REG_CFG]		= 0x08,
+	[SPM_REG_SPM_CTL]	= 0x30,
+	[SPM_REG_DLY]		= 0x34,
+	[SPM_REG_SEQ_ENTRY]	= 0x80,
+};
+
+/* SPM register data for 8974, 8084 */
+static const struct spm_reg_data spm_reg_8974_8084_cpu  = {
+	.reg_offset = spm_reg_offset_v2_1,
+	.spm_cfg = 0x1,
+	.spm_dly = 0x3C102800,
+	.seq = { 0x03, 0x0B, 0x0F, 0x00, 0x20, 0x80, 0x10, 0xE8, 0x5B, 0x03,
+		0x3B, 0xE8, 0x5B, 0x82, 0x10, 0x0B, 0x30, 0x06, 0x26, 0x30,
+		0x0F },
+	.start_index[PM_SLEEP_MODE_STBY] = 0,
+	.start_index[PM_SLEEP_MODE_SPC] = 3,
+};
+
+static const u8 spm_reg_offset_v1_1[SPM_REG_NR] = {
+	[SPM_REG_CFG]		= 0x08,
+	[SPM_REG_SPM_CTL]	= 0x20,
+	[SPM_REG_PMIC_DLY]	= 0x24,
+	[SPM_REG_PMIC_DATA_0]	= 0x28,
+	[SPM_REG_PMIC_DATA_1]	= 0x2C,
+	[SPM_REG_SEQ_ENTRY]	= 0x80,
+};
+
+/* SPM register data for 8064 */
+static const struct spm_reg_data spm_reg_8064_cpu = {
+	.reg_offset = spm_reg_offset_v1_1,
+	.spm_cfg = 0x1F,
+	.pmic_dly = 0x02020004,
+	.pmic_data[0] = 0x0084009C,
+	.pmic_data[1] = 0x00A4001C,
+	.seq = { 0x03, 0x0F, 0x00, 0x24, 0x54, 0x10, 0x09, 0x03, 0x01,
+		0x10, 0x54, 0x30, 0x0C, 0x24, 0x30, 0x0F },
+	.start_index[PM_SLEEP_MODE_STBY] = 0,
+	.start_index[PM_SLEEP_MODE_SPC] = 2,
+};
+
+static DEFINE_PER_CPU(struct spm_driver_data *, cpu_spm_drv);
+
+typedef int (*idle_fn)(void);
+static DEFINE_PER_CPU(idle_fn*, qcom_idle_ops);
+
+static inline void spm_register_write(struct spm_driver_data *drv,
+					enum spm_reg reg, u32 val)
+{
+	if (drv->reg_data->reg_offset[reg])
+		writel_relaxed(val, drv->reg_base +
+				drv->reg_data->reg_offset[reg]);
+}
+
+/* Ensure a guaranteed write, before return */
+static inline void spm_register_write_sync(struct spm_driver_data *drv,
+					enum spm_reg reg, u32 val)
+{
+	u32 ret;
+
+	if (!drv->reg_data->reg_offset[reg])
+		return;
+
+	do {
+		writel_relaxed(val, drv->reg_base +
+				drv->reg_data->reg_offset[reg]);
+		ret = readl_relaxed(drv->reg_base +
+				drv->reg_data->reg_offset[reg]);
+		if (ret == val)
+			break;
+		cpu_relax();
+	} while (1);
+}
+
+static inline u32 spm_register_read(struct spm_driver_data *drv,
+					enum spm_reg reg)
+{
+	return readl_relaxed(drv->reg_base + drv->reg_data->reg_offset[reg]);
+}
+
+static void spm_set_low_power_mode(struct spm_driver_data *drv,
+					enum pm_sleep_mode mode)
+{
+	u32 start_index;
+	u32 ctl_val;
+
+	start_index = drv->reg_data->start_index[mode];
+
+	ctl_val = spm_register_read(drv, SPM_REG_SPM_CTL);
+	ctl_val &= ~(SPM_CTL_INDEX << SPM_CTL_INDEX_SHIFT);
+	ctl_val |= start_index << SPM_CTL_INDEX_SHIFT;
+	ctl_val |= SPM_CTL_EN;
+	spm_register_write_sync(drv, SPM_REG_SPM_CTL, ctl_val);
+}
+
+static int qcom_pm_collapse(unsigned long int unused)
+{
+	qcom_scm_cpu_power_down(QCOM_SCM_CPU_PWR_DOWN_L2_ON);
+
+	/*
+	 * Returns here only if there was a pending interrupt and we did not
+	 * power down as a result.
+	 */
+	return -1;
+}
+
+static int qcom_cpu_spc(void)
+{
+	int ret;
+	struct spm_driver_data *drv = __this_cpu_read(cpu_spm_drv);
+
+	spm_set_low_power_mode(drv, PM_SLEEP_MODE_SPC);
+	ret = cpu_suspend(0, qcom_pm_collapse);
+	/*
+	 * ARM common code executes WFI without calling into our driver and
+	 * if the SPM mode is not reset, then we may accidently power down the
+	 * cpu when we intended only to gate the cpu clock.
+	 * Ensure the state is set to standby before returning.
+	 */
+	spm_set_low_power_mode(drv, PM_SLEEP_MODE_STBY);
+
+	return ret;
+}
+
+static int qcom_idle_enter(unsigned long index)
+{
+	return __this_cpu_read(qcom_idle_ops)[index]();
+}
+
+static const struct of_device_id qcom_idle_state_match[] __initconst = {
+	{ .compatible = "qcom,idle-state-spc", .data = qcom_cpu_spc },
+	{ },
+};
+
+static int __init qcom_cpuidle_init(struct device_node *cpu_node, int cpu)
+{
+	const struct of_device_id *match_id;
+	struct device_node *state_node;
+	int i;
+	int state_count = 1;
+	idle_fn idle_fns[CPUIDLE_STATE_MAX];
+	idle_fn *fns;
+	cpumask_t mask;
+	bool use_scm_power_down = false;
+
+	for (i = 0; ; i++) {
+		state_node = of_parse_phandle(cpu_node, "cpu-idle-states", i);
+		if (!state_node)
+			break;
+
+		if (!of_device_is_available(state_node))
+			continue;
+
+		if (i == CPUIDLE_STATE_MAX) {
+			pr_warn("%s: cpuidle states reached max possible\n",
+					__func__);
+			break;
+		}
+
+		match_id = of_match_node(qcom_idle_state_match, state_node);
+		if (!match_id)
+			return -ENODEV;
+
+		idle_fns[state_count] = match_id->data;
+
+		/* Check if any of the states allow power down */
+		if (match_id->data == qcom_cpu_spc)
+			use_scm_power_down = true;
+
+		state_count++;
+	}
+
+	if (state_count == 1)
+		goto check_spm;
+
+	fns = devm_kcalloc(get_cpu_device(cpu), state_count, sizeof(*fns),
+			GFP_KERNEL);
+	if (!fns)
+		return -ENOMEM;
+
+	for (i = 1; i < state_count; i++)
+		fns[i] = idle_fns[i];
+
+	if (use_scm_power_down) {
+		/* We have atleast one power down mode */
+		cpumask_clear(&mask);
+		cpumask_set_cpu(cpu, &mask);
+		qcom_scm_set_warm_boot_addr(cpu_resume_arm, &mask);
+	}
+
+	per_cpu(qcom_idle_ops, cpu) = fns;
+
+	/*
+	 * SPM probe for the cpu should have happened by now, if the
+	 * SPM device does not exist, return -ENXIO to indicate that the
+	 * cpu does not support idle states.
+	 */
+check_spm:
+	return per_cpu(cpu_spm_drv, cpu) ? 0 : -ENXIO;
+}
+
+static const struct cpuidle_ops qcom_cpuidle_ops __initconst = {
+	.suspend = qcom_idle_enter,
+	.init = qcom_cpuidle_init,
+};
+
+CPUIDLE_METHOD_OF_DECLARE(qcom_idle_v1, "qcom,kpss-acc-v1", &qcom_cpuidle_ops);
+CPUIDLE_METHOD_OF_DECLARE(qcom_idle_v2, "qcom,kpss-acc-v2", &qcom_cpuidle_ops);
+
+static struct spm_driver_data *spm_get_drv(struct platform_device *pdev,
+		int *spm_cpu)
+{
+	struct spm_driver_data *drv = NULL;
+	struct device_node *cpu_node, *saw_node;
+	int cpu;
+	bool found = 0;
+
+	for_each_possible_cpu(cpu) {
+		cpu_node = of_cpu_device_node_get(cpu);
+		if (!cpu_node)
+			continue;
+		saw_node = of_parse_phandle(cpu_node, "qcom,saw", 0);
+		found = (saw_node == pdev->dev.of_node);
+		of_node_put(saw_node);
+		of_node_put(cpu_node);
+		if (found)
+			break;
+	}
+
+	if (found) {
+		drv = devm_kzalloc(&pdev->dev, sizeof(*drv), GFP_KERNEL);
+		if (drv)
+			*spm_cpu = cpu;
+	}
+
+	return drv;
+}
+
+static const struct of_device_id spm_match_table[] = {
+	{ .compatible = "qcom,msm8974-saw2-v2.1-cpu",
+	  .data = &spm_reg_8974_8084_cpu },
+	{ .compatible = "qcom,apq8084-saw2-v2.1-cpu",
+	  .data = &spm_reg_8974_8084_cpu },
+	{ .compatible = "qcom,apq8064-saw2-v1.1-cpu",
+	  .data = &spm_reg_8064_cpu },
+	{ },
+};
+
+static int spm_dev_probe(struct platform_device *pdev)
+{
+	struct spm_driver_data *drv;
+	struct resource *res;
+	const struct of_device_id *match_id;
+	void __iomem *addr;
+	int cpu;
+
+	drv = spm_get_drv(pdev, &cpu);
+	if (!drv)
+		return -EINVAL;
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	drv->reg_base = devm_ioremap_resource(&pdev->dev, res);
+	if (IS_ERR(drv->reg_base))
+		return PTR_ERR(drv->reg_base);
+
+	match_id = of_match_node(spm_match_table, pdev->dev.of_node);
+	if (!match_id)
+		return -ENODEV;
+
+	drv->reg_data = match_id->data;
+
+	/* Write the SPM sequences first.. */
+	addr = drv->reg_base + drv->reg_data->reg_offset[SPM_REG_SEQ_ENTRY];
+	__iowrite32_copy(addr, drv->reg_data->seq,
+			ARRAY_SIZE(drv->reg_data->seq) / 4);
+
+	/*
+	 * ..and then the control registers.
+	 * On some SoC if the control registers are written first and if the
+	 * CPU was held in reset, the reset signal could trigger the SPM state
+	 * machine, before the sequences are completely written.
+	 */
+	spm_register_write(drv, SPM_REG_CFG, drv->reg_data->spm_cfg);
+	spm_register_write(drv, SPM_REG_DLY, drv->reg_data->spm_dly);
+	spm_register_write(drv, SPM_REG_PMIC_DLY, drv->reg_data->pmic_dly);
+	spm_register_write(drv, SPM_REG_PMIC_DATA_0,
+				drv->reg_data->pmic_data[0]);
+	spm_register_write(drv, SPM_REG_PMIC_DATA_1,
+				drv->reg_data->pmic_data[1]);
+
+	/* Set up Standby as the default low power mode */
+	spm_set_low_power_mode(drv, PM_SLEEP_MODE_STBY);
+
+	per_cpu(cpu_spm_drv, cpu) = drv;
+
+	return 0;
+}
+
+static struct platform_driver spm_driver = {
+	.probe = spm_dev_probe,
+	.driver = {
+		.name = "saw",
+		.of_match_table = spm_match_table,
+	},
+};
+
+builtin_platform_driver(spm_driver);
diff --git a/src/kernel/linux/v4.14/drivers/soc/qcom/wcnss_ctrl.c b/src/kernel/linux/v4.14/drivers/soc/qcom/wcnss_ctrl.c
new file mode 100644
index 0000000..373400d
--- /dev/null
+++ b/src/kernel/linux/v4.14/drivers/soc/qcom/wcnss_ctrl.c
@@ -0,0 +1,366 @@
+/*
+ * Copyright (c) 2016, Linaro Ltd.
+ * Copyright (c) 2015, Sony Mobile Communications Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#include <linux/firmware.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/io.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/rpmsg.h>
+#include <linux/soc/qcom/wcnss_ctrl.h>
+
+#define WCNSS_REQUEST_TIMEOUT	(5 * HZ)
+#define WCNSS_CBC_TIMEOUT	(10 * HZ)
+
+#define WCNSS_ACK_DONE_BOOTING	1
+#define WCNSS_ACK_COLD_BOOTING	2
+
+#define NV_FRAGMENT_SIZE	3072
+#define NVBIN_FILE		"wlan/prima/WCNSS_qcom_wlan_nv.bin"
+
+/**
+ * struct wcnss_ctrl - driver context
+ * @dev:	device handle
+ * @channel:	SMD channel handle
+ * @ack:	completion for outstanding requests
+ * @cbc:	completion for cbc complete indication
+ * @ack_status:	status of the outstanding request
+ * @probe_work: worker for uploading nv binary
+ */
+struct wcnss_ctrl {
+	struct device *dev;
+	struct rpmsg_endpoint *channel;
+
+	struct completion ack;
+	struct completion cbc;
+	int ack_status;
+
+	struct work_struct probe_work;
+};
+
+/* message types */
+enum {
+	WCNSS_VERSION_REQ = 0x01000000,
+	WCNSS_VERSION_RESP,
+	WCNSS_DOWNLOAD_NV_REQ,
+	WCNSS_DOWNLOAD_NV_RESP,
+	WCNSS_UPLOAD_CAL_REQ,
+	WCNSS_UPLOAD_CAL_RESP,
+	WCNSS_DOWNLOAD_CAL_REQ,
+	WCNSS_DOWNLOAD_CAL_RESP,
+	WCNSS_VBAT_LEVEL_IND,
+	WCNSS_BUILD_VERSION_REQ,
+	WCNSS_BUILD_VERSION_RESP,
+	WCNSS_PM_CONFIG_REQ,
+	WCNSS_CBC_COMPLETE_IND,
+};
+
+/**
+ * struct wcnss_msg_hdr - common packet header for requests and responses
+ * @type:	packet message type
+ * @len:	total length of the packet, including this header
+ */
+struct wcnss_msg_hdr {
+	u32 type;
+	u32 len;
+} __packed;
+
+/**
+ * struct wcnss_version_resp - version request response
+ * @hdr:	common packet wcnss_msg_hdr header
+ */
+struct wcnss_version_resp {
+	struct wcnss_msg_hdr hdr;
+	u8 major;
+	u8 minor;
+	u8 version;
+	u8 revision;
+} __packed;
+
+/**
+ * struct wcnss_download_nv_req - firmware fragment request
+ * @hdr:	common packet wcnss_msg_hdr header
+ * @seq:	sequence number of this fragment
+ * @last:	boolean indicator of this being the last fragment of the binary
+ * @frag_size:	length of this fragment
+ * @fragment:	fragment data
+ */
+struct wcnss_download_nv_req {
+	struct wcnss_msg_hdr hdr;
+	u16 seq;
+	u16 last;
+	u32 frag_size;
+	u8 fragment[];
+} __packed;
+
+/**
+ * struct wcnss_download_nv_resp - firmware download response
+ * @hdr:	common packet wcnss_msg_hdr header
+ * @status:	boolean to indicate success of the download
+ */
+struct wcnss_download_nv_resp {
+	struct wcnss_msg_hdr hdr;
+	u8 status;
+} __packed;
+
+/**
+ * wcnss_ctrl_smd_callback() - handler from SMD responses
+ * @channel:	smd channel handle
+ * @data:	pointer to the incoming data packet
+ * @count:	size of the incoming data packet
+ *
+ * Handles any incoming packets from the remote WCNSS_CTRL service.
+ */
+static int wcnss_ctrl_smd_callback(struct rpmsg_device *rpdev,
+				   void *data,
+				   int count,
+				   void *priv,
+				   u32 addr)
+{
+	struct wcnss_ctrl *wcnss = dev_get_drvdata(&rpdev->dev);
+	const struct wcnss_download_nv_resp *nvresp;
+	const struct wcnss_version_resp *version;
+	const struct wcnss_msg_hdr *hdr = data;
+
+	switch (hdr->type) {
+	case WCNSS_VERSION_RESP:
+		if (count != sizeof(*version)) {
+			dev_err(wcnss->dev,
+				"invalid size of version response\n");
+			break;
+		}
+
+		version = data;
+		dev_info(wcnss->dev, "WCNSS Version %d.%d %d.%d\n",
+			 version->major, version->minor,
+			 version->version, version->revision);
+
+		complete(&wcnss->ack);
+		break;
+	case WCNSS_DOWNLOAD_NV_RESP:
+		if (count != sizeof(*nvresp)) {
+			dev_err(wcnss->dev,
+				"invalid size of download response\n");
+			break;
+		}
+
+		nvresp = data;
+		wcnss->ack_status = nvresp->status;
+		complete(&wcnss->ack);
+		break;
+	case WCNSS_CBC_COMPLETE_IND:
+		dev_dbg(wcnss->dev, "cold boot complete\n");
+		complete(&wcnss->cbc);
+		break;
+	default:
+		dev_info(wcnss->dev, "unknown message type %d\n", hdr->type);
+		break;
+	}
+
+	return 0;
+}
+
+/**
+ * wcnss_request_version() - send a version request to WCNSS
+ * @wcnss:	wcnss ctrl driver context
+ */
+static int wcnss_request_version(struct wcnss_ctrl *wcnss)
+{
+	struct wcnss_msg_hdr msg;
+	int ret;
+
+	msg.type = WCNSS_VERSION_REQ;
+	msg.len = sizeof(msg);
+	ret = rpmsg_send(wcnss->channel, &msg, sizeof(msg));
+	if (ret < 0)
+		return ret;
+
+	ret = wait_for_completion_timeout(&wcnss->ack, WCNSS_CBC_TIMEOUT);
+	if (!ret) {
+		dev_err(wcnss->dev, "timeout waiting for version response\n");
+		return -ETIMEDOUT;
+	}
+
+	return 0;
+}
+
+/**
+ * wcnss_download_nv() - send nv binary to WCNSS
+ * @wcnss:	wcnss_ctrl state handle
+ * @expect_cbc:	indicator to caller that an cbc event is expected
+ *
+ * Returns 0 on success. Negative errno on failure.
+ */
+static int wcnss_download_nv(struct wcnss_ctrl *wcnss, bool *expect_cbc)
+{
+	struct wcnss_download_nv_req *req;
+	const struct firmware *fw;
+	const void *data;
+	ssize_t left;
+	int ret;
+
+	req = kzalloc(sizeof(*req) + NV_FRAGMENT_SIZE, GFP_KERNEL);
+	if (!req)
+		return -ENOMEM;
+
+	ret = request_firmware(&fw, NVBIN_FILE, wcnss->dev);
+	if (ret < 0) {
+		dev_err(wcnss->dev, "Failed to load nv file %s: %d\n",
+			NVBIN_FILE, ret);
+		goto free_req;
+	}
+
+	data = fw->data;
+	left = fw->size;
+
+	req->hdr.type = WCNSS_DOWNLOAD_NV_REQ;
+	req->hdr.len = sizeof(*req) + NV_FRAGMENT_SIZE;
+
+	req->last = 0;
+	req->frag_size = NV_FRAGMENT_SIZE;
+
+	req->seq = 0;
+	do {
+		if (left <= NV_FRAGMENT_SIZE) {
+			req->last = 1;
+			req->frag_size = left;
+			req->hdr.len = sizeof(*req) + left;
+		}
+
+		memcpy(req->fragment, data, req->frag_size);
+
+		ret = rpmsg_send(wcnss->channel, req, req->hdr.len);
+		if (ret < 0) {
+			dev_err(wcnss->dev, "failed to send smd packet\n");
+			goto release_fw;
+		}
+
+		/* Increment for next fragment */
+		req->seq++;
+
+		data += NV_FRAGMENT_SIZE;
+		left -= NV_FRAGMENT_SIZE;
+	} while (left > 0);
+
+	ret = wait_for_completion_timeout(&wcnss->ack, WCNSS_REQUEST_TIMEOUT);
+	if (!ret) {
+		dev_err(wcnss->dev, "timeout waiting for nv upload ack\n");
+		ret = -ETIMEDOUT;
+	} else {
+		*expect_cbc = wcnss->ack_status == WCNSS_ACK_COLD_BOOTING;
+		ret = 0;
+	}
+
+release_fw:
+	release_firmware(fw);
+free_req:
+	kfree(req);
+
+	return ret;
+}
+
+/**
+ * qcom_wcnss_open_channel() - open additional SMD channel to WCNSS
+ * @wcnss:	wcnss handle, retrieved from drvdata
+ * @name:	SMD channel name
+ * @cb:		callback to handle incoming data on the channel
+ */
+struct rpmsg_endpoint *qcom_wcnss_open_channel(void *wcnss, const char *name, rpmsg_rx_cb_t cb, void *priv)
+{
+	struct rpmsg_channel_info chinfo;
+	struct wcnss_ctrl *_wcnss = wcnss;
+
+	strscpy(chinfo.name, name, sizeof(chinfo.name));
+	chinfo.src = RPMSG_ADDR_ANY;
+	chinfo.dst = RPMSG_ADDR_ANY;
+
+	return rpmsg_create_ept(_wcnss->channel->rpdev, cb, priv, chinfo);
+}
+EXPORT_SYMBOL(qcom_wcnss_open_channel);
+
+static void wcnss_async_probe(struct work_struct *work)
+{
+	struct wcnss_ctrl *wcnss = container_of(work, struct wcnss_ctrl, probe_work);
+	bool expect_cbc;
+	int ret;
+
+	ret = wcnss_request_version(wcnss);
+	if (ret < 0)
+		return;
+
+	ret = wcnss_download_nv(wcnss, &expect_cbc);
+	if (ret < 0)
+		return;
+
+	/* Wait for pending cold boot completion if indicated by the nv downloader */
+	if (expect_cbc) {
+		ret = wait_for_completion_timeout(&wcnss->cbc, WCNSS_REQUEST_TIMEOUT);
+		if (!ret)
+			dev_err(wcnss->dev, "expected cold boot completion\n");
+	}
+
+	of_platform_populate(wcnss->dev->of_node, NULL, NULL, wcnss->dev);
+}
+
+static int wcnss_ctrl_probe(struct rpmsg_device *rpdev)
+{
+	struct wcnss_ctrl *wcnss;
+
+	wcnss = devm_kzalloc(&rpdev->dev, sizeof(*wcnss), GFP_KERNEL);
+	if (!wcnss)
+		return -ENOMEM;
+
+	wcnss->dev = &rpdev->dev;
+	wcnss->channel = rpdev->ept;
+
+	init_completion(&wcnss->ack);
+	init_completion(&wcnss->cbc);
+	INIT_WORK(&wcnss->probe_work, wcnss_async_probe);
+
+	dev_set_drvdata(&rpdev->dev, wcnss);
+
+	schedule_work(&wcnss->probe_work);
+
+	return 0;
+}
+
+static void wcnss_ctrl_remove(struct rpmsg_device *rpdev)
+{
+	struct wcnss_ctrl *wcnss = dev_get_drvdata(&rpdev->dev);
+
+	cancel_work_sync(&wcnss->probe_work);
+	of_platform_depopulate(&rpdev->dev);
+}
+
+static const struct of_device_id wcnss_ctrl_of_match[] = {
+	{ .compatible = "qcom,wcnss", },
+	{}
+};
+MODULE_DEVICE_TABLE(of, wcnss_ctrl_of_match);
+
+static struct rpmsg_driver wcnss_ctrl_driver = {
+	.probe = wcnss_ctrl_probe,
+	.remove = wcnss_ctrl_remove,
+	.callback = wcnss_ctrl_smd_callback,
+	.drv  = {
+		.name  = "qcom_wcnss_ctrl",
+		.owner = THIS_MODULE,
+		.of_match_table = wcnss_ctrl_of_match,
+	},
+};
+
+module_rpmsg_driver(wcnss_ctrl_driver);
+
+MODULE_DESCRIPTION("Qualcomm WCNSS control driver");
+MODULE_LICENSE("GPL v2");