[Feature]add MT2731_MP2_MR2_SVN388 baseline version

Change-Id: Ief04314834b31e27effab435d3ca8ba33b499059
diff --git a/src/kernel/linux/v4.14/drivers/bus/Kconfig b/src/kernel/linux/v4.14/drivers/bus/Kconfig
new file mode 100644
index 0000000..3e66f4c
--- /dev/null
+++ b/src/kernel/linux/v4.14/drivers/bus/Kconfig
@@ -0,0 +1,187 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Bus Devices
+#
+
+menu "Bus devices"
+
+config ARM_CCI
+	bool
+
+config ARM_CCI_PMU
+	bool
+	select ARM_CCI
+
+config ARM_CCI400_COMMON
+	bool
+	select ARM_CCI
+
+config ARM_CCI400_PMU
+	bool "ARM CCI400 PMU support"
+	depends on (ARM && CPU_V7) || ARM64
+	depends on PERF_EVENTS
+	select ARM_CCI400_COMMON
+	select ARM_CCI_PMU
+	help
+	  Support for PMU events monitoring on the ARM CCI-400 (cache coherent
+	  interconnect). CCI-400 supports counting events related to the
+	  connected slave/master interfaces.
+
+config ARM_CCI400_PORT_CTRL
+	bool
+	depends on ARM && OF && CPU_V7
+	select ARM_CCI400_COMMON
+	help
+	  Low level power management driver for CCI400 cache coherent
+	  interconnect for ARM platforms.
+
+config ARM_CCI5xx_PMU
+	bool "ARM CCI-500/CCI-550 PMU support"
+	depends on (ARM && CPU_V7) || ARM64
+	depends on PERF_EVENTS
+	select ARM_CCI_PMU
+	help
+	  Support for PMU events monitoring on the ARM CCI-500/CCI-550 cache
+	  coherent interconnects. Both of them provide 8 independent event counters,
+	  which can count events pertaining to the slave/master interfaces as well
+	  as the internal events to the CCI.
+
+	  If unsure, say Y
+
+config ARM_CCN
+	tristate "ARM CCN driver support"
+	depends on ARM || ARM64
+	depends on PERF_EVENTS
+	help
+	  PMU (perf) driver supporting the ARM CCN (Cache Coherent Network)
+	  interconnect.
+
+config BRCMSTB_GISB_ARB
+	bool "Broadcom STB GISB bus arbiter"
+	depends on ARM || ARM64 || MIPS
+	default ARCH_BRCMSTB || BMIPS_GENERIC
+	help
+	  Driver for the Broadcom Set Top Box System-on-a-chip internal bus
+	  arbiter. This driver provides timeout and target abort error handling
+	  and internal bus master decoding.
+
+config IMX_WEIM
+	bool "Freescale EIM DRIVER"
+	depends on ARCH_MXC
+	help
+	  Driver for i.MX WEIM controller.
+	  The WEIM(Wireless External Interface Module) works like a bus.
+	  You can attach many different devices on it, such as NOR, onenand.
+
+config MIPS_CDMM
+	bool "MIPS Common Device Memory Map (CDMM) Driver"
+	depends on CPU_MIPSR2
+	help
+	  Driver needed for the MIPS Common Device Memory Map bus in MIPS
+	  cores. This bus is for per-CPU tightly coupled devices such as the
+	  Fast Debug Channel (FDC).
+
+	  For this to work, either your bootloader needs to enable the CDMM
+	  region at an unused physical address on the boot CPU, or else your
+	  platform code needs to implement mips_cdmm_phys_base() (see
+	  asm/cdmm.h).
+
+config MVEBU_MBUS
+	bool
+	depends on PLAT_ORION
+	help
+	  Driver needed for the MBus configuration on Marvell EBU SoCs
+	  (Kirkwood, Dove, Orion5x, MV78XX0 and Armada 370/XP).
+
+config OMAP_INTERCONNECT
+	tristate "OMAP INTERCONNECT DRIVER"
+	depends on ARCH_OMAP2PLUS
+
+	help
+	  Driver to enable OMAP interconnect error handling driver.
+
+config OMAP_OCP2SCP
+	tristate "OMAP OCP2SCP DRIVER"
+	depends on ARCH_OMAP2PLUS
+	help
+	  Driver to enable ocp2scp module which transforms ocp interface
+	  protocol to scp protocol. In OMAP4, USB PHY is connected via
+	  OCP2SCP and in OMAP5, both USB PHY and SATA PHY is connected via
+	  OCP2SCP.
+
+config QCOM_EBI2
+	bool "Qualcomm External Bus Interface 2 (EBI2)"
+	depends on HAS_IOMEM
+	depends on ARCH_QCOM || COMPILE_TEST
+	default ARCH_QCOM
+	help
+	  Say y here to enable support for the Qualcomm External Bus
+	  Interface 2, which can be used to connect things like NAND Flash,
+	  SRAM, ethernet adapters, FPGAs and LCD displays.
+
+config SIMPLE_PM_BUS
+	bool "Simple Power-Managed Bus Driver"
+	depends on OF && PM
+	help
+	  Driver for transparent busses that don't need a real driver, but
+	  where the bus controller is part of a PM domain, or under the control
+	  of a functional clock, and thus relies on runtime PM for managing
+	  this PM domain and/or clock.
+	  An example of such a bus controller is the Renesas Bus State
+	  Controller (BSC, sometimes called "LBSC within Bus Bridge", or
+	  "External Bus Interface") as found on several Renesas ARM SoCs.
+
+config SUNXI_RSB
+	tristate "Allwinner sunXi Reduced Serial Bus Driver"
+	  default MACH_SUN8I || MACH_SUN9I || ARM64
+	  depends on ARCH_SUNXI
+	  select REGMAP
+	  help
+	  Say y here to enable support for Allwinner's Reduced Serial Bus
+	  (RSB) support. This controller is responsible for communicating
+	  with various RSB based devices, such as AXP223, AXP8XX PMICs,
+	  and AC100/AC200 ICs.
+
+config TEGRA_ACONNECT
+	tristate "Tegra ACONNECT Bus Driver"
+	depends on ARCH_TEGRA_210_SOC
+	depends on OF && PM
+	select PM_CLK
+	help
+	  Driver for the Tegra ACONNECT bus which is used to interface with
+	  the devices inside the Audio Processing Engine (APE) for Tegra210.
+
+config TEGRA_GMI
+	tristate "Tegra Generic Memory Interface bus driver"
+	depends on ARCH_TEGRA
+	help
+	  Driver for the Tegra Generic Memory Interface bus which can be used
+	  to attach devices such as NOR, UART, FPGA and more.
+
+config UNIPHIER_SYSTEM_BUS
+	tristate "UniPhier System Bus driver"
+	depends on ARCH_UNIPHIER && OF
+	default y
+	help
+	  Support for UniPhier System Bus, a simple external bus.  This is
+	  needed to use on-board devices connected to UniPhier SoCs.
+
+config VEXPRESS_CONFIG
+	bool "Versatile Express configuration bus"
+	default y if ARCH_VEXPRESS
+	depends on ARM || ARM64
+	depends on OF
+	select REGMAP
+	help
+	  Platform configuration infrastructure for the ARM Ltd.
+	  Versatile Express.
+
+config DA8XX_MSTPRI
+	bool "TI da8xx master peripheral priority driver"
+	depends on ARCH_DAVINCI_DA8XX
+	help
+	  Driver for Texas Instruments da8xx master peripheral priority
+	  configuration. Allows to adjust the priorities of all master
+	  peripherals.
+
+endmenu
diff --git a/src/kernel/linux/v4.14/drivers/bus/Makefile b/src/kernel/linux/v4.14/drivers/bus/Makefile
new file mode 100644
index 0000000..3ae96cf
--- /dev/null
+++ b/src/kernel/linux/v4.14/drivers/bus/Makefile
@@ -0,0 +1,27 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Makefile for the bus drivers.
+#
+
+# Interconnect bus drivers for ARM platforms
+obj-$(CONFIG_ARM_CCI)		+= arm-cci.o
+obj-$(CONFIG_ARM_CCN)		+= arm-ccn.o
+
+obj-$(CONFIG_BRCMSTB_GISB_ARB)	+= brcmstb_gisb.o
+obj-$(CONFIG_IMX_WEIM)		+= imx-weim.o
+obj-$(CONFIG_MIPS_CDMM)		+= mips_cdmm.o
+obj-$(CONFIG_MVEBU_MBUS) 	+= mvebu-mbus.o
+
+# Interconnect bus driver for OMAP SoCs.
+obj-$(CONFIG_OMAP_INTERCONNECT)	+= omap_l3_smx.o omap_l3_noc.o
+
+obj-$(CONFIG_OMAP_OCP2SCP)	+= omap-ocp2scp.o
+obj-$(CONFIG_QCOM_EBI2)		+= qcom-ebi2.o
+obj-$(CONFIG_SUNXI_RSB)		+= sunxi-rsb.o
+obj-$(CONFIG_SIMPLE_PM_BUS)	+= simple-pm-bus.o
+obj-$(CONFIG_TEGRA_ACONNECT)	+= tegra-aconnect.o
+obj-$(CONFIG_TEGRA_GMI)		+= tegra-gmi.o
+obj-$(CONFIG_UNIPHIER_SYSTEM_BUS)	+= uniphier-system-bus.o
+obj-$(CONFIG_VEXPRESS_CONFIG)	+= vexpress-config.o
+
+obj-$(CONFIG_DA8XX_MSTPRI)	+= da8xx-mstpri.o
diff --git a/src/kernel/linux/v4.14/drivers/bus/arm-cci.c b/src/kernel/linux/v4.14/drivers/bus/arm-cci.c
new file mode 100644
index 0000000..fc2da3a
--- /dev/null
+++ b/src/kernel/linux/v4.14/drivers/bus/arm-cci.c
@@ -0,0 +1,2322 @@
+/*
+ * CCI cache coherent interconnect driver
+ *
+ * Copyright (C) 2013 ARM Ltd.
+ * Author: Lorenzo Pieralisi <lorenzo.pieralisi@arm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/arm-cci.h>
+#include <linux/io.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/of_platform.h>
+#include <linux/perf_event.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+
+#include <asm/cacheflush.h>
+#include <asm/smp_plat.h>
+
+static void __iomem *cci_ctrl_base;
+static unsigned long cci_ctrl_phys;
+
+#ifdef CONFIG_ARM_CCI400_PORT_CTRL
+struct cci_nb_ports {
+	unsigned int nb_ace;
+	unsigned int nb_ace_lite;
+};
+
+static const struct cci_nb_ports cci400_ports = {
+	.nb_ace = 2,
+	.nb_ace_lite = 3
+};
+
+#define CCI400_PORTS_DATA	(&cci400_ports)
+#else
+#define CCI400_PORTS_DATA	(NULL)
+#endif
+
+static const struct of_device_id arm_cci_matches[] = {
+#ifdef CONFIG_ARM_CCI400_COMMON
+	{.compatible = "arm,cci-400", .data = CCI400_PORTS_DATA },
+#endif
+#ifdef CONFIG_ARM_CCI5xx_PMU
+	{ .compatible = "arm,cci-500", },
+	{ .compatible = "arm,cci-550", },
+#endif
+	{},
+};
+
+#ifdef CONFIG_ARM_CCI_PMU
+
+#define DRIVER_NAME		"ARM-CCI"
+#define DRIVER_NAME_PMU		DRIVER_NAME " PMU"
+
+#define CCI_PMCR		0x0100
+#define CCI_PID2		0x0fe8
+
+#define CCI_PMCR_CEN		0x00000001
+#define CCI_PMCR_NCNT_MASK	0x0000f800
+#define CCI_PMCR_NCNT_SHIFT	11
+
+#define CCI_PID2_REV_MASK	0xf0
+#define CCI_PID2_REV_SHIFT	4
+
+#define CCI_PMU_EVT_SEL		0x000
+#define CCI_PMU_CNTR		0x004
+#define CCI_PMU_CNTR_CTRL	0x008
+#define CCI_PMU_OVRFLW		0x00c
+
+#define CCI_PMU_OVRFLW_FLAG	1
+
+#define CCI_PMU_CNTR_SIZE(model)	((model)->cntr_size)
+#define CCI_PMU_CNTR_BASE(model, idx)	((idx) * CCI_PMU_CNTR_SIZE(model))
+#define CCI_PMU_CNTR_MASK		((1ULL << 32) -1)
+#define CCI_PMU_CNTR_LAST(cci_pmu)	(cci_pmu->num_cntrs - 1)
+
+#define CCI_PMU_MAX_HW_CNTRS(model) \
+	((model)->num_hw_cntrs + (model)->fixed_hw_cntrs)
+
+/* Types of interfaces that can generate events */
+enum {
+	CCI_IF_SLAVE,
+	CCI_IF_MASTER,
+#ifdef CONFIG_ARM_CCI5xx_PMU
+	CCI_IF_GLOBAL,
+#endif
+	CCI_IF_MAX,
+};
+
+struct event_range {
+	u32 min;
+	u32 max;
+};
+
+struct cci_pmu_hw_events {
+	struct perf_event **events;
+	unsigned long *used_mask;
+	raw_spinlock_t pmu_lock;
+};
+
+struct cci_pmu;
+/*
+ * struct cci_pmu_model:
+ * @fixed_hw_cntrs - Number of fixed event counters
+ * @num_hw_cntrs - Maximum number of programmable event counters
+ * @cntr_size - Size of an event counter mapping
+ */
+struct cci_pmu_model {
+	char *name;
+	u32 fixed_hw_cntrs;
+	u32 num_hw_cntrs;
+	u32 cntr_size;
+	struct attribute **format_attrs;
+	struct attribute **event_attrs;
+	struct event_range event_ranges[CCI_IF_MAX];
+	int (*validate_hw_event)(struct cci_pmu *, unsigned long);
+	int (*get_event_idx)(struct cci_pmu *, struct cci_pmu_hw_events *, unsigned long);
+	void (*write_counters)(struct cci_pmu *, unsigned long *);
+};
+
+static struct cci_pmu_model cci_pmu_models[];
+
+struct cci_pmu {
+	void __iomem *base;
+	struct pmu pmu;
+	int nr_irqs;
+	int *irqs;
+	unsigned long active_irqs;
+	const struct cci_pmu_model *model;
+	struct cci_pmu_hw_events hw_events;
+	struct platform_device *plat_device;
+	int num_cntrs;
+	atomic_t active_events;
+	struct mutex reserve_mutex;
+	struct hlist_node node;
+	cpumask_t cpus;
+};
+
+#define to_cci_pmu(c)	(container_of(c, struct cci_pmu, pmu))
+
+enum cci_models {
+#ifdef CONFIG_ARM_CCI400_PMU
+	CCI400_R0,
+	CCI400_R1,
+#endif
+#ifdef CONFIG_ARM_CCI5xx_PMU
+	CCI500_R0,
+	CCI550_R0,
+#endif
+	CCI_MODEL_MAX
+};
+
+static void pmu_write_counters(struct cci_pmu *cci_pmu,
+				 unsigned long *mask);
+static ssize_t cci_pmu_format_show(struct device *dev,
+			struct device_attribute *attr, char *buf);
+static ssize_t cci_pmu_event_show(struct device *dev,
+			struct device_attribute *attr, char *buf);
+
+#define CCI_EXT_ATTR_ENTRY(_name, _func, _config) 				\
+	&((struct dev_ext_attribute[]) {					\
+		{ __ATTR(_name, S_IRUGO, _func, NULL), (void *)_config }	\
+	})[0].attr.attr
+
+#define CCI_FORMAT_EXT_ATTR_ENTRY(_name, _config) \
+	CCI_EXT_ATTR_ENTRY(_name, cci_pmu_format_show, (char *)_config)
+#define CCI_EVENT_EXT_ATTR_ENTRY(_name, _config) \
+	CCI_EXT_ATTR_ENTRY(_name, cci_pmu_event_show, (unsigned long)_config)
+
+/* CCI400 PMU Specific definitions */
+
+#ifdef CONFIG_ARM_CCI400_PMU
+
+/* Port ids */
+#define CCI400_PORT_S0		0
+#define CCI400_PORT_S1		1
+#define CCI400_PORT_S2		2
+#define CCI400_PORT_S3		3
+#define CCI400_PORT_S4		4
+#define CCI400_PORT_M0		5
+#define CCI400_PORT_M1		6
+#define CCI400_PORT_M2		7
+
+#define CCI400_R1_PX		5
+
+/*
+ * Instead of an event id to monitor CCI cycles, a dedicated counter is
+ * provided. Use 0xff to represent CCI cycles and hope that no future revisions
+ * make use of this event in hardware.
+ */
+enum cci400_perf_events {
+	CCI400_PMU_CYCLES = 0xff
+};
+
+#define CCI400_PMU_CYCLE_CNTR_IDX	0
+#define CCI400_PMU_CNTR0_IDX		1
+
+/*
+ * CCI PMU event id is an 8-bit value made of two parts - bits 7:5 for one of 8
+ * ports and bits 4:0 are event codes. There are different event codes
+ * associated with each port type.
+ *
+ * Additionally, the range of events associated with the port types changed
+ * between Rev0 and Rev1.
+ *
+ * The constants below define the range of valid codes for each port type for
+ * the different revisions and are used to validate the event to be monitored.
+ */
+
+#define CCI400_PMU_EVENT_MASK		0xffUL
+#define CCI400_PMU_EVENT_SOURCE_SHIFT	5
+#define CCI400_PMU_EVENT_SOURCE_MASK	0x7
+#define CCI400_PMU_EVENT_CODE_SHIFT	0
+#define CCI400_PMU_EVENT_CODE_MASK	0x1f
+#define CCI400_PMU_EVENT_SOURCE(event) \
+	((event >> CCI400_PMU_EVENT_SOURCE_SHIFT) & \
+			CCI400_PMU_EVENT_SOURCE_MASK)
+#define CCI400_PMU_EVENT_CODE(event) \
+	((event >> CCI400_PMU_EVENT_CODE_SHIFT) & CCI400_PMU_EVENT_CODE_MASK)
+
+#define CCI400_R0_SLAVE_PORT_MIN_EV	0x00
+#define CCI400_R0_SLAVE_PORT_MAX_EV	0x13
+#define CCI400_R0_MASTER_PORT_MIN_EV	0x14
+#define CCI400_R0_MASTER_PORT_MAX_EV	0x1a
+
+#define CCI400_R1_SLAVE_PORT_MIN_EV	0x00
+#define CCI400_R1_SLAVE_PORT_MAX_EV	0x14
+#define CCI400_R1_MASTER_PORT_MIN_EV	0x00
+#define CCI400_R1_MASTER_PORT_MAX_EV	0x11
+
+#define CCI400_CYCLE_EVENT_EXT_ATTR_ENTRY(_name, _config) \
+	CCI_EXT_ATTR_ENTRY(_name, cci400_pmu_cycle_event_show, \
+					(unsigned long)_config)
+
+static ssize_t cci400_pmu_cycle_event_show(struct device *dev,
+			struct device_attribute *attr, char *buf);
+
+static struct attribute *cci400_pmu_format_attrs[] = {
+	CCI_FORMAT_EXT_ATTR_ENTRY(event, "config:0-4"),
+	CCI_FORMAT_EXT_ATTR_ENTRY(source, "config:5-7"),
+	NULL
+};
+
+static struct attribute *cci400_r0_pmu_event_attrs[] = {
+	/* Slave events */
+	CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_any, 0x0),
+	CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_device, 0x01),
+	CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_normal_or_nonshareable, 0x2),
+	CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_inner_or_outershareable, 0x3),
+	CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_cache_maintenance, 0x4),
+	CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_mem_barrier, 0x5),
+	CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_sync_barrier, 0x6),
+	CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_dvm_msg, 0x7),
+	CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_dvm_msg_sync, 0x8),
+	CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_stall_tt_full, 0x9),
+	CCI_EVENT_EXT_ATTR_ENTRY(si_r_data_last_hs_snoop, 0xA),
+	CCI_EVENT_EXT_ATTR_ENTRY(si_r_data_stall_rvalids_h_rready_l, 0xB),
+	CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_hs_any, 0xC),
+	CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_hs_device, 0xD),
+	CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_hs_normal_or_nonshareable, 0xE),
+	CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_hs_inner_or_outershare_wback_wclean, 0xF),
+	CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_hs_write_unique, 0x10),
+	CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_hs_write_line_unique, 0x11),
+	CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_hs_evict, 0x12),
+	CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_stall_tt_full, 0x13),
+	/* Master events */
+	CCI_EVENT_EXT_ATTR_ENTRY(mi_retry_speculative_fetch, 0x14),
+	CCI_EVENT_EXT_ATTR_ENTRY(mi_rrq_stall_addr_hazard, 0x15),
+	CCI_EVENT_EXT_ATTR_ENTRY(mi_rrq_stall_id_hazard, 0x16),
+	CCI_EVENT_EXT_ATTR_ENTRY(mi_rrq_stall_tt_full, 0x17),
+	CCI_EVENT_EXT_ATTR_ENTRY(mi_rrq_stall_barrier_hazard, 0x18),
+	CCI_EVENT_EXT_ATTR_ENTRY(mi_wrq_stall_barrier_hazard, 0x19),
+	CCI_EVENT_EXT_ATTR_ENTRY(mi_wrq_stall_tt_full, 0x1A),
+	/* Special event for cycles counter */
+	CCI400_CYCLE_EVENT_EXT_ATTR_ENTRY(cycles, 0xff),
+	NULL
+};
+
+static struct attribute *cci400_r1_pmu_event_attrs[] = {
+	/* Slave events */
+	CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_any, 0x0),
+	CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_device, 0x01),
+	CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_normal_or_nonshareable, 0x2),
+	CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_inner_or_outershareable, 0x3),
+	CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_cache_maintenance, 0x4),
+	CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_mem_barrier, 0x5),
+	CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_sync_barrier, 0x6),
+	CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_dvm_msg, 0x7),
+	CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_dvm_msg_sync, 0x8),
+	CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_stall_tt_full, 0x9),
+	CCI_EVENT_EXT_ATTR_ENTRY(si_r_data_last_hs_snoop, 0xA),
+	CCI_EVENT_EXT_ATTR_ENTRY(si_r_data_stall_rvalids_h_rready_l, 0xB),
+	CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_hs_any, 0xC),
+	CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_hs_device, 0xD),
+	CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_hs_normal_or_nonshareable, 0xE),
+	CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_hs_inner_or_outershare_wback_wclean, 0xF),
+	CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_hs_write_unique, 0x10),
+	CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_hs_write_line_unique, 0x11),
+	CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_hs_evict, 0x12),
+	CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_stall_tt_full, 0x13),
+	CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_stall_slave_id_hazard, 0x14),
+	/* Master events */
+	CCI_EVENT_EXT_ATTR_ENTRY(mi_retry_speculative_fetch, 0x0),
+	CCI_EVENT_EXT_ATTR_ENTRY(mi_stall_cycle_addr_hazard, 0x1),
+	CCI_EVENT_EXT_ATTR_ENTRY(mi_rrq_stall_master_id_hazard, 0x2),
+	CCI_EVENT_EXT_ATTR_ENTRY(mi_rrq_stall_hi_prio_rtq_full, 0x3),
+	CCI_EVENT_EXT_ATTR_ENTRY(mi_rrq_stall_barrier_hazard, 0x4),
+	CCI_EVENT_EXT_ATTR_ENTRY(mi_wrq_stall_barrier_hazard, 0x5),
+	CCI_EVENT_EXT_ATTR_ENTRY(mi_wrq_stall_wtq_full, 0x6),
+	CCI_EVENT_EXT_ATTR_ENTRY(mi_rrq_stall_low_prio_rtq_full, 0x7),
+	CCI_EVENT_EXT_ATTR_ENTRY(mi_rrq_stall_mid_prio_rtq_full, 0x8),
+	CCI_EVENT_EXT_ATTR_ENTRY(mi_rrq_stall_qvn_vn0, 0x9),
+	CCI_EVENT_EXT_ATTR_ENTRY(mi_rrq_stall_qvn_vn1, 0xA),
+	CCI_EVENT_EXT_ATTR_ENTRY(mi_rrq_stall_qvn_vn2, 0xB),
+	CCI_EVENT_EXT_ATTR_ENTRY(mi_rrq_stall_qvn_vn3, 0xC),
+	CCI_EVENT_EXT_ATTR_ENTRY(mi_wrq_stall_qvn_vn0, 0xD),
+	CCI_EVENT_EXT_ATTR_ENTRY(mi_wrq_stall_qvn_vn1, 0xE),
+	CCI_EVENT_EXT_ATTR_ENTRY(mi_wrq_stall_qvn_vn2, 0xF),
+	CCI_EVENT_EXT_ATTR_ENTRY(mi_wrq_stall_qvn_vn3, 0x10),
+	CCI_EVENT_EXT_ATTR_ENTRY(mi_wrq_unique_or_line_unique_addr_hazard, 0x11),
+	/* Special event for cycles counter */
+	CCI400_CYCLE_EVENT_EXT_ATTR_ENTRY(cycles, 0xff),
+	NULL
+};
+
+static ssize_t cci400_pmu_cycle_event_show(struct device *dev,
+			struct device_attribute *attr, char *buf)
+{
+	struct dev_ext_attribute *eattr = container_of(attr,
+				struct dev_ext_attribute, attr);
+	return snprintf(buf, PAGE_SIZE, "config=0x%lx\n", (unsigned long)eattr->var);
+}
+
+static int cci400_get_event_idx(struct cci_pmu *cci_pmu,
+				struct cci_pmu_hw_events *hw,
+				unsigned long cci_event)
+{
+	int idx;
+
+	/* cycles event idx is fixed */
+	if (cci_event == CCI400_PMU_CYCLES) {
+		if (test_and_set_bit(CCI400_PMU_CYCLE_CNTR_IDX, hw->used_mask))
+			return -EAGAIN;
+
+		return CCI400_PMU_CYCLE_CNTR_IDX;
+	}
+
+	for (idx = CCI400_PMU_CNTR0_IDX; idx <= CCI_PMU_CNTR_LAST(cci_pmu); ++idx)
+		if (!test_and_set_bit(idx, hw->used_mask))
+			return idx;
+
+	/* No counters available */
+	return -EAGAIN;
+}
+
+static int cci400_validate_hw_event(struct cci_pmu *cci_pmu, unsigned long hw_event)
+{
+	u8 ev_source = CCI400_PMU_EVENT_SOURCE(hw_event);
+	u8 ev_code = CCI400_PMU_EVENT_CODE(hw_event);
+	int if_type;
+
+	if (hw_event & ~CCI400_PMU_EVENT_MASK)
+		return -ENOENT;
+
+	if (hw_event == CCI400_PMU_CYCLES)
+		return hw_event;
+
+	switch (ev_source) {
+	case CCI400_PORT_S0:
+	case CCI400_PORT_S1:
+	case CCI400_PORT_S2:
+	case CCI400_PORT_S3:
+	case CCI400_PORT_S4:
+		/* Slave Interface */
+		if_type = CCI_IF_SLAVE;
+		break;
+	case CCI400_PORT_M0:
+	case CCI400_PORT_M1:
+	case CCI400_PORT_M2:
+		/* Master Interface */
+		if_type = CCI_IF_MASTER;
+		break;
+	default:
+		return -ENOENT;
+	}
+
+	if (ev_code >= cci_pmu->model->event_ranges[if_type].min &&
+		ev_code <= cci_pmu->model->event_ranges[if_type].max)
+		return hw_event;
+
+	return -ENOENT;
+}
+
+static int probe_cci400_revision(void)
+{
+	int rev;
+	rev = readl_relaxed(cci_ctrl_base + CCI_PID2) & CCI_PID2_REV_MASK;
+	rev >>= CCI_PID2_REV_SHIFT;
+
+	if (rev < CCI400_R1_PX)
+		return CCI400_R0;
+	else
+		return CCI400_R1;
+}
+
+static const struct cci_pmu_model *probe_cci_model(struct platform_device *pdev)
+{
+	if (platform_has_secure_cci_access())
+		return &cci_pmu_models[probe_cci400_revision()];
+	return NULL;
+}
+#else	/* !CONFIG_ARM_CCI400_PMU */
+static inline struct cci_pmu_model *probe_cci_model(struct platform_device *pdev)
+{
+	return NULL;
+}
+#endif	/* CONFIG_ARM_CCI400_PMU */
+
+#ifdef CONFIG_ARM_CCI5xx_PMU
+
+/*
+ * CCI5xx PMU event id is an 9-bit value made of two parts.
+ *	 bits [8:5] - Source for the event
+ *	 bits [4:0] - Event code (specific to type of interface)
+ *
+ *
+ */
+
+/* Port ids */
+#define CCI5xx_PORT_S0			0x0
+#define CCI5xx_PORT_S1			0x1
+#define CCI5xx_PORT_S2			0x2
+#define CCI5xx_PORT_S3			0x3
+#define CCI5xx_PORT_S4			0x4
+#define CCI5xx_PORT_S5			0x5
+#define CCI5xx_PORT_S6			0x6
+
+#define CCI5xx_PORT_M0			0x8
+#define CCI5xx_PORT_M1			0x9
+#define CCI5xx_PORT_M2			0xa
+#define CCI5xx_PORT_M3			0xb
+#define CCI5xx_PORT_M4			0xc
+#define CCI5xx_PORT_M5			0xd
+#define CCI5xx_PORT_M6			0xe
+
+#define CCI5xx_PORT_GLOBAL		0xf
+
+#define CCI5xx_PMU_EVENT_MASK		0x1ffUL
+#define CCI5xx_PMU_EVENT_SOURCE_SHIFT	0x5
+#define CCI5xx_PMU_EVENT_SOURCE_MASK	0xf
+#define CCI5xx_PMU_EVENT_CODE_SHIFT	0x0
+#define CCI5xx_PMU_EVENT_CODE_MASK	0x1f
+
+#define CCI5xx_PMU_EVENT_SOURCE(event)	\
+	((event >> CCI5xx_PMU_EVENT_SOURCE_SHIFT) & CCI5xx_PMU_EVENT_SOURCE_MASK)
+#define CCI5xx_PMU_EVENT_CODE(event)	\
+	((event >> CCI5xx_PMU_EVENT_CODE_SHIFT) & CCI5xx_PMU_EVENT_CODE_MASK)
+
+#define CCI5xx_SLAVE_PORT_MIN_EV	0x00
+#define CCI5xx_SLAVE_PORT_MAX_EV	0x1f
+#define CCI5xx_MASTER_PORT_MIN_EV	0x00
+#define CCI5xx_MASTER_PORT_MAX_EV	0x06
+#define CCI5xx_GLOBAL_PORT_MIN_EV	0x00
+#define CCI5xx_GLOBAL_PORT_MAX_EV	0x0f
+
+
+#define CCI5xx_GLOBAL_EVENT_EXT_ATTR_ENTRY(_name, _config) \
+	CCI_EXT_ATTR_ENTRY(_name, cci5xx_pmu_global_event_show, \
+					(unsigned long) _config)
+
+static ssize_t cci5xx_pmu_global_event_show(struct device *dev,
+				struct device_attribute *attr, char *buf);
+
+static struct attribute *cci5xx_pmu_format_attrs[] = {
+	CCI_FORMAT_EXT_ATTR_ENTRY(event, "config:0-4"),
+	CCI_FORMAT_EXT_ATTR_ENTRY(source, "config:5-8"),
+	NULL,
+};
+
+static struct attribute *cci5xx_pmu_event_attrs[] = {
+	/* Slave events */
+	CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_arvalid, 0x0),
+	CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_dev, 0x1),
+	CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_nonshareable, 0x2),
+	CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_shareable_non_alloc, 0x3),
+	CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_shareable_alloc, 0x4),
+	CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_invalidate, 0x5),
+	CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_cache_maint, 0x6),
+	CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_dvm_msg, 0x7),
+	CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_rval, 0x8),
+	CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_rlast_snoop, 0x9),
+	CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_hs_awalid, 0xA),
+	CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_dev, 0xB),
+	CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_non_shareable, 0xC),
+	CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_share_wb, 0xD),
+	CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_share_wlu, 0xE),
+	CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_share_wunique, 0xF),
+	CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_evict, 0x10),
+	CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_wrevict, 0x11),
+	CCI_EVENT_EXT_ATTR_ENTRY(si_w_data_beat, 0x12),
+	CCI_EVENT_EXT_ATTR_ENTRY(si_srq_acvalid, 0x13),
+	CCI_EVENT_EXT_ATTR_ENTRY(si_srq_read, 0x14),
+	CCI_EVENT_EXT_ATTR_ENTRY(si_srq_clean, 0x15),
+	CCI_EVENT_EXT_ATTR_ENTRY(si_srq_data_transfer_low, 0x16),
+	CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_stall_arvalid, 0x17),
+	CCI_EVENT_EXT_ATTR_ENTRY(si_r_data_stall, 0x18),
+	CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_stall, 0x19),
+	CCI_EVENT_EXT_ATTR_ENTRY(si_w_data_stall, 0x1A),
+	CCI_EVENT_EXT_ATTR_ENTRY(si_w_resp_stall, 0x1B),
+	CCI_EVENT_EXT_ATTR_ENTRY(si_srq_stall, 0x1C),
+	CCI_EVENT_EXT_ATTR_ENTRY(si_s_data_stall, 0x1D),
+	CCI_EVENT_EXT_ATTR_ENTRY(si_rq_stall_ot_limit, 0x1E),
+	CCI_EVENT_EXT_ATTR_ENTRY(si_r_stall_arbit, 0x1F),
+
+	/* Master events */
+	CCI_EVENT_EXT_ATTR_ENTRY(mi_r_data_beat_any, 0x0),
+	CCI_EVENT_EXT_ATTR_ENTRY(mi_w_data_beat_any, 0x1),
+	CCI_EVENT_EXT_ATTR_ENTRY(mi_rrq_stall, 0x2),
+	CCI_EVENT_EXT_ATTR_ENTRY(mi_r_data_stall, 0x3),
+	CCI_EVENT_EXT_ATTR_ENTRY(mi_wrq_stall, 0x4),
+	CCI_EVENT_EXT_ATTR_ENTRY(mi_w_data_stall, 0x5),
+	CCI_EVENT_EXT_ATTR_ENTRY(mi_w_resp_stall, 0x6),
+
+	/* Global events */
+	CCI5xx_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_snoop_access_filter_bank_0_1, 0x0),
+	CCI5xx_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_snoop_access_filter_bank_2_3, 0x1),
+	CCI5xx_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_snoop_access_filter_bank_4_5, 0x2),
+	CCI5xx_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_snoop_access_filter_bank_6_7, 0x3),
+	CCI5xx_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_snoop_access_miss_filter_bank_0_1, 0x4),
+	CCI5xx_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_snoop_access_miss_filter_bank_2_3, 0x5),
+	CCI5xx_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_snoop_access_miss_filter_bank_4_5, 0x6),
+	CCI5xx_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_snoop_access_miss_filter_bank_6_7, 0x7),
+	CCI5xx_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_snoop_back_invalidation, 0x8),
+	CCI5xx_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_snoop_stall_alloc_busy, 0x9),
+	CCI5xx_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_snoop_stall_tt_full, 0xA),
+	CCI5xx_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_wrq, 0xB),
+	CCI5xx_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_snoop_cd_hs, 0xC),
+	CCI5xx_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_rq_stall_addr_hazard, 0xD),
+	CCI5xx_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_snoop_rq_stall_tt_full, 0xE),
+	CCI5xx_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_snoop_rq_tzmp1_prot, 0xF),
+	NULL
+};
+
+static ssize_t cci5xx_pmu_global_event_show(struct device *dev,
+				struct device_attribute *attr, char *buf)
+{
+	struct dev_ext_attribute *eattr = container_of(attr,
+					struct dev_ext_attribute, attr);
+	/* Global events have single fixed source code */
+	return snprintf(buf, PAGE_SIZE, "event=0x%lx,source=0x%x\n",
+				(unsigned long)eattr->var, CCI5xx_PORT_GLOBAL);
+}
+
+/*
+ * CCI500 provides 8 independent event counters that can count
+ * any of the events available.
+ * CCI500 PMU event source ids
+ *	0x0-0x6 - Slave interfaces
+ *	0x8-0xD - Master interfaces
+ *	0xf     - Global Events
+ *	0x7,0xe - Reserved
+ */
+static int cci500_validate_hw_event(struct cci_pmu *cci_pmu,
+					unsigned long hw_event)
+{
+	u32 ev_source = CCI5xx_PMU_EVENT_SOURCE(hw_event);
+	u32 ev_code = CCI5xx_PMU_EVENT_CODE(hw_event);
+	int if_type;
+
+	if (hw_event & ~CCI5xx_PMU_EVENT_MASK)
+		return -ENOENT;
+
+	switch (ev_source) {
+	case CCI5xx_PORT_S0:
+	case CCI5xx_PORT_S1:
+	case CCI5xx_PORT_S2:
+	case CCI5xx_PORT_S3:
+	case CCI5xx_PORT_S4:
+	case CCI5xx_PORT_S5:
+	case CCI5xx_PORT_S6:
+		if_type = CCI_IF_SLAVE;
+		break;
+	case CCI5xx_PORT_M0:
+	case CCI5xx_PORT_M1:
+	case CCI5xx_PORT_M2:
+	case CCI5xx_PORT_M3:
+	case CCI5xx_PORT_M4:
+	case CCI5xx_PORT_M5:
+		if_type = CCI_IF_MASTER;
+		break;
+	case CCI5xx_PORT_GLOBAL:
+		if_type = CCI_IF_GLOBAL;
+		break;
+	default:
+		return -ENOENT;
+	}
+
+	if (ev_code >= cci_pmu->model->event_ranges[if_type].min &&
+		ev_code <= cci_pmu->model->event_ranges[if_type].max)
+		return hw_event;
+
+	return -ENOENT;
+}
+
+/*
+ * CCI550 provides 8 independent event counters that can count
+ * any of the events available.
+ * CCI550 PMU event source ids
+ *	0x0-0x6 - Slave interfaces
+ *	0x8-0xe - Master interfaces
+ *	0xf     - Global Events
+ *	0x7	- Reserved
+ */
+static int cci550_validate_hw_event(struct cci_pmu *cci_pmu,
+					unsigned long hw_event)
+{
+	u32 ev_source = CCI5xx_PMU_EVENT_SOURCE(hw_event);
+	u32 ev_code = CCI5xx_PMU_EVENT_CODE(hw_event);
+	int if_type;
+
+	if (hw_event & ~CCI5xx_PMU_EVENT_MASK)
+		return -ENOENT;
+
+	switch (ev_source) {
+	case CCI5xx_PORT_S0:
+	case CCI5xx_PORT_S1:
+	case CCI5xx_PORT_S2:
+	case CCI5xx_PORT_S3:
+	case CCI5xx_PORT_S4:
+	case CCI5xx_PORT_S5:
+	case CCI5xx_PORT_S6:
+		if_type = CCI_IF_SLAVE;
+		break;
+	case CCI5xx_PORT_M0:
+	case CCI5xx_PORT_M1:
+	case CCI5xx_PORT_M2:
+	case CCI5xx_PORT_M3:
+	case CCI5xx_PORT_M4:
+	case CCI5xx_PORT_M5:
+	case CCI5xx_PORT_M6:
+		if_type = CCI_IF_MASTER;
+		break;
+	case CCI5xx_PORT_GLOBAL:
+		if_type = CCI_IF_GLOBAL;
+		break;
+	default:
+		return -ENOENT;
+	}
+
+	if (ev_code >= cci_pmu->model->event_ranges[if_type].min &&
+		ev_code <= cci_pmu->model->event_ranges[if_type].max)
+		return hw_event;
+
+	return -ENOENT;
+}
+
+#endif	/* CONFIG_ARM_CCI5xx_PMU */
+
+/*
+ * Program the CCI PMU counters which have PERF_HES_ARCH set
+ * with the event period and mark them ready before we enable
+ * PMU.
+ */
+static void cci_pmu_sync_counters(struct cci_pmu *cci_pmu)
+{
+	int i;
+	struct cci_pmu_hw_events *cci_hw = &cci_pmu->hw_events;
+
+	DECLARE_BITMAP(mask, cci_pmu->num_cntrs);
+
+	bitmap_zero(mask, cci_pmu->num_cntrs);
+	for_each_set_bit(i, cci_pmu->hw_events.used_mask, cci_pmu->num_cntrs) {
+		struct perf_event *event = cci_hw->events[i];
+
+		if (WARN_ON(!event))
+			continue;
+
+		/* Leave the events which are not counting */
+		if (event->hw.state & PERF_HES_STOPPED)
+			continue;
+		if (event->hw.state & PERF_HES_ARCH) {
+			set_bit(i, mask);
+			event->hw.state &= ~PERF_HES_ARCH;
+		}
+	}
+
+	pmu_write_counters(cci_pmu, mask);
+}
+
+/* Should be called with cci_pmu->hw_events->pmu_lock held */
+static void __cci_pmu_enable_nosync(struct cci_pmu *cci_pmu)
+{
+	u32 val;
+
+	/* Enable all the PMU counters. */
+	val = readl_relaxed(cci_ctrl_base + CCI_PMCR) | CCI_PMCR_CEN;
+	writel(val, cci_ctrl_base + CCI_PMCR);
+}
+
+/* Should be called with cci_pmu->hw_events->pmu_lock held */
+static void __cci_pmu_enable_sync(struct cci_pmu *cci_pmu)
+{
+	cci_pmu_sync_counters(cci_pmu);
+	__cci_pmu_enable_nosync(cci_pmu);
+}
+
+/* Should be called with cci_pmu->hw_events->pmu_lock held */
+static void __cci_pmu_disable(void)
+{
+	u32 val;
+
+	/* Disable all the PMU counters. */
+	val = readl_relaxed(cci_ctrl_base + CCI_PMCR) & ~CCI_PMCR_CEN;
+	writel(val, cci_ctrl_base + CCI_PMCR);
+}
+
+static ssize_t cci_pmu_format_show(struct device *dev,
+			struct device_attribute *attr, char *buf)
+{
+	struct dev_ext_attribute *eattr = container_of(attr,
+				struct dev_ext_attribute, attr);
+	return snprintf(buf, PAGE_SIZE, "%s\n", (char *)eattr->var);
+}
+
+static ssize_t cci_pmu_event_show(struct device *dev,
+			struct device_attribute *attr, char *buf)
+{
+	struct dev_ext_attribute *eattr = container_of(attr,
+				struct dev_ext_attribute, attr);
+	/* source parameter is mandatory for normal PMU events */
+	return snprintf(buf, PAGE_SIZE, "source=?,event=0x%lx\n",
+					 (unsigned long)eattr->var);
+}
+
+static int pmu_is_valid_counter(struct cci_pmu *cci_pmu, int idx)
+{
+	return 0 <= idx && idx <= CCI_PMU_CNTR_LAST(cci_pmu);
+}
+
+static u32 pmu_read_register(struct cci_pmu *cci_pmu, int idx, unsigned int offset)
+{
+	return readl_relaxed(cci_pmu->base +
+			     CCI_PMU_CNTR_BASE(cci_pmu->model, idx) + offset);
+}
+
+static void pmu_write_register(struct cci_pmu *cci_pmu, u32 value,
+			       int idx, unsigned int offset)
+{
+	writel_relaxed(value, cci_pmu->base +
+		       CCI_PMU_CNTR_BASE(cci_pmu->model, idx) + offset);
+}
+
+static void pmu_disable_counter(struct cci_pmu *cci_pmu, int idx)
+{
+	pmu_write_register(cci_pmu, 0, idx, CCI_PMU_CNTR_CTRL);
+}
+
+static void pmu_enable_counter(struct cci_pmu *cci_pmu, int idx)
+{
+	pmu_write_register(cci_pmu, 1, idx, CCI_PMU_CNTR_CTRL);
+}
+
+static bool __maybe_unused
+pmu_counter_is_enabled(struct cci_pmu *cci_pmu, int idx)
+{
+	return (pmu_read_register(cci_pmu, idx, CCI_PMU_CNTR_CTRL) & 0x1) != 0;
+}
+
+static void pmu_set_event(struct cci_pmu *cci_pmu, int idx, unsigned long event)
+{
+	pmu_write_register(cci_pmu, event, idx, CCI_PMU_EVT_SEL);
+}
+
+/*
+ * For all counters on the CCI-PMU, disable any 'enabled' counters,
+ * saving the changed counters in the mask, so that we can restore
+ * it later using pmu_restore_counters. The mask is private to the
+ * caller. We cannot rely on the used_mask maintained by the CCI_PMU
+ * as it only tells us if the counter is assigned to perf_event or not.
+ * The state of the perf_event cannot be locked by the PMU layer, hence
+ * we check the individual counter status (which can be locked by
+ * cci_pm->hw_events->pmu_lock).
+ *
+ * @mask should be initialised to empty by the caller.
+ */
+static void __maybe_unused
+pmu_save_counters(struct cci_pmu *cci_pmu, unsigned long *mask)
+{
+	int i;
+
+	for (i = 0; i < cci_pmu->num_cntrs; i++) {
+		if (pmu_counter_is_enabled(cci_pmu, i)) {
+			set_bit(i, mask);
+			pmu_disable_counter(cci_pmu, i);
+		}
+	}
+}
+
+/*
+ * Restore the status of the counters. Reversal of the pmu_save_counters().
+ * For each counter set in the mask, enable the counter back.
+ */
+static void __maybe_unused
+pmu_restore_counters(struct cci_pmu *cci_pmu, unsigned long *mask)
+{
+	int i;
+
+	for_each_set_bit(i, mask, cci_pmu->num_cntrs)
+		pmu_enable_counter(cci_pmu, i);
+}
+
+/*
+ * Returns the number of programmable counters actually implemented
+ * by the cci
+ */
+static u32 pmu_get_max_counters(void)
+{
+	return (readl_relaxed(cci_ctrl_base + CCI_PMCR) &
+		CCI_PMCR_NCNT_MASK) >> CCI_PMCR_NCNT_SHIFT;
+}
+
+static int pmu_get_event_idx(struct cci_pmu_hw_events *hw, struct perf_event *event)
+{
+	struct cci_pmu *cci_pmu = to_cci_pmu(event->pmu);
+	unsigned long cci_event = event->hw.config_base;
+	int idx;
+
+	if (cci_pmu->model->get_event_idx)
+		return cci_pmu->model->get_event_idx(cci_pmu, hw, cci_event);
+
+	/* Generic code to find an unused idx from the mask */
+	for(idx = 0; idx <= CCI_PMU_CNTR_LAST(cci_pmu); idx++)
+		if (!test_and_set_bit(idx, hw->used_mask))
+			return idx;
+
+	/* No counters available */
+	return -EAGAIN;
+}
+
+static int pmu_map_event(struct perf_event *event)
+{
+	struct cci_pmu *cci_pmu = to_cci_pmu(event->pmu);
+
+	if (event->attr.type < PERF_TYPE_MAX ||
+			!cci_pmu->model->validate_hw_event)
+		return -ENOENT;
+
+	return	cci_pmu->model->validate_hw_event(cci_pmu, event->attr.config);
+}
+
+static int pmu_request_irq(struct cci_pmu *cci_pmu, irq_handler_t handler)
+{
+	int i;
+	struct platform_device *pmu_device = cci_pmu->plat_device;
+
+	if (unlikely(!pmu_device))
+		return -ENODEV;
+
+	if (cci_pmu->nr_irqs < 1) {
+		dev_err(&pmu_device->dev, "no irqs for CCI PMUs defined\n");
+		return -ENODEV;
+	}
+
+	/*
+	 * Register all available CCI PMU interrupts. In the interrupt handler
+	 * we iterate over the counters checking for interrupt source (the
+	 * overflowing counter) and clear it.
+	 *
+	 * This should allow handling of non-unique interrupt for the counters.
+	 */
+	for (i = 0; i < cci_pmu->nr_irqs; i++) {
+		int err = request_irq(cci_pmu->irqs[i], handler, IRQF_SHARED,
+				"arm-cci-pmu", cci_pmu);
+		if (err) {
+			dev_err(&pmu_device->dev, "unable to request IRQ%d for ARM CCI PMU counters\n",
+				cci_pmu->irqs[i]);
+			return err;
+		}
+
+		set_bit(i, &cci_pmu->active_irqs);
+	}
+
+	return 0;
+}
+
+static void pmu_free_irq(struct cci_pmu *cci_pmu)
+{
+	int i;
+
+	for (i = 0; i < cci_pmu->nr_irqs; i++) {
+		if (!test_and_clear_bit(i, &cci_pmu->active_irqs))
+			continue;
+
+		free_irq(cci_pmu->irqs[i], cci_pmu);
+	}
+}
+
+static u32 pmu_read_counter(struct perf_event *event)
+{
+	struct cci_pmu *cci_pmu = to_cci_pmu(event->pmu);
+	struct hw_perf_event *hw_counter = &event->hw;
+	int idx = hw_counter->idx;
+	u32 value;
+
+	if (unlikely(!pmu_is_valid_counter(cci_pmu, idx))) {
+		dev_err(&cci_pmu->plat_device->dev, "Invalid CCI PMU counter %d\n", idx);
+		return 0;
+	}
+	value = pmu_read_register(cci_pmu, idx, CCI_PMU_CNTR);
+
+	return value;
+}
+
+static void pmu_write_counter(struct cci_pmu *cci_pmu, u32 value, int idx)
+{
+	pmu_write_register(cci_pmu, value, idx, CCI_PMU_CNTR);
+}
+
+static void __pmu_write_counters(struct cci_pmu *cci_pmu, unsigned long *mask)
+{
+	int i;
+	struct cci_pmu_hw_events *cci_hw = &cci_pmu->hw_events;
+
+	for_each_set_bit(i, mask, cci_pmu->num_cntrs) {
+		struct perf_event *event = cci_hw->events[i];
+
+		if (WARN_ON(!event))
+			continue;
+		pmu_write_counter(cci_pmu, local64_read(&event->hw.prev_count), i);
+	}
+}
+
+static void pmu_write_counters(struct cci_pmu *cci_pmu, unsigned long *mask)
+{
+	if (cci_pmu->model->write_counters)
+		cci_pmu->model->write_counters(cci_pmu, mask);
+	else
+		__pmu_write_counters(cci_pmu, mask);
+}
+
+#ifdef CONFIG_ARM_CCI5xx_PMU
+
+/*
+ * CCI-500/CCI-550 has advanced power saving policies, which could gate the
+ * clocks to the PMU counters, which makes the writes to them ineffective.
+ * The only way to write to those counters is when the global counters
+ * are enabled and the particular counter is enabled.
+ *
+ * So we do the following :
+ *
+ * 1) Disable all the PMU counters, saving their current state
+ * 2) Enable the global PMU profiling, now that all counters are
+ *    disabled.
+ *
+ * For each counter to be programmed, repeat steps 3-7:
+ *
+ * 3) Write an invalid event code to the event control register for the
+      counter, so that the counters are not modified.
+ * 4) Enable the counter control for the counter.
+ * 5) Set the counter value
+ * 6) Disable the counter
+ * 7) Restore the event in the target counter
+ *
+ * 8) Disable the global PMU.
+ * 9) Restore the status of the rest of the counters.
+ *
+ * We choose an event which for CCI-5xx is guaranteed not to count.
+ * We use the highest possible event code (0x1f) for the master interface 0.
+ */
+#define CCI5xx_INVALID_EVENT	((CCI5xx_PORT_M0 << CCI5xx_PMU_EVENT_SOURCE_SHIFT) | \
+				 (CCI5xx_PMU_EVENT_CODE_MASK << CCI5xx_PMU_EVENT_CODE_SHIFT))
+static void cci5xx_pmu_write_counters(struct cci_pmu *cci_pmu, unsigned long *mask)
+{
+	int i;
+	DECLARE_BITMAP(saved_mask, cci_pmu->num_cntrs);
+
+	bitmap_zero(saved_mask, cci_pmu->num_cntrs);
+	pmu_save_counters(cci_pmu, saved_mask);
+
+	/*
+	 * Now that all the counters are disabled, we can safely turn the PMU on,
+	 * without syncing the status of the counters
+	 */
+	__cci_pmu_enable_nosync(cci_pmu);
+
+	for_each_set_bit(i, mask, cci_pmu->num_cntrs) {
+		struct perf_event *event = cci_pmu->hw_events.events[i];
+
+		if (WARN_ON(!event))
+			continue;
+
+		pmu_set_event(cci_pmu, i, CCI5xx_INVALID_EVENT);
+		pmu_enable_counter(cci_pmu, i);
+		pmu_write_counter(cci_pmu, local64_read(&event->hw.prev_count), i);
+		pmu_disable_counter(cci_pmu, i);
+		pmu_set_event(cci_pmu, i, event->hw.config_base);
+	}
+
+	__cci_pmu_disable();
+
+	pmu_restore_counters(cci_pmu, saved_mask);
+}
+
+#endif	/* CONFIG_ARM_CCI5xx_PMU */
+
+static u64 pmu_event_update(struct perf_event *event)
+{
+	struct hw_perf_event *hwc = &event->hw;
+	u64 delta, prev_raw_count, new_raw_count;
+
+	do {
+		prev_raw_count = local64_read(&hwc->prev_count);
+		new_raw_count = pmu_read_counter(event);
+	} while (local64_cmpxchg(&hwc->prev_count, prev_raw_count,
+		 new_raw_count) != prev_raw_count);
+
+	delta = (new_raw_count - prev_raw_count) & CCI_PMU_CNTR_MASK;
+
+	local64_add(delta, &event->count);
+
+	return new_raw_count;
+}
+
+static void pmu_read(struct perf_event *event)
+{
+	pmu_event_update(event);
+}
+
+static void pmu_event_set_period(struct perf_event *event)
+{
+	struct hw_perf_event *hwc = &event->hw;
+	/*
+	 * The CCI PMU counters have a period of 2^32. To account for the
+	 * possiblity of extreme interrupt latency we program for a period of
+	 * half that. Hopefully we can handle the interrupt before another 2^31
+	 * events occur and the counter overtakes its previous value.
+	 */
+	u64 val = 1ULL << 31;
+	local64_set(&hwc->prev_count, val);
+
+	/*
+	 * CCI PMU uses PERF_HES_ARCH to keep track of the counters, whose
+	 * values needs to be sync-ed with the s/w state before the PMU is
+	 * enabled.
+	 * Mark this counter for sync.
+	 */
+	hwc->state |= PERF_HES_ARCH;
+}
+
+static irqreturn_t pmu_handle_irq(int irq_num, void *dev)
+{
+	unsigned long flags;
+	struct cci_pmu *cci_pmu = dev;
+	struct cci_pmu_hw_events *events = &cci_pmu->hw_events;
+	int idx, handled = IRQ_NONE;
+
+	raw_spin_lock_irqsave(&events->pmu_lock, flags);
+
+	/* Disable the PMU while we walk through the counters */
+	__cci_pmu_disable();
+	/*
+	 * Iterate over counters and update the corresponding perf events.
+	 * This should work regardless of whether we have per-counter overflow
+	 * interrupt or a combined overflow interrupt.
+	 */
+	for (idx = 0; idx <= CCI_PMU_CNTR_LAST(cci_pmu); idx++) {
+		struct perf_event *event = events->events[idx];
+
+		if (!event)
+			continue;
+
+		/* Did this counter overflow? */
+		if (!(pmu_read_register(cci_pmu, idx, CCI_PMU_OVRFLW) &
+		      CCI_PMU_OVRFLW_FLAG))
+			continue;
+
+		pmu_write_register(cci_pmu, CCI_PMU_OVRFLW_FLAG, idx,
+							CCI_PMU_OVRFLW);
+
+		pmu_event_update(event);
+		pmu_event_set_period(event);
+		handled = IRQ_HANDLED;
+	}
+
+	/* Enable the PMU and sync possibly overflowed counters */
+	__cci_pmu_enable_sync(cci_pmu);
+	raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
+
+	return IRQ_RETVAL(handled);
+}
+
+static int cci_pmu_get_hw(struct cci_pmu *cci_pmu)
+{
+	int ret = pmu_request_irq(cci_pmu, pmu_handle_irq);
+	if (ret) {
+		pmu_free_irq(cci_pmu);
+		return ret;
+	}
+	return 0;
+}
+
+static void cci_pmu_put_hw(struct cci_pmu *cci_pmu)
+{
+	pmu_free_irq(cci_pmu);
+}
+
+static void hw_perf_event_destroy(struct perf_event *event)
+{
+	struct cci_pmu *cci_pmu = to_cci_pmu(event->pmu);
+	atomic_t *active_events = &cci_pmu->active_events;
+	struct mutex *reserve_mutex = &cci_pmu->reserve_mutex;
+
+	if (atomic_dec_and_mutex_lock(active_events, reserve_mutex)) {
+		cci_pmu_put_hw(cci_pmu);
+		mutex_unlock(reserve_mutex);
+	}
+}
+
+static void cci_pmu_enable(struct pmu *pmu)
+{
+	struct cci_pmu *cci_pmu = to_cci_pmu(pmu);
+	struct cci_pmu_hw_events *hw_events = &cci_pmu->hw_events;
+	int enabled = bitmap_weight(hw_events->used_mask, cci_pmu->num_cntrs);
+	unsigned long flags;
+
+	if (!enabled)
+		return;
+
+	raw_spin_lock_irqsave(&hw_events->pmu_lock, flags);
+	__cci_pmu_enable_sync(cci_pmu);
+	raw_spin_unlock_irqrestore(&hw_events->pmu_lock, flags);
+
+}
+
+static void cci_pmu_disable(struct pmu *pmu)
+{
+	struct cci_pmu *cci_pmu = to_cci_pmu(pmu);
+	struct cci_pmu_hw_events *hw_events = &cci_pmu->hw_events;
+	unsigned long flags;
+
+	raw_spin_lock_irqsave(&hw_events->pmu_lock, flags);
+	__cci_pmu_disable();
+	raw_spin_unlock_irqrestore(&hw_events->pmu_lock, flags);
+}
+
+/*
+ * Check if the idx represents a non-programmable counter.
+ * All the fixed event counters are mapped before the programmable
+ * counters.
+ */
+static bool pmu_fixed_hw_idx(struct cci_pmu *cci_pmu, int idx)
+{
+	return (idx >= 0) && (idx < cci_pmu->model->fixed_hw_cntrs);
+}
+
+static void cci_pmu_start(struct perf_event *event, int pmu_flags)
+{
+	struct cci_pmu *cci_pmu = to_cci_pmu(event->pmu);
+	struct cci_pmu_hw_events *hw_events = &cci_pmu->hw_events;
+	struct hw_perf_event *hwc = &event->hw;
+	int idx = hwc->idx;
+	unsigned long flags;
+
+	/*
+	 * To handle interrupt latency, we always reprogram the period
+	 * regardlesss of PERF_EF_RELOAD.
+	 */
+	if (pmu_flags & PERF_EF_RELOAD)
+		WARN_ON_ONCE(!(hwc->state & PERF_HES_UPTODATE));
+
+	hwc->state = 0;
+
+	if (unlikely(!pmu_is_valid_counter(cci_pmu, idx))) {
+		dev_err(&cci_pmu->plat_device->dev, "Invalid CCI PMU counter %d\n", idx);
+		return;
+	}
+
+	raw_spin_lock_irqsave(&hw_events->pmu_lock, flags);
+
+	/* Configure the counter unless you are counting a fixed event */
+	if (!pmu_fixed_hw_idx(cci_pmu, idx))
+		pmu_set_event(cci_pmu, idx, hwc->config_base);
+
+	pmu_event_set_period(event);
+	pmu_enable_counter(cci_pmu, idx);
+
+	raw_spin_unlock_irqrestore(&hw_events->pmu_lock, flags);
+}
+
+static void cci_pmu_stop(struct perf_event *event, int pmu_flags)
+{
+	struct cci_pmu *cci_pmu = to_cci_pmu(event->pmu);
+	struct hw_perf_event *hwc = &event->hw;
+	int idx = hwc->idx;
+
+	if (hwc->state & PERF_HES_STOPPED)
+		return;
+
+	if (unlikely(!pmu_is_valid_counter(cci_pmu, idx))) {
+		dev_err(&cci_pmu->plat_device->dev, "Invalid CCI PMU counter %d\n", idx);
+		return;
+	}
+
+	/*
+	 * We always reprogram the counter, so ignore PERF_EF_UPDATE. See
+	 * cci_pmu_start()
+	 */
+	pmu_disable_counter(cci_pmu, idx);
+	pmu_event_update(event);
+	hwc->state |= PERF_HES_STOPPED | PERF_HES_UPTODATE;
+}
+
+static int cci_pmu_add(struct perf_event *event, int flags)
+{
+	struct cci_pmu *cci_pmu = to_cci_pmu(event->pmu);
+	struct cci_pmu_hw_events *hw_events = &cci_pmu->hw_events;
+	struct hw_perf_event *hwc = &event->hw;
+	int idx;
+	int err = 0;
+
+	perf_pmu_disable(event->pmu);
+
+	/* If we don't have a space for the counter then finish early. */
+	idx = pmu_get_event_idx(hw_events, event);
+	if (idx < 0) {
+		err = idx;
+		goto out;
+	}
+
+	event->hw.idx = idx;
+	hw_events->events[idx] = event;
+
+	hwc->state = PERF_HES_STOPPED | PERF_HES_UPTODATE;
+	if (flags & PERF_EF_START)
+		cci_pmu_start(event, PERF_EF_RELOAD);
+
+	/* Propagate our changes to the userspace mapping. */
+	perf_event_update_userpage(event);
+
+out:
+	perf_pmu_enable(event->pmu);
+	return err;
+}
+
+static void cci_pmu_del(struct perf_event *event, int flags)
+{
+	struct cci_pmu *cci_pmu = to_cci_pmu(event->pmu);
+	struct cci_pmu_hw_events *hw_events = &cci_pmu->hw_events;
+	struct hw_perf_event *hwc = &event->hw;
+	int idx = hwc->idx;
+
+	cci_pmu_stop(event, PERF_EF_UPDATE);
+	hw_events->events[idx] = NULL;
+	clear_bit(idx, hw_events->used_mask);
+
+	perf_event_update_userpage(event);
+}
+
+static int
+validate_event(struct pmu *cci_pmu,
+               struct cci_pmu_hw_events *hw_events,
+               struct perf_event *event)
+{
+	if (is_software_event(event))
+		return 1;
+
+	/*
+	 * Reject groups spanning multiple HW PMUs (e.g. CPU + CCI). The
+	 * core perf code won't check that the pmu->ctx == leader->ctx
+	 * until after pmu->event_init(event).
+	 */
+	if (event->pmu != cci_pmu)
+		return 0;
+
+	if (event->state < PERF_EVENT_STATE_OFF)
+		return 1;
+
+	if (event->state == PERF_EVENT_STATE_OFF && !event->attr.enable_on_exec)
+		return 1;
+
+	return pmu_get_event_idx(hw_events, event) >= 0;
+}
+
+static int
+validate_group(struct perf_event *event)
+{
+	struct perf_event *sibling, *leader = event->group_leader;
+	struct cci_pmu *cci_pmu = to_cci_pmu(event->pmu);
+	unsigned long mask[BITS_TO_LONGS(cci_pmu->num_cntrs)];
+	struct cci_pmu_hw_events fake_pmu = {
+		/*
+		 * Initialise the fake PMU. We only need to populate the
+		 * used_mask for the purposes of validation.
+		 */
+		.used_mask = mask,
+	};
+	memset(mask, 0, BITS_TO_LONGS(cci_pmu->num_cntrs) * sizeof(unsigned long));
+
+	if (!validate_event(event->pmu, &fake_pmu, leader))
+		return -EINVAL;
+
+	list_for_each_entry(sibling, &leader->sibling_list, group_entry) {
+		if (!validate_event(event->pmu, &fake_pmu, sibling))
+			return -EINVAL;
+	}
+
+	if (!validate_event(event->pmu, &fake_pmu, event))
+		return -EINVAL;
+
+	return 0;
+}
+
+static int
+__hw_perf_event_init(struct perf_event *event)
+{
+	struct hw_perf_event *hwc = &event->hw;
+	int mapping;
+
+	mapping = pmu_map_event(event);
+
+	if (mapping < 0) {
+		pr_debug("event %x:%llx not supported\n", event->attr.type,
+			 event->attr.config);
+		return mapping;
+	}
+
+	/*
+	 * We don't assign an index until we actually place the event onto
+	 * hardware. Use -1 to signify that we haven't decided where to put it
+	 * yet.
+	 */
+	hwc->idx		= -1;
+	hwc->config_base	= 0;
+	hwc->config		= 0;
+	hwc->event_base		= 0;
+
+	/*
+	 * Store the event encoding into the config_base field.
+	 */
+	hwc->config_base	    |= (unsigned long)mapping;
+
+	/*
+	 * Limit the sample_period to half of the counter width. That way, the
+	 * new counter value is far less likely to overtake the previous one
+	 * unless you have some serious IRQ latency issues.
+	 */
+	hwc->sample_period  = CCI_PMU_CNTR_MASK >> 1;
+	hwc->last_period    = hwc->sample_period;
+	local64_set(&hwc->period_left, hwc->sample_period);
+
+	if (event->group_leader != event) {
+		if (validate_group(event) != 0)
+			return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int cci_pmu_event_init(struct perf_event *event)
+{
+	struct cci_pmu *cci_pmu = to_cci_pmu(event->pmu);
+	atomic_t *active_events = &cci_pmu->active_events;
+	int err = 0;
+	int cpu;
+
+	if (event->attr.type != event->pmu->type)
+		return -ENOENT;
+
+	/* Shared by all CPUs, no meaningful state to sample */
+	if (is_sampling_event(event) || event->attach_state & PERF_ATTACH_TASK)
+		return -EOPNOTSUPP;
+
+	/* We have no filtering of any kind */
+	if (event->attr.exclude_user	||
+	    event->attr.exclude_kernel	||
+	    event->attr.exclude_hv	||
+	    event->attr.exclude_idle	||
+	    event->attr.exclude_host	||
+	    event->attr.exclude_guest)
+		return -EINVAL;
+
+	/*
+	 * Following the example set by other "uncore" PMUs, we accept any CPU
+	 * and rewrite its affinity dynamically rather than having perf core
+	 * handle cpu == -1 and pid == -1 for this case.
+	 *
+	 * The perf core will pin online CPUs for the duration of this call and
+	 * the event being installed into its context, so the PMU's CPU can't
+	 * change under our feet.
+	 */
+	cpu = cpumask_first(&cci_pmu->cpus);
+	if (event->cpu < 0 || cpu < 0)
+		return -EINVAL;
+	event->cpu = cpu;
+
+	event->destroy = hw_perf_event_destroy;
+	if (!atomic_inc_not_zero(active_events)) {
+		mutex_lock(&cci_pmu->reserve_mutex);
+		if (atomic_read(active_events) == 0)
+			err = cci_pmu_get_hw(cci_pmu);
+		if (!err)
+			atomic_inc(active_events);
+		mutex_unlock(&cci_pmu->reserve_mutex);
+	}
+	if (err)
+		return err;
+
+	err = __hw_perf_event_init(event);
+	if (err)
+		hw_perf_event_destroy(event);
+
+	return err;
+}
+
+static ssize_t pmu_cpumask_attr_show(struct device *dev,
+				     struct device_attribute *attr, char *buf)
+{
+	struct pmu *pmu = dev_get_drvdata(dev);
+	struct cci_pmu *cci_pmu = to_cci_pmu(pmu);
+
+	int n = scnprintf(buf, PAGE_SIZE - 1, "%*pbl",
+			  cpumask_pr_args(&cci_pmu->cpus));
+	buf[n++] = '\n';
+	buf[n] = '\0';
+	return n;
+}
+
+static struct device_attribute pmu_cpumask_attr =
+	__ATTR(cpumask, S_IRUGO, pmu_cpumask_attr_show, NULL);
+
+static struct attribute *pmu_attrs[] = {
+	&pmu_cpumask_attr.attr,
+	NULL,
+};
+
+static struct attribute_group pmu_attr_group = {
+	.attrs = pmu_attrs,
+};
+
+static struct attribute_group pmu_format_attr_group = {
+	.name = "format",
+	.attrs = NULL,		/* Filled in cci_pmu_init_attrs */
+};
+
+static struct attribute_group pmu_event_attr_group = {
+	.name = "events",
+	.attrs = NULL,		/* Filled in cci_pmu_init_attrs */
+};
+
+static const struct attribute_group *pmu_attr_groups[] = {
+	&pmu_attr_group,
+	&pmu_format_attr_group,
+	&pmu_event_attr_group,
+	NULL
+};
+
+static int cci_pmu_init(struct cci_pmu *cci_pmu, struct platform_device *pdev)
+{
+	const struct cci_pmu_model *model = cci_pmu->model;
+	char *name = model->name;
+	u32 num_cntrs;
+
+	pmu_event_attr_group.attrs = model->event_attrs;
+	pmu_format_attr_group.attrs = model->format_attrs;
+
+	cci_pmu->pmu = (struct pmu) {
+		.name		= cci_pmu->model->name,
+		.task_ctx_nr	= perf_invalid_context,
+		.pmu_enable	= cci_pmu_enable,
+		.pmu_disable	= cci_pmu_disable,
+		.event_init	= cci_pmu_event_init,
+		.add		= cci_pmu_add,
+		.del		= cci_pmu_del,
+		.start		= cci_pmu_start,
+		.stop		= cci_pmu_stop,
+		.read		= pmu_read,
+		.attr_groups	= pmu_attr_groups,
+	};
+
+	cci_pmu->plat_device = pdev;
+	num_cntrs = pmu_get_max_counters();
+	if (num_cntrs > cci_pmu->model->num_hw_cntrs) {
+		dev_warn(&pdev->dev,
+			"PMU implements more counters(%d) than supported by"
+			" the model(%d), truncated.",
+			num_cntrs, cci_pmu->model->num_hw_cntrs);
+		num_cntrs = cci_pmu->model->num_hw_cntrs;
+	}
+	cci_pmu->num_cntrs = num_cntrs + cci_pmu->model->fixed_hw_cntrs;
+
+	return perf_pmu_register(&cci_pmu->pmu, name, -1);
+}
+
+static int cci_pmu_offline_cpu(unsigned int cpu, struct hlist_node *node)
+{
+	struct cci_pmu *cci_pmu = hlist_entry_safe(node, struct cci_pmu, node);
+	unsigned int target;
+
+	if (!cpumask_test_and_clear_cpu(cpu, &cci_pmu->cpus))
+		return 0;
+	target = cpumask_any_but(cpu_online_mask, cpu);
+	if (target >= nr_cpu_ids)
+		return 0;
+	/*
+	 * TODO: migrate context once core races on event->ctx have
+	 * been fixed.
+	 */
+	cpumask_set_cpu(target, &cci_pmu->cpus);
+	return 0;
+}
+
+static struct cci_pmu_model cci_pmu_models[] = {
+#ifdef CONFIG_ARM_CCI400_PMU
+	[CCI400_R0] = {
+		.name = "CCI_400",
+		.fixed_hw_cntrs = 1,	/* Cycle counter */
+		.num_hw_cntrs = 4,
+		.cntr_size = SZ_4K,
+		.format_attrs = cci400_pmu_format_attrs,
+		.event_attrs = cci400_r0_pmu_event_attrs,
+		.event_ranges = {
+			[CCI_IF_SLAVE] = {
+				CCI400_R0_SLAVE_PORT_MIN_EV,
+				CCI400_R0_SLAVE_PORT_MAX_EV,
+			},
+			[CCI_IF_MASTER] = {
+				CCI400_R0_MASTER_PORT_MIN_EV,
+				CCI400_R0_MASTER_PORT_MAX_EV,
+			},
+		},
+		.validate_hw_event = cci400_validate_hw_event,
+		.get_event_idx = cci400_get_event_idx,
+	},
+	[CCI400_R1] = {
+		.name = "CCI_400_r1",
+		.fixed_hw_cntrs = 1,	/* Cycle counter */
+		.num_hw_cntrs = 4,
+		.cntr_size = SZ_4K,
+		.format_attrs = cci400_pmu_format_attrs,
+		.event_attrs = cci400_r1_pmu_event_attrs,
+		.event_ranges = {
+			[CCI_IF_SLAVE] = {
+				CCI400_R1_SLAVE_PORT_MIN_EV,
+				CCI400_R1_SLAVE_PORT_MAX_EV,
+			},
+			[CCI_IF_MASTER] = {
+				CCI400_R1_MASTER_PORT_MIN_EV,
+				CCI400_R1_MASTER_PORT_MAX_EV,
+			},
+		},
+		.validate_hw_event = cci400_validate_hw_event,
+		.get_event_idx = cci400_get_event_idx,
+	},
+#endif
+#ifdef CONFIG_ARM_CCI5xx_PMU
+	[CCI500_R0] = {
+		.name = "CCI_500",
+		.fixed_hw_cntrs = 0,
+		.num_hw_cntrs = 8,
+		.cntr_size = SZ_64K,
+		.format_attrs = cci5xx_pmu_format_attrs,
+		.event_attrs = cci5xx_pmu_event_attrs,
+		.event_ranges = {
+			[CCI_IF_SLAVE] = {
+				CCI5xx_SLAVE_PORT_MIN_EV,
+				CCI5xx_SLAVE_PORT_MAX_EV,
+			},
+			[CCI_IF_MASTER] = {
+				CCI5xx_MASTER_PORT_MIN_EV,
+				CCI5xx_MASTER_PORT_MAX_EV,
+			},
+			[CCI_IF_GLOBAL] = {
+				CCI5xx_GLOBAL_PORT_MIN_EV,
+				CCI5xx_GLOBAL_PORT_MAX_EV,
+			},
+		},
+		.validate_hw_event = cci500_validate_hw_event,
+		.write_counters	= cci5xx_pmu_write_counters,
+	},
+	[CCI550_R0] = {
+		.name = "CCI_550",
+		.fixed_hw_cntrs = 0,
+		.num_hw_cntrs = 8,
+		.cntr_size = SZ_64K,
+		.format_attrs = cci5xx_pmu_format_attrs,
+		.event_attrs = cci5xx_pmu_event_attrs,
+		.event_ranges = {
+			[CCI_IF_SLAVE] = {
+				CCI5xx_SLAVE_PORT_MIN_EV,
+				CCI5xx_SLAVE_PORT_MAX_EV,
+			},
+			[CCI_IF_MASTER] = {
+				CCI5xx_MASTER_PORT_MIN_EV,
+				CCI5xx_MASTER_PORT_MAX_EV,
+			},
+			[CCI_IF_GLOBAL] = {
+				CCI5xx_GLOBAL_PORT_MIN_EV,
+				CCI5xx_GLOBAL_PORT_MAX_EV,
+			},
+		},
+		.validate_hw_event = cci550_validate_hw_event,
+		.write_counters	= cci5xx_pmu_write_counters,
+	},
+#endif
+};
+
+static const struct of_device_id arm_cci_pmu_matches[] = {
+#ifdef CONFIG_ARM_CCI400_PMU
+	{
+		.compatible = "arm,cci-400-pmu",
+		.data	= NULL,
+	},
+	{
+		.compatible = "arm,cci-400-pmu,r0",
+		.data	= &cci_pmu_models[CCI400_R0],
+	},
+	{
+		.compatible = "arm,cci-400-pmu,r1",
+		.data	= &cci_pmu_models[CCI400_R1],
+	},
+#endif
+#ifdef CONFIG_ARM_CCI5xx_PMU
+	{
+		.compatible = "arm,cci-500-pmu,r0",
+		.data = &cci_pmu_models[CCI500_R0],
+	},
+	{
+		.compatible = "arm,cci-550-pmu,r0",
+		.data = &cci_pmu_models[CCI550_R0],
+	},
+#endif
+	{},
+};
+
+static inline const struct cci_pmu_model *get_cci_model(struct platform_device *pdev)
+{
+	const struct of_device_id *match = of_match_node(arm_cci_pmu_matches,
+							pdev->dev.of_node);
+	if (!match)
+		return NULL;
+	if (match->data)
+		return match->data;
+
+	dev_warn(&pdev->dev, "DEPRECATED compatible property,"
+			 "requires secure access to CCI registers");
+	return probe_cci_model(pdev);
+}
+
+static bool is_duplicate_irq(int irq, int *irqs, int nr_irqs)
+{
+	int i;
+
+	for (i = 0; i < nr_irqs; i++)
+		if (irq == irqs[i])
+			return true;
+
+	return false;
+}
+
+static struct cci_pmu *cci_pmu_alloc(struct platform_device *pdev)
+{
+	struct cci_pmu *cci_pmu;
+	const struct cci_pmu_model *model;
+
+	/*
+	 * All allocations are devm_* hence we don't have to free
+	 * them explicitly on an error, as it would end up in driver
+	 * detach.
+	 */
+	model = get_cci_model(pdev);
+	if (!model) {
+		dev_warn(&pdev->dev, "CCI PMU version not supported\n");
+		return ERR_PTR(-ENODEV);
+	}
+
+	cci_pmu = devm_kzalloc(&pdev->dev, sizeof(*cci_pmu), GFP_KERNEL);
+	if (!cci_pmu)
+		return ERR_PTR(-ENOMEM);
+
+	cci_pmu->model = model;
+	cci_pmu->irqs = devm_kcalloc(&pdev->dev, CCI_PMU_MAX_HW_CNTRS(model),
+					sizeof(*cci_pmu->irqs), GFP_KERNEL);
+	if (!cci_pmu->irqs)
+		return ERR_PTR(-ENOMEM);
+	cci_pmu->hw_events.events = devm_kcalloc(&pdev->dev,
+					     CCI_PMU_MAX_HW_CNTRS(model),
+					     sizeof(*cci_pmu->hw_events.events),
+					     GFP_KERNEL);
+	if (!cci_pmu->hw_events.events)
+		return ERR_PTR(-ENOMEM);
+	cci_pmu->hw_events.used_mask = devm_kcalloc(&pdev->dev,
+						BITS_TO_LONGS(CCI_PMU_MAX_HW_CNTRS(model)),
+						sizeof(*cci_pmu->hw_events.used_mask),
+						GFP_KERNEL);
+	if (!cci_pmu->hw_events.used_mask)
+		return ERR_PTR(-ENOMEM);
+
+	return cci_pmu;
+}
+
+
+static int cci_pmu_probe(struct platform_device *pdev)
+{
+	struct resource *res;
+	struct cci_pmu *cci_pmu;
+	int i, ret, irq;
+
+	cci_pmu = cci_pmu_alloc(pdev);
+	if (IS_ERR(cci_pmu))
+		return PTR_ERR(cci_pmu);
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	cci_pmu->base = devm_ioremap_resource(&pdev->dev, res);
+	if (IS_ERR(cci_pmu->base))
+		return -ENOMEM;
+
+	/*
+	 * CCI PMU has one overflow interrupt per counter; but some may be tied
+	 * together to a common interrupt.
+	 */
+	cci_pmu->nr_irqs = 0;
+	for (i = 0; i < CCI_PMU_MAX_HW_CNTRS(cci_pmu->model); i++) {
+		irq = platform_get_irq(pdev, i);
+		if (irq < 0)
+			break;
+
+		if (is_duplicate_irq(irq, cci_pmu->irqs, cci_pmu->nr_irqs))
+			continue;
+
+		cci_pmu->irqs[cci_pmu->nr_irqs++] = irq;
+	}
+
+	/*
+	 * Ensure that the device tree has as many interrupts as the number
+	 * of counters.
+	 */
+	if (i < CCI_PMU_MAX_HW_CNTRS(cci_pmu->model)) {
+		dev_warn(&pdev->dev, "In-correct number of interrupts: %d, should be %d\n",
+			i, CCI_PMU_MAX_HW_CNTRS(cci_pmu->model));
+		return -EINVAL;
+	}
+
+	raw_spin_lock_init(&cci_pmu->hw_events.pmu_lock);
+	mutex_init(&cci_pmu->reserve_mutex);
+	atomic_set(&cci_pmu->active_events, 0);
+	cpumask_set_cpu(get_cpu(), &cci_pmu->cpus);
+
+	ret = cci_pmu_init(cci_pmu, pdev);
+	if (ret) {
+		put_cpu();
+		return ret;
+	}
+
+	cpuhp_state_add_instance_nocalls(CPUHP_AP_PERF_ARM_CCI_ONLINE,
+					 &cci_pmu->node);
+	put_cpu();
+	pr_info("ARM %s PMU driver probed", cci_pmu->model->name);
+	return 0;
+}
+
+static int cci_platform_probe(struct platform_device *pdev)
+{
+	if (!cci_probed())
+		return -ENODEV;
+
+	return of_platform_populate(pdev->dev.of_node, NULL, NULL, &pdev->dev);
+}
+
+static struct platform_driver cci_pmu_driver = {
+	.driver = {
+		   .name = DRIVER_NAME_PMU,
+		   .of_match_table = arm_cci_pmu_matches,
+		  },
+	.probe = cci_pmu_probe,
+};
+
+static struct platform_driver cci_platform_driver = {
+	.driver = {
+		   .name = DRIVER_NAME,
+		   .of_match_table = arm_cci_matches,
+		  },
+	.probe = cci_platform_probe,
+};
+
+static int __init cci_platform_init(void)
+{
+	int ret;
+
+	ret = cpuhp_setup_state_multi(CPUHP_AP_PERF_ARM_CCI_ONLINE,
+				      "perf/arm/cci:online", NULL,
+				      cci_pmu_offline_cpu);
+	if (ret)
+		return ret;
+
+	ret = platform_driver_register(&cci_pmu_driver);
+	if (ret)
+		return ret;
+
+	return platform_driver_register(&cci_platform_driver);
+}
+
+#else /* !CONFIG_ARM_CCI_PMU */
+
+static int __init cci_platform_init(void)
+{
+	return 0;
+}
+
+#endif /* CONFIG_ARM_CCI_PMU */
+
+#ifdef CONFIG_ARM_CCI400_PORT_CTRL
+
+#define CCI_PORT_CTRL		0x0
+#define CCI_CTRL_STATUS		0xc
+
+#define CCI_ENABLE_SNOOP_REQ	0x1
+#define CCI_ENABLE_DVM_REQ	0x2
+#define CCI_ENABLE_REQ		(CCI_ENABLE_SNOOP_REQ | CCI_ENABLE_DVM_REQ)
+
+enum cci_ace_port_type {
+	ACE_INVALID_PORT = 0x0,
+	ACE_PORT,
+	ACE_LITE_PORT,
+};
+
+struct cci_ace_port {
+	void __iomem *base;
+	unsigned long phys;
+	enum cci_ace_port_type type;
+	struct device_node *dn;
+};
+
+static struct cci_ace_port *ports;
+static unsigned int nb_cci_ports;
+
+struct cpu_port {
+	u64 mpidr;
+	u32 port;
+};
+
+/*
+ * Use the port MSB as valid flag, shift can be made dynamic
+ * by computing number of bits required for port indexes.
+ * Code disabling CCI cpu ports runs with D-cache invalidated
+ * and SCTLR bit clear so data accesses must be kept to a minimum
+ * to improve performance; for now shift is left static to
+ * avoid one more data access while disabling the CCI port.
+ */
+#define PORT_VALID_SHIFT	31
+#define PORT_VALID		(0x1 << PORT_VALID_SHIFT)
+
+static inline void init_cpu_port(struct cpu_port *port, u32 index, u64 mpidr)
+{
+	port->port = PORT_VALID | index;
+	port->mpidr = mpidr;
+}
+
+static inline bool cpu_port_is_valid(struct cpu_port *port)
+{
+	return !!(port->port & PORT_VALID);
+}
+
+static inline bool cpu_port_match(struct cpu_port *port, u64 mpidr)
+{
+	return port->mpidr == (mpidr & MPIDR_HWID_BITMASK);
+}
+
+static struct cpu_port cpu_port[NR_CPUS];
+
+/**
+ * __cci_ace_get_port - Function to retrieve the port index connected to
+ *			a cpu or device.
+ *
+ * @dn: device node of the device to look-up
+ * @type: port type
+ *
+ * Return value:
+ *	- CCI port index if success
+ *	- -ENODEV if failure
+ */
+static int __cci_ace_get_port(struct device_node *dn, int type)
+{
+	int i;
+	bool ace_match;
+	struct device_node *cci_portn;
+
+	cci_portn = of_parse_phandle(dn, "cci-control-port", 0);
+	for (i = 0; i < nb_cci_ports; i++) {
+		ace_match = ports[i].type == type;
+		if (ace_match && cci_portn == ports[i].dn)
+			return i;
+	}
+	return -ENODEV;
+}
+
+int cci_ace_get_port(struct device_node *dn)
+{
+	return __cci_ace_get_port(dn, ACE_LITE_PORT);
+}
+EXPORT_SYMBOL_GPL(cci_ace_get_port);
+
+static void cci_ace_init_ports(void)
+{
+	int port, cpu;
+	struct device_node *cpun;
+
+	/*
+	 * Port index look-up speeds up the function disabling ports by CPU,
+	 * since the logical to port index mapping is done once and does
+	 * not change after system boot.
+	 * The stashed index array is initialized for all possible CPUs
+	 * at probe time.
+	 */
+	for_each_possible_cpu(cpu) {
+		/* too early to use cpu->of_node */
+		cpun = of_get_cpu_node(cpu, NULL);
+
+		if (WARN(!cpun, "Missing cpu device node\n"))
+			continue;
+
+		port = __cci_ace_get_port(cpun, ACE_PORT);
+		if (port < 0)
+			continue;
+
+		init_cpu_port(&cpu_port[cpu], port, cpu_logical_map(cpu));
+	}
+
+	for_each_possible_cpu(cpu) {
+		WARN(!cpu_port_is_valid(&cpu_port[cpu]),
+			"CPU %u does not have an associated CCI port\n",
+			cpu);
+	}
+}
+/*
+ * Functions to enable/disable a CCI interconnect slave port
+ *
+ * They are called by low-level power management code to disable slave
+ * interfaces snoops and DVM broadcast.
+ * Since they may execute with cache data allocation disabled and
+ * after the caches have been cleaned and invalidated the functions provide
+ * no explicit locking since they may run with D-cache disabled, so normal
+ * cacheable kernel locks based on ldrex/strex may not work.
+ * Locking has to be provided by BSP implementations to ensure proper
+ * operations.
+ */
+
+/**
+ * cci_port_control() - function to control a CCI port
+ *
+ * @port: index of the port to setup
+ * @enable: if true enables the port, if false disables it
+ */
+static void notrace cci_port_control(unsigned int port, bool enable)
+{
+	void __iomem *base = ports[port].base;
+
+	writel_relaxed(enable ? CCI_ENABLE_REQ : 0, base + CCI_PORT_CTRL);
+	/*
+	 * This function is called from power down procedures
+	 * and must not execute any instruction that might
+	 * cause the processor to be put in a quiescent state
+	 * (eg wfi). Hence, cpu_relax() can not be added to this
+	 * read loop to optimize power, since it might hide possibly
+	 * disruptive operations.
+	 */
+	while (readl_relaxed(cci_ctrl_base + CCI_CTRL_STATUS) & 0x1)
+			;
+}
+
+/**
+ * cci_disable_port_by_cpu() - function to disable a CCI port by CPU
+ *			       reference
+ *
+ * @mpidr: mpidr of the CPU whose CCI port should be disabled
+ *
+ * Disabling a CCI port for a CPU implies disabling the CCI port
+ * controlling that CPU cluster. Code disabling CPU CCI ports
+ * must make sure that the CPU running the code is the last active CPU
+ * in the cluster ie all other CPUs are quiescent in a low power state.
+ *
+ * Return:
+ *	0 on success
+ *	-ENODEV on port look-up failure
+ */
+int notrace cci_disable_port_by_cpu(u64 mpidr)
+{
+	int cpu;
+	bool is_valid;
+	for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
+		is_valid = cpu_port_is_valid(&cpu_port[cpu]);
+		if (is_valid && cpu_port_match(&cpu_port[cpu], mpidr)) {
+			cci_port_control(cpu_port[cpu].port, false);
+			return 0;
+		}
+	}
+	return -ENODEV;
+}
+EXPORT_SYMBOL_GPL(cci_disable_port_by_cpu);
+
+/**
+ * cci_enable_port_for_self() - enable a CCI port for calling CPU
+ *
+ * Enabling a CCI port for the calling CPU implies enabling the CCI
+ * port controlling that CPU's cluster. Caller must make sure that the
+ * CPU running the code is the first active CPU in the cluster and all
+ * other CPUs are quiescent in a low power state  or waiting for this CPU
+ * to complete the CCI initialization.
+ *
+ * Because this is called when the MMU is still off and with no stack,
+ * the code must be position independent and ideally rely on callee
+ * clobbered registers only.  To achieve this we must code this function
+ * entirely in assembler.
+ *
+ * On success this returns with the proper CCI port enabled.  In case of
+ * any failure this never returns as the inability to enable the CCI is
+ * fatal and there is no possible recovery at this stage.
+ */
+asmlinkage void __naked cci_enable_port_for_self(void)
+{
+	asm volatile ("\n"
+"	.arch armv7-a\n"
+"	mrc	p15, 0, r0, c0, c0, 5	@ get MPIDR value \n"
+"	and	r0, r0, #"__stringify(MPIDR_HWID_BITMASK)" \n"
+"	adr	r1, 5f \n"
+"	ldr	r2, [r1] \n"
+"	add	r1, r1, r2		@ &cpu_port \n"
+"	add	ip, r1, %[sizeof_cpu_port] \n"
+
+	/* Loop over the cpu_port array looking for a matching MPIDR */
+"1:	ldr	r2, [r1, %[offsetof_cpu_port_mpidr_lsb]] \n"
+"	cmp	r2, r0 			@ compare MPIDR \n"
+"	bne	2f \n"
+
+	/* Found a match, now test port validity */
+"	ldr	r3, [r1, %[offsetof_cpu_port_port]] \n"
+"	tst	r3, #"__stringify(PORT_VALID)" \n"
+"	bne	3f \n"
+
+	/* no match, loop with the next cpu_port entry */
+"2:	add	r1, r1, %[sizeof_struct_cpu_port] \n"
+"	cmp	r1, ip			@ done? \n"
+"	blo	1b \n"
+
+	/* CCI port not found -- cheaply try to stall this CPU */
+"cci_port_not_found: \n"
+"	wfi \n"
+"	wfe \n"
+"	b	cci_port_not_found \n"
+
+	/* Use matched port index to look up the corresponding ports entry */
+"3:	bic	r3, r3, #"__stringify(PORT_VALID)" \n"
+"	adr	r0, 6f \n"
+"	ldmia	r0, {r1, r2} \n"
+"	sub	r1, r1, r0 		@ virt - phys \n"
+"	ldr	r0, [r0, r2] 		@ *(&ports) \n"
+"	mov	r2, %[sizeof_struct_ace_port] \n"
+"	mla	r0, r2, r3, r0		@ &ports[index] \n"
+"	sub	r0, r0, r1		@ virt_to_phys() \n"
+
+	/* Enable the CCI port */
+"	ldr	r0, [r0, %[offsetof_port_phys]] \n"
+"	mov	r3, %[cci_enable_req]\n"		   
+"	str	r3, [r0, #"__stringify(CCI_PORT_CTRL)"] \n"
+
+	/* poll the status reg for completion */
+"	adr	r1, 7f \n"
+"	ldr	r0, [r1] \n"
+"	ldr	r0, [r0, r1]		@ cci_ctrl_base \n"
+"4:	ldr	r1, [r0, #"__stringify(CCI_CTRL_STATUS)"] \n"
+"	tst	r1, %[cci_control_status_bits] \n"			
+"	bne	4b \n"
+
+"	mov	r0, #0 \n"
+"	bx	lr \n"
+
+"	.align	2 \n"
+"5:	.word	cpu_port - . \n"
+"6:	.word	. \n"
+"	.word	ports - 6b \n"
+"7:	.word	cci_ctrl_phys - . \n"
+	: :
+	[sizeof_cpu_port] "i" (sizeof(cpu_port)),
+	[cci_enable_req] "i" cpu_to_le32(CCI_ENABLE_REQ),
+	[cci_control_status_bits] "i" cpu_to_le32(1),
+#ifndef __ARMEB__
+	[offsetof_cpu_port_mpidr_lsb] "i" (offsetof(struct cpu_port, mpidr)),
+#else
+	[offsetof_cpu_port_mpidr_lsb] "i" (offsetof(struct cpu_port, mpidr)+4),
+#endif
+	[offsetof_cpu_port_port] "i" (offsetof(struct cpu_port, port)),
+	[sizeof_struct_cpu_port] "i" (sizeof(struct cpu_port)),
+	[sizeof_struct_ace_port] "i" (sizeof(struct cci_ace_port)),
+	[offsetof_port_phys] "i" (offsetof(struct cci_ace_port, phys)) );
+}
+
+/**
+ * __cci_control_port_by_device() - function to control a CCI port by device
+ *				    reference
+ *
+ * @dn: device node pointer of the device whose CCI port should be
+ *      controlled
+ * @enable: if true enables the port, if false disables it
+ *
+ * Return:
+ *	0 on success
+ *	-ENODEV on port look-up failure
+ */
+int notrace __cci_control_port_by_device(struct device_node *dn, bool enable)
+{
+	int port;
+
+	if (!dn)
+		return -ENODEV;
+
+	port = __cci_ace_get_port(dn, ACE_LITE_PORT);
+	if (WARN_ONCE(port < 0, "node %pOF ACE lite port look-up failure\n",
+				dn))
+		return -ENODEV;
+	cci_port_control(port, enable);
+	return 0;
+}
+EXPORT_SYMBOL_GPL(__cci_control_port_by_device);
+
+/**
+ * __cci_control_port_by_index() - function to control a CCI port by port index
+ *
+ * @port: port index previously retrieved with cci_ace_get_port()
+ * @enable: if true enables the port, if false disables it
+ *
+ * Return:
+ *	0 on success
+ *	-ENODEV on port index out of range
+ *	-EPERM if operation carried out on an ACE PORT
+ */
+int notrace __cci_control_port_by_index(u32 port, bool enable)
+{
+	if (port >= nb_cci_ports || ports[port].type == ACE_INVALID_PORT)
+		return -ENODEV;
+	/*
+	 * CCI control for ports connected to CPUS is extremely fragile
+	 * and must be made to go through a specific and controlled
+	 * interface (ie cci_disable_port_by_cpu(); control by general purpose
+	 * indexing is therefore disabled for ACE ports.
+	 */
+	if (ports[port].type == ACE_PORT)
+		return -EPERM;
+
+	cci_port_control(port, enable);
+	return 0;
+}
+EXPORT_SYMBOL_GPL(__cci_control_port_by_index);
+
+static const struct of_device_id arm_cci_ctrl_if_matches[] = {
+	{.compatible = "arm,cci-400-ctrl-if", },
+	{},
+};
+
+static int cci_probe_ports(struct device_node *np)
+{
+	struct cci_nb_ports const *cci_config;
+	int ret, i, nb_ace = 0, nb_ace_lite = 0;
+	struct device_node *cp;
+	struct resource res;
+	const char *match_str;
+	bool is_ace;
+
+
+	cci_config = of_match_node(arm_cci_matches, np)->data;
+	if (!cci_config)
+		return -ENODEV;
+
+	nb_cci_ports = cci_config->nb_ace + cci_config->nb_ace_lite;
+
+	ports = kcalloc(nb_cci_ports, sizeof(*ports), GFP_KERNEL);
+	if (!ports)
+		return -ENOMEM;
+
+	for_each_child_of_node(np, cp) {
+		if (!of_match_node(arm_cci_ctrl_if_matches, cp))
+			continue;
+
+		if (!of_device_is_available(cp))
+			continue;
+
+		i = nb_ace + nb_ace_lite;
+
+		if (i >= nb_cci_ports)
+			break;
+
+		if (of_property_read_string(cp, "interface-type",
+					&match_str)) {
+			WARN(1, "node %pOF missing interface-type property\n",
+				  cp);
+			continue;
+		}
+		is_ace = strcmp(match_str, "ace") == 0;
+		if (!is_ace && strcmp(match_str, "ace-lite")) {
+			WARN(1, "node %pOF containing invalid interface-type property, skipping it\n",
+					cp);
+			continue;
+		}
+
+		ret = of_address_to_resource(cp, 0, &res);
+		if (!ret) {
+			ports[i].base = ioremap(res.start, resource_size(&res));
+			ports[i].phys = res.start;
+		}
+		if (ret || !ports[i].base) {
+			WARN(1, "unable to ioremap CCI port %d\n", i);
+			continue;
+		}
+
+		if (is_ace) {
+			if (WARN_ON(nb_ace >= cci_config->nb_ace))
+				continue;
+			ports[i].type = ACE_PORT;
+			++nb_ace;
+		} else {
+			if (WARN_ON(nb_ace_lite >= cci_config->nb_ace_lite))
+				continue;
+			ports[i].type = ACE_LITE_PORT;
+			++nb_ace_lite;
+		}
+		ports[i].dn = cp;
+	}
+
+	/*
+	 * If there is no CCI port that is under kernel control
+	 * return early and report probe status.
+	 */
+	if (!nb_ace && !nb_ace_lite)
+		return -ENODEV;
+
+	 /* initialize a stashed array of ACE ports to speed-up look-up */
+	cci_ace_init_ports();
+
+	/*
+	 * Multi-cluster systems may need this data when non-coherent, during
+	 * cluster power-up/power-down. Make sure it reaches main memory.
+	 */
+	sync_cache_w(&cci_ctrl_base);
+	sync_cache_w(&cci_ctrl_phys);
+	sync_cache_w(&ports);
+	sync_cache_w(&cpu_port);
+	__sync_cache_range_w(ports, sizeof(*ports) * nb_cci_ports);
+	pr_info("ARM CCI driver probed\n");
+
+	return 0;
+}
+#else /* !CONFIG_ARM_CCI400_PORT_CTRL */
+static inline int cci_probe_ports(struct device_node *np)
+{
+	return 0;
+}
+#endif /* CONFIG_ARM_CCI400_PORT_CTRL */
+
+static int cci_probe(void)
+{
+	int ret;
+	struct device_node *np;
+	struct resource res;
+
+	np = of_find_matching_node(NULL, arm_cci_matches);
+	if(!np || !of_device_is_available(np))
+		return -ENODEV;
+
+	ret = of_address_to_resource(np, 0, &res);
+	if (!ret) {
+		cci_ctrl_base = ioremap(res.start, resource_size(&res));
+		cci_ctrl_phys =	res.start;
+	}
+	if (ret || !cci_ctrl_base) {
+		WARN(1, "unable to ioremap CCI ctrl\n");
+		return -ENXIO;
+	}
+
+	return cci_probe_ports(np);
+}
+
+static int cci_init_status = -EAGAIN;
+static DEFINE_MUTEX(cci_probing);
+
+static int cci_init(void)
+{
+	if (cci_init_status != -EAGAIN)
+		return cci_init_status;
+
+	mutex_lock(&cci_probing);
+	if (cci_init_status == -EAGAIN)
+		cci_init_status = cci_probe();
+	mutex_unlock(&cci_probing);
+	return cci_init_status;
+}
+
+/*
+ * To sort out early init calls ordering a helper function is provided to
+ * check if the CCI driver has beed initialized. Function check if the driver
+ * has been initialized, if not it calls the init function that probes
+ * the driver and updates the return value.
+ */
+bool cci_probed(void)
+{
+	return cci_init() == 0;
+}
+EXPORT_SYMBOL_GPL(cci_probed);
+
+early_initcall(cci_init);
+core_initcall(cci_platform_init);
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("ARM CCI support");
diff --git a/src/kernel/linux/v4.14/drivers/bus/arm-ccn.c b/src/kernel/linux/v4.14/drivers/bus/arm-ccn.c
new file mode 100644
index 0000000..942d076
--- /dev/null
+++ b/src/kernel/linux/v4.14/drivers/bus/arm-ccn.c
@@ -0,0 +1,1599 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * Copyright (C) 2014 ARM Limited
+ */
+
+#include <linux/ctype.h>
+#include <linux/hrtimer.h>
+#include <linux/idr.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/perf_event.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+#define CCN_NUM_XP_PORTS 2
+#define CCN_NUM_VCS 4
+#define CCN_NUM_REGIONS	256
+#define CCN_REGION_SIZE	0x10000
+
+#define CCN_ALL_OLY_ID			0xff00
+#define CCN_ALL_OLY_ID__OLY_ID__SHIFT			0
+#define CCN_ALL_OLY_ID__OLY_ID__MASK			0x1f
+#define CCN_ALL_OLY_ID__NODE_ID__SHIFT			8
+#define CCN_ALL_OLY_ID__NODE_ID__MASK			0x3f
+
+#define CCN_MN_ERRINT_STATUS		0x0008
+#define CCN_MN_ERRINT_STATUS__INTREQ__DESSERT		0x11
+#define CCN_MN_ERRINT_STATUS__ALL_ERRORS__ENABLE	0x02
+#define CCN_MN_ERRINT_STATUS__ALL_ERRORS__DISABLED	0x20
+#define CCN_MN_ERRINT_STATUS__ALL_ERRORS__DISABLE	0x22
+#define CCN_MN_ERRINT_STATUS__CORRECTED_ERRORS_ENABLE	0x04
+#define CCN_MN_ERRINT_STATUS__CORRECTED_ERRORS_DISABLED	0x40
+#define CCN_MN_ERRINT_STATUS__CORRECTED_ERRORS_DISABLE	0x44
+#define CCN_MN_ERRINT_STATUS__PMU_EVENTS__ENABLE	0x08
+#define CCN_MN_ERRINT_STATUS__PMU_EVENTS__DISABLED	0x80
+#define CCN_MN_ERRINT_STATUS__PMU_EVENTS__DISABLE	0x88
+#define CCN_MN_OLY_COMP_LIST_63_0	0x01e0
+#define CCN_MN_ERR_SIG_VAL_63_0		0x0300
+#define CCN_MN_ERR_SIG_VAL_63_0__DT			(1 << 1)
+
+#define CCN_DT_ACTIVE_DSM		0x0000
+#define CCN_DT_ACTIVE_DSM__DSM_ID__SHIFT(n)		((n) * 8)
+#define CCN_DT_ACTIVE_DSM__DSM_ID__MASK			0xff
+#define CCN_DT_CTL			0x0028
+#define CCN_DT_CTL__DT_EN				(1 << 0)
+#define CCN_DT_PMEVCNT(n)		(0x0100 + (n) * 0x8)
+#define CCN_DT_PMCCNTR			0x0140
+#define CCN_DT_PMCCNTRSR		0x0190
+#define CCN_DT_PMOVSR			0x0198
+#define CCN_DT_PMOVSR_CLR		0x01a0
+#define CCN_DT_PMOVSR_CLR__MASK				0x1f
+#define CCN_DT_PMCR			0x01a8
+#define CCN_DT_PMCR__OVFL_INTR_EN			(1 << 6)
+#define CCN_DT_PMCR__PMU_EN				(1 << 0)
+#define CCN_DT_PMSR			0x01b0
+#define CCN_DT_PMSR_REQ			0x01b8
+#define CCN_DT_PMSR_CLR			0x01c0
+
+#define CCN_HNF_PMU_EVENT_SEL		0x0600
+#define CCN_HNF_PMU_EVENT_SEL__ID__SHIFT(n)		((n) * 4)
+#define CCN_HNF_PMU_EVENT_SEL__ID__MASK			0xf
+
+#define CCN_XP_DT_CONFIG		0x0300
+#define CCN_XP_DT_CONFIG__DT_CFG__SHIFT(n)		((n) * 4)
+#define CCN_XP_DT_CONFIG__DT_CFG__MASK			0xf
+#define CCN_XP_DT_CONFIG__DT_CFG__PASS_THROUGH		0x0
+#define CCN_XP_DT_CONFIG__DT_CFG__WATCHPOINT_0_OR_1	0x1
+#define CCN_XP_DT_CONFIG__DT_CFG__WATCHPOINT(n)		(0x2 + (n))
+#define CCN_XP_DT_CONFIG__DT_CFG__XP_PMU_EVENT(n)	(0x4 + (n))
+#define CCN_XP_DT_CONFIG__DT_CFG__DEVICE_PMU_EVENT(d, n) (0x8 + (d) * 4 + (n))
+#define CCN_XP_DT_INTERFACE_SEL		0x0308
+#define CCN_XP_DT_INTERFACE_SEL__DT_IO_SEL__SHIFT(n)	(0 + (n) * 8)
+#define CCN_XP_DT_INTERFACE_SEL__DT_IO_SEL__MASK	0x1
+#define CCN_XP_DT_INTERFACE_SEL__DT_DEV_SEL__SHIFT(n)	(1 + (n) * 8)
+#define CCN_XP_DT_INTERFACE_SEL__DT_DEV_SEL__MASK	0x1
+#define CCN_XP_DT_INTERFACE_SEL__DT_VC_SEL__SHIFT(n)	(2 + (n) * 8)
+#define CCN_XP_DT_INTERFACE_SEL__DT_VC_SEL__MASK	0x3
+#define CCN_XP_DT_CMP_VAL_L(n)		(0x0310 + (n) * 0x40)
+#define CCN_XP_DT_CMP_VAL_H(n)		(0x0318 + (n) * 0x40)
+#define CCN_XP_DT_CMP_MASK_L(n)		(0x0320 + (n) * 0x40)
+#define CCN_XP_DT_CMP_MASK_H(n)		(0x0328 + (n) * 0x40)
+#define CCN_XP_DT_CONTROL		0x0370
+#define CCN_XP_DT_CONTROL__DT_ENABLE			(1 << 0)
+#define CCN_XP_DT_CONTROL__WP_ARM_SEL__SHIFT(n)		(12 + (n) * 4)
+#define CCN_XP_DT_CONTROL__WP_ARM_SEL__MASK		0xf
+#define CCN_XP_DT_CONTROL__WP_ARM_SEL__ALWAYS		0xf
+#define CCN_XP_PMU_EVENT_SEL		0x0600
+#define CCN_XP_PMU_EVENT_SEL__ID__SHIFT(n)		((n) * 7)
+#define CCN_XP_PMU_EVENT_SEL__ID__MASK			0x3f
+
+#define CCN_SBAS_PMU_EVENT_SEL		0x0600
+#define CCN_SBAS_PMU_EVENT_SEL__ID__SHIFT(n)		((n) * 4)
+#define CCN_SBAS_PMU_EVENT_SEL__ID__MASK		0xf
+
+#define CCN_RNI_PMU_EVENT_SEL		0x0600
+#define CCN_RNI_PMU_EVENT_SEL__ID__SHIFT(n)		((n) * 4)
+#define CCN_RNI_PMU_EVENT_SEL__ID__MASK			0xf
+
+#define CCN_TYPE_MN	0x01
+#define CCN_TYPE_DT	0x02
+#define CCN_TYPE_HNF	0x04
+#define CCN_TYPE_HNI	0x05
+#define CCN_TYPE_XP	0x08
+#define CCN_TYPE_SBSX	0x0c
+#define CCN_TYPE_SBAS	0x10
+#define CCN_TYPE_RNI_1P	0x14
+#define CCN_TYPE_RNI_2P	0x15
+#define CCN_TYPE_RNI_3P	0x16
+#define CCN_TYPE_RND_1P	0x18 /* RN-D = RN-I + DVM */
+#define CCN_TYPE_RND_2P	0x19
+#define CCN_TYPE_RND_3P	0x1a
+#define CCN_TYPE_CYCLES	0xff /* Pseudotype */
+
+#define CCN_EVENT_WATCHPOINT 0xfe /* Pseudoevent */
+
+#define CCN_NUM_PMU_EVENTS		4
+#define CCN_NUM_XP_WATCHPOINTS		2 /* See DT.dbg_id.num_watchpoints */
+#define CCN_NUM_PMU_EVENT_COUNTERS	8 /* See DT.dbg_id.num_pmucntr */
+#define CCN_IDX_PMU_CYCLE_COUNTER	CCN_NUM_PMU_EVENT_COUNTERS
+
+#define CCN_NUM_PREDEFINED_MASKS	4
+#define CCN_IDX_MASK_ANY		(CCN_NUM_PMU_EVENT_COUNTERS + 0)
+#define CCN_IDX_MASK_EXACT		(CCN_NUM_PMU_EVENT_COUNTERS + 1)
+#define CCN_IDX_MASK_ORDER		(CCN_NUM_PMU_EVENT_COUNTERS + 2)
+#define CCN_IDX_MASK_OPCODE		(CCN_NUM_PMU_EVENT_COUNTERS + 3)
+
+struct arm_ccn_component {
+	void __iomem *base;
+	u32 type;
+
+	DECLARE_BITMAP(pmu_events_mask, CCN_NUM_PMU_EVENTS);
+	union {
+		struct {
+			DECLARE_BITMAP(dt_cmp_mask, CCN_NUM_XP_WATCHPOINTS);
+		} xp;
+	};
+};
+
+#define pmu_to_arm_ccn(_pmu) container_of(container_of(_pmu, \
+	struct arm_ccn_dt, pmu), struct arm_ccn, dt)
+
+struct arm_ccn_dt {
+	int id;
+	void __iomem *base;
+
+	spinlock_t config_lock;
+
+	DECLARE_BITMAP(pmu_counters_mask, CCN_NUM_PMU_EVENT_COUNTERS + 1);
+	struct {
+		struct arm_ccn_component *source;
+		struct perf_event *event;
+	} pmu_counters[CCN_NUM_PMU_EVENT_COUNTERS + 1];
+
+	struct {
+	       u64 l, h;
+	} cmp_mask[CCN_NUM_PMU_EVENT_COUNTERS + CCN_NUM_PREDEFINED_MASKS];
+
+	struct hrtimer hrtimer;
+
+	cpumask_t cpu;
+	struct hlist_node node;
+
+	struct pmu pmu;
+};
+
+struct arm_ccn {
+	struct device *dev;
+	void __iomem *base;
+	unsigned int irq;
+
+	unsigned sbas_present:1;
+	unsigned sbsx_present:1;
+
+	int num_nodes;
+	struct arm_ccn_component *node;
+
+	int num_xps;
+	struct arm_ccn_component *xp;
+
+	struct arm_ccn_dt dt;
+	int mn_id;
+};
+
+static int arm_ccn_node_to_xp(int node)
+{
+	return node / CCN_NUM_XP_PORTS;
+}
+
+static int arm_ccn_node_to_xp_port(int node)
+{
+	return node % CCN_NUM_XP_PORTS;
+}
+
+
+/*
+ * Bit shifts and masks in these defines must be kept in sync with
+ * arm_ccn_pmu_config_set() and CCN_FORMAT_ATTRs below!
+ */
+#define CCN_CONFIG_NODE(_config)	(((_config) >> 0) & 0xff)
+#define CCN_CONFIG_XP(_config)		(((_config) >> 0) & 0xff)
+#define CCN_CONFIG_TYPE(_config)	(((_config) >> 8) & 0xff)
+#define CCN_CONFIG_EVENT(_config)	(((_config) >> 16) & 0xff)
+#define CCN_CONFIG_PORT(_config)	(((_config) >> 24) & 0x3)
+#define CCN_CONFIG_BUS(_config)		(((_config) >> 24) & 0x3)
+#define CCN_CONFIG_VC(_config)		(((_config) >> 26) & 0x7)
+#define CCN_CONFIG_DIR(_config)		(((_config) >> 29) & 0x1)
+#define CCN_CONFIG_MASK(_config)	(((_config) >> 30) & 0xf)
+
+static void arm_ccn_pmu_config_set(u64 *config, u32 node_xp, u32 type, u32 port)
+{
+	*config &= ~((0xff << 0) | (0xff << 8) | (0x3 << 24));
+	*config |= (node_xp << 0) | (type << 8) | (port << 24);
+}
+
+static ssize_t arm_ccn_pmu_format_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct dev_ext_attribute *ea = container_of(attr,
+			struct dev_ext_attribute, attr);
+
+	return snprintf(buf, PAGE_SIZE, "%s\n", (char *)ea->var);
+}
+
+#define CCN_FORMAT_ATTR(_name, _config) \
+	struct dev_ext_attribute arm_ccn_pmu_format_attr_##_name = \
+			{ __ATTR(_name, S_IRUGO, arm_ccn_pmu_format_show, \
+			NULL), _config }
+
+static CCN_FORMAT_ATTR(node, "config:0-7");
+static CCN_FORMAT_ATTR(xp, "config:0-7");
+static CCN_FORMAT_ATTR(type, "config:8-15");
+static CCN_FORMAT_ATTR(event, "config:16-23");
+static CCN_FORMAT_ATTR(port, "config:24-25");
+static CCN_FORMAT_ATTR(bus, "config:24-25");
+static CCN_FORMAT_ATTR(vc, "config:26-28");
+static CCN_FORMAT_ATTR(dir, "config:29-29");
+static CCN_FORMAT_ATTR(mask, "config:30-33");
+static CCN_FORMAT_ATTR(cmp_l, "config1:0-62");
+static CCN_FORMAT_ATTR(cmp_h, "config2:0-59");
+
+static struct attribute *arm_ccn_pmu_format_attrs[] = {
+	&arm_ccn_pmu_format_attr_node.attr.attr,
+	&arm_ccn_pmu_format_attr_xp.attr.attr,
+	&arm_ccn_pmu_format_attr_type.attr.attr,
+	&arm_ccn_pmu_format_attr_event.attr.attr,
+	&arm_ccn_pmu_format_attr_port.attr.attr,
+	&arm_ccn_pmu_format_attr_bus.attr.attr,
+	&arm_ccn_pmu_format_attr_vc.attr.attr,
+	&arm_ccn_pmu_format_attr_dir.attr.attr,
+	&arm_ccn_pmu_format_attr_mask.attr.attr,
+	&arm_ccn_pmu_format_attr_cmp_l.attr.attr,
+	&arm_ccn_pmu_format_attr_cmp_h.attr.attr,
+	NULL
+};
+
+static struct attribute_group arm_ccn_pmu_format_attr_group = {
+	.name = "format",
+	.attrs = arm_ccn_pmu_format_attrs,
+};
+
+
+struct arm_ccn_pmu_event {
+	struct device_attribute attr;
+	u32 type;
+	u32 event;
+	int num_ports;
+	int num_vcs;
+	const char *def;
+	int mask;
+};
+
+#define CCN_EVENT_ATTR(_name) \
+	__ATTR(_name, S_IRUGO, arm_ccn_pmu_event_show, NULL)
+
+/*
+ * Events defined in TRM for MN, HN-I and SBSX are actually watchpoints set on
+ * their ports in XP they are connected to. For the sake of usability they are
+ * explicitly defined here (and translated into a relevant watchpoint in
+ * arm_ccn_pmu_event_init()) so the user can easily request them without deep
+ * knowledge of the flit format.
+ */
+
+#define CCN_EVENT_MN(_name, _def, _mask) { .attr = CCN_EVENT_ATTR(mn_##_name), \
+		.type = CCN_TYPE_MN, .event = CCN_EVENT_WATCHPOINT, \
+		.num_ports = CCN_NUM_XP_PORTS, .num_vcs = CCN_NUM_VCS, \
+		.def = _def, .mask = _mask, }
+
+#define CCN_EVENT_HNI(_name, _def, _mask) { \
+		.attr = CCN_EVENT_ATTR(hni_##_name), .type = CCN_TYPE_HNI, \
+		.event = CCN_EVENT_WATCHPOINT, .num_ports = CCN_NUM_XP_PORTS, \
+		.num_vcs = CCN_NUM_VCS, .def = _def, .mask = _mask, }
+
+#define CCN_EVENT_SBSX(_name, _def, _mask) { \
+		.attr = CCN_EVENT_ATTR(sbsx_##_name), .type = CCN_TYPE_SBSX, \
+		.event = CCN_EVENT_WATCHPOINT, .num_ports = CCN_NUM_XP_PORTS, \
+		.num_vcs = CCN_NUM_VCS, .def = _def, .mask = _mask, }
+
+#define CCN_EVENT_HNF(_name, _event) { .attr = CCN_EVENT_ATTR(hnf_##_name), \
+		.type = CCN_TYPE_HNF, .event = _event, }
+
+#define CCN_EVENT_XP(_name, _event) { .attr = CCN_EVENT_ATTR(xp_##_name), \
+		.type = CCN_TYPE_XP, .event = _event, \
+		.num_ports = CCN_NUM_XP_PORTS, .num_vcs = CCN_NUM_VCS, }
+
+/*
+ * RN-I & RN-D (RN-D = RN-I + DVM) nodes have different type ID depending
+ * on configuration. One of them is picked to represent the whole group,
+ * as they all share the same event types.
+ */
+#define CCN_EVENT_RNI(_name, _event) { .attr = CCN_EVENT_ATTR(rni_##_name), \
+		.type = CCN_TYPE_RNI_3P, .event = _event, }
+
+#define CCN_EVENT_SBAS(_name, _event) { .attr = CCN_EVENT_ATTR(sbas_##_name), \
+		.type = CCN_TYPE_SBAS, .event = _event, }
+
+#define CCN_EVENT_CYCLES(_name) { .attr = CCN_EVENT_ATTR(_name), \
+		.type = CCN_TYPE_CYCLES }
+
+
+static ssize_t arm_ccn_pmu_event_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct arm_ccn *ccn = pmu_to_arm_ccn(dev_get_drvdata(dev));
+	struct arm_ccn_pmu_event *event = container_of(attr,
+			struct arm_ccn_pmu_event, attr);
+	ssize_t res;
+
+	res = snprintf(buf, PAGE_SIZE, "type=0x%x", event->type);
+	if (event->event)
+		res += snprintf(buf + res, PAGE_SIZE - res, ",event=0x%x",
+				event->event);
+	if (event->def)
+		res += snprintf(buf + res, PAGE_SIZE - res, ",%s",
+				event->def);
+	if (event->mask)
+		res += snprintf(buf + res, PAGE_SIZE - res, ",mask=0x%x",
+				event->mask);
+
+	/* Arguments required by an event */
+	switch (event->type) {
+	case CCN_TYPE_CYCLES:
+		break;
+	case CCN_TYPE_XP:
+		res += snprintf(buf + res, PAGE_SIZE - res,
+				",xp=?,vc=?");
+		if (event->event == CCN_EVENT_WATCHPOINT)
+			res += snprintf(buf + res, PAGE_SIZE - res,
+					",port=?,dir=?,cmp_l=?,cmp_h=?,mask=?");
+		else
+			res += snprintf(buf + res, PAGE_SIZE - res,
+					",bus=?");
+
+		break;
+	case CCN_TYPE_MN:
+		res += snprintf(buf + res, PAGE_SIZE - res, ",node=%d", ccn->mn_id);
+		break;
+	default:
+		res += snprintf(buf + res, PAGE_SIZE - res, ",node=?");
+		break;
+	}
+
+	res += snprintf(buf + res, PAGE_SIZE - res, "\n");
+
+	return res;
+}
+
+static umode_t arm_ccn_pmu_events_is_visible(struct kobject *kobj,
+				     struct attribute *attr, int index)
+{
+	struct device *dev = kobj_to_dev(kobj);
+	struct arm_ccn *ccn = pmu_to_arm_ccn(dev_get_drvdata(dev));
+	struct device_attribute *dev_attr = container_of(attr,
+			struct device_attribute, attr);
+	struct arm_ccn_pmu_event *event = container_of(dev_attr,
+			struct arm_ccn_pmu_event, attr);
+
+	if (event->type == CCN_TYPE_SBAS && !ccn->sbas_present)
+		return 0;
+	if (event->type == CCN_TYPE_SBSX && !ccn->sbsx_present)
+		return 0;
+
+	return attr->mode;
+}
+
+static struct arm_ccn_pmu_event arm_ccn_pmu_events[] = {
+	CCN_EVENT_MN(eobarrier, "dir=1,vc=0,cmp_h=0x1c00", CCN_IDX_MASK_OPCODE),
+	CCN_EVENT_MN(ecbarrier, "dir=1,vc=0,cmp_h=0x1e00", CCN_IDX_MASK_OPCODE),
+	CCN_EVENT_MN(dvmop, "dir=1,vc=0,cmp_h=0x2800", CCN_IDX_MASK_OPCODE),
+	CCN_EVENT_HNI(txdatflits, "dir=1,vc=3", CCN_IDX_MASK_ANY),
+	CCN_EVENT_HNI(rxdatflits, "dir=0,vc=3", CCN_IDX_MASK_ANY),
+	CCN_EVENT_HNI(txreqflits, "dir=1,vc=0", CCN_IDX_MASK_ANY),
+	CCN_EVENT_HNI(rxreqflits, "dir=0,vc=0", CCN_IDX_MASK_ANY),
+	CCN_EVENT_HNI(rxreqflits_order, "dir=0,vc=0,cmp_h=0x8000",
+			CCN_IDX_MASK_ORDER),
+	CCN_EVENT_SBSX(txdatflits, "dir=1,vc=3", CCN_IDX_MASK_ANY),
+	CCN_EVENT_SBSX(rxdatflits, "dir=0,vc=3", CCN_IDX_MASK_ANY),
+	CCN_EVENT_SBSX(txreqflits, "dir=1,vc=0", CCN_IDX_MASK_ANY),
+	CCN_EVENT_SBSX(rxreqflits, "dir=0,vc=0", CCN_IDX_MASK_ANY),
+	CCN_EVENT_SBSX(rxreqflits_order, "dir=0,vc=0,cmp_h=0x8000",
+			CCN_IDX_MASK_ORDER),
+	CCN_EVENT_HNF(cache_miss, 0x1),
+	CCN_EVENT_HNF(l3_sf_cache_access, 0x02),
+	CCN_EVENT_HNF(cache_fill, 0x3),
+	CCN_EVENT_HNF(pocq_retry, 0x4),
+	CCN_EVENT_HNF(pocq_reqs_recvd, 0x5),
+	CCN_EVENT_HNF(sf_hit, 0x6),
+	CCN_EVENT_HNF(sf_evictions, 0x7),
+	CCN_EVENT_HNF(snoops_sent, 0x8),
+	CCN_EVENT_HNF(snoops_broadcast, 0x9),
+	CCN_EVENT_HNF(l3_eviction, 0xa),
+	CCN_EVENT_HNF(l3_fill_invalid_way, 0xb),
+	CCN_EVENT_HNF(mc_retries, 0xc),
+	CCN_EVENT_HNF(mc_reqs, 0xd),
+	CCN_EVENT_HNF(qos_hh_retry, 0xe),
+	CCN_EVENT_RNI(rdata_beats_p0, 0x1),
+	CCN_EVENT_RNI(rdata_beats_p1, 0x2),
+	CCN_EVENT_RNI(rdata_beats_p2, 0x3),
+	CCN_EVENT_RNI(rxdat_flits, 0x4),
+	CCN_EVENT_RNI(txdat_flits, 0x5),
+	CCN_EVENT_RNI(txreq_flits, 0x6),
+	CCN_EVENT_RNI(txreq_flits_retried, 0x7),
+	CCN_EVENT_RNI(rrt_full, 0x8),
+	CCN_EVENT_RNI(wrt_full, 0x9),
+	CCN_EVENT_RNI(txreq_flits_replayed, 0xa),
+	CCN_EVENT_XP(upload_starvation, 0x1),
+	CCN_EVENT_XP(download_starvation, 0x2),
+	CCN_EVENT_XP(respin, 0x3),
+	CCN_EVENT_XP(valid_flit, 0x4),
+	CCN_EVENT_XP(watchpoint, CCN_EVENT_WATCHPOINT),
+	CCN_EVENT_SBAS(rdata_beats_p0, 0x1),
+	CCN_EVENT_SBAS(rxdat_flits, 0x4),
+	CCN_EVENT_SBAS(txdat_flits, 0x5),
+	CCN_EVENT_SBAS(txreq_flits, 0x6),
+	CCN_EVENT_SBAS(txreq_flits_retried, 0x7),
+	CCN_EVENT_SBAS(rrt_full, 0x8),
+	CCN_EVENT_SBAS(wrt_full, 0x9),
+	CCN_EVENT_SBAS(txreq_flits_replayed, 0xa),
+	CCN_EVENT_CYCLES(cycles),
+};
+
+/* Populated in arm_ccn_init() */
+static struct attribute
+		*arm_ccn_pmu_events_attrs[ARRAY_SIZE(arm_ccn_pmu_events) + 1];
+
+static struct attribute_group arm_ccn_pmu_events_attr_group = {
+	.name = "events",
+	.is_visible = arm_ccn_pmu_events_is_visible,
+	.attrs = arm_ccn_pmu_events_attrs,
+};
+
+
+static u64 *arm_ccn_pmu_get_cmp_mask(struct arm_ccn *ccn, const char *name)
+{
+	unsigned long i;
+
+	if (WARN_ON(!name || !name[0] || !isxdigit(name[0]) || !name[1]))
+		return NULL;
+	i = isdigit(name[0]) ? name[0] - '0' : 0xa + tolower(name[0]) - 'a';
+
+	switch (name[1]) {
+	case 'l':
+		return &ccn->dt.cmp_mask[i].l;
+	case 'h':
+		return &ccn->dt.cmp_mask[i].h;
+	default:
+		return NULL;
+	}
+}
+
+static ssize_t arm_ccn_pmu_cmp_mask_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct arm_ccn *ccn = pmu_to_arm_ccn(dev_get_drvdata(dev));
+	u64 *mask = arm_ccn_pmu_get_cmp_mask(ccn, attr->attr.name);
+
+	return mask ? snprintf(buf, PAGE_SIZE, "0x%016llx\n", *mask) : -EINVAL;
+}
+
+static ssize_t arm_ccn_pmu_cmp_mask_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	struct arm_ccn *ccn = pmu_to_arm_ccn(dev_get_drvdata(dev));
+	u64 *mask = arm_ccn_pmu_get_cmp_mask(ccn, attr->attr.name);
+	int err = -EINVAL;
+
+	if (mask)
+		err = kstrtoull(buf, 0, mask);
+
+	return err ? err : count;
+}
+
+#define CCN_CMP_MASK_ATTR(_name) \
+	struct device_attribute arm_ccn_pmu_cmp_mask_attr_##_name = \
+			__ATTR(_name, S_IRUGO | S_IWUSR, \
+			arm_ccn_pmu_cmp_mask_show, arm_ccn_pmu_cmp_mask_store)
+
+#define CCN_CMP_MASK_ATTR_RO(_name) \
+	struct device_attribute arm_ccn_pmu_cmp_mask_attr_##_name = \
+			__ATTR(_name, S_IRUGO, arm_ccn_pmu_cmp_mask_show, NULL)
+
+static CCN_CMP_MASK_ATTR(0l);
+static CCN_CMP_MASK_ATTR(0h);
+static CCN_CMP_MASK_ATTR(1l);
+static CCN_CMP_MASK_ATTR(1h);
+static CCN_CMP_MASK_ATTR(2l);
+static CCN_CMP_MASK_ATTR(2h);
+static CCN_CMP_MASK_ATTR(3l);
+static CCN_CMP_MASK_ATTR(3h);
+static CCN_CMP_MASK_ATTR(4l);
+static CCN_CMP_MASK_ATTR(4h);
+static CCN_CMP_MASK_ATTR(5l);
+static CCN_CMP_MASK_ATTR(5h);
+static CCN_CMP_MASK_ATTR(6l);
+static CCN_CMP_MASK_ATTR(6h);
+static CCN_CMP_MASK_ATTR(7l);
+static CCN_CMP_MASK_ATTR(7h);
+static CCN_CMP_MASK_ATTR_RO(8l);
+static CCN_CMP_MASK_ATTR_RO(8h);
+static CCN_CMP_MASK_ATTR_RO(9l);
+static CCN_CMP_MASK_ATTR_RO(9h);
+static CCN_CMP_MASK_ATTR_RO(al);
+static CCN_CMP_MASK_ATTR_RO(ah);
+static CCN_CMP_MASK_ATTR_RO(bl);
+static CCN_CMP_MASK_ATTR_RO(bh);
+
+static struct attribute *arm_ccn_pmu_cmp_mask_attrs[] = {
+	&arm_ccn_pmu_cmp_mask_attr_0l.attr, &arm_ccn_pmu_cmp_mask_attr_0h.attr,
+	&arm_ccn_pmu_cmp_mask_attr_1l.attr, &arm_ccn_pmu_cmp_mask_attr_1h.attr,
+	&arm_ccn_pmu_cmp_mask_attr_2l.attr, &arm_ccn_pmu_cmp_mask_attr_2h.attr,
+	&arm_ccn_pmu_cmp_mask_attr_3l.attr, &arm_ccn_pmu_cmp_mask_attr_3h.attr,
+	&arm_ccn_pmu_cmp_mask_attr_4l.attr, &arm_ccn_pmu_cmp_mask_attr_4h.attr,
+	&arm_ccn_pmu_cmp_mask_attr_5l.attr, &arm_ccn_pmu_cmp_mask_attr_5h.attr,
+	&arm_ccn_pmu_cmp_mask_attr_6l.attr, &arm_ccn_pmu_cmp_mask_attr_6h.attr,
+	&arm_ccn_pmu_cmp_mask_attr_7l.attr, &arm_ccn_pmu_cmp_mask_attr_7h.attr,
+	&arm_ccn_pmu_cmp_mask_attr_8l.attr, &arm_ccn_pmu_cmp_mask_attr_8h.attr,
+	&arm_ccn_pmu_cmp_mask_attr_9l.attr, &arm_ccn_pmu_cmp_mask_attr_9h.attr,
+	&arm_ccn_pmu_cmp_mask_attr_al.attr, &arm_ccn_pmu_cmp_mask_attr_ah.attr,
+	&arm_ccn_pmu_cmp_mask_attr_bl.attr, &arm_ccn_pmu_cmp_mask_attr_bh.attr,
+	NULL
+};
+
+static struct attribute_group arm_ccn_pmu_cmp_mask_attr_group = {
+	.name = "cmp_mask",
+	.attrs = arm_ccn_pmu_cmp_mask_attrs,
+};
+
+static ssize_t arm_ccn_pmu_cpumask_show(struct device *dev,
+				     struct device_attribute *attr, char *buf)
+{
+	struct arm_ccn *ccn = pmu_to_arm_ccn(dev_get_drvdata(dev));
+
+	return cpumap_print_to_pagebuf(true, buf, &ccn->dt.cpu);
+}
+
+static struct device_attribute arm_ccn_pmu_cpumask_attr =
+		__ATTR(cpumask, S_IRUGO, arm_ccn_pmu_cpumask_show, NULL);
+
+static struct attribute *arm_ccn_pmu_cpumask_attrs[] = {
+	&arm_ccn_pmu_cpumask_attr.attr,
+	NULL,
+};
+
+static struct attribute_group arm_ccn_pmu_cpumask_attr_group = {
+	.attrs = arm_ccn_pmu_cpumask_attrs,
+};
+
+/*
+ * Default poll period is 10ms, which is way over the top anyway,
+ * as in the worst case scenario (an event every cycle), with 1GHz
+ * clocked bus, the smallest, 32 bit counter will overflow in
+ * more than 4s.
+ */
+static unsigned int arm_ccn_pmu_poll_period_us = 10000;
+module_param_named(pmu_poll_period_us, arm_ccn_pmu_poll_period_us, uint,
+		S_IRUGO | S_IWUSR);
+
+static ktime_t arm_ccn_pmu_timer_period(void)
+{
+	return ns_to_ktime((u64)arm_ccn_pmu_poll_period_us * 1000);
+}
+
+
+static const struct attribute_group *arm_ccn_pmu_attr_groups[] = {
+	&arm_ccn_pmu_events_attr_group,
+	&arm_ccn_pmu_format_attr_group,
+	&arm_ccn_pmu_cmp_mask_attr_group,
+	&arm_ccn_pmu_cpumask_attr_group,
+	NULL
+};
+
+
+static int arm_ccn_pmu_alloc_bit(unsigned long *bitmap, unsigned long size)
+{
+	int bit;
+
+	do {
+		bit = find_first_zero_bit(bitmap, size);
+		if (bit >= size)
+			return -EAGAIN;
+	} while (test_and_set_bit(bit, bitmap));
+
+	return bit;
+}
+
+/* All RN-I and RN-D nodes have identical PMUs */
+static int arm_ccn_pmu_type_eq(u32 a, u32 b)
+{
+	if (a == b)
+		return 1;
+
+	switch (a) {
+	case CCN_TYPE_RNI_1P:
+	case CCN_TYPE_RNI_2P:
+	case CCN_TYPE_RNI_3P:
+	case CCN_TYPE_RND_1P:
+	case CCN_TYPE_RND_2P:
+	case CCN_TYPE_RND_3P:
+		switch (b) {
+		case CCN_TYPE_RNI_1P:
+		case CCN_TYPE_RNI_2P:
+		case CCN_TYPE_RNI_3P:
+		case CCN_TYPE_RND_1P:
+		case CCN_TYPE_RND_2P:
+		case CCN_TYPE_RND_3P:
+			return 1;
+		}
+		break;
+	}
+
+	return 0;
+}
+
+static int arm_ccn_pmu_event_alloc(struct perf_event *event)
+{
+	struct arm_ccn *ccn = pmu_to_arm_ccn(event->pmu);
+	struct hw_perf_event *hw = &event->hw;
+	u32 node_xp, type, event_id;
+	struct arm_ccn_component *source;
+	int bit;
+
+	node_xp = CCN_CONFIG_NODE(event->attr.config);
+	type = CCN_CONFIG_TYPE(event->attr.config);
+	event_id = CCN_CONFIG_EVENT(event->attr.config);
+
+	/* Allocate the cycle counter */
+	if (type == CCN_TYPE_CYCLES) {
+		if (test_and_set_bit(CCN_IDX_PMU_CYCLE_COUNTER,
+				ccn->dt.pmu_counters_mask))
+			return -EAGAIN;
+
+		hw->idx = CCN_IDX_PMU_CYCLE_COUNTER;
+		ccn->dt.pmu_counters[CCN_IDX_PMU_CYCLE_COUNTER].event = event;
+
+		return 0;
+	}
+
+	/* Allocate an event counter */
+	hw->idx = arm_ccn_pmu_alloc_bit(ccn->dt.pmu_counters_mask,
+			CCN_NUM_PMU_EVENT_COUNTERS);
+	if (hw->idx < 0) {
+		dev_dbg(ccn->dev, "No more counters available!\n");
+		return -EAGAIN;
+	}
+
+	if (type == CCN_TYPE_XP)
+		source = &ccn->xp[node_xp];
+	else
+		source = &ccn->node[node_xp];
+	ccn->dt.pmu_counters[hw->idx].source = source;
+
+	/* Allocate an event source or a watchpoint */
+	if (type == CCN_TYPE_XP && event_id == CCN_EVENT_WATCHPOINT)
+		bit = arm_ccn_pmu_alloc_bit(source->xp.dt_cmp_mask,
+				CCN_NUM_XP_WATCHPOINTS);
+	else
+		bit = arm_ccn_pmu_alloc_bit(source->pmu_events_mask,
+				CCN_NUM_PMU_EVENTS);
+	if (bit < 0) {
+		dev_dbg(ccn->dev, "No more event sources/watchpoints on node/XP %d!\n",
+				node_xp);
+		clear_bit(hw->idx, ccn->dt.pmu_counters_mask);
+		return -EAGAIN;
+	}
+	hw->config_base = bit;
+
+	ccn->dt.pmu_counters[hw->idx].event = event;
+
+	return 0;
+}
+
+static void arm_ccn_pmu_event_release(struct perf_event *event)
+{
+	struct arm_ccn *ccn = pmu_to_arm_ccn(event->pmu);
+	struct hw_perf_event *hw = &event->hw;
+
+	if (hw->idx == CCN_IDX_PMU_CYCLE_COUNTER) {
+		clear_bit(CCN_IDX_PMU_CYCLE_COUNTER, ccn->dt.pmu_counters_mask);
+	} else {
+		struct arm_ccn_component *source =
+				ccn->dt.pmu_counters[hw->idx].source;
+
+		if (CCN_CONFIG_TYPE(event->attr.config) == CCN_TYPE_XP &&
+				CCN_CONFIG_EVENT(event->attr.config) ==
+				CCN_EVENT_WATCHPOINT)
+			clear_bit(hw->config_base, source->xp.dt_cmp_mask);
+		else
+			clear_bit(hw->config_base, source->pmu_events_mask);
+		clear_bit(hw->idx, ccn->dt.pmu_counters_mask);
+	}
+
+	ccn->dt.pmu_counters[hw->idx].source = NULL;
+	ccn->dt.pmu_counters[hw->idx].event = NULL;
+}
+
+static int arm_ccn_pmu_event_init(struct perf_event *event)
+{
+	struct arm_ccn *ccn;
+	struct hw_perf_event *hw = &event->hw;
+	u32 node_xp, type, event_id;
+	int valid;
+	int i;
+	struct perf_event *sibling;
+
+	if (event->attr.type != event->pmu->type)
+		return -ENOENT;
+
+	ccn = pmu_to_arm_ccn(event->pmu);
+
+	if (hw->sample_period) {
+		dev_dbg(ccn->dev, "Sampling not supported!\n");
+		return -EOPNOTSUPP;
+	}
+
+	if (has_branch_stack(event) || event->attr.exclude_user ||
+			event->attr.exclude_kernel || event->attr.exclude_hv ||
+			event->attr.exclude_idle || event->attr.exclude_host ||
+			event->attr.exclude_guest) {
+		dev_dbg(ccn->dev, "Can't exclude execution levels!\n");
+		return -EINVAL;
+	}
+
+	if (event->cpu < 0) {
+		dev_dbg(ccn->dev, "Can't provide per-task data!\n");
+		return -EOPNOTSUPP;
+	}
+	/*
+	 * Many perf core operations (eg. events rotation) operate on a
+	 * single CPU context. This is obvious for CPU PMUs, where one
+	 * expects the same sets of events being observed on all CPUs,
+	 * but can lead to issues for off-core PMUs, like CCN, where each
+	 * event could be theoretically assigned to a different CPU. To
+	 * mitigate this, we enforce CPU assignment to one, selected
+	 * processor (the one described in the "cpumask" attribute).
+	 */
+	event->cpu = cpumask_first(&ccn->dt.cpu);
+
+	node_xp = CCN_CONFIG_NODE(event->attr.config);
+	type = CCN_CONFIG_TYPE(event->attr.config);
+	event_id = CCN_CONFIG_EVENT(event->attr.config);
+
+	/* Validate node/xp vs topology */
+	switch (type) {
+	case CCN_TYPE_MN:
+		if (node_xp != ccn->mn_id) {
+			dev_dbg(ccn->dev, "Invalid MN ID %d!\n", node_xp);
+			return -EINVAL;
+		}
+		break;
+	case CCN_TYPE_XP:
+		if (node_xp >= ccn->num_xps) {
+			dev_dbg(ccn->dev, "Invalid XP ID %d!\n", node_xp);
+			return -EINVAL;
+		}
+		break;
+	case CCN_TYPE_CYCLES:
+		break;
+	default:
+		if (node_xp >= ccn->num_nodes) {
+			dev_dbg(ccn->dev, "Invalid node ID %d!\n", node_xp);
+			return -EINVAL;
+		}
+		if (!arm_ccn_pmu_type_eq(type, ccn->node[node_xp].type)) {
+			dev_dbg(ccn->dev, "Invalid type 0x%x for node %d!\n",
+					type, node_xp);
+			return -EINVAL;
+		}
+		break;
+	}
+
+	/* Validate event ID vs available for the type */
+	for (i = 0, valid = 0; i < ARRAY_SIZE(arm_ccn_pmu_events) && !valid;
+			i++) {
+		struct arm_ccn_pmu_event *e = &arm_ccn_pmu_events[i];
+		u32 port = CCN_CONFIG_PORT(event->attr.config);
+		u32 vc = CCN_CONFIG_VC(event->attr.config);
+
+		if (!arm_ccn_pmu_type_eq(type, e->type))
+			continue;
+		if (event_id != e->event)
+			continue;
+		if (e->num_ports && port >= e->num_ports) {
+			dev_dbg(ccn->dev, "Invalid port %d for node/XP %d!\n",
+					port, node_xp);
+			return -EINVAL;
+		}
+		if (e->num_vcs && vc >= e->num_vcs) {
+			dev_dbg(ccn->dev, "Invalid vc %d for node/XP %d!\n",
+					vc, node_xp);
+			return -EINVAL;
+		}
+		valid = 1;
+	}
+	if (!valid) {
+		dev_dbg(ccn->dev, "Invalid event 0x%x for node/XP %d!\n",
+				event_id, node_xp);
+		return -EINVAL;
+	}
+
+	/* Watchpoint-based event for a node is actually set on XP */
+	if (event_id == CCN_EVENT_WATCHPOINT && type != CCN_TYPE_XP) {
+		u32 port;
+
+		type = CCN_TYPE_XP;
+		port = arm_ccn_node_to_xp_port(node_xp);
+		node_xp = arm_ccn_node_to_xp(node_xp);
+
+		arm_ccn_pmu_config_set(&event->attr.config,
+				node_xp, type, port);
+	}
+
+	/*
+	 * We must NOT create groups containing mixed PMUs, although software
+	 * events are acceptable (for example to create a CCN group
+	 * periodically read when a hrtimer aka cpu-clock leader triggers).
+	 */
+	if (event->group_leader->pmu != event->pmu &&
+			!is_software_event(event->group_leader))
+		return -EINVAL;
+
+	list_for_each_entry(sibling, &event->group_leader->sibling_list,
+			group_entry)
+		if (sibling->pmu != event->pmu &&
+				!is_software_event(sibling))
+			return -EINVAL;
+
+	return 0;
+}
+
+static u64 arm_ccn_pmu_read_counter(struct arm_ccn *ccn, int idx)
+{
+	u64 res;
+
+	if (idx == CCN_IDX_PMU_CYCLE_COUNTER) {
+#ifdef readq
+		res = readq(ccn->dt.base + CCN_DT_PMCCNTR);
+#else
+		/* 40 bit counter, can do snapshot and read in two parts */
+		writel(0x1, ccn->dt.base + CCN_DT_PMSR_REQ);
+		while (!(readl(ccn->dt.base + CCN_DT_PMSR) & 0x1))
+			;
+		writel(0x1, ccn->dt.base + CCN_DT_PMSR_CLR);
+		res = readl(ccn->dt.base + CCN_DT_PMCCNTRSR + 4) & 0xff;
+		res <<= 32;
+		res |= readl(ccn->dt.base + CCN_DT_PMCCNTRSR);
+#endif
+	} else {
+		res = readl(ccn->dt.base + CCN_DT_PMEVCNT(idx));
+	}
+
+	return res;
+}
+
+static void arm_ccn_pmu_event_update(struct perf_event *event)
+{
+	struct arm_ccn *ccn = pmu_to_arm_ccn(event->pmu);
+	struct hw_perf_event *hw = &event->hw;
+	u64 prev_count, new_count, mask;
+
+	do {
+		prev_count = local64_read(&hw->prev_count);
+		new_count = arm_ccn_pmu_read_counter(ccn, hw->idx);
+	} while (local64_xchg(&hw->prev_count, new_count) != prev_count);
+
+	mask = (1LLU << (hw->idx == CCN_IDX_PMU_CYCLE_COUNTER ? 40 : 32)) - 1;
+
+	local64_add((new_count - prev_count) & mask, &event->count);
+}
+
+static void arm_ccn_pmu_xp_dt_config(struct perf_event *event, int enable)
+{
+	struct arm_ccn *ccn = pmu_to_arm_ccn(event->pmu);
+	struct hw_perf_event *hw = &event->hw;
+	struct arm_ccn_component *xp;
+	u32 val, dt_cfg;
+
+	/* Nothing to do for cycle counter */
+	if (hw->idx == CCN_IDX_PMU_CYCLE_COUNTER)
+		return;
+
+	if (CCN_CONFIG_TYPE(event->attr.config) == CCN_TYPE_XP)
+		xp = &ccn->xp[CCN_CONFIG_XP(event->attr.config)];
+	else
+		xp = &ccn->xp[arm_ccn_node_to_xp(
+				CCN_CONFIG_NODE(event->attr.config))];
+
+	if (enable)
+		dt_cfg = hw->event_base;
+	else
+		dt_cfg = CCN_XP_DT_CONFIG__DT_CFG__PASS_THROUGH;
+
+	spin_lock(&ccn->dt.config_lock);
+
+	val = readl(xp->base + CCN_XP_DT_CONFIG);
+	val &= ~(CCN_XP_DT_CONFIG__DT_CFG__MASK <<
+			CCN_XP_DT_CONFIG__DT_CFG__SHIFT(hw->idx));
+	val |= dt_cfg << CCN_XP_DT_CONFIG__DT_CFG__SHIFT(hw->idx);
+	writel(val, xp->base + CCN_XP_DT_CONFIG);
+
+	spin_unlock(&ccn->dt.config_lock);
+}
+
+static void arm_ccn_pmu_event_start(struct perf_event *event, int flags)
+{
+	struct arm_ccn *ccn = pmu_to_arm_ccn(event->pmu);
+	struct hw_perf_event *hw = &event->hw;
+
+	local64_set(&event->hw.prev_count,
+			arm_ccn_pmu_read_counter(ccn, hw->idx));
+	hw->state = 0;
+
+	/* Set the DT bus input, engaging the counter */
+	arm_ccn_pmu_xp_dt_config(event, 1);
+}
+
+static void arm_ccn_pmu_event_stop(struct perf_event *event, int flags)
+{
+	struct hw_perf_event *hw = &event->hw;
+
+	/* Disable counting, setting the DT bus to pass-through mode */
+	arm_ccn_pmu_xp_dt_config(event, 0);
+
+	if (flags & PERF_EF_UPDATE)
+		arm_ccn_pmu_event_update(event);
+
+	hw->state |= PERF_HES_STOPPED;
+}
+
+static void arm_ccn_pmu_xp_watchpoint_config(struct perf_event *event)
+{
+	struct arm_ccn *ccn = pmu_to_arm_ccn(event->pmu);
+	struct hw_perf_event *hw = &event->hw;
+	struct arm_ccn_component *source =
+			ccn->dt.pmu_counters[hw->idx].source;
+	unsigned long wp = hw->config_base;
+	u32 val;
+	u64 cmp_l = event->attr.config1;
+	u64 cmp_h = event->attr.config2;
+	u64 mask_l = ccn->dt.cmp_mask[CCN_CONFIG_MASK(event->attr.config)].l;
+	u64 mask_h = ccn->dt.cmp_mask[CCN_CONFIG_MASK(event->attr.config)].h;
+
+	hw->event_base = CCN_XP_DT_CONFIG__DT_CFG__WATCHPOINT(wp);
+
+	/* Direction (RX/TX), device (port) & virtual channel */
+	val = readl(source->base + CCN_XP_DT_INTERFACE_SEL);
+	val &= ~(CCN_XP_DT_INTERFACE_SEL__DT_IO_SEL__MASK <<
+			CCN_XP_DT_INTERFACE_SEL__DT_IO_SEL__SHIFT(wp));
+	val |= CCN_CONFIG_DIR(event->attr.config) <<
+			CCN_XP_DT_INTERFACE_SEL__DT_IO_SEL__SHIFT(wp);
+	val &= ~(CCN_XP_DT_INTERFACE_SEL__DT_DEV_SEL__MASK <<
+			CCN_XP_DT_INTERFACE_SEL__DT_DEV_SEL__SHIFT(wp));
+	val |= CCN_CONFIG_PORT(event->attr.config) <<
+			CCN_XP_DT_INTERFACE_SEL__DT_DEV_SEL__SHIFT(wp);
+	val &= ~(CCN_XP_DT_INTERFACE_SEL__DT_VC_SEL__MASK <<
+			CCN_XP_DT_INTERFACE_SEL__DT_VC_SEL__SHIFT(wp));
+	val |= CCN_CONFIG_VC(event->attr.config) <<
+			CCN_XP_DT_INTERFACE_SEL__DT_VC_SEL__SHIFT(wp);
+	writel(val, source->base + CCN_XP_DT_INTERFACE_SEL);
+
+	/* Comparison values */
+	writel(cmp_l & 0xffffffff, source->base + CCN_XP_DT_CMP_VAL_L(wp));
+	writel((cmp_l >> 32) & 0x7fffffff,
+			source->base + CCN_XP_DT_CMP_VAL_L(wp) + 4);
+	writel(cmp_h & 0xffffffff, source->base + CCN_XP_DT_CMP_VAL_H(wp));
+	writel((cmp_h >> 32) & 0x0fffffff,
+			source->base + CCN_XP_DT_CMP_VAL_H(wp) + 4);
+
+	/* Mask */
+	writel(mask_l & 0xffffffff, source->base + CCN_XP_DT_CMP_MASK_L(wp));
+	writel((mask_l >> 32) & 0x7fffffff,
+			source->base + CCN_XP_DT_CMP_MASK_L(wp) + 4);
+	writel(mask_h & 0xffffffff, source->base + CCN_XP_DT_CMP_MASK_H(wp));
+	writel((mask_h >> 32) & 0x0fffffff,
+			source->base + CCN_XP_DT_CMP_MASK_H(wp) + 4);
+}
+
+static void arm_ccn_pmu_xp_event_config(struct perf_event *event)
+{
+	struct arm_ccn *ccn = pmu_to_arm_ccn(event->pmu);
+	struct hw_perf_event *hw = &event->hw;
+	struct arm_ccn_component *source =
+			ccn->dt.pmu_counters[hw->idx].source;
+	u32 val, id;
+
+	hw->event_base = CCN_XP_DT_CONFIG__DT_CFG__XP_PMU_EVENT(hw->config_base);
+
+	id = (CCN_CONFIG_VC(event->attr.config) << 4) |
+			(CCN_CONFIG_BUS(event->attr.config) << 3) |
+			(CCN_CONFIG_EVENT(event->attr.config) << 0);
+
+	val = readl(source->base + CCN_XP_PMU_EVENT_SEL);
+	val &= ~(CCN_XP_PMU_EVENT_SEL__ID__MASK <<
+			CCN_XP_PMU_EVENT_SEL__ID__SHIFT(hw->config_base));
+	val |= id << CCN_XP_PMU_EVENT_SEL__ID__SHIFT(hw->config_base);
+	writel(val, source->base + CCN_XP_PMU_EVENT_SEL);
+}
+
+static void arm_ccn_pmu_node_event_config(struct perf_event *event)
+{
+	struct arm_ccn *ccn = pmu_to_arm_ccn(event->pmu);
+	struct hw_perf_event *hw = &event->hw;
+	struct arm_ccn_component *source =
+			ccn->dt.pmu_counters[hw->idx].source;
+	u32 type = CCN_CONFIG_TYPE(event->attr.config);
+	u32 val, port;
+
+	port = arm_ccn_node_to_xp_port(CCN_CONFIG_NODE(event->attr.config));
+	hw->event_base = CCN_XP_DT_CONFIG__DT_CFG__DEVICE_PMU_EVENT(port,
+			hw->config_base);
+
+	/* These *_event_sel regs should be identical, but let's make sure... */
+	BUILD_BUG_ON(CCN_HNF_PMU_EVENT_SEL != CCN_SBAS_PMU_EVENT_SEL);
+	BUILD_BUG_ON(CCN_SBAS_PMU_EVENT_SEL != CCN_RNI_PMU_EVENT_SEL);
+	BUILD_BUG_ON(CCN_HNF_PMU_EVENT_SEL__ID__SHIFT(1) !=
+			CCN_SBAS_PMU_EVENT_SEL__ID__SHIFT(1));
+	BUILD_BUG_ON(CCN_SBAS_PMU_EVENT_SEL__ID__SHIFT(1) !=
+			CCN_RNI_PMU_EVENT_SEL__ID__SHIFT(1));
+	BUILD_BUG_ON(CCN_HNF_PMU_EVENT_SEL__ID__MASK !=
+			CCN_SBAS_PMU_EVENT_SEL__ID__MASK);
+	BUILD_BUG_ON(CCN_SBAS_PMU_EVENT_SEL__ID__MASK !=
+			CCN_RNI_PMU_EVENT_SEL__ID__MASK);
+	if (WARN_ON(type != CCN_TYPE_HNF && type != CCN_TYPE_SBAS &&
+			!arm_ccn_pmu_type_eq(type, CCN_TYPE_RNI_3P)))
+		return;
+
+	/* Set the event id for the pre-allocated counter */
+	val = readl(source->base + CCN_HNF_PMU_EVENT_SEL);
+	val &= ~(CCN_HNF_PMU_EVENT_SEL__ID__MASK <<
+		CCN_HNF_PMU_EVENT_SEL__ID__SHIFT(hw->config_base));
+	val |= CCN_CONFIG_EVENT(event->attr.config) <<
+		CCN_HNF_PMU_EVENT_SEL__ID__SHIFT(hw->config_base);
+	writel(val, source->base + CCN_HNF_PMU_EVENT_SEL);
+}
+
+static void arm_ccn_pmu_event_config(struct perf_event *event)
+{
+	struct arm_ccn *ccn = pmu_to_arm_ccn(event->pmu);
+	struct hw_perf_event *hw = &event->hw;
+	u32 xp, offset, val;
+
+	/* Cycle counter requires no setup */
+	if (hw->idx == CCN_IDX_PMU_CYCLE_COUNTER)
+		return;
+
+	if (CCN_CONFIG_TYPE(event->attr.config) == CCN_TYPE_XP)
+		xp = CCN_CONFIG_XP(event->attr.config);
+	else
+		xp = arm_ccn_node_to_xp(CCN_CONFIG_NODE(event->attr.config));
+
+	spin_lock(&ccn->dt.config_lock);
+
+	/* Set the DT bus "distance" register */
+	offset = (hw->idx / 4) * 4;
+	val = readl(ccn->dt.base + CCN_DT_ACTIVE_DSM + offset);
+	val &= ~(CCN_DT_ACTIVE_DSM__DSM_ID__MASK <<
+			CCN_DT_ACTIVE_DSM__DSM_ID__SHIFT(hw->idx % 4));
+	val |= xp << CCN_DT_ACTIVE_DSM__DSM_ID__SHIFT(hw->idx % 4);
+	writel(val, ccn->dt.base + CCN_DT_ACTIVE_DSM + offset);
+
+	if (CCN_CONFIG_TYPE(event->attr.config) == CCN_TYPE_XP) {
+		if (CCN_CONFIG_EVENT(event->attr.config) ==
+				CCN_EVENT_WATCHPOINT)
+			arm_ccn_pmu_xp_watchpoint_config(event);
+		else
+			arm_ccn_pmu_xp_event_config(event);
+	} else {
+		arm_ccn_pmu_node_event_config(event);
+	}
+
+	spin_unlock(&ccn->dt.config_lock);
+}
+
+static int arm_ccn_pmu_active_counters(struct arm_ccn *ccn)
+{
+	return bitmap_weight(ccn->dt.pmu_counters_mask,
+			     CCN_NUM_PMU_EVENT_COUNTERS + 1);
+}
+
+static int arm_ccn_pmu_event_add(struct perf_event *event, int flags)
+{
+	int err;
+	struct hw_perf_event *hw = &event->hw;
+	struct arm_ccn *ccn = pmu_to_arm_ccn(event->pmu);
+
+	err = arm_ccn_pmu_event_alloc(event);
+	if (err)
+		return err;
+
+	/*
+	 * Pin the timer, so that the overflows are handled by the chosen
+	 * event->cpu (this is the same one as presented in "cpumask"
+	 * attribute).
+	 */
+	if (!ccn->irq && arm_ccn_pmu_active_counters(ccn) == 1)
+		hrtimer_start(&ccn->dt.hrtimer, arm_ccn_pmu_timer_period(),
+			      HRTIMER_MODE_REL_PINNED);
+
+	arm_ccn_pmu_event_config(event);
+
+	hw->state = PERF_HES_STOPPED;
+
+	if (flags & PERF_EF_START)
+		arm_ccn_pmu_event_start(event, PERF_EF_UPDATE);
+
+	return 0;
+}
+
+static void arm_ccn_pmu_event_del(struct perf_event *event, int flags)
+{
+	struct arm_ccn *ccn = pmu_to_arm_ccn(event->pmu);
+
+	arm_ccn_pmu_event_stop(event, PERF_EF_UPDATE);
+
+	arm_ccn_pmu_event_release(event);
+
+	if (!ccn->irq && arm_ccn_pmu_active_counters(ccn) == 0)
+		hrtimer_cancel(&ccn->dt.hrtimer);
+}
+
+static void arm_ccn_pmu_event_read(struct perf_event *event)
+{
+	arm_ccn_pmu_event_update(event);
+}
+
+static void arm_ccn_pmu_enable(struct pmu *pmu)
+{
+	struct arm_ccn *ccn = pmu_to_arm_ccn(pmu);
+
+	u32 val = readl(ccn->dt.base + CCN_DT_PMCR);
+	val |= CCN_DT_PMCR__PMU_EN;
+	writel(val, ccn->dt.base + CCN_DT_PMCR);
+}
+
+static void arm_ccn_pmu_disable(struct pmu *pmu)
+{
+	struct arm_ccn *ccn = pmu_to_arm_ccn(pmu);
+
+	u32 val = readl(ccn->dt.base + CCN_DT_PMCR);
+	val &= ~CCN_DT_PMCR__PMU_EN;
+	writel(val, ccn->dt.base + CCN_DT_PMCR);
+}
+
+static irqreturn_t arm_ccn_pmu_overflow_handler(struct arm_ccn_dt *dt)
+{
+	u32 pmovsr = readl(dt->base + CCN_DT_PMOVSR);
+	int idx;
+
+	if (!pmovsr)
+		return IRQ_NONE;
+
+	writel(pmovsr, dt->base + CCN_DT_PMOVSR_CLR);
+
+	BUILD_BUG_ON(CCN_IDX_PMU_CYCLE_COUNTER != CCN_NUM_PMU_EVENT_COUNTERS);
+
+	for (idx = 0; idx < CCN_NUM_PMU_EVENT_COUNTERS + 1; idx++) {
+		struct perf_event *event = dt->pmu_counters[idx].event;
+		int overflowed = pmovsr & BIT(idx);
+
+		WARN_ON_ONCE(overflowed && !event &&
+				idx != CCN_IDX_PMU_CYCLE_COUNTER);
+
+		if (!event || !overflowed)
+			continue;
+
+		arm_ccn_pmu_event_update(event);
+	}
+
+	return IRQ_HANDLED;
+}
+
+static enum hrtimer_restart arm_ccn_pmu_timer_handler(struct hrtimer *hrtimer)
+{
+	struct arm_ccn_dt *dt = container_of(hrtimer, struct arm_ccn_dt,
+			hrtimer);
+	unsigned long flags;
+
+	local_irq_save(flags);
+	arm_ccn_pmu_overflow_handler(dt);
+	local_irq_restore(flags);
+
+	hrtimer_forward_now(hrtimer, arm_ccn_pmu_timer_period());
+	return HRTIMER_RESTART;
+}
+
+
+static int arm_ccn_pmu_offline_cpu(unsigned int cpu, struct hlist_node *node)
+{
+	struct arm_ccn_dt *dt = hlist_entry_safe(node, struct arm_ccn_dt, node);
+	struct arm_ccn *ccn = container_of(dt, struct arm_ccn, dt);
+	unsigned int target;
+
+	if (!cpumask_test_and_clear_cpu(cpu, &dt->cpu))
+		return 0;
+	target = cpumask_any_but(cpu_online_mask, cpu);
+	if (target >= nr_cpu_ids)
+		return 0;
+	perf_pmu_migrate_context(&dt->pmu, cpu, target);
+	cpumask_set_cpu(target, &dt->cpu);
+	if (ccn->irq)
+		WARN_ON(irq_set_affinity_hint(ccn->irq, &dt->cpu) != 0);
+	return 0;
+}
+
+static DEFINE_IDA(arm_ccn_pmu_ida);
+
+static int arm_ccn_pmu_init(struct arm_ccn *ccn)
+{
+	int i;
+	char *name;
+	int err;
+
+	/* Initialize DT subsystem */
+	ccn->dt.base = ccn->base + CCN_REGION_SIZE;
+	spin_lock_init(&ccn->dt.config_lock);
+	writel(CCN_DT_PMOVSR_CLR__MASK, ccn->dt.base + CCN_DT_PMOVSR_CLR);
+	writel(CCN_DT_CTL__DT_EN, ccn->dt.base + CCN_DT_CTL);
+	writel(CCN_DT_PMCR__OVFL_INTR_EN | CCN_DT_PMCR__PMU_EN,
+			ccn->dt.base + CCN_DT_PMCR);
+	writel(0x1, ccn->dt.base + CCN_DT_PMSR_CLR);
+	for (i = 0; i < ccn->num_xps; i++) {
+		writel(0, ccn->xp[i].base + CCN_XP_DT_CONFIG);
+		writel((CCN_XP_DT_CONTROL__WP_ARM_SEL__ALWAYS <<
+				CCN_XP_DT_CONTROL__WP_ARM_SEL__SHIFT(0)) |
+				(CCN_XP_DT_CONTROL__WP_ARM_SEL__ALWAYS <<
+				CCN_XP_DT_CONTROL__WP_ARM_SEL__SHIFT(1)) |
+				CCN_XP_DT_CONTROL__DT_ENABLE,
+				ccn->xp[i].base + CCN_XP_DT_CONTROL);
+	}
+	ccn->dt.cmp_mask[CCN_IDX_MASK_ANY].l = ~0;
+	ccn->dt.cmp_mask[CCN_IDX_MASK_ANY].h = ~0;
+	ccn->dt.cmp_mask[CCN_IDX_MASK_EXACT].l = 0;
+	ccn->dt.cmp_mask[CCN_IDX_MASK_EXACT].h = 0;
+	ccn->dt.cmp_mask[CCN_IDX_MASK_ORDER].l = ~0;
+	ccn->dt.cmp_mask[CCN_IDX_MASK_ORDER].h = ~(0x1 << 15);
+	ccn->dt.cmp_mask[CCN_IDX_MASK_OPCODE].l = ~0;
+	ccn->dt.cmp_mask[CCN_IDX_MASK_OPCODE].h = ~(0x1f << 9);
+
+	/* Get a convenient /sys/event_source/devices/ name */
+	ccn->dt.id = ida_simple_get(&arm_ccn_pmu_ida, 0, 0, GFP_KERNEL);
+	if (ccn->dt.id == 0) {
+		name = "ccn";
+	} else {
+		int len = snprintf(NULL, 0, "ccn_%d", ccn->dt.id);
+
+		name = devm_kzalloc(ccn->dev, len + 1, GFP_KERNEL);
+		if (!name) {
+			err = -ENOMEM;
+			goto error_choose_name;
+		}
+		snprintf(name, len + 1, "ccn_%d", ccn->dt.id);
+	}
+
+	/* Perf driver registration */
+	ccn->dt.pmu = (struct pmu) {
+		.module = THIS_MODULE,
+		.attr_groups = arm_ccn_pmu_attr_groups,
+		.task_ctx_nr = perf_invalid_context,
+		.event_init = arm_ccn_pmu_event_init,
+		.add = arm_ccn_pmu_event_add,
+		.del = arm_ccn_pmu_event_del,
+		.start = arm_ccn_pmu_event_start,
+		.stop = arm_ccn_pmu_event_stop,
+		.read = arm_ccn_pmu_event_read,
+		.pmu_enable = arm_ccn_pmu_enable,
+		.pmu_disable = arm_ccn_pmu_disable,
+	};
+
+	/* No overflow interrupt? Have to use a timer instead. */
+	if (!ccn->irq) {
+		dev_info(ccn->dev, "No access to interrupts, using timer.\n");
+		hrtimer_init(&ccn->dt.hrtimer, CLOCK_MONOTONIC,
+				HRTIMER_MODE_REL);
+		ccn->dt.hrtimer.function = arm_ccn_pmu_timer_handler;
+	}
+
+	/* Pick one CPU which we will use to collect data from CCN... */
+	cpumask_set_cpu(get_cpu(), &ccn->dt.cpu);
+
+	/* Also make sure that the overflow interrupt is handled by this CPU */
+	if (ccn->irq) {
+		err = irq_set_affinity_hint(ccn->irq, &ccn->dt.cpu);
+		if (err) {
+			dev_err(ccn->dev, "Failed to set interrupt affinity!\n");
+			goto error_set_affinity;
+		}
+	}
+
+	err = perf_pmu_register(&ccn->dt.pmu, name, -1);
+	if (err)
+		goto error_pmu_register;
+
+	cpuhp_state_add_instance_nocalls(CPUHP_AP_PERF_ARM_CCN_ONLINE,
+					 &ccn->dt.node);
+	put_cpu();
+	return 0;
+
+error_pmu_register:
+error_set_affinity:
+	put_cpu();
+error_choose_name:
+	ida_simple_remove(&arm_ccn_pmu_ida, ccn->dt.id);
+	for (i = 0; i < ccn->num_xps; i++)
+		writel(0, ccn->xp[i].base + CCN_XP_DT_CONTROL);
+	writel(0, ccn->dt.base + CCN_DT_PMCR);
+	return err;
+}
+
+static void arm_ccn_pmu_cleanup(struct arm_ccn *ccn)
+{
+	int i;
+
+	cpuhp_state_remove_instance_nocalls(CPUHP_AP_PERF_ARM_CCN_ONLINE,
+					    &ccn->dt.node);
+	if (ccn->irq)
+		irq_set_affinity_hint(ccn->irq, NULL);
+	for (i = 0; i < ccn->num_xps; i++)
+		writel(0, ccn->xp[i].base + CCN_XP_DT_CONTROL);
+	writel(0, ccn->dt.base + CCN_DT_PMCR);
+	perf_pmu_unregister(&ccn->dt.pmu);
+	ida_simple_remove(&arm_ccn_pmu_ida, ccn->dt.id);
+}
+
+static int arm_ccn_for_each_valid_region(struct arm_ccn *ccn,
+		int (*callback)(struct arm_ccn *ccn, int region,
+		void __iomem *base, u32 type, u32 id))
+{
+	int region;
+
+	for (region = 0; region < CCN_NUM_REGIONS; region++) {
+		u32 val, type, id;
+		void __iomem *base;
+		int err;
+
+		val = readl(ccn->base + CCN_MN_OLY_COMP_LIST_63_0 +
+				4 * (region / 32));
+		if (!(val & (1 << (region % 32))))
+			continue;
+
+		base = ccn->base + region * CCN_REGION_SIZE;
+		val = readl(base + CCN_ALL_OLY_ID);
+		type = (val >> CCN_ALL_OLY_ID__OLY_ID__SHIFT) &
+				CCN_ALL_OLY_ID__OLY_ID__MASK;
+		id = (val >> CCN_ALL_OLY_ID__NODE_ID__SHIFT) &
+				CCN_ALL_OLY_ID__NODE_ID__MASK;
+
+		err = callback(ccn, region, base, type, id);
+		if (err)
+			return err;
+	}
+
+	return 0;
+}
+
+static int arm_ccn_get_nodes_num(struct arm_ccn *ccn, int region,
+		void __iomem *base, u32 type, u32 id)
+{
+
+	if (type == CCN_TYPE_XP && id >= ccn->num_xps)
+		ccn->num_xps = id + 1;
+	else if (id >= ccn->num_nodes)
+		ccn->num_nodes = id + 1;
+
+	return 0;
+}
+
+static int arm_ccn_init_nodes(struct arm_ccn *ccn, int region,
+		void __iomem *base, u32 type, u32 id)
+{
+	struct arm_ccn_component *component;
+
+	dev_dbg(ccn->dev, "Region %d: id=%u, type=0x%02x\n", region, id, type);
+
+	switch (type) {
+	case CCN_TYPE_MN:
+		ccn->mn_id = id;
+		return 0;
+	case CCN_TYPE_DT:
+		return 0;
+	case CCN_TYPE_XP:
+		component = &ccn->xp[id];
+		break;
+	case CCN_TYPE_SBSX:
+		ccn->sbsx_present = 1;
+		component = &ccn->node[id];
+		break;
+	case CCN_TYPE_SBAS:
+		ccn->sbas_present = 1;
+		/* Fall-through */
+	default:
+		component = &ccn->node[id];
+		break;
+	}
+
+	component->base = base;
+	component->type = type;
+
+	return 0;
+}
+
+
+static irqreturn_t arm_ccn_error_handler(struct arm_ccn *ccn,
+		const u32 *err_sig_val)
+{
+	/* This should be really handled by firmware... */
+	dev_err(ccn->dev, "Error reported in %08x%08x%08x%08x%08x%08x.\n",
+			err_sig_val[5], err_sig_val[4], err_sig_val[3],
+			err_sig_val[2], err_sig_val[1], err_sig_val[0]);
+	dev_err(ccn->dev, "Disabling interrupt generation for all errors.\n");
+	writel(CCN_MN_ERRINT_STATUS__ALL_ERRORS__DISABLE,
+			ccn->base + CCN_MN_ERRINT_STATUS);
+
+	return IRQ_HANDLED;
+}
+
+
+static irqreturn_t arm_ccn_irq_handler(int irq, void *dev_id)
+{
+	irqreturn_t res = IRQ_NONE;
+	struct arm_ccn *ccn = dev_id;
+	u32 err_sig_val[6];
+	u32 err_or;
+	int i;
+
+	/* PMU overflow is a special case */
+	err_or = err_sig_val[0] = readl(ccn->base + CCN_MN_ERR_SIG_VAL_63_0);
+	if (err_or & CCN_MN_ERR_SIG_VAL_63_0__DT) {
+		err_or &= ~CCN_MN_ERR_SIG_VAL_63_0__DT;
+		res = arm_ccn_pmu_overflow_handler(&ccn->dt);
+	}
+
+	/* Have to read all err_sig_vals to clear them */
+	for (i = 1; i < ARRAY_SIZE(err_sig_val); i++) {
+		err_sig_val[i] = readl(ccn->base +
+				CCN_MN_ERR_SIG_VAL_63_0 + i * 4);
+		err_or |= err_sig_val[i];
+	}
+	if (err_or)
+		res |= arm_ccn_error_handler(ccn, err_sig_val);
+
+	if (res != IRQ_NONE)
+		writel(CCN_MN_ERRINT_STATUS__INTREQ__DESSERT,
+				ccn->base + CCN_MN_ERRINT_STATUS);
+
+	return res;
+}
+
+
+static int arm_ccn_probe(struct platform_device *pdev)
+{
+	struct arm_ccn *ccn;
+	struct resource *res;
+	unsigned int irq;
+	int err;
+
+	ccn = devm_kzalloc(&pdev->dev, sizeof(*ccn), GFP_KERNEL);
+	if (!ccn)
+		return -ENOMEM;
+	ccn->dev = &pdev->dev;
+	platform_set_drvdata(pdev, ccn);
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	if (!res)
+		return -EINVAL;
+
+	if (!devm_request_mem_region(ccn->dev, res->start,
+			resource_size(res), pdev->name))
+		return -EBUSY;
+
+	ccn->base = devm_ioremap(ccn->dev, res->start,
+				resource_size(res));
+	if (!ccn->base)
+		return -EFAULT;
+
+	res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+	if (!res)
+		return -EINVAL;
+	irq = res->start;
+
+	/* Check if we can use the interrupt */
+	writel(CCN_MN_ERRINT_STATUS__PMU_EVENTS__DISABLE,
+			ccn->base + CCN_MN_ERRINT_STATUS);
+	if (readl(ccn->base + CCN_MN_ERRINT_STATUS) &
+			CCN_MN_ERRINT_STATUS__PMU_EVENTS__DISABLED) {
+		/* Can set 'disable' bits, so can acknowledge interrupts */
+		writel(CCN_MN_ERRINT_STATUS__PMU_EVENTS__ENABLE,
+				ccn->base + CCN_MN_ERRINT_STATUS);
+		err = devm_request_irq(ccn->dev, irq, arm_ccn_irq_handler,
+				       IRQF_NOBALANCING | IRQF_NO_THREAD,
+				       dev_name(ccn->dev), ccn);
+		if (err)
+			return err;
+
+		ccn->irq = irq;
+	}
+
+
+	/* Build topology */
+
+	err = arm_ccn_for_each_valid_region(ccn, arm_ccn_get_nodes_num);
+	if (err)
+		return err;
+
+	ccn->node = devm_kcalloc(ccn->dev, ccn->num_nodes, sizeof(*ccn->node),
+				 GFP_KERNEL);
+	ccn->xp = devm_kcalloc(ccn->dev, ccn->num_xps, sizeof(*ccn->node),
+			       GFP_KERNEL);
+	if (!ccn->node || !ccn->xp)
+		return -ENOMEM;
+
+	err = arm_ccn_for_each_valid_region(ccn, arm_ccn_init_nodes);
+	if (err)
+		return err;
+
+	return arm_ccn_pmu_init(ccn);
+}
+
+static int arm_ccn_remove(struct platform_device *pdev)
+{
+	struct arm_ccn *ccn = platform_get_drvdata(pdev);
+
+	arm_ccn_pmu_cleanup(ccn);
+
+	return 0;
+}
+
+static const struct of_device_id arm_ccn_match[] = {
+	{ .compatible = "arm,ccn-502", },
+	{ .compatible = "arm,ccn-504", },
+	{},
+};
+MODULE_DEVICE_TABLE(of, arm_ccn_match);
+
+static struct platform_driver arm_ccn_driver = {
+	.driver = {
+		.name = "arm-ccn",
+		.of_match_table = arm_ccn_match,
+	},
+	.probe = arm_ccn_probe,
+	.remove = arm_ccn_remove,
+};
+
+static int __init arm_ccn_init(void)
+{
+	int i, ret;
+
+	ret = cpuhp_setup_state_multi(CPUHP_AP_PERF_ARM_CCN_ONLINE,
+				      "perf/arm/ccn:online", NULL,
+				      arm_ccn_pmu_offline_cpu);
+	if (ret)
+		return ret;
+
+	for (i = 0; i < ARRAY_SIZE(arm_ccn_pmu_events); i++)
+		arm_ccn_pmu_events_attrs[i] = &arm_ccn_pmu_events[i].attr.attr;
+
+	ret = platform_driver_register(&arm_ccn_driver);
+	if (ret)
+		cpuhp_remove_multi_state(CPUHP_AP_PERF_ARM_CCN_ONLINE);
+	return ret;
+}
+
+static void __exit arm_ccn_exit(void)
+{
+	platform_driver_unregister(&arm_ccn_driver);
+	cpuhp_remove_multi_state(CPUHP_AP_PERF_ARM_CCN_ONLINE);
+}
+
+module_init(arm_ccn_init);
+module_exit(arm_ccn_exit);
+
+MODULE_AUTHOR("Pawel Moll <pawel.moll@arm.com>");
+MODULE_LICENSE("GPL");
diff --git a/src/kernel/linux/v4.14/drivers/bus/brcmstb_gisb.c b/src/kernel/linux/v4.14/drivers/bus/brcmstb_gisb.c
new file mode 100644
index 0000000..68ac3e9
--- /dev/null
+++ b/src/kernel/linux/v4.14/drivers/bus/brcmstb_gisb.c
@@ -0,0 +1,465 @@
+/*
+ * Copyright (C) 2014-2017 Broadcom
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/interrupt.h>
+#include <linux/sysfs.h>
+#include <linux/io.h>
+#include <linux/string.h>
+#include <linux/device.h>
+#include <linux/list.h>
+#include <linux/of.h>
+#include <linux/bitops.h>
+#include <linux/pm.h>
+#include <linux/kernel.h>
+#include <linux/kdebug.h>
+#include <linux/notifier.h>
+
+#ifdef CONFIG_MIPS
+#include <asm/traps.h>
+#endif
+
+#define  ARB_ERR_CAP_CLEAR		(1 << 0)
+#define  ARB_ERR_CAP_STATUS_TIMEOUT	(1 << 12)
+#define  ARB_ERR_CAP_STATUS_TEA		(1 << 11)
+#define  ARB_ERR_CAP_STATUS_WRITE	(1 << 1)
+#define  ARB_ERR_CAP_STATUS_VALID	(1 << 0)
+
+enum {
+	ARB_TIMER,
+	ARB_ERR_CAP_CLR,
+	ARB_ERR_CAP_HI_ADDR,
+	ARB_ERR_CAP_ADDR,
+	ARB_ERR_CAP_STATUS,
+	ARB_ERR_CAP_MASTER,
+};
+
+static const int gisb_offsets_bcm7038[] = {
+	[ARB_TIMER]		= 0x00c,
+	[ARB_ERR_CAP_CLR]	= 0x0c4,
+	[ARB_ERR_CAP_HI_ADDR]	= -1,
+	[ARB_ERR_CAP_ADDR]	= 0x0c8,
+	[ARB_ERR_CAP_STATUS]	= 0x0d0,
+	[ARB_ERR_CAP_MASTER]	= -1,
+};
+
+static const int gisb_offsets_bcm7278[] = {
+	[ARB_TIMER]		= 0x008,
+	[ARB_ERR_CAP_CLR]	= 0x7f8,
+	[ARB_ERR_CAP_HI_ADDR]	= -1,
+	[ARB_ERR_CAP_ADDR]	= 0x7e0,
+	[ARB_ERR_CAP_STATUS]	= 0x7f0,
+	[ARB_ERR_CAP_MASTER]	= 0x7f4,
+};
+
+static const int gisb_offsets_bcm7400[] = {
+	[ARB_TIMER]		= 0x00c,
+	[ARB_ERR_CAP_CLR]	= 0x0c8,
+	[ARB_ERR_CAP_HI_ADDR]	= -1,
+	[ARB_ERR_CAP_ADDR]	= 0x0cc,
+	[ARB_ERR_CAP_STATUS]	= 0x0d4,
+	[ARB_ERR_CAP_MASTER]	= 0x0d8,
+};
+
+static const int gisb_offsets_bcm7435[] = {
+	[ARB_TIMER]		= 0x00c,
+	[ARB_ERR_CAP_CLR]	= 0x168,
+	[ARB_ERR_CAP_HI_ADDR]	= -1,
+	[ARB_ERR_CAP_ADDR]	= 0x16c,
+	[ARB_ERR_CAP_STATUS]	= 0x174,
+	[ARB_ERR_CAP_MASTER]	= 0x178,
+};
+
+static const int gisb_offsets_bcm7445[] = {
+	[ARB_TIMER]		= 0x008,
+	[ARB_ERR_CAP_CLR]	= 0x7e4,
+	[ARB_ERR_CAP_HI_ADDR]	= 0x7e8,
+	[ARB_ERR_CAP_ADDR]	= 0x7ec,
+	[ARB_ERR_CAP_STATUS]	= 0x7f4,
+	[ARB_ERR_CAP_MASTER]	= 0x7f8,
+};
+
+struct brcmstb_gisb_arb_device {
+	void __iomem	*base;
+	const int	*gisb_offsets;
+	bool		big_endian;
+	struct mutex	lock;
+	struct list_head next;
+	u32 valid_mask;
+	const char *master_names[sizeof(u32) * BITS_PER_BYTE];
+	u32 saved_timeout;
+};
+
+static LIST_HEAD(brcmstb_gisb_arb_device_list);
+
+static u32 gisb_read(struct brcmstb_gisb_arb_device *gdev, int reg)
+{
+	int offset = gdev->gisb_offsets[reg];
+
+	if (offset < 0) {
+		/* return 1 if the hardware doesn't have ARB_ERR_CAP_MASTER */
+		if (reg == ARB_ERR_CAP_MASTER)
+			return 1;
+		else
+			return 0;
+	}
+
+	if (gdev->big_endian)
+		return ioread32be(gdev->base + offset);
+	else
+		return ioread32(gdev->base + offset);
+}
+
+static u64 gisb_read_address(struct brcmstb_gisb_arb_device *gdev)
+{
+	u64 value;
+
+	value = gisb_read(gdev, ARB_ERR_CAP_ADDR);
+	value |= (u64)gisb_read(gdev, ARB_ERR_CAP_HI_ADDR) << 32;
+
+	return value;
+}
+
+static void gisb_write(struct brcmstb_gisb_arb_device *gdev, u32 val, int reg)
+{
+	int offset = gdev->gisb_offsets[reg];
+
+	if (offset == -1)
+		return;
+
+	if (gdev->big_endian)
+		iowrite32be(val, gdev->base + offset);
+	else
+		iowrite32(val, gdev->base + offset);
+}
+
+static ssize_t gisb_arb_get_timeout(struct device *dev,
+				    struct device_attribute *attr,
+				    char *buf)
+{
+	struct platform_device *pdev = to_platform_device(dev);
+	struct brcmstb_gisb_arb_device *gdev = platform_get_drvdata(pdev);
+	u32 timeout;
+
+	mutex_lock(&gdev->lock);
+	timeout = gisb_read(gdev, ARB_TIMER);
+	mutex_unlock(&gdev->lock);
+
+	return sprintf(buf, "%d", timeout);
+}
+
+static ssize_t gisb_arb_set_timeout(struct device *dev,
+				    struct device_attribute *attr,
+				    const char *buf, size_t count)
+{
+	struct platform_device *pdev = to_platform_device(dev);
+	struct brcmstb_gisb_arb_device *gdev = platform_get_drvdata(pdev);
+	int val, ret;
+
+	ret = kstrtoint(buf, 10, &val);
+	if (ret < 0)
+		return ret;
+
+	if (val == 0 || val >= 0xffffffff)
+		return -EINVAL;
+
+	mutex_lock(&gdev->lock);
+	gisb_write(gdev, val, ARB_TIMER);
+	mutex_unlock(&gdev->lock);
+
+	return count;
+}
+
+static const char *
+brcmstb_gisb_master_to_str(struct brcmstb_gisb_arb_device *gdev,
+						u32 masters)
+{
+	u32 mask = gdev->valid_mask & masters;
+
+	if (hweight_long(mask) != 1)
+		return NULL;
+
+	return gdev->master_names[ffs(mask) - 1];
+}
+
+static int brcmstb_gisb_arb_decode_addr(struct brcmstb_gisb_arb_device *gdev,
+					const char *reason)
+{
+	u32 cap_status;
+	u64 arb_addr;
+	u32 master;
+	const char *m_name;
+	char m_fmt[11];
+
+	cap_status = gisb_read(gdev, ARB_ERR_CAP_STATUS);
+
+	/* Invalid captured address, bail out */
+	if (!(cap_status & ARB_ERR_CAP_STATUS_VALID))
+		return 1;
+
+	/* Read the address and master */
+	arb_addr = gisb_read_address(gdev);
+	master = gisb_read(gdev, ARB_ERR_CAP_MASTER);
+
+	m_name = brcmstb_gisb_master_to_str(gdev, master);
+	if (!m_name) {
+		snprintf(m_fmt, sizeof(m_fmt), "0x%08x", master);
+		m_name = m_fmt;
+	}
+
+	pr_crit("%s: %s at 0x%llx [%c %s], core: %s\n",
+		__func__, reason, arb_addr,
+		cap_status & ARB_ERR_CAP_STATUS_WRITE ? 'W' : 'R',
+		cap_status & ARB_ERR_CAP_STATUS_TIMEOUT ? "timeout" : "",
+		m_name);
+
+	/* clear the GISB error */
+	gisb_write(gdev, ARB_ERR_CAP_CLEAR, ARB_ERR_CAP_CLR);
+
+	return 0;
+}
+
+#ifdef CONFIG_MIPS
+static int brcmstb_bus_error_handler(struct pt_regs *regs, int is_fixup)
+{
+	int ret = 0;
+	struct brcmstb_gisb_arb_device *gdev;
+	u32 cap_status;
+
+	list_for_each_entry(gdev, &brcmstb_gisb_arb_device_list, next) {
+		cap_status = gisb_read(gdev, ARB_ERR_CAP_STATUS);
+
+		/* Invalid captured address, bail out */
+		if (!(cap_status & ARB_ERR_CAP_STATUS_VALID)) {
+			is_fixup = 1;
+			goto out;
+		}
+
+		ret |= brcmstb_gisb_arb_decode_addr(gdev, "bus error");
+	}
+out:
+	return is_fixup ? MIPS_BE_FIXUP : MIPS_BE_FATAL;
+}
+#endif
+
+static irqreturn_t brcmstb_gisb_timeout_handler(int irq, void *dev_id)
+{
+	brcmstb_gisb_arb_decode_addr(dev_id, "timeout");
+
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t brcmstb_gisb_tea_handler(int irq, void *dev_id)
+{
+	brcmstb_gisb_arb_decode_addr(dev_id, "target abort");
+
+	return IRQ_HANDLED;
+}
+
+/*
+ * Dump out gisb errors on die or panic.
+ */
+static int dump_gisb_error(struct notifier_block *self, unsigned long v,
+			   void *p);
+
+static struct notifier_block gisb_die_notifier = {
+	.notifier_call = dump_gisb_error,
+};
+
+static struct notifier_block gisb_panic_notifier = {
+	.notifier_call = dump_gisb_error,
+};
+
+static int dump_gisb_error(struct notifier_block *self, unsigned long v,
+			   void *p)
+{
+	struct brcmstb_gisb_arb_device *gdev;
+	const char *reason = "panic";
+
+	if (self == &gisb_die_notifier)
+		reason = "die";
+
+	/* iterate over each GISB arb registered handlers */
+	list_for_each_entry(gdev, &brcmstb_gisb_arb_device_list, next)
+		brcmstb_gisb_arb_decode_addr(gdev, reason);
+
+	return NOTIFY_DONE;
+}
+
+static DEVICE_ATTR(gisb_arb_timeout, S_IWUSR | S_IRUGO,
+		gisb_arb_get_timeout, gisb_arb_set_timeout);
+
+static struct attribute *gisb_arb_sysfs_attrs[] = {
+	&dev_attr_gisb_arb_timeout.attr,
+	NULL,
+};
+
+static struct attribute_group gisb_arb_sysfs_attr_group = {
+	.attrs = gisb_arb_sysfs_attrs,
+};
+
+static const struct of_device_id brcmstb_gisb_arb_of_match[] = {
+	{ .compatible = "brcm,gisb-arb",         .data = gisb_offsets_bcm7445 },
+	{ .compatible = "brcm,bcm7445-gisb-arb", .data = gisb_offsets_bcm7445 },
+	{ .compatible = "brcm,bcm7435-gisb-arb", .data = gisb_offsets_bcm7435 },
+	{ .compatible = "brcm,bcm7400-gisb-arb", .data = gisb_offsets_bcm7400 },
+	{ .compatible = "brcm,bcm7278-gisb-arb", .data = gisb_offsets_bcm7278 },
+	{ .compatible = "brcm,bcm7038-gisb-arb", .data = gisb_offsets_bcm7038 },
+	{ },
+};
+
+static int __init brcmstb_gisb_arb_probe(struct platform_device *pdev)
+{
+	struct device_node *dn = pdev->dev.of_node;
+	struct brcmstb_gisb_arb_device *gdev;
+	const struct of_device_id *of_id;
+	struct resource *r;
+	int err, timeout_irq, tea_irq;
+	unsigned int num_masters, j = 0;
+	int i, first, last;
+
+	r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	timeout_irq = platform_get_irq(pdev, 0);
+	tea_irq = platform_get_irq(pdev, 1);
+
+	gdev = devm_kzalloc(&pdev->dev, sizeof(*gdev), GFP_KERNEL);
+	if (!gdev)
+		return -ENOMEM;
+
+	mutex_init(&gdev->lock);
+	INIT_LIST_HEAD(&gdev->next);
+
+	gdev->base = devm_ioremap_resource(&pdev->dev, r);
+	if (IS_ERR(gdev->base))
+		return PTR_ERR(gdev->base);
+
+	of_id = of_match_node(brcmstb_gisb_arb_of_match, dn);
+	if (!of_id) {
+		pr_err("failed to look up compatible string\n");
+		return -EINVAL;
+	}
+	gdev->gisb_offsets = of_id->data;
+	gdev->big_endian = of_device_is_big_endian(dn);
+
+	err = devm_request_irq(&pdev->dev, timeout_irq,
+				brcmstb_gisb_timeout_handler, 0, pdev->name,
+				gdev);
+	if (err < 0)
+		return err;
+
+	err = devm_request_irq(&pdev->dev, tea_irq,
+				brcmstb_gisb_tea_handler, 0, pdev->name,
+				gdev);
+	if (err < 0)
+		return err;
+
+	/* If we do not have a valid mask, assume all masters are enabled */
+	if (of_property_read_u32(dn, "brcm,gisb-arb-master-mask",
+				&gdev->valid_mask))
+		gdev->valid_mask = 0xffffffff;
+
+	/* Proceed with reading the litteral names if we agree on the
+	 * number of masters
+	 */
+	num_masters = of_property_count_strings(dn,
+			"brcm,gisb-arb-master-names");
+	if (hweight_long(gdev->valid_mask) == num_masters) {
+		first = ffs(gdev->valid_mask) - 1;
+		last = fls(gdev->valid_mask) - 1;
+
+		for (i = first; i < last; i++) {
+			if (!(gdev->valid_mask & BIT(i)))
+				continue;
+
+			of_property_read_string_index(dn,
+					"brcm,gisb-arb-master-names", j,
+					&gdev->master_names[i]);
+			j++;
+		}
+	}
+
+	err = sysfs_create_group(&pdev->dev.kobj, &gisb_arb_sysfs_attr_group);
+	if (err)
+		return err;
+
+	platform_set_drvdata(pdev, gdev);
+
+	list_add_tail(&gdev->next, &brcmstb_gisb_arb_device_list);
+
+#ifdef CONFIG_MIPS
+	board_be_handler = brcmstb_bus_error_handler;
+#endif
+
+	if (list_is_singular(&brcmstb_gisb_arb_device_list)) {
+		register_die_notifier(&gisb_die_notifier);
+		atomic_notifier_chain_register(&panic_notifier_list,
+					       &gisb_panic_notifier);
+	}
+
+	dev_info(&pdev->dev, "registered mem: %p, irqs: %d, %d\n",
+			gdev->base, timeout_irq, tea_irq);
+
+	return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int brcmstb_gisb_arb_suspend(struct device *dev)
+{
+	struct platform_device *pdev = to_platform_device(dev);
+	struct brcmstb_gisb_arb_device *gdev = platform_get_drvdata(pdev);
+
+	gdev->saved_timeout = gisb_read(gdev, ARB_TIMER);
+
+	return 0;
+}
+
+/* Make sure we provide the same timeout value that was configured before, and
+ * do this before the GISB timeout interrupt handler has any chance to run.
+ */
+static int brcmstb_gisb_arb_resume_noirq(struct device *dev)
+{
+	struct platform_device *pdev = to_platform_device(dev);
+	struct brcmstb_gisb_arb_device *gdev = platform_get_drvdata(pdev);
+
+	gisb_write(gdev, gdev->saved_timeout, ARB_TIMER);
+
+	return 0;
+}
+#else
+#define brcmstb_gisb_arb_suspend       NULL
+#define brcmstb_gisb_arb_resume_noirq  NULL
+#endif
+
+static const struct dev_pm_ops brcmstb_gisb_arb_pm_ops = {
+	.suspend	= brcmstb_gisb_arb_suspend,
+	.resume_noirq	= brcmstb_gisb_arb_resume_noirq,
+};
+
+static struct platform_driver brcmstb_gisb_arb_driver = {
+	.driver = {
+		.name	= "brcm-gisb-arb",
+		.of_match_table = brcmstb_gisb_arb_of_match,
+		.pm	= &brcmstb_gisb_arb_pm_ops,
+	},
+};
+
+static int __init brcm_gisb_driver_init(void)
+{
+	return platform_driver_probe(&brcmstb_gisb_arb_driver,
+				     brcmstb_gisb_arb_probe);
+}
+
+module_init(brcm_gisb_driver_init);
diff --git a/src/kernel/linux/v4.14/drivers/bus/da8xx-mstpri.c b/src/kernel/linux/v4.14/drivers/bus/da8xx-mstpri.c
new file mode 100644
index 0000000..9af9bcc
--- /dev/null
+++ b/src/kernel/linux/v4.14/drivers/bus/da8xx-mstpri.c
@@ -0,0 +1,267 @@
+/*
+ * TI da8xx master peripheral priority driver
+ *
+ * Copyright (C) 2016 BayLibre SAS
+ *
+ * Author:
+ *   Bartosz Golaszewski <bgolaszewski@baylibre.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/io.h>
+#include <linux/regmap.h>
+
+/*
+ * REVISIT: Linux doesn't have a good framework for the kind of performance
+ * knobs this driver controls. We can't use device tree properties as it deals
+ * with hardware configuration rather than description. We also don't want to
+ * commit to maintaining some random sysfs attributes.
+ *
+ * For now we just hardcode the register values for the boards that need
+ * some changes (as is the case for the LCD controller on da850-lcdk - the
+ * first board we support here). When linux gets an appropriate framework,
+ * we'll easily convert the driver to it.
+ */
+
+#define DA8XX_MSTPRI0_OFFSET		0
+#define DA8XX_MSTPRI1_OFFSET		4
+#define DA8XX_MSTPRI2_OFFSET		8
+
+enum {
+	DA8XX_MSTPRI_ARM_I = 0,
+	DA8XX_MSTPRI_ARM_D,
+	DA8XX_MSTPRI_UPP,
+	DA8XX_MSTPRI_SATA,
+	DA8XX_MSTPRI_PRU0,
+	DA8XX_MSTPRI_PRU1,
+	DA8XX_MSTPRI_EDMA30TC0,
+	DA8XX_MSTPRI_EDMA30TC1,
+	DA8XX_MSTPRI_EDMA31TC0,
+	DA8XX_MSTPRI_VPIF_DMA_0,
+	DA8XX_MSTPRI_VPIF_DMA_1,
+	DA8XX_MSTPRI_EMAC,
+	DA8XX_MSTPRI_USB0CFG,
+	DA8XX_MSTPRI_USB0CDMA,
+	DA8XX_MSTPRI_UHPI,
+	DA8XX_MSTPRI_USB1,
+	DA8XX_MSTPRI_LCDC,
+};
+
+struct da8xx_mstpri_descr {
+	int reg;
+	int shift;
+	int mask;
+};
+
+static const struct da8xx_mstpri_descr da8xx_mstpri_priority_list[] = {
+	[DA8XX_MSTPRI_ARM_I] = {
+		.reg = DA8XX_MSTPRI0_OFFSET,
+		.shift = 0,
+		.mask = 0x0000000f,
+	},
+	[DA8XX_MSTPRI_ARM_D] = {
+		.reg = DA8XX_MSTPRI0_OFFSET,
+		.shift = 4,
+		.mask = 0x000000f0,
+	},
+	[DA8XX_MSTPRI_UPP] = {
+		.reg = DA8XX_MSTPRI0_OFFSET,
+		.shift = 16,
+		.mask = 0x000f0000,
+	},
+	[DA8XX_MSTPRI_SATA] = {
+		.reg = DA8XX_MSTPRI0_OFFSET,
+		.shift = 20,
+		.mask = 0x00f00000,
+	},
+	[DA8XX_MSTPRI_PRU0] = {
+		.reg = DA8XX_MSTPRI1_OFFSET,
+		.shift = 0,
+		.mask = 0x0000000f,
+	},
+	[DA8XX_MSTPRI_PRU1] = {
+		.reg = DA8XX_MSTPRI1_OFFSET,
+		.shift = 4,
+		.mask = 0x000000f0,
+	},
+	[DA8XX_MSTPRI_EDMA30TC0] = {
+		.reg = DA8XX_MSTPRI1_OFFSET,
+		.shift = 8,
+		.mask = 0x00000f00,
+	},
+	[DA8XX_MSTPRI_EDMA30TC1] = {
+		.reg = DA8XX_MSTPRI1_OFFSET,
+		.shift = 12,
+		.mask = 0x0000f000,
+	},
+	[DA8XX_MSTPRI_EDMA31TC0] = {
+		.reg = DA8XX_MSTPRI1_OFFSET,
+		.shift = 16,
+		.mask = 0x000f0000,
+	},
+	[DA8XX_MSTPRI_VPIF_DMA_0] = {
+		.reg = DA8XX_MSTPRI1_OFFSET,
+		.shift = 24,
+		.mask = 0x0f000000,
+	},
+	[DA8XX_MSTPRI_VPIF_DMA_1] = {
+		.reg = DA8XX_MSTPRI1_OFFSET,
+		.shift = 28,
+		.mask = 0xf0000000,
+	},
+	[DA8XX_MSTPRI_EMAC] = {
+		.reg = DA8XX_MSTPRI2_OFFSET,
+		.shift = 0,
+		.mask = 0x0000000f,
+	},
+	[DA8XX_MSTPRI_USB0CFG] = {
+		.reg = DA8XX_MSTPRI2_OFFSET,
+		.shift = 8,
+		.mask = 0x00000f00,
+	},
+	[DA8XX_MSTPRI_USB0CDMA] = {
+		.reg = DA8XX_MSTPRI2_OFFSET,
+		.shift = 12,
+		.mask = 0x0000f000,
+	},
+	[DA8XX_MSTPRI_UHPI] = {
+		.reg = DA8XX_MSTPRI2_OFFSET,
+		.shift = 20,
+		.mask = 0x00f00000,
+	},
+	[DA8XX_MSTPRI_USB1] = {
+		.reg = DA8XX_MSTPRI2_OFFSET,
+		.shift = 24,
+		.mask = 0x0f000000,
+	},
+	[DA8XX_MSTPRI_LCDC] = {
+		.reg = DA8XX_MSTPRI2_OFFSET,
+		.shift = 28,
+		.mask = 0xf0000000,
+	},
+};
+
+struct da8xx_mstpri_priority {
+	int which;
+	u32 val;
+};
+
+struct da8xx_mstpri_board_priorities {
+	const char *board;
+	const struct da8xx_mstpri_priority *priorities;
+	size_t numprio;
+};
+
+/*
+ * Default memory settings of da850 do not meet the throughput/latency
+ * requirements of tilcdc. This results in the image displayed being
+ * incorrect and the following warning being displayed by the LCDC
+ * drm driver:
+ *
+ *   tilcdc da8xx_lcdc.0: tilcdc_crtc_irq(0x00000020): FIFO underfow
+ */
+static const struct da8xx_mstpri_priority da850_lcdk_priorities[] = {
+	{
+		.which = DA8XX_MSTPRI_LCDC,
+		.val = 0,
+	},
+	{
+		.which = DA8XX_MSTPRI_EDMA30TC1,
+		.val = 0,
+	},
+	{
+		.which = DA8XX_MSTPRI_EDMA30TC0,
+		.val = 1,
+	},
+};
+
+static const struct da8xx_mstpri_board_priorities da8xx_mstpri_board_confs[] = {
+	{
+		.board = "ti,da850-lcdk",
+		.priorities = da850_lcdk_priorities,
+		.numprio = ARRAY_SIZE(da850_lcdk_priorities),
+	},
+};
+
+static const struct da8xx_mstpri_board_priorities *
+da8xx_mstpri_get_board_prio(void)
+{
+	const struct da8xx_mstpri_board_priorities *board_prio;
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(da8xx_mstpri_board_confs); i++) {
+		board_prio = &da8xx_mstpri_board_confs[i];
+
+		if (of_machine_is_compatible(board_prio->board))
+			return board_prio;
+	}
+
+	return NULL;
+}
+
+static int da8xx_mstpri_probe(struct platform_device *pdev)
+{
+	const struct da8xx_mstpri_board_priorities *prio_list;
+	const struct da8xx_mstpri_descr *prio_descr;
+	const struct da8xx_mstpri_priority *prio;
+	struct device *dev = &pdev->dev;
+	struct resource *res;
+	void __iomem *mstpri;
+	u32 reg;
+	int i;
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	mstpri = devm_ioremap_resource(dev, res);
+	if (IS_ERR(mstpri)) {
+		dev_err(dev, "unable to map MSTPRI registers\n");
+		return PTR_ERR(mstpri);
+	}
+
+	prio_list = da8xx_mstpri_get_board_prio();
+	if (!prio_list) {
+		dev_err(dev, "no master priorities defined for this board\n");
+		return -EINVAL;
+	}
+
+	for (i = 0; i < prio_list->numprio; i++) {
+		prio = &prio_list->priorities[i];
+		prio_descr = &da8xx_mstpri_priority_list[prio->which];
+
+		if (prio_descr->reg + sizeof(u32) > resource_size(res)) {
+			dev_warn(dev, "register offset out of range\n");
+			continue;
+		}
+
+		reg = readl(mstpri + prio_descr->reg);
+		reg &= ~prio_descr->mask;
+		reg |= prio->val << prio_descr->shift;
+
+		writel(reg, mstpri + prio_descr->reg);
+	}
+
+	return 0;
+}
+
+static const struct of_device_id da8xx_mstpri_of_match[] = {
+	{ .compatible = "ti,da850-mstpri", },
+	{ },
+};
+
+static struct platform_driver da8xx_mstpri_driver = {
+	.probe = da8xx_mstpri_probe,
+	.driver = {
+		.name = "da8xx-mstpri",
+		.of_match_table = da8xx_mstpri_of_match,
+	},
+};
+module_platform_driver(da8xx_mstpri_driver);
+
+MODULE_AUTHOR("Bartosz Golaszewski <bgolaszewski@baylibre.com>");
+MODULE_DESCRIPTION("TI da8xx master peripheral priority driver");
+MODULE_LICENSE("GPL v2");
diff --git a/src/kernel/linux/v4.14/drivers/bus/imx-weim.c b/src/kernel/linux/v4.14/drivers/bus/imx-weim.c
new file mode 100644
index 0000000..3d56ebc
--- /dev/null
+++ b/src/kernel/linux/v4.14/drivers/bus/imx-weim.c
@@ -0,0 +1,216 @@
+/*
+ * EIM driver for Freescale's i.MX chips
+ *
+ * Copyright (C) 2013 Freescale Semiconductor, Inc.
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+#include <linux/module.h>
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/of_device.h>
+#include <linux/mfd/syscon.h>
+#include <linux/mfd/syscon/imx6q-iomuxc-gpr.h>
+#include <linux/regmap.h>
+
+struct imx_weim_devtype {
+	unsigned int	cs_count;
+	unsigned int	cs_regs_count;
+	unsigned int	cs_stride;
+};
+
+static const struct imx_weim_devtype imx1_weim_devtype = {
+	.cs_count	= 6,
+	.cs_regs_count	= 2,
+	.cs_stride	= 0x08,
+};
+
+static const struct imx_weim_devtype imx27_weim_devtype = {
+	.cs_count	= 6,
+	.cs_regs_count	= 3,
+	.cs_stride	= 0x10,
+};
+
+static const struct imx_weim_devtype imx50_weim_devtype = {
+	.cs_count	= 4,
+	.cs_regs_count	= 6,
+	.cs_stride	= 0x18,
+};
+
+static const struct imx_weim_devtype imx51_weim_devtype = {
+	.cs_count	= 6,
+	.cs_regs_count	= 6,
+	.cs_stride	= 0x18,
+};
+
+static const struct of_device_id weim_id_table[] = {
+	/* i.MX1/21 */
+	{ .compatible = "fsl,imx1-weim", .data = &imx1_weim_devtype, },
+	/* i.MX25/27/31/35 */
+	{ .compatible = "fsl,imx27-weim", .data = &imx27_weim_devtype, },
+	/* i.MX50/53/6Q */
+	{ .compatible = "fsl,imx50-weim", .data = &imx50_weim_devtype, },
+	{ .compatible = "fsl,imx6q-weim", .data = &imx50_weim_devtype, },
+	/* i.MX51 */
+	{ .compatible = "fsl,imx51-weim", .data = &imx51_weim_devtype, },
+	{ }
+};
+MODULE_DEVICE_TABLE(of, weim_id_table);
+
+static int __init imx_weim_gpr_setup(struct platform_device *pdev)
+{
+	struct device_node *np = pdev->dev.of_node;
+	struct property *prop;
+	const __be32 *p;
+	struct regmap *gpr;
+	u32 gprvals[4] = {
+		05,	/* CS0(128M) CS1(0M)  CS2(0M)  CS3(0M)  */
+		033,	/* CS0(64M)  CS1(64M) CS2(0M)  CS3(0M)  */
+		0113,	/* CS0(64M)  CS1(32M) CS2(32M) CS3(0M)  */
+		01111,	/* CS0(32M)  CS1(32M) CS2(32M) CS3(32M) */
+	};
+	u32 gprval = 0;
+	u32 val;
+	int cs = 0;
+	int i = 0;
+
+	gpr = syscon_regmap_lookup_by_phandle(np, "fsl,weim-cs-gpr");
+	if (IS_ERR(gpr)) {
+		dev_dbg(&pdev->dev, "failed to find weim-cs-gpr\n");
+		return 0;
+	}
+
+	of_property_for_each_u32(np, "ranges", prop, p, val) {
+		if (i % 4 == 0) {
+			cs = val;
+		} else if (i % 4 == 3 && val) {
+			val = (val / SZ_32M) | 1;
+			gprval |= val << cs * 3;
+		}
+		i++;
+	}
+
+	if (i == 0 || i % 4)
+		goto err;
+
+	for (i = 0; i < ARRAY_SIZE(gprvals); i++) {
+		if (gprval == gprvals[i]) {
+			/* Found it. Set up IOMUXC_GPR1[11:0] with it. */
+			regmap_update_bits(gpr, IOMUXC_GPR1, 0xfff, gprval);
+			return 0;
+		}
+	}
+
+err:
+	dev_err(&pdev->dev, "Invalid 'ranges' configuration\n");
+	return -EINVAL;
+}
+
+/* Parse and set the timing for this device. */
+static int __init weim_timing_setup(struct device_node *np, void __iomem *base,
+				    const struct imx_weim_devtype *devtype)
+{
+	u32 cs_idx, value[devtype->cs_regs_count];
+	int i, ret;
+
+	/* get the CS index from this child node's "reg" property. */
+	ret = of_property_read_u32(np, "reg", &cs_idx);
+	if (ret)
+		return ret;
+
+	if (cs_idx >= devtype->cs_count)
+		return -EINVAL;
+
+	ret = of_property_read_u32_array(np, "fsl,weim-cs-timing",
+					 value, devtype->cs_regs_count);
+	if (ret)
+		return ret;
+
+	/* set the timing for WEIM */
+	for (i = 0; i < devtype->cs_regs_count; i++)
+		writel(value[i], base + cs_idx * devtype->cs_stride + i * 4);
+
+	return 0;
+}
+
+static int __init weim_parse_dt(struct platform_device *pdev,
+				void __iomem *base)
+{
+	const struct of_device_id *of_id = of_match_device(weim_id_table,
+							   &pdev->dev);
+	const struct imx_weim_devtype *devtype = of_id->data;
+	struct device_node *child;
+	int ret, have_child = 0;
+
+	if (devtype == &imx50_weim_devtype) {
+		ret = imx_weim_gpr_setup(pdev);
+		if (ret)
+			return ret;
+	}
+
+	for_each_available_child_of_node(pdev->dev.of_node, child) {
+		if (!child->name)
+			continue;
+
+		ret = weim_timing_setup(child, base, devtype);
+		if (ret)
+			dev_warn(&pdev->dev, "%pOF set timing failed.\n",
+				child);
+		else
+			have_child = 1;
+	}
+
+	if (have_child)
+		ret = of_platform_default_populate(pdev->dev.of_node,
+						   NULL, &pdev->dev);
+	if (ret)
+		dev_err(&pdev->dev, "%pOF fail to create devices.\n",
+			pdev->dev.of_node);
+	return ret;
+}
+
+static int __init weim_probe(struct platform_device *pdev)
+{
+	struct resource *res;
+	struct clk *clk;
+	void __iomem *base;
+	int ret;
+
+	/* get the resource */
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	base = devm_ioremap_resource(&pdev->dev, res);
+	if (IS_ERR(base))
+		return PTR_ERR(base);
+
+	/* get the clock */
+	clk = devm_clk_get(&pdev->dev, NULL);
+	if (IS_ERR(clk))
+		return PTR_ERR(clk);
+
+	ret = clk_prepare_enable(clk);
+	if (ret)
+		return ret;
+
+	/* parse the device node */
+	ret = weim_parse_dt(pdev, base);
+	if (ret)
+		clk_disable_unprepare(clk);
+	else
+		dev_info(&pdev->dev, "Driver registered.\n");
+
+	return ret;
+}
+
+static struct platform_driver weim_driver = {
+	.driver = {
+		.name		= "imx-weim",
+		.of_match_table	= weim_id_table,
+	},
+};
+module_platform_driver_probe(weim_driver, weim_probe);
+
+MODULE_AUTHOR("Freescale Semiconductor Inc.");
+MODULE_DESCRIPTION("i.MX EIM Controller Driver");
+MODULE_LICENSE("GPL");
diff --git a/src/kernel/linux/v4.14/drivers/bus/mips_cdmm.c b/src/kernel/linux/v4.14/drivers/bus/mips_cdmm.c
new file mode 100644
index 0000000..1b14256
--- /dev/null
+++ b/src/kernel/linux/v4.14/drivers/bus/mips_cdmm.c
@@ -0,0 +1,684 @@
+/*
+ * Bus driver for MIPS Common Device Memory Map (CDMM).
+ *
+ * Copyright (C) 2014-2015 Imagination Technologies Ltd.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+
+#include <linux/atomic.h>
+#include <linux/err.h>
+#include <linux/cpu.h>
+#include <linux/cpumask.h>
+#include <linux/io.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/smp.h>
+#include <asm/cdmm.h>
+#include <asm/hazards.h>
+#include <asm/mipsregs.h>
+
+/* Access control and status register fields */
+#define CDMM_ACSR_DEVTYPE_SHIFT	24
+#define CDMM_ACSR_DEVTYPE	(255ul << CDMM_ACSR_DEVTYPE_SHIFT)
+#define CDMM_ACSR_DEVSIZE_SHIFT	16
+#define CDMM_ACSR_DEVSIZE	(31ul << CDMM_ACSR_DEVSIZE_SHIFT)
+#define CDMM_ACSR_DEVREV_SHIFT	12
+#define CDMM_ACSR_DEVREV	(15ul << CDMM_ACSR_DEVREV_SHIFT)
+#define CDMM_ACSR_UW		(1ul << 3)
+#define CDMM_ACSR_UR		(1ul << 2)
+#define CDMM_ACSR_SW		(1ul << 1)
+#define CDMM_ACSR_SR		(1ul << 0)
+
+/* Each block of device registers is 64 bytes */
+#define CDMM_DRB_SIZE		64
+
+#define to_mips_cdmm_driver(d)	container_of(d, struct mips_cdmm_driver, drv)
+
+/* Default physical base address */
+static phys_addr_t mips_cdmm_default_base;
+
+/* Bus operations */
+
+static const struct mips_cdmm_device_id *
+mips_cdmm_lookup(const struct mips_cdmm_device_id *table,
+		 struct mips_cdmm_device *dev)
+{
+	int ret = 0;
+
+	for (; table->type; ++table) {
+		ret = (dev->type == table->type);
+		if (ret)
+			break;
+	}
+
+	return ret ? table : NULL;
+}
+
+static int mips_cdmm_match(struct device *dev, struct device_driver *drv)
+{
+	struct mips_cdmm_device *cdev = to_mips_cdmm_device(dev);
+	struct mips_cdmm_driver *cdrv = to_mips_cdmm_driver(drv);
+
+	return mips_cdmm_lookup(cdrv->id_table, cdev) != NULL;
+}
+
+static int mips_cdmm_uevent(struct device *dev, struct kobj_uevent_env *env)
+{
+	struct mips_cdmm_device *cdev = to_mips_cdmm_device(dev);
+	int retval = 0;
+
+	retval = add_uevent_var(env, "CDMM_CPU=%u", cdev->cpu);
+	if (retval)
+		return retval;
+
+	retval = add_uevent_var(env, "CDMM_TYPE=0x%02x", cdev->type);
+	if (retval)
+		return retval;
+
+	retval = add_uevent_var(env, "CDMM_REV=%u", cdev->rev);
+	if (retval)
+		return retval;
+
+	retval = add_uevent_var(env, "MODALIAS=mipscdmm:t%02X", cdev->type);
+	return retval;
+}
+
+/* Device attributes */
+
+#define CDMM_ATTR(name, fmt, arg...)					\
+static ssize_t name##_show(struct device *_dev,				\
+			   struct device_attribute *attr, char *buf)	\
+{									\
+	struct mips_cdmm_device *dev = to_mips_cdmm_device(_dev);	\
+	return sprintf(buf, fmt, arg);					\
+}									\
+static DEVICE_ATTR_RO(name);
+
+CDMM_ATTR(cpu, "%u\n", dev->cpu);
+CDMM_ATTR(type, "0x%02x\n", dev->type);
+CDMM_ATTR(revision, "%u\n", dev->rev);
+CDMM_ATTR(modalias, "mipscdmm:t%02X\n", dev->type);
+CDMM_ATTR(resource, "\t%016llx\t%016llx\t%016lx\n",
+	  (unsigned long long)dev->res.start,
+	  (unsigned long long)dev->res.end,
+	  dev->res.flags);
+
+static struct attribute *mips_cdmm_dev_attrs[] = {
+	&dev_attr_cpu.attr,
+	&dev_attr_type.attr,
+	&dev_attr_revision.attr,
+	&dev_attr_modalias.attr,
+	&dev_attr_resource.attr,
+	NULL,
+};
+ATTRIBUTE_GROUPS(mips_cdmm_dev);
+
+struct bus_type mips_cdmm_bustype = {
+	.name		= "cdmm",
+	.dev_groups	= mips_cdmm_dev_groups,
+	.match		= mips_cdmm_match,
+	.uevent		= mips_cdmm_uevent,
+};
+EXPORT_SYMBOL_GPL(mips_cdmm_bustype);
+
+/*
+ * Standard driver callback helpers.
+ *
+ * All the CDMM driver callbacks need to be executed on the appropriate CPU from
+ * workqueues. For the standard driver callbacks we need a work function
+ * (mips_cdmm_{void,int}_work()) to do the actual call from the right CPU, and a
+ * wrapper function (generated with BUILD_PERCPU_HELPER) to arrange for the work
+ * function to be called on that CPU.
+ */
+
+/**
+ * struct mips_cdmm_work_dev - Data for per-device call work.
+ * @fn:		CDMM driver callback function to call for the device.
+ * @dev:	CDMM device to pass to @fn.
+ */
+struct mips_cdmm_work_dev {
+	void			*fn;
+	struct mips_cdmm_device *dev;
+};
+
+/**
+ * mips_cdmm_void_work() - Call a void returning CDMM driver callback.
+ * @data:	struct mips_cdmm_work_dev pointer.
+ *
+ * A work_on_cpu() callback function to call an arbitrary CDMM driver callback
+ * function which doesn't return a value.
+ */
+static long mips_cdmm_void_work(void *data)
+{
+	struct mips_cdmm_work_dev *work = data;
+	void (*fn)(struct mips_cdmm_device *) = work->fn;
+
+	fn(work->dev);
+	return 0;
+}
+
+/**
+ * mips_cdmm_int_work() - Call an int returning CDMM driver callback.
+ * @data:	struct mips_cdmm_work_dev pointer.
+ *
+ * A work_on_cpu() callback function to call an arbitrary CDMM driver callback
+ * function which returns an int.
+ */
+static long mips_cdmm_int_work(void *data)
+{
+	struct mips_cdmm_work_dev *work = data;
+	int (*fn)(struct mips_cdmm_device *) = work->fn;
+
+	return fn(work->dev);
+}
+
+#define _BUILD_RET_void
+#define _BUILD_RET_int	return
+
+/**
+ * BUILD_PERCPU_HELPER() - Helper to call a CDMM driver callback on right CPU.
+ * @_ret:	Return type (void or int).
+ * @_name:	Name of CDMM driver callback function.
+ *
+ * Generates a specific device callback function to call a CDMM driver callback
+ * function on the appropriate CPU for the device, and if applicable return the
+ * result.
+ */
+#define BUILD_PERCPU_HELPER(_ret, _name)				\
+static _ret mips_cdmm_##_name(struct device *dev)			\
+{									\
+	struct mips_cdmm_device *cdev = to_mips_cdmm_device(dev);	\
+	struct mips_cdmm_driver *cdrv = to_mips_cdmm_driver(dev->driver); \
+	struct mips_cdmm_work_dev work = {				\
+		.fn	= cdrv->_name,					\
+		.dev	= cdev,						\
+	};								\
+									\
+	_BUILD_RET_##_ret work_on_cpu(cdev->cpu,			\
+				      mips_cdmm_##_ret##_work, &work);	\
+}
+
+/* Driver callback functions */
+BUILD_PERCPU_HELPER(int, probe)     /* int mips_cdmm_probe(struct device) */
+BUILD_PERCPU_HELPER(int, remove)    /* int mips_cdmm_remove(struct device) */
+BUILD_PERCPU_HELPER(void, shutdown) /* void mips_cdmm_shutdown(struct device) */
+
+
+/* Driver registration */
+
+/**
+ * mips_cdmm_driver_register() - Register a CDMM driver.
+ * @drv:	CDMM driver information.
+ *
+ * Register a CDMM driver with the CDMM subsystem. The driver will be informed
+ * of matching devices which are discovered.
+ *
+ * Returns:	0 on success.
+ */
+int mips_cdmm_driver_register(struct mips_cdmm_driver *drv)
+{
+	drv->drv.bus = &mips_cdmm_bustype;
+
+	if (drv->probe)
+		drv->drv.probe = mips_cdmm_probe;
+	if (drv->remove)
+		drv->drv.remove = mips_cdmm_remove;
+	if (drv->shutdown)
+		drv->drv.shutdown = mips_cdmm_shutdown;
+
+	return driver_register(&drv->drv);
+}
+EXPORT_SYMBOL_GPL(mips_cdmm_driver_register);
+
+/**
+ * mips_cdmm_driver_unregister() - Unregister a CDMM driver.
+ * @drv:	CDMM driver information.
+ *
+ * Unregister a CDMM driver from the CDMM subsystem.
+ */
+void mips_cdmm_driver_unregister(struct mips_cdmm_driver *drv)
+{
+	driver_unregister(&drv->drv);
+}
+EXPORT_SYMBOL_GPL(mips_cdmm_driver_unregister);
+
+
+/* CDMM initialisation and bus discovery */
+
+/**
+ * struct mips_cdmm_bus - Info about CDMM bus.
+ * @phys:		Physical address at which it is mapped.
+ * @regs:		Virtual address where registers can be accessed.
+ * @drbs:		Total number of DRBs.
+ * @drbs_reserved:	Number of DRBs reserved.
+ * @discovered:		Whether the devices on the bus have been discovered yet.
+ * @offline:		Whether the CDMM bus is going offline (or very early
+ *			coming back online), in which case it should be
+ *			reconfigured each time.
+ */
+struct mips_cdmm_bus {
+	phys_addr_t	 phys;
+	void __iomem	*regs;
+	unsigned int	 drbs;
+	unsigned int	 drbs_reserved;
+	bool		 discovered;
+	bool		 offline;
+};
+
+static struct mips_cdmm_bus mips_cdmm_boot_bus;
+static DEFINE_PER_CPU(struct mips_cdmm_bus *, mips_cdmm_buses);
+static atomic_t mips_cdmm_next_id = ATOMIC_INIT(-1);
+
+/**
+ * mips_cdmm_get_bus() - Get the per-CPU CDMM bus information.
+ *
+ * Get information about the per-CPU CDMM bus, if the bus is present.
+ *
+ * The caller must prevent migration to another CPU, either by disabling
+ * pre-emption or by running from a pinned kernel thread.
+ *
+ * Returns:	Pointer to CDMM bus information for the current CPU.
+ *		May return ERR_PTR(-errno) in case of error, so check with
+ *		IS_ERR().
+ */
+static struct mips_cdmm_bus *mips_cdmm_get_bus(void)
+{
+	struct mips_cdmm_bus *bus, **bus_p;
+	unsigned long flags;
+	unsigned int cpu;
+
+	if (!cpu_has_cdmm)
+		return ERR_PTR(-ENODEV);
+
+	cpu = smp_processor_id();
+	/* Avoid early use of per-cpu primitives before initialised */
+	if (cpu == 0)
+		return &mips_cdmm_boot_bus;
+
+	/* Get bus pointer */
+	bus_p = per_cpu_ptr(&mips_cdmm_buses, cpu);
+	local_irq_save(flags);
+	bus = *bus_p;
+	/* Attempt allocation if NULL */
+	if (unlikely(!bus)) {
+		bus = kzalloc(sizeof(*bus), GFP_ATOMIC);
+		if (unlikely(!bus))
+			bus = ERR_PTR(-ENOMEM);
+		else
+			*bus_p = bus;
+	}
+	local_irq_restore(flags);
+	return bus;
+}
+
+/**
+ * mips_cdmm_cur_base() - Find current physical base address of CDMM region.
+ *
+ * Returns:	Physical base address of CDMM region according to cdmmbase CP0
+ *		register, or 0 if the CDMM region is disabled.
+ */
+static phys_addr_t mips_cdmm_cur_base(void)
+{
+	unsigned long cdmmbase = read_c0_cdmmbase();
+
+	if (!(cdmmbase & MIPS_CDMMBASE_EN))
+		return 0;
+
+	return (cdmmbase >> MIPS_CDMMBASE_ADDR_SHIFT)
+		<< MIPS_CDMMBASE_ADDR_START;
+}
+
+/**
+ * mips_cdmm_phys_base() - Choose a physical base address for CDMM region.
+ *
+ * Picking a suitable physical address at which to map the CDMM region is
+ * platform specific, so this weak function can be overridden by platform
+ * code to pick a suitable value if none is configured by the bootloader.
+ */
+phys_addr_t __weak mips_cdmm_phys_base(void)
+{
+	return 0;
+}
+
+/**
+ * mips_cdmm_setup() - Ensure the CDMM bus is initialised and usable.
+ * @bus:	Pointer to bus information for current CPU.
+ *		IS_ERR(bus) is checked, so no need for caller to check.
+ *
+ * The caller must prevent migration to another CPU, either by disabling
+ * pre-emption or by running from a pinned kernel thread.
+ *
+ * Returns	0 on success, -errno on failure.
+ */
+static int mips_cdmm_setup(struct mips_cdmm_bus *bus)
+{
+	unsigned long cdmmbase, flags;
+	int ret = 0;
+
+	if (IS_ERR(bus))
+		return PTR_ERR(bus);
+
+	local_irq_save(flags);
+	/* Don't set up bus a second time unless marked offline */
+	if (bus->offline) {
+		/* If CDMM region is still set up, nothing to do */
+		if (bus->phys == mips_cdmm_cur_base())
+			goto out;
+		/*
+		 * The CDMM region isn't set up as expected, so it needs
+		 * reconfiguring, but then we can stop checking it.
+		 */
+		bus->offline = false;
+	} else if (bus->phys > 1) {
+		goto out;
+	}
+
+	/* If the CDMM region is already configured, inherit that setup */
+	if (!bus->phys)
+		bus->phys = mips_cdmm_cur_base();
+	/* Otherwise, ask platform code for suggestions */
+	if (!bus->phys)
+		bus->phys = mips_cdmm_phys_base();
+	/* Otherwise, copy what other CPUs have done */
+	if (!bus->phys)
+		bus->phys = mips_cdmm_default_base;
+	/* Otherwise, complain once */
+	if (!bus->phys) {
+		bus->phys = 1;
+		/*
+		 * If you hit this, either your bootloader needs to set up the
+		 * CDMM on the boot CPU, or else you need to implement
+		 * mips_cdmm_phys_base() for your platform (see asm/cdmm.h).
+		 */
+		pr_err("cdmm%u: Failed to choose a physical base\n",
+		       smp_processor_id());
+	}
+	/* Already complained? */
+	if (bus->phys == 1) {
+		ret = -ENOMEM;
+		goto out;
+	}
+	/* Record our success for other CPUs to copy */
+	mips_cdmm_default_base = bus->phys;
+
+	pr_debug("cdmm%u: Enabling CDMM region at %pa\n",
+		 smp_processor_id(), &bus->phys);
+
+	/* Enable CDMM */
+	cdmmbase = read_c0_cdmmbase();
+	cdmmbase &= (1ul << MIPS_CDMMBASE_ADDR_SHIFT) - 1;
+	cdmmbase |= (bus->phys >> MIPS_CDMMBASE_ADDR_START)
+			<< MIPS_CDMMBASE_ADDR_SHIFT;
+	cdmmbase |= MIPS_CDMMBASE_EN;
+	write_c0_cdmmbase(cdmmbase);
+	tlbw_use_hazard();
+
+	bus->regs = (void __iomem *)CKSEG1ADDR(bus->phys);
+	bus->drbs = 1 + ((cdmmbase & MIPS_CDMMBASE_SIZE) >>
+			 MIPS_CDMMBASE_SIZE_SHIFT);
+	bus->drbs_reserved = !!(cdmmbase & MIPS_CDMMBASE_CI);
+
+out:
+	local_irq_restore(flags);
+	return ret;
+}
+
+/**
+ * mips_cdmm_early_probe() - Minimally probe for a specific device on CDMM.
+ * @dev_type:	CDMM type code to look for.
+ *
+ * Minimally configure the in-CPU Common Device Memory Map (CDMM) and look for a
+ * specific device. This can be used to find a device very early in boot for
+ * example to configure an early FDC console device.
+ *
+ * The caller must prevent migration to another CPU, either by disabling
+ * pre-emption or by running from a pinned kernel thread.
+ *
+ * Returns:	MMIO pointer to device memory. The caller can read the ACSR
+ *		register to find more information about the device (such as the
+ *		version number or the number of blocks).
+ *		May return IOMEM_ERR_PTR(-errno) in case of error, so check with
+ *		IS_ERR().
+ */
+void __iomem *mips_cdmm_early_probe(unsigned int dev_type)
+{
+	struct mips_cdmm_bus *bus;
+	void __iomem *cdmm;
+	u32 acsr;
+	unsigned int drb, type, size;
+	int err;
+
+	if (WARN_ON(!dev_type))
+		return IOMEM_ERR_PTR(-ENODEV);
+
+	bus = mips_cdmm_get_bus();
+	err = mips_cdmm_setup(bus);
+	if (err)
+		return IOMEM_ERR_PTR(err);
+
+	/* Skip the first block if it's reserved for more registers */
+	drb = bus->drbs_reserved;
+	cdmm = bus->regs;
+
+	/* Look for a specific device type */
+	for (; drb < bus->drbs; drb += size + 1) {
+		acsr = __raw_readl(cdmm + drb * CDMM_DRB_SIZE);
+		type = (acsr & CDMM_ACSR_DEVTYPE) >> CDMM_ACSR_DEVTYPE_SHIFT;
+		if (type == dev_type)
+			return cdmm + drb * CDMM_DRB_SIZE;
+		size = (acsr & CDMM_ACSR_DEVSIZE) >> CDMM_ACSR_DEVSIZE_SHIFT;
+	}
+
+	return IOMEM_ERR_PTR(-ENODEV);
+}
+EXPORT_SYMBOL_GPL(mips_cdmm_early_probe);
+
+/**
+ * mips_cdmm_release() - Release a removed CDMM device.
+ * @dev:	Device object
+ *
+ * Clean up the struct mips_cdmm_device for an unused CDMM device. This is
+ * called automatically by the driver core when a device is removed.
+ */
+static void mips_cdmm_release(struct device *dev)
+{
+	struct mips_cdmm_device *cdev = to_mips_cdmm_device(dev);
+
+	kfree(cdev);
+}
+
+/**
+ * mips_cdmm_bus_discover() - Discover the devices on the CDMM bus.
+ * @bus:	CDMM bus information, must already be set up.
+ */
+static void mips_cdmm_bus_discover(struct mips_cdmm_bus *bus)
+{
+	void __iomem *cdmm;
+	u32 acsr;
+	unsigned int drb, type, size, rev;
+	struct mips_cdmm_device *dev;
+	unsigned int cpu = smp_processor_id();
+	int ret = 0;
+	int id = 0;
+
+	/* Skip the first block if it's reserved for more registers */
+	drb = bus->drbs_reserved;
+	cdmm = bus->regs;
+
+	/* Discover devices */
+	bus->discovered = true;
+	pr_info("cdmm%u discovery (%u blocks)\n", cpu, bus->drbs);
+	for (; drb < bus->drbs; drb += size + 1) {
+		acsr = __raw_readl(cdmm + drb * CDMM_DRB_SIZE);
+		type = (acsr & CDMM_ACSR_DEVTYPE) >> CDMM_ACSR_DEVTYPE_SHIFT;
+		size = (acsr & CDMM_ACSR_DEVSIZE) >> CDMM_ACSR_DEVSIZE_SHIFT;
+		rev  = (acsr & CDMM_ACSR_DEVREV)  >> CDMM_ACSR_DEVREV_SHIFT;
+
+		if (!type)
+			continue;
+
+		pr_info("cdmm%u-%u: @%u (%#x..%#x), type 0x%02x, rev %u\n",
+			cpu, id, drb, drb * CDMM_DRB_SIZE,
+			(drb + size + 1) * CDMM_DRB_SIZE - 1,
+			type, rev);
+
+		dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+		if (!dev)
+			break;
+
+		dev->cpu = cpu;
+		dev->res.start = bus->phys + drb * CDMM_DRB_SIZE;
+		dev->res.end = bus->phys +
+				(drb + size + 1) * CDMM_DRB_SIZE - 1;
+		dev->res.flags = IORESOURCE_MEM;
+		dev->type = type;
+		dev->rev = rev;
+		dev->dev.parent = get_cpu_device(cpu);
+		dev->dev.bus = &mips_cdmm_bustype;
+		dev->dev.id = atomic_inc_return(&mips_cdmm_next_id);
+		dev->dev.release = mips_cdmm_release;
+
+		dev_set_name(&dev->dev, "cdmm%u-%u", cpu, id);
+		++id;
+		ret = device_register(&dev->dev);
+		if (ret) {
+			put_device(&dev->dev);
+			kfree(dev);
+		}
+	}
+}
+
+
+/*
+ * CPU hotplug and initialisation
+ *
+ * All the CDMM driver callbacks need to be executed on the appropriate CPU from
+ * workqueues. For the CPU callbacks, they need to be called for all devices on
+ * that CPU, so the work function calls bus_for_each_dev, using a helper
+ * (generated with BUILD_PERDEV_HELPER) to call the driver callback if the
+ * device's CPU matches.
+ */
+
+/**
+ * BUILD_PERDEV_HELPER() - Helper to call a CDMM driver callback if CPU matches.
+ * @_name:	Name of CDMM driver callback function.
+ *
+ * Generates a bus_for_each_dev callback function to call a specific CDMM driver
+ * callback function for the device if the device's CPU matches that pointed to
+ * by the data argument.
+ *
+ * This is used for informing drivers for all devices on a given CPU of some
+ * event (such as the CPU going online/offline).
+ *
+ * It is expected to already be called from the appropriate CPU.
+ */
+#define BUILD_PERDEV_HELPER(_name)					\
+static int mips_cdmm_##_name##_helper(struct device *dev, void *data)	\
+{									\
+	struct mips_cdmm_device *cdev = to_mips_cdmm_device(dev);	\
+	struct mips_cdmm_driver *cdrv;					\
+	unsigned int cpu = *(unsigned int *)data;			\
+									\
+	if (cdev->cpu != cpu || !dev->driver)				\
+		return 0;						\
+									\
+	cdrv = to_mips_cdmm_driver(dev->driver);			\
+	if (!cdrv->_name)						\
+		return 0;						\
+	return cdrv->_name(cdev);					\
+}
+
+/* bus_for_each_dev callback helper functions */
+BUILD_PERDEV_HELPER(cpu_down)       /* int mips_cdmm_cpu_down_helper(...) */
+BUILD_PERDEV_HELPER(cpu_up)         /* int mips_cdmm_cpu_up_helper(...) */
+
+/**
+ * mips_cdmm_cpu_down_prep() - Callback for CPUHP DOWN_PREP:
+ *			       Tear down the CDMM bus.
+ * @cpu:	unsigned int CPU number.
+ *
+ * This function is executed on the hotplugged CPU and calls the CDMM
+ * driver cpu_down callback for all devices on that CPU.
+ */
+static int mips_cdmm_cpu_down_prep(unsigned int cpu)
+{
+	struct mips_cdmm_bus *bus;
+	long ret;
+
+	/* Inform all the devices on the bus */
+	ret = bus_for_each_dev(&mips_cdmm_bustype, NULL, &cpu,
+			       mips_cdmm_cpu_down_helper);
+
+	/*
+	 * While bus is offline, each use of it should reconfigure it just in
+	 * case it is first use when coming back online again.
+	 */
+	bus = mips_cdmm_get_bus();
+	if (!IS_ERR(bus))
+		bus->offline = true;
+
+	return ret;
+}
+
+/**
+ * mips_cdmm_cpu_online() - Callback for CPUHP ONLINE: Bring up the CDMM bus.
+ * @cpu:	unsigned int CPU number.
+ *
+ * This work_on_cpu callback function is executed on a given CPU to discover
+ * CDMM devices on that CPU, or to call the CDMM driver cpu_up callback for all
+ * devices already discovered on that CPU.
+ *
+ * It is used as work_on_cpu callback function during
+ * initialisation. When CPUs are brought online the function is
+ * invoked directly on the hotplugged CPU.
+ */
+static int mips_cdmm_cpu_online(unsigned int cpu)
+{
+	struct mips_cdmm_bus *bus;
+	long ret;
+
+	bus = mips_cdmm_get_bus();
+	ret = mips_cdmm_setup(bus);
+	if (ret)
+		return ret;
+
+	/* Bus now set up, so we can drop the offline flag if still set */
+	bus->offline = false;
+
+	if (!bus->discovered)
+		mips_cdmm_bus_discover(bus);
+	else
+		/* Inform all the devices on the bus */
+		ret = bus_for_each_dev(&mips_cdmm_bustype, NULL, &cpu,
+				       mips_cdmm_cpu_up_helper);
+
+	return ret;
+}
+
+/**
+ * mips_cdmm_init() - Initialise CDMM bus.
+ *
+ * Initialise CDMM bus, discover CDMM devices for online CPUs, and arrange for
+ * hotplug notifications so the CDMM drivers can be kept up to date.
+ */
+static int __init mips_cdmm_init(void)
+{
+	int ret;
+
+	/* Register the bus */
+	ret = bus_register(&mips_cdmm_bustype);
+	if (ret)
+		return ret;
+
+	/* We want to be notified about new CPUs */
+	ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "bus/cdmm:online",
+				mips_cdmm_cpu_online, mips_cdmm_cpu_down_prep);
+	if (ret < 0)
+		pr_warn("cdmm: Failed to register CPU notifier\n");
+
+	return ret;
+}
+subsys_initcall(mips_cdmm_init);
diff --git a/src/kernel/linux/v4.14/drivers/bus/mvebu-mbus.c b/src/kernel/linux/v4.14/drivers/bus/mvebu-mbus.c
new file mode 100644
index 0000000..70db4d5
--- /dev/null
+++ b/src/kernel/linux/v4.14/drivers/bus/mvebu-mbus.c
@@ -0,0 +1,1378 @@
+/*
+ * Address map functions for Marvell EBU SoCs (Kirkwood, Armada
+ * 370/XP, Dove, Orion5x and MV78xx0)
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2.  This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ *
+ * The Marvell EBU SoCs have a configurable physical address space:
+ * the physical address at which certain devices (PCIe, NOR, NAND,
+ * etc.) sit can be configured. The configuration takes place through
+ * two sets of registers:
+ *
+ * - One to configure the access of the CPU to the devices. Depending
+ *   on the families, there are between 8 and 20 configurable windows,
+ *   each can be use to create a physical memory window that maps to a
+ *   specific device. Devices are identified by a tuple (target,
+ *   attribute).
+ *
+ * - One to configure the access to the CPU to the SDRAM. There are
+ *   either 2 (for Dove) or 4 (for other families) windows to map the
+ *   SDRAM into the physical address space.
+ *
+ * This driver:
+ *
+ * - Reads out the SDRAM address decoding windows at initialization
+ *   time, and fills the mvebu_mbus_dram_info structure with these
+ *   informations. The exported function mv_mbus_dram_info() allow
+ *   device drivers to get those informations related to the SDRAM
+ *   address decoding windows. This is because devices also have their
+ *   own windows (configured through registers that are part of each
+ *   device register space), and therefore the drivers for Marvell
+ *   devices have to configure those device -> SDRAM windows to ensure
+ *   that DMA works properly.
+ *
+ * - Provides an API for platform code or device drivers to
+ *   dynamically add or remove address decoding windows for the CPU ->
+ *   device accesses. This API is mvebu_mbus_add_window_by_id(),
+ *   mvebu_mbus_add_window_remap_by_id() and
+ *   mvebu_mbus_del_window().
+ *
+ * - Provides a debugfs interface in /sys/kernel/debug/mvebu-mbus/ to
+ *   see the list of CPU -> SDRAM windows and their configuration
+ *   (file 'sdram') and the list of CPU -> devices windows and their
+ *   configuration (file 'devices').
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/mbus.h>
+#include <linux/io.h>
+#include <linux/ioport.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/debugfs.h>
+#include <linux/log2.h>
+#include <linux/memblock.h>
+#include <linux/syscore_ops.h>
+
+/*
+ * DDR target is the same on all platforms.
+ */
+#define TARGET_DDR		0
+
+/*
+ * CPU Address Decode Windows registers
+ */
+#define WIN_CTRL_OFF		0x0000
+#define   WIN_CTRL_ENABLE       BIT(0)
+/* Only on HW I/O coherency capable platforms */
+#define   WIN_CTRL_SYNCBARRIER  BIT(1)
+#define   WIN_CTRL_TGT_MASK     0xf0
+#define   WIN_CTRL_TGT_SHIFT    4
+#define   WIN_CTRL_ATTR_MASK    0xff00
+#define   WIN_CTRL_ATTR_SHIFT   8
+#define   WIN_CTRL_SIZE_MASK    0xffff0000
+#define   WIN_CTRL_SIZE_SHIFT   16
+#define WIN_BASE_OFF		0x0004
+#define   WIN_BASE_LOW          0xffff0000
+#define   WIN_BASE_HIGH         0xf
+#define WIN_REMAP_LO_OFF	0x0008
+#define   WIN_REMAP_LOW         0xffff0000
+#define WIN_REMAP_HI_OFF	0x000c
+
+#define UNIT_SYNC_BARRIER_OFF   0x84
+#define   UNIT_SYNC_BARRIER_ALL 0xFFFF
+
+#define ATTR_HW_COHERENCY	(0x1 << 4)
+
+#define DDR_BASE_CS_OFF(n)	(0x0000 + ((n) << 3))
+#define  DDR_BASE_CS_HIGH_MASK  0xf
+#define  DDR_BASE_CS_LOW_MASK   0xff000000
+#define DDR_SIZE_CS_OFF(n)	(0x0004 + ((n) << 3))
+#define  DDR_SIZE_ENABLED       BIT(0)
+#define  DDR_SIZE_CS_MASK       0x1c
+#define  DDR_SIZE_CS_SHIFT      2
+#define  DDR_SIZE_MASK          0xff000000
+
+#define DOVE_DDR_BASE_CS_OFF(n) ((n) << 4)
+
+/* Relative to mbusbridge_base */
+#define MBUS_BRIDGE_CTRL_OFF	0x0
+#define MBUS_BRIDGE_BASE_OFF	0x4
+
+/* Maximum number of windows, for all known platforms */
+#define MBUS_WINS_MAX           20
+
+struct mvebu_mbus_state;
+
+struct mvebu_mbus_soc_data {
+	unsigned int num_wins;
+	bool has_mbus_bridge;
+	unsigned int (*win_cfg_offset)(const int win);
+	unsigned int (*win_remap_offset)(const int win);
+	void (*setup_cpu_target)(struct mvebu_mbus_state *s);
+	int (*save_cpu_target)(struct mvebu_mbus_state *s,
+			       u32 __iomem *store_addr);
+	int (*show_cpu_target)(struct mvebu_mbus_state *s,
+			       struct seq_file *seq, void *v);
+};
+
+/*
+ * Used to store the state of one MBus window accross suspend/resume.
+ */
+struct mvebu_mbus_win_data {
+	u32 ctrl;
+	u32 base;
+	u32 remap_lo;
+	u32 remap_hi;
+};
+
+struct mvebu_mbus_state {
+	void __iomem *mbuswins_base;
+	void __iomem *sdramwins_base;
+	void __iomem *mbusbridge_base;
+	phys_addr_t sdramwins_phys_base;
+	struct dentry *debugfs_root;
+	struct dentry *debugfs_sdram;
+	struct dentry *debugfs_devs;
+	struct resource pcie_mem_aperture;
+	struct resource pcie_io_aperture;
+	const struct mvebu_mbus_soc_data *soc;
+	int hw_io_coherency;
+
+	/* Used during suspend/resume */
+	u32 mbus_bridge_ctrl;
+	u32 mbus_bridge_base;
+	struct mvebu_mbus_win_data wins[MBUS_WINS_MAX];
+};
+
+static struct mvebu_mbus_state mbus_state;
+
+/*
+ * We provide two variants of the mv_mbus_dram_info() function:
+ *
+ * - The normal one, where the described DRAM ranges may overlap with
+ *   the I/O windows, but for which the DRAM ranges are guaranteed to
+ *   have a power of two size. Such ranges are suitable for the DMA
+ *   masters that only DMA between the RAM and the device, which is
+ *   actually all devices except the crypto engines.
+ *
+ * - The 'nooverlap' one, where the described DRAM ranges are
+ *   guaranteed to not overlap with the I/O windows, but for which the
+ *   DRAM ranges will not have power of two sizes. They will only be
+ *   aligned on a 64 KB boundary, and have a size multiple of 64
+ *   KB. Such ranges are suitable for the DMA masters that DMA between
+ *   the crypto SRAM (which is mapped through an I/O window) and a
+ *   device. This is the case for the crypto engines.
+ */
+
+static struct mbus_dram_target_info mvebu_mbus_dram_info;
+static struct mbus_dram_target_info mvebu_mbus_dram_info_nooverlap;
+
+const struct mbus_dram_target_info *mv_mbus_dram_info(void)
+{
+	return &mvebu_mbus_dram_info;
+}
+EXPORT_SYMBOL_GPL(mv_mbus_dram_info);
+
+const struct mbus_dram_target_info *mv_mbus_dram_info_nooverlap(void)
+{
+	return &mvebu_mbus_dram_info_nooverlap;
+}
+EXPORT_SYMBOL_GPL(mv_mbus_dram_info_nooverlap);
+
+/* Checks whether the given window has remap capability */
+static bool mvebu_mbus_window_is_remappable(struct mvebu_mbus_state *mbus,
+					    const int win)
+{
+	return mbus->soc->win_remap_offset(win) != MVEBU_MBUS_NO_REMAP;
+}
+
+/*
+ * Functions to manipulate the address decoding windows
+ */
+
+static void mvebu_mbus_read_window(struct mvebu_mbus_state *mbus,
+				   int win, int *enabled, u64 *base,
+				   u32 *size, u8 *target, u8 *attr,
+				   u64 *remap)
+{
+	void __iomem *addr = mbus->mbuswins_base +
+		mbus->soc->win_cfg_offset(win);
+	u32 basereg = readl(addr + WIN_BASE_OFF);
+	u32 ctrlreg = readl(addr + WIN_CTRL_OFF);
+
+	if (!(ctrlreg & WIN_CTRL_ENABLE)) {
+		*enabled = 0;
+		return;
+	}
+
+	*enabled = 1;
+	*base = ((u64)basereg & WIN_BASE_HIGH) << 32;
+	*base |= (basereg & WIN_BASE_LOW);
+	*size = (ctrlreg | ~WIN_CTRL_SIZE_MASK) + 1;
+
+	if (target)
+		*target = (ctrlreg & WIN_CTRL_TGT_MASK) >> WIN_CTRL_TGT_SHIFT;
+
+	if (attr)
+		*attr = (ctrlreg & WIN_CTRL_ATTR_MASK) >> WIN_CTRL_ATTR_SHIFT;
+
+	if (remap) {
+		if (mvebu_mbus_window_is_remappable(mbus, win)) {
+			u32 remap_low, remap_hi;
+			void __iomem *addr_rmp = mbus->mbuswins_base +
+				mbus->soc->win_remap_offset(win);
+			remap_low = readl(addr_rmp + WIN_REMAP_LO_OFF);
+			remap_hi  = readl(addr_rmp + WIN_REMAP_HI_OFF);
+			*remap = ((u64)remap_hi << 32) | remap_low;
+		} else
+			*remap = 0;
+	}
+}
+
+static void mvebu_mbus_disable_window(struct mvebu_mbus_state *mbus,
+				      int win)
+{
+	void __iomem *addr;
+
+	addr = mbus->mbuswins_base + mbus->soc->win_cfg_offset(win);
+	writel(0, addr + WIN_BASE_OFF);
+	writel(0, addr + WIN_CTRL_OFF);
+
+	if (mvebu_mbus_window_is_remappable(mbus, win)) {
+		addr = mbus->mbuswins_base + mbus->soc->win_remap_offset(win);
+		writel(0, addr + WIN_REMAP_LO_OFF);
+		writel(0, addr + WIN_REMAP_HI_OFF);
+	}
+}
+
+/* Checks whether the given window number is available */
+
+static int mvebu_mbus_window_is_free(struct mvebu_mbus_state *mbus,
+				     const int win)
+{
+	void __iomem *addr = mbus->mbuswins_base +
+		mbus->soc->win_cfg_offset(win);
+	u32 ctrl = readl(addr + WIN_CTRL_OFF);
+
+	return !(ctrl & WIN_CTRL_ENABLE);
+}
+
+/*
+ * Checks whether the given (base, base+size) area doesn't overlap an
+ * existing region
+ */
+static int mvebu_mbus_window_conflicts(struct mvebu_mbus_state *mbus,
+				       phys_addr_t base, size_t size,
+				       u8 target, u8 attr)
+{
+	u64 end = (u64)base + size;
+	int win;
+
+	for (win = 0; win < mbus->soc->num_wins; win++) {
+		u64 wbase, wend;
+		u32 wsize;
+		u8 wtarget, wattr;
+		int enabled;
+
+		mvebu_mbus_read_window(mbus, win,
+				       &enabled, &wbase, &wsize,
+				       &wtarget, &wattr, NULL);
+
+		if (!enabled)
+			continue;
+
+		wend = wbase + wsize;
+
+		/*
+		 * Check if the current window overlaps with the
+		 * proposed physical range
+		 */
+		if ((u64)base < wend && end > wbase)
+			return 0;
+	}
+
+	return 1;
+}
+
+static int mvebu_mbus_find_window(struct mvebu_mbus_state *mbus,
+				  phys_addr_t base, size_t size)
+{
+	int win;
+
+	for (win = 0; win < mbus->soc->num_wins; win++) {
+		u64 wbase;
+		u32 wsize;
+		int enabled;
+
+		mvebu_mbus_read_window(mbus, win,
+				       &enabled, &wbase, &wsize,
+				       NULL, NULL, NULL);
+
+		if (!enabled)
+			continue;
+
+		if (base == wbase && size == wsize)
+			return win;
+	}
+
+	return -ENODEV;
+}
+
+static int mvebu_mbus_setup_window(struct mvebu_mbus_state *mbus,
+				   int win, phys_addr_t base, size_t size,
+				   phys_addr_t remap, u8 target,
+				   u8 attr)
+{
+	void __iomem *addr = mbus->mbuswins_base +
+		mbus->soc->win_cfg_offset(win);
+	u32 ctrl, remap_addr;
+
+	if (!is_power_of_2(size)) {
+		WARN(true, "Invalid MBus window size: 0x%zx\n", size);
+		return -EINVAL;
+	}
+
+	if ((base & (phys_addr_t)(size - 1)) != 0) {
+		WARN(true, "Invalid MBus base/size: %pa len 0x%zx\n", &base,
+		     size);
+		return -EINVAL;
+	}
+
+	ctrl = ((size - 1) & WIN_CTRL_SIZE_MASK) |
+		(attr << WIN_CTRL_ATTR_SHIFT)    |
+		(target << WIN_CTRL_TGT_SHIFT)   |
+		WIN_CTRL_ENABLE;
+	if (mbus->hw_io_coherency)
+		ctrl |= WIN_CTRL_SYNCBARRIER;
+
+	writel(base & WIN_BASE_LOW, addr + WIN_BASE_OFF);
+	writel(ctrl, addr + WIN_CTRL_OFF);
+
+	if (mvebu_mbus_window_is_remappable(mbus, win)) {
+		void __iomem *addr_rmp = mbus->mbuswins_base +
+			mbus->soc->win_remap_offset(win);
+
+		if (remap == MVEBU_MBUS_NO_REMAP)
+			remap_addr = base;
+		else
+			remap_addr = remap;
+		writel(remap_addr & WIN_REMAP_LOW, addr_rmp + WIN_REMAP_LO_OFF);
+		writel(0, addr_rmp + WIN_REMAP_HI_OFF);
+	}
+
+	return 0;
+}
+
+static int mvebu_mbus_alloc_window(struct mvebu_mbus_state *mbus,
+				   phys_addr_t base, size_t size,
+				   phys_addr_t remap, u8 target,
+				   u8 attr)
+{
+	int win;
+
+	if (remap == MVEBU_MBUS_NO_REMAP) {
+		for (win = 0; win < mbus->soc->num_wins; win++) {
+			if (mvebu_mbus_window_is_remappable(mbus, win))
+				continue;
+
+			if (mvebu_mbus_window_is_free(mbus, win))
+				return mvebu_mbus_setup_window(mbus, win, base,
+							       size, remap,
+							       target, attr);
+		}
+	}
+
+	for (win = 0; win < mbus->soc->num_wins; win++) {
+		/* Skip window if need remap but is not supported */
+		if ((remap != MVEBU_MBUS_NO_REMAP) &&
+		    !mvebu_mbus_window_is_remappable(mbus, win))
+			continue;
+
+		if (mvebu_mbus_window_is_free(mbus, win))
+			return mvebu_mbus_setup_window(mbus, win, base, size,
+						       remap, target, attr);
+	}
+
+	return -ENOMEM;
+}
+
+/*
+ * Debugfs debugging
+ */
+
+/* Common function used for Dove, Kirkwood, Armada 370/XP and Orion 5x */
+static int mvebu_sdram_debug_show_orion(struct mvebu_mbus_state *mbus,
+					struct seq_file *seq, void *v)
+{
+	int i;
+
+	for (i = 0; i < 4; i++) {
+		u32 basereg = readl(mbus->sdramwins_base + DDR_BASE_CS_OFF(i));
+		u32 sizereg = readl(mbus->sdramwins_base + DDR_SIZE_CS_OFF(i));
+		u64 base;
+		u32 size;
+
+		if (!(sizereg & DDR_SIZE_ENABLED)) {
+			seq_printf(seq, "[%d] disabled\n", i);
+			continue;
+		}
+
+		base = ((u64)basereg & DDR_BASE_CS_HIGH_MASK) << 32;
+		base |= basereg & DDR_BASE_CS_LOW_MASK;
+		size = (sizereg | ~DDR_SIZE_MASK);
+
+		seq_printf(seq, "[%d] %016llx - %016llx : cs%d\n",
+			   i, (unsigned long long)base,
+			   (unsigned long long)base + size + 1,
+			   (sizereg & DDR_SIZE_CS_MASK) >> DDR_SIZE_CS_SHIFT);
+	}
+
+	return 0;
+}
+
+/* Special function for Dove */
+static int mvebu_sdram_debug_show_dove(struct mvebu_mbus_state *mbus,
+				       struct seq_file *seq, void *v)
+{
+	int i;
+
+	for (i = 0; i < 2; i++) {
+		u32 map = readl(mbus->sdramwins_base + DOVE_DDR_BASE_CS_OFF(i));
+		u64 base;
+		u32 size;
+
+		if (!(map & 1)) {
+			seq_printf(seq, "[%d] disabled\n", i);
+			continue;
+		}
+
+		base = map & 0xff800000;
+		size = 0x100000 << (((map & 0x000f0000) >> 16) - 4);
+
+		seq_printf(seq, "[%d] %016llx - %016llx : cs%d\n",
+			   i, (unsigned long long)base,
+			   (unsigned long long)base + size, i);
+	}
+
+	return 0;
+}
+
+static int mvebu_sdram_debug_show(struct seq_file *seq, void *v)
+{
+	struct mvebu_mbus_state *mbus = &mbus_state;
+	return mbus->soc->show_cpu_target(mbus, seq, v);
+}
+
+static int mvebu_sdram_debug_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, mvebu_sdram_debug_show, inode->i_private);
+}
+
+static const struct file_operations mvebu_sdram_debug_fops = {
+	.open = mvebu_sdram_debug_open,
+	.read = seq_read,
+	.llseek = seq_lseek,
+	.release = single_release,
+};
+
+static int mvebu_devs_debug_show(struct seq_file *seq, void *v)
+{
+	struct mvebu_mbus_state *mbus = &mbus_state;
+	int win;
+
+	for (win = 0; win < mbus->soc->num_wins; win++) {
+		u64 wbase, wremap;
+		u32 wsize;
+		u8 wtarget, wattr;
+		int enabled;
+
+		mvebu_mbus_read_window(mbus, win,
+				       &enabled, &wbase, &wsize,
+				       &wtarget, &wattr, &wremap);
+
+		if (!enabled) {
+			seq_printf(seq, "[%02d] disabled\n", win);
+			continue;
+		}
+
+		seq_printf(seq, "[%02d] %016llx - %016llx : %04x:%04x",
+			   win, (unsigned long long)wbase,
+			   (unsigned long long)(wbase + wsize), wtarget, wattr);
+
+		if (!is_power_of_2(wsize) ||
+		    ((wbase & (u64)(wsize - 1)) != 0))
+			seq_puts(seq, " (Invalid base/size!!)");
+
+		if (mvebu_mbus_window_is_remappable(mbus, win)) {
+			seq_printf(seq, " (remap %016llx)\n",
+				   (unsigned long long)wremap);
+		} else
+			seq_printf(seq, "\n");
+	}
+
+	return 0;
+}
+
+static int mvebu_devs_debug_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, mvebu_devs_debug_show, inode->i_private);
+}
+
+static const struct file_operations mvebu_devs_debug_fops = {
+	.open = mvebu_devs_debug_open,
+	.read = seq_read,
+	.llseek = seq_lseek,
+	.release = single_release,
+};
+
+/*
+ * SoC-specific functions and definitions
+ */
+
+static unsigned int generic_mbus_win_cfg_offset(int win)
+{
+	return win << 4;
+}
+
+static unsigned int armada_370_xp_mbus_win_cfg_offset(int win)
+{
+	/* The register layout is a bit annoying and the below code
+	 * tries to cope with it.
+	 * - At offset 0x0, there are the registers for the first 8
+	 *   windows, with 4 registers of 32 bits per window (ctrl,
+	 *   base, remap low, remap high)
+	 * - Then at offset 0x80, there is a hole of 0x10 bytes for
+	 *   the internal registers base address and internal units
+	 *   sync barrier register.
+	 * - Then at offset 0x90, there the registers for 12
+	 *   windows, with only 2 registers of 32 bits per window
+	 *   (ctrl, base).
+	 */
+	if (win < 8)
+		return win << 4;
+	else
+		return 0x90 + ((win - 8) << 3);
+}
+
+static unsigned int mv78xx0_mbus_win_cfg_offset(int win)
+{
+	if (win < 8)
+		return win << 4;
+	else
+		return 0x900 + ((win - 8) << 4);
+}
+
+static unsigned int generic_mbus_win_remap_2_offset(int win)
+{
+	if (win < 2)
+		return generic_mbus_win_cfg_offset(win);
+	else
+		return MVEBU_MBUS_NO_REMAP;
+}
+
+static unsigned int generic_mbus_win_remap_4_offset(int win)
+{
+	if (win < 4)
+		return generic_mbus_win_cfg_offset(win);
+	else
+		return MVEBU_MBUS_NO_REMAP;
+}
+
+static unsigned int generic_mbus_win_remap_8_offset(int win)
+{
+	if (win < 8)
+		return generic_mbus_win_cfg_offset(win);
+	else
+		return MVEBU_MBUS_NO_REMAP;
+}
+
+static unsigned int armada_xp_mbus_win_remap_offset(int win)
+{
+	if (win < 8)
+		return generic_mbus_win_cfg_offset(win);
+	else if (win == 13)
+		return 0xF0 - WIN_REMAP_LO_OFF;
+	else
+		return MVEBU_MBUS_NO_REMAP;
+}
+
+/*
+ * Use the memblock information to find the MBus bridge hole in the
+ * physical address space.
+ */
+static void __init
+mvebu_mbus_find_bridge_hole(uint64_t *start, uint64_t *end)
+{
+	struct memblock_region *r;
+	uint64_t s = 0;
+
+	for_each_memblock(memory, r) {
+		/*
+		 * This part of the memory is above 4 GB, so we don't
+		 * care for the MBus bridge hole.
+		 */
+		if (r->base >= 0x100000000ULL)
+			continue;
+
+		/*
+		 * The MBus bridge hole is at the end of the RAM under
+		 * the 4 GB limit.
+		 */
+		if (r->base + r->size > s)
+			s = r->base + r->size;
+	}
+
+	*start = s;
+	*end = 0x100000000ULL;
+}
+
+/*
+ * This function fills in the mvebu_mbus_dram_info_nooverlap data
+ * structure, by looking at the mvebu_mbus_dram_info data, and
+ * removing the parts of it that overlap with I/O windows.
+ */
+static void __init
+mvebu_mbus_setup_cpu_target_nooverlap(struct mvebu_mbus_state *mbus)
+{
+	uint64_t mbus_bridge_base, mbus_bridge_end;
+	int cs_nooverlap = 0;
+	int i;
+
+	mvebu_mbus_find_bridge_hole(&mbus_bridge_base, &mbus_bridge_end);
+
+	for (i = 0; i < mvebu_mbus_dram_info.num_cs; i++) {
+		struct mbus_dram_window *w;
+		u64 base, size, end;
+
+		w = &mvebu_mbus_dram_info.cs[i];
+		base = w->base;
+		size = w->size;
+		end = base + size;
+
+		/*
+		 * The CS is fully enclosed inside the MBus bridge
+		 * area, so ignore it.
+		 */
+		if (base >= mbus_bridge_base && end <= mbus_bridge_end)
+			continue;
+
+		/*
+		 * Beginning of CS overlaps with end of MBus, raise CS
+		 * base address, and shrink its size.
+		 */
+		if (base >= mbus_bridge_base && end > mbus_bridge_end) {
+			size -= mbus_bridge_end - base;
+			base = mbus_bridge_end;
+		}
+
+		/*
+		 * End of CS overlaps with beginning of MBus, shrink
+		 * CS size.
+		 */
+		if (base < mbus_bridge_base && end > mbus_bridge_base)
+			size -= end - mbus_bridge_base;
+
+		w = &mvebu_mbus_dram_info_nooverlap.cs[cs_nooverlap++];
+		w->cs_index = i;
+		w->mbus_attr = 0xf & ~(1 << i);
+		if (mbus->hw_io_coherency)
+			w->mbus_attr |= ATTR_HW_COHERENCY;
+		w->base = base;
+		w->size = size;
+	}
+
+	mvebu_mbus_dram_info_nooverlap.mbus_dram_target_id = TARGET_DDR;
+	mvebu_mbus_dram_info_nooverlap.num_cs = cs_nooverlap;
+}
+
+static void __init
+mvebu_mbus_default_setup_cpu_target(struct mvebu_mbus_state *mbus)
+{
+	int i;
+	int cs;
+
+	mvebu_mbus_dram_info.mbus_dram_target_id = TARGET_DDR;
+
+	for (i = 0, cs = 0; i < 4; i++) {
+		u32 base = readl(mbus->sdramwins_base + DDR_BASE_CS_OFF(i));
+		u32 size = readl(mbus->sdramwins_base + DDR_SIZE_CS_OFF(i));
+
+		/*
+		 * We only take care of entries for which the chip
+		 * select is enabled, and that don't have high base
+		 * address bits set (devices can only access the first
+		 * 32 bits of the memory).
+		 */
+		if ((size & DDR_SIZE_ENABLED) &&
+		    !(base & DDR_BASE_CS_HIGH_MASK)) {
+			struct mbus_dram_window *w;
+
+			w = &mvebu_mbus_dram_info.cs[cs++];
+			w->cs_index = i;
+			w->mbus_attr = 0xf & ~(1 << i);
+			if (mbus->hw_io_coherency)
+				w->mbus_attr |= ATTR_HW_COHERENCY;
+			w->base = base & DDR_BASE_CS_LOW_MASK;
+			w->size = (u64)(size | ~DDR_SIZE_MASK) + 1;
+		}
+	}
+	mvebu_mbus_dram_info.num_cs = cs;
+}
+
+static int
+mvebu_mbus_default_save_cpu_target(struct mvebu_mbus_state *mbus,
+				   u32 __iomem *store_addr)
+{
+	int i;
+
+	for (i = 0; i < 4; i++) {
+		u32 base = readl(mbus->sdramwins_base + DDR_BASE_CS_OFF(i));
+		u32 size = readl(mbus->sdramwins_base + DDR_SIZE_CS_OFF(i));
+
+		writel(mbus->sdramwins_phys_base + DDR_BASE_CS_OFF(i),
+		       store_addr++);
+		writel(base, store_addr++);
+		writel(mbus->sdramwins_phys_base + DDR_SIZE_CS_OFF(i),
+		       store_addr++);
+		writel(size, store_addr++);
+	}
+
+	/* We've written 16 words to the store address */
+	return 16;
+}
+
+static void __init
+mvebu_mbus_dove_setup_cpu_target(struct mvebu_mbus_state *mbus)
+{
+	int i;
+	int cs;
+
+	mvebu_mbus_dram_info.mbus_dram_target_id = TARGET_DDR;
+
+	for (i = 0, cs = 0; i < 2; i++) {
+		u32 map = readl(mbus->sdramwins_base + DOVE_DDR_BASE_CS_OFF(i));
+
+		/*
+		 * Chip select enabled?
+		 */
+		if (map & 1) {
+			struct mbus_dram_window *w;
+
+			w = &mvebu_mbus_dram_info.cs[cs++];
+			w->cs_index = i;
+			w->mbus_attr = 0; /* CS address decoding done inside */
+					  /* the DDR controller, no need to  */
+					  /* provide attributes */
+			w->base = map & 0xff800000;
+			w->size = 0x100000 << (((map & 0x000f0000) >> 16) - 4);
+		}
+	}
+
+	mvebu_mbus_dram_info.num_cs = cs;
+}
+
+static int
+mvebu_mbus_dove_save_cpu_target(struct mvebu_mbus_state *mbus,
+				u32 __iomem *store_addr)
+{
+	int i;
+
+	for (i = 0; i < 2; i++) {
+		u32 map = readl(mbus->sdramwins_base + DOVE_DDR_BASE_CS_OFF(i));
+
+		writel(mbus->sdramwins_phys_base + DOVE_DDR_BASE_CS_OFF(i),
+		       store_addr++);
+		writel(map, store_addr++);
+	}
+
+	/* We've written 4 words to the store address */
+	return 4;
+}
+
+int mvebu_mbus_save_cpu_target(u32 __iomem *store_addr)
+{
+	return mbus_state.soc->save_cpu_target(&mbus_state, store_addr);
+}
+
+static const struct mvebu_mbus_soc_data armada_370_mbus_data = {
+	.num_wins            = 20,
+	.has_mbus_bridge     = true,
+	.win_cfg_offset      = armada_370_xp_mbus_win_cfg_offset,
+	.win_remap_offset    = generic_mbus_win_remap_8_offset,
+	.setup_cpu_target    = mvebu_mbus_default_setup_cpu_target,
+	.show_cpu_target     = mvebu_sdram_debug_show_orion,
+	.save_cpu_target     = mvebu_mbus_default_save_cpu_target,
+};
+
+static const struct mvebu_mbus_soc_data armada_xp_mbus_data = {
+	.num_wins            = 20,
+	.has_mbus_bridge     = true,
+	.win_cfg_offset      = armada_370_xp_mbus_win_cfg_offset,
+	.win_remap_offset    = armada_xp_mbus_win_remap_offset,
+	.setup_cpu_target    = mvebu_mbus_default_setup_cpu_target,
+	.show_cpu_target     = mvebu_sdram_debug_show_orion,
+	.save_cpu_target     = mvebu_mbus_default_save_cpu_target,
+};
+
+static const struct mvebu_mbus_soc_data kirkwood_mbus_data = {
+	.num_wins            = 8,
+	.win_cfg_offset      = generic_mbus_win_cfg_offset,
+	.save_cpu_target     = mvebu_mbus_default_save_cpu_target,
+	.win_remap_offset    = generic_mbus_win_remap_4_offset,
+	.setup_cpu_target    = mvebu_mbus_default_setup_cpu_target,
+	.show_cpu_target     = mvebu_sdram_debug_show_orion,
+};
+
+static const struct mvebu_mbus_soc_data dove_mbus_data = {
+	.num_wins            = 8,
+	.win_cfg_offset      = generic_mbus_win_cfg_offset,
+	.save_cpu_target     = mvebu_mbus_dove_save_cpu_target,
+	.win_remap_offset    = generic_mbus_win_remap_4_offset,
+	.setup_cpu_target    = mvebu_mbus_dove_setup_cpu_target,
+	.show_cpu_target     = mvebu_sdram_debug_show_dove,
+};
+
+/*
+ * Some variants of Orion5x have 4 remappable windows, some other have
+ * only two of them.
+ */
+static const struct mvebu_mbus_soc_data orion5x_4win_mbus_data = {
+	.num_wins            = 8,
+	.win_cfg_offset      = generic_mbus_win_cfg_offset,
+	.save_cpu_target     = mvebu_mbus_default_save_cpu_target,
+	.win_remap_offset    = generic_mbus_win_remap_4_offset,
+	.setup_cpu_target    = mvebu_mbus_default_setup_cpu_target,
+	.show_cpu_target     = mvebu_sdram_debug_show_orion,
+};
+
+static const struct mvebu_mbus_soc_data orion5x_2win_mbus_data = {
+	.num_wins            = 8,
+	.win_cfg_offset      = generic_mbus_win_cfg_offset,
+	.save_cpu_target     = mvebu_mbus_default_save_cpu_target,
+	.win_remap_offset    = generic_mbus_win_remap_2_offset,
+	.setup_cpu_target    = mvebu_mbus_default_setup_cpu_target,
+	.show_cpu_target     = mvebu_sdram_debug_show_orion,
+};
+
+static const struct mvebu_mbus_soc_data mv78xx0_mbus_data = {
+	.num_wins            = 14,
+	.win_cfg_offset      = mv78xx0_mbus_win_cfg_offset,
+	.save_cpu_target     = mvebu_mbus_default_save_cpu_target,
+	.win_remap_offset    = generic_mbus_win_remap_8_offset,
+	.setup_cpu_target    = mvebu_mbus_default_setup_cpu_target,
+	.show_cpu_target     = mvebu_sdram_debug_show_orion,
+};
+
+static const struct of_device_id of_mvebu_mbus_ids[] = {
+	{ .compatible = "marvell,armada370-mbus",
+	  .data = &armada_370_mbus_data, },
+	{ .compatible = "marvell,armada375-mbus",
+	  .data = &armada_xp_mbus_data, },
+	{ .compatible = "marvell,armada380-mbus",
+	  .data = &armada_xp_mbus_data, },
+	{ .compatible = "marvell,armadaxp-mbus",
+	  .data = &armada_xp_mbus_data, },
+	{ .compatible = "marvell,kirkwood-mbus",
+	  .data = &kirkwood_mbus_data, },
+	{ .compatible = "marvell,dove-mbus",
+	  .data = &dove_mbus_data, },
+	{ .compatible = "marvell,orion5x-88f5281-mbus",
+	  .data = &orion5x_4win_mbus_data, },
+	{ .compatible = "marvell,orion5x-88f5182-mbus",
+	  .data = &orion5x_2win_mbus_data, },
+	{ .compatible = "marvell,orion5x-88f5181-mbus",
+	  .data = &orion5x_2win_mbus_data, },
+	{ .compatible = "marvell,orion5x-88f6183-mbus",
+	  .data = &orion5x_4win_mbus_data, },
+	{ .compatible = "marvell,mv78xx0-mbus",
+	  .data = &mv78xx0_mbus_data, },
+	{ },
+};
+
+/*
+ * Public API of the driver
+ */
+int mvebu_mbus_add_window_remap_by_id(unsigned int target,
+				      unsigned int attribute,
+				      phys_addr_t base, size_t size,
+				      phys_addr_t remap)
+{
+	struct mvebu_mbus_state *s = &mbus_state;
+
+	if (!mvebu_mbus_window_conflicts(s, base, size, target, attribute)) {
+		pr_err("cannot add window '%x:%x', conflicts with another window\n",
+		       target, attribute);
+		return -EINVAL;
+	}
+
+	return mvebu_mbus_alloc_window(s, base, size, remap, target, attribute);
+}
+
+int mvebu_mbus_add_window_by_id(unsigned int target, unsigned int attribute,
+				phys_addr_t base, size_t size)
+{
+	return mvebu_mbus_add_window_remap_by_id(target, attribute, base,
+						 size, MVEBU_MBUS_NO_REMAP);
+}
+
+int mvebu_mbus_del_window(phys_addr_t base, size_t size)
+{
+	int win;
+
+	win = mvebu_mbus_find_window(&mbus_state, base, size);
+	if (win < 0)
+		return win;
+
+	mvebu_mbus_disable_window(&mbus_state, win);
+	return 0;
+}
+
+void mvebu_mbus_get_pcie_mem_aperture(struct resource *res)
+{
+	if (!res)
+		return;
+	*res = mbus_state.pcie_mem_aperture;
+}
+
+void mvebu_mbus_get_pcie_io_aperture(struct resource *res)
+{
+	if (!res)
+		return;
+	*res = mbus_state.pcie_io_aperture;
+}
+
+int mvebu_mbus_get_dram_win_info(phys_addr_t phyaddr, u8 *target, u8 *attr)
+{
+	const struct mbus_dram_target_info *dram;
+	int i;
+
+	/* Get dram info */
+	dram = mv_mbus_dram_info();
+	if (!dram) {
+		pr_err("missing DRAM information\n");
+		return -ENODEV;
+	}
+
+	/* Try to find matching DRAM window for phyaddr */
+	for (i = 0; i < dram->num_cs; i++) {
+		const struct mbus_dram_window *cs = dram->cs + i;
+
+		if (cs->base <= phyaddr &&
+			phyaddr <= (cs->base + cs->size - 1)) {
+			*target = dram->mbus_dram_target_id;
+			*attr = cs->mbus_attr;
+			return 0;
+		}
+	}
+
+	pr_err("invalid dram address %pa\n", &phyaddr);
+	return -EINVAL;
+}
+EXPORT_SYMBOL_GPL(mvebu_mbus_get_dram_win_info);
+
+int mvebu_mbus_get_io_win_info(phys_addr_t phyaddr, u32 *size, u8 *target,
+			       u8 *attr)
+{
+	int win;
+
+	for (win = 0; win < mbus_state.soc->num_wins; win++) {
+		u64 wbase;
+		int enabled;
+
+		mvebu_mbus_read_window(&mbus_state, win, &enabled, &wbase,
+				       size, target, attr, NULL);
+
+		if (!enabled)
+			continue;
+
+		if (wbase <= phyaddr && phyaddr <= wbase + *size)
+			return win;
+	}
+
+	return -EINVAL;
+}
+EXPORT_SYMBOL_GPL(mvebu_mbus_get_io_win_info);
+
+static __init int mvebu_mbus_debugfs_init(void)
+{
+	struct mvebu_mbus_state *s = &mbus_state;
+
+	/*
+	 * If no base has been initialized, doesn't make sense to
+	 * register the debugfs entries. We may be on a multiplatform
+	 * kernel that isn't running a Marvell EBU SoC.
+	 */
+	if (!s->mbuswins_base)
+		return 0;
+
+	s->debugfs_root = debugfs_create_dir("mvebu-mbus", NULL);
+	if (s->debugfs_root) {
+		s->debugfs_sdram = debugfs_create_file("sdram", S_IRUGO,
+						       s->debugfs_root, NULL,
+						       &mvebu_sdram_debug_fops);
+		s->debugfs_devs = debugfs_create_file("devices", S_IRUGO,
+						      s->debugfs_root, NULL,
+						      &mvebu_devs_debug_fops);
+	}
+
+	return 0;
+}
+fs_initcall(mvebu_mbus_debugfs_init);
+
+static int mvebu_mbus_suspend(void)
+{
+	struct mvebu_mbus_state *s = &mbus_state;
+	int win;
+
+	if (!s->mbusbridge_base)
+		return -ENODEV;
+
+	for (win = 0; win < s->soc->num_wins; win++) {
+		void __iomem *addr = s->mbuswins_base +
+			s->soc->win_cfg_offset(win);
+		void __iomem *addr_rmp;
+
+		s->wins[win].base = readl(addr + WIN_BASE_OFF);
+		s->wins[win].ctrl = readl(addr + WIN_CTRL_OFF);
+
+		if (!mvebu_mbus_window_is_remappable(s, win))
+			continue;
+
+		addr_rmp = s->mbuswins_base +
+			s->soc->win_remap_offset(win);
+
+		s->wins[win].remap_lo = readl(addr_rmp + WIN_REMAP_LO_OFF);
+		s->wins[win].remap_hi = readl(addr_rmp + WIN_REMAP_HI_OFF);
+	}
+
+	s->mbus_bridge_ctrl = readl(s->mbusbridge_base +
+				    MBUS_BRIDGE_CTRL_OFF);
+	s->mbus_bridge_base = readl(s->mbusbridge_base +
+				    MBUS_BRIDGE_BASE_OFF);
+
+	return 0;
+}
+
+static void mvebu_mbus_resume(void)
+{
+	struct mvebu_mbus_state *s = &mbus_state;
+	int win;
+
+	writel(s->mbus_bridge_ctrl,
+	       s->mbusbridge_base + MBUS_BRIDGE_CTRL_OFF);
+	writel(s->mbus_bridge_base,
+	       s->mbusbridge_base + MBUS_BRIDGE_BASE_OFF);
+
+	for (win = 0; win < s->soc->num_wins; win++) {
+		void __iomem *addr = s->mbuswins_base +
+			s->soc->win_cfg_offset(win);
+		void __iomem *addr_rmp;
+
+		writel(s->wins[win].base, addr + WIN_BASE_OFF);
+		writel(s->wins[win].ctrl, addr + WIN_CTRL_OFF);
+
+		if (!mvebu_mbus_window_is_remappable(s, win))
+			continue;
+
+		addr_rmp = s->mbuswins_base +
+			s->soc->win_remap_offset(win);
+
+		writel(s->wins[win].remap_lo, addr_rmp + WIN_REMAP_LO_OFF);
+		writel(s->wins[win].remap_hi, addr_rmp + WIN_REMAP_HI_OFF);
+	}
+}
+
+static struct syscore_ops mvebu_mbus_syscore_ops = {
+	.suspend	= mvebu_mbus_suspend,
+	.resume		= mvebu_mbus_resume,
+};
+
+static int __init mvebu_mbus_common_init(struct mvebu_mbus_state *mbus,
+					 phys_addr_t mbuswins_phys_base,
+					 size_t mbuswins_size,
+					 phys_addr_t sdramwins_phys_base,
+					 size_t sdramwins_size,
+					 phys_addr_t mbusbridge_phys_base,
+					 size_t mbusbridge_size,
+					 bool is_coherent)
+{
+	int win;
+
+	mbus->mbuswins_base = ioremap(mbuswins_phys_base, mbuswins_size);
+	if (!mbus->mbuswins_base)
+		return -ENOMEM;
+
+	mbus->sdramwins_base = ioremap(sdramwins_phys_base, sdramwins_size);
+	if (!mbus->sdramwins_base) {
+		iounmap(mbus_state.mbuswins_base);
+		return -ENOMEM;
+	}
+
+	mbus->sdramwins_phys_base = sdramwins_phys_base;
+
+	if (mbusbridge_phys_base) {
+		mbus->mbusbridge_base = ioremap(mbusbridge_phys_base,
+						mbusbridge_size);
+		if (!mbus->mbusbridge_base) {
+			iounmap(mbus->sdramwins_base);
+			iounmap(mbus->mbuswins_base);
+			return -ENOMEM;
+		}
+	} else
+		mbus->mbusbridge_base = NULL;
+
+	for (win = 0; win < mbus->soc->num_wins; win++)
+		mvebu_mbus_disable_window(mbus, win);
+
+	mbus->soc->setup_cpu_target(mbus);
+	mvebu_mbus_setup_cpu_target_nooverlap(mbus);
+
+	if (is_coherent)
+		writel(UNIT_SYNC_BARRIER_ALL,
+		       mbus->mbuswins_base + UNIT_SYNC_BARRIER_OFF);
+
+	register_syscore_ops(&mvebu_mbus_syscore_ops);
+
+	return 0;
+}
+
+int __init mvebu_mbus_init(const char *soc, phys_addr_t mbuswins_phys_base,
+			   size_t mbuswins_size,
+			   phys_addr_t sdramwins_phys_base,
+			   size_t sdramwins_size)
+{
+	const struct of_device_id *of_id;
+
+	for (of_id = of_mvebu_mbus_ids; of_id->compatible[0]; of_id++)
+		if (!strcmp(of_id->compatible, soc))
+			break;
+
+	if (!of_id->compatible[0]) {
+		pr_err("could not find a matching SoC family\n");
+		return -ENODEV;
+	}
+
+	mbus_state.soc = of_id->data;
+
+	return mvebu_mbus_common_init(&mbus_state,
+			mbuswins_phys_base,
+			mbuswins_size,
+			sdramwins_phys_base,
+			sdramwins_size, 0, 0, false);
+}
+
+#ifdef CONFIG_OF
+/*
+ * The window IDs in the ranges DT property have the following format:
+ *  - bits 28 to 31: MBus custom field
+ *  - bits 24 to 27: window target ID
+ *  - bits 16 to 23: window attribute ID
+ *  - bits  0 to 15: unused
+ */
+#define CUSTOM(id) (((id) & 0xF0000000) >> 24)
+#define TARGET(id) (((id) & 0x0F000000) >> 24)
+#define ATTR(id)   (((id) & 0x00FF0000) >> 16)
+
+static int __init mbus_dt_setup_win(struct mvebu_mbus_state *mbus,
+				    u32 base, u32 size,
+				    u8 target, u8 attr)
+{
+	if (!mvebu_mbus_window_conflicts(mbus, base, size, target, attr)) {
+		pr_err("cannot add window '%04x:%04x', conflicts with another window\n",
+		       target, attr);
+		return -EBUSY;
+	}
+
+	if (mvebu_mbus_alloc_window(mbus, base, size, MVEBU_MBUS_NO_REMAP,
+				    target, attr)) {
+		pr_err("cannot add window '%04x:%04x', too many windows\n",
+		       target, attr);
+		return -ENOMEM;
+	}
+	return 0;
+}
+
+static int __init
+mbus_parse_ranges(struct device_node *node,
+		  int *addr_cells, int *c_addr_cells, int *c_size_cells,
+		  int *cell_count, const __be32 **ranges_start,
+		  const __be32 **ranges_end)
+{
+	const __be32 *prop;
+	int ranges_len, tuple_len;
+
+	/* Allow a node with no 'ranges' property */
+	*ranges_start = of_get_property(node, "ranges", &ranges_len);
+	if (*ranges_start == NULL) {
+		*addr_cells = *c_addr_cells = *c_size_cells = *cell_count = 0;
+		*ranges_start = *ranges_end = NULL;
+		return 0;
+	}
+	*ranges_end = *ranges_start + ranges_len / sizeof(__be32);
+
+	*addr_cells = of_n_addr_cells(node);
+
+	prop = of_get_property(node, "#address-cells", NULL);
+	*c_addr_cells = be32_to_cpup(prop);
+
+	prop = of_get_property(node, "#size-cells", NULL);
+	*c_size_cells = be32_to_cpup(prop);
+
+	*cell_count = *addr_cells + *c_addr_cells + *c_size_cells;
+	tuple_len = (*cell_count) * sizeof(__be32);
+
+	if (ranges_len % tuple_len) {
+		pr_warn("malformed ranges entry '%s'\n", node->name);
+		return -EINVAL;
+	}
+	return 0;
+}
+
+static int __init mbus_dt_setup(struct mvebu_mbus_state *mbus,
+				struct device_node *np)
+{
+	int addr_cells, c_addr_cells, c_size_cells;
+	int i, ret, cell_count;
+	const __be32 *r, *ranges_start, *ranges_end;
+
+	ret = mbus_parse_ranges(np, &addr_cells, &c_addr_cells,
+				&c_size_cells, &cell_count,
+				&ranges_start, &ranges_end);
+	if (ret < 0)
+		return ret;
+
+	for (i = 0, r = ranges_start; r < ranges_end; r += cell_count, i++) {
+		u32 windowid, base, size;
+		u8 target, attr;
+
+		/*
+		 * An entry with a non-zero custom field do not
+		 * correspond to a static window, so skip it.
+		 */
+		windowid = of_read_number(r, 1);
+		if (CUSTOM(windowid))
+			continue;
+
+		target = TARGET(windowid);
+		attr = ATTR(windowid);
+
+		base = of_read_number(r + c_addr_cells, addr_cells);
+		size = of_read_number(r + c_addr_cells + addr_cells,
+				      c_size_cells);
+		ret = mbus_dt_setup_win(mbus, base, size, target, attr);
+		if (ret < 0)
+			return ret;
+	}
+	return 0;
+}
+
+static void __init mvebu_mbus_get_pcie_resources(struct device_node *np,
+						 struct resource *mem,
+						 struct resource *io)
+{
+	u32 reg[2];
+	int ret;
+
+	/*
+	 * These are optional, so we make sure that resource_size(x) will
+	 * return 0.
+	 */
+	memset(mem, 0, sizeof(struct resource));
+	mem->end = -1;
+	memset(io, 0, sizeof(struct resource));
+	io->end = -1;
+
+	ret = of_property_read_u32_array(np, "pcie-mem-aperture", reg, ARRAY_SIZE(reg));
+	if (!ret) {
+		mem->start = reg[0];
+		mem->end = mem->start + reg[1] - 1;
+		mem->flags = IORESOURCE_MEM;
+	}
+
+	ret = of_property_read_u32_array(np, "pcie-io-aperture", reg, ARRAY_SIZE(reg));
+	if (!ret) {
+		io->start = reg[0];
+		io->end = io->start + reg[1] - 1;
+		io->flags = IORESOURCE_IO;
+	}
+}
+
+int __init mvebu_mbus_dt_init(bool is_coherent)
+{
+	struct resource mbuswins_res, sdramwins_res, mbusbridge_res;
+	struct device_node *np, *controller;
+	const struct of_device_id *of_id;
+	const __be32 *prop;
+	int ret;
+
+	np = of_find_matching_node_and_match(NULL, of_mvebu_mbus_ids, &of_id);
+	if (!np) {
+		pr_err("could not find a matching SoC family\n");
+		return -ENODEV;
+	}
+
+	mbus_state.soc = of_id->data;
+
+	prop = of_get_property(np, "controller", NULL);
+	if (!prop) {
+		pr_err("required 'controller' property missing\n");
+		return -EINVAL;
+	}
+
+	controller = of_find_node_by_phandle(be32_to_cpup(prop));
+	if (!controller) {
+		pr_err("could not find an 'mbus-controller' node\n");
+		return -ENODEV;
+	}
+
+	if (of_address_to_resource(controller, 0, &mbuswins_res)) {
+		pr_err("cannot get MBUS register address\n");
+		return -EINVAL;
+	}
+
+	if (of_address_to_resource(controller, 1, &sdramwins_res)) {
+		pr_err("cannot get SDRAM register address\n");
+		return -EINVAL;
+	}
+
+	/*
+	 * Set the resource to 0 so that it can be left unmapped by
+	 * mvebu_mbus_common_init() if the DT doesn't carry the
+	 * necessary information. This is needed to preserve backward
+	 * compatibility.
+	 */
+	memset(&mbusbridge_res, 0, sizeof(mbusbridge_res));
+
+	if (mbus_state.soc->has_mbus_bridge) {
+		if (of_address_to_resource(controller, 2, &mbusbridge_res))
+			pr_warn(FW_WARN "deprecated mbus-mvebu Device Tree, suspend/resume will not work\n");
+	}
+
+	mbus_state.hw_io_coherency = is_coherent;
+
+	/* Get optional pcie-{mem,io}-aperture properties */
+	mvebu_mbus_get_pcie_resources(np, &mbus_state.pcie_mem_aperture,
+					  &mbus_state.pcie_io_aperture);
+
+	ret = mvebu_mbus_common_init(&mbus_state,
+				     mbuswins_res.start,
+				     resource_size(&mbuswins_res),
+				     sdramwins_res.start,
+				     resource_size(&sdramwins_res),
+				     mbusbridge_res.start,
+				     resource_size(&mbusbridge_res),
+				     is_coherent);
+	if (ret)
+		return ret;
+
+	/* Setup statically declared windows in the DT */
+	return mbus_dt_setup(&mbus_state, np);
+}
+#endif
diff --git a/src/kernel/linux/v4.14/drivers/bus/omap-ocp2scp.c b/src/kernel/linux/v4.14/drivers/bus/omap-ocp2scp.c
new file mode 100644
index 0000000..77791f3
--- /dev/null
+++ b/src/kernel/linux/v4.14/drivers/bus/omap-ocp2scp.c
@@ -0,0 +1,128 @@
+/*
+ * omap-ocp2scp.c - transform ocp interface protocol to scp protocol
+ *
+ * Copyright (C) 2012 Texas Instruments Incorporated - http://www.ti.com
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * Author: Kishon Vijay Abraham I <kishon@ti.com>
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/err.h>
+#include <linux/pm_runtime.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+
+#define OCP2SCP_TIMING 0x18
+#define SYNC2_MASK 0xf
+
+static int ocp2scp_remove_devices(struct device *dev, void *c)
+{
+	struct platform_device *pdev = to_platform_device(dev);
+
+	platform_device_unregister(pdev);
+
+	return 0;
+}
+
+static int omap_ocp2scp_probe(struct platform_device *pdev)
+{
+	int ret;
+	u32 reg;
+	void __iomem *regs;
+	struct resource *res;
+	struct device_node *np = pdev->dev.of_node;
+
+	if (np) {
+		ret = of_platform_populate(np, NULL, NULL, &pdev->dev);
+		if (ret) {
+			dev_err(&pdev->dev,
+			    "failed to add resources for ocp2scp child\n");
+			goto err0;
+		}
+	}
+
+	pm_runtime_enable(&pdev->dev);
+	/*
+	 * As per AM572x TRM: http://www.ti.com/lit/ug/spruhz6/spruhz6.pdf
+	 * under section 26.3.2.2, table 26-26 OCP2SCP TIMING Caution;
+	 * As per OMAP4430 TRM: http://www.ti.com/lit/ug/swpu231ap/swpu231ap.pdf
+	 * under section 23.12.6.2.2 , Table 23-1213 OCP2SCP TIMING Caution;
+	 * As per OMAP4460 TRM: http://www.ti.com/lit/ug/swpu235ab/swpu235ab.pdf
+	 * under section 23.12.6.2.2, Table 23-1213 OCP2SCP TIMING Caution;
+	 * As per OMAP543x TRM http://www.ti.com/lit/pdf/swpu249
+	 * under section 27.3.2.2, Table 27-27 OCP2SCP TIMING Caution;
+	 *
+	 * Read path of OCP2SCP is not working properly due to low reset value
+	 * of SYNC2 parameter in OCP2SCP. Suggested reset value is 0x6 or more.
+	 */
+	if (!of_device_is_compatible(np, "ti,am437x-ocp2scp")) {
+		res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+		regs = devm_ioremap_resource(&pdev->dev, res);
+		if (IS_ERR(regs)) {
+			ret = PTR_ERR(regs);
+			goto err1;
+		}
+
+		pm_runtime_get_sync(&pdev->dev);
+		reg = readl_relaxed(regs + OCP2SCP_TIMING);
+		reg &= ~(SYNC2_MASK);
+		reg |= 0x6;
+		writel_relaxed(reg, regs + OCP2SCP_TIMING);
+		pm_runtime_put_sync(&pdev->dev);
+	}
+
+	return 0;
+
+err1:
+	pm_runtime_disable(&pdev->dev);
+
+err0:
+	device_for_each_child(&pdev->dev, NULL, ocp2scp_remove_devices);
+
+	return ret;
+}
+
+static int omap_ocp2scp_remove(struct platform_device *pdev)
+{
+	pm_runtime_disable(&pdev->dev);
+	device_for_each_child(&pdev->dev, NULL, ocp2scp_remove_devices);
+
+	return 0;
+}
+
+#ifdef CONFIG_OF
+static const struct of_device_id omap_ocp2scp_id_table[] = {
+	{ .compatible = "ti,omap-ocp2scp" },
+	{ .compatible = "ti,am437x-ocp2scp" },
+	{}
+};
+MODULE_DEVICE_TABLE(of, omap_ocp2scp_id_table);
+#endif
+
+static struct platform_driver omap_ocp2scp_driver = {
+	.probe		= omap_ocp2scp_probe,
+	.remove		= omap_ocp2scp_remove,
+	.driver		= {
+		.name	= "omap-ocp2scp",
+		.of_match_table = of_match_ptr(omap_ocp2scp_id_table),
+	},
+};
+
+module_platform_driver(omap_ocp2scp_driver);
+
+MODULE_ALIAS("platform:omap-ocp2scp");
+MODULE_AUTHOR("Texas Instruments Inc.");
+MODULE_DESCRIPTION("OMAP OCP2SCP driver");
+MODULE_LICENSE("GPL v2");
diff --git a/src/kernel/linux/v4.14/drivers/bus/omap_l3_noc.c b/src/kernel/linux/v4.14/drivers/bus/omap_l3_noc.c
new file mode 100644
index 0000000..5012e3a
--- /dev/null
+++ b/src/kernel/linux/v4.14/drivers/bus/omap_l3_noc.c
@@ -0,0 +1,377 @@
+/*
+ * OMAP L3 Interconnect error handling driver
+ *
+ * Copyright (C) 2011-2015 Texas Instruments Incorporated - http://www.ti.com/
+ *	Santosh Shilimkar <santosh.shilimkar@ti.com>
+ *	Sricharan <r.sricharan@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+#include "omap_l3_noc.h"
+
+/**
+ * l3_handle_target() - Handle Target specific parse and reporting
+ * @l3:		pointer to l3 struct
+ * @base:	base address of clkdm
+ * @flag_mux:	flagmux corresponding to the event
+ * @err_src:	error source index of the slave (target)
+ *
+ * This does the second part of the error interrupt handling:
+ *	3) Parse in the slave information
+ *	4) Print the logged information.
+ *	5) Add dump stack to provide kernel trace.
+ *	6) Clear the source if known.
+ *
+ * This handles two types of errors:
+ *	1) Custom errors in L3 :
+ *		Target like DMM/FW/EMIF generates SRESP=ERR error
+ *	2) Standard L3 error:
+ *		- Unsupported CMD.
+ *			L3 tries to access target while it is idle
+ *		- OCP disconnect.
+ *		- Address hole error:
+ *			If DSS/ISS/FDIF/USBHOSTFS access a target where they
+ *			do not have connectivity, the error is logged in
+ *			their default target which is DMM2.
+ *
+ *	On High Secure devices, firewall errors are possible and those
+ *	can be trapped as well. But the trapping is implemented as part
+ *	secure software and hence need not be implemented here.
+ */
+static int l3_handle_target(struct omap_l3 *l3, void __iomem *base,
+			    struct l3_flagmux_data *flag_mux, int err_src)
+{
+	int k;
+	u32 std_err_main, clear, masterid;
+	u8 op_code, m_req_info;
+	void __iomem *l3_targ_base;
+	void __iomem *l3_targ_stderr, *l3_targ_slvofslsb, *l3_targ_mstaddr;
+	void __iomem *l3_targ_hdr, *l3_targ_info;
+	struct l3_target_data *l3_targ_inst;
+	struct l3_masters_data *master;
+	char *target_name, *master_name = "UN IDENTIFIED";
+	char *err_description;
+	char err_string[30] = { 0 };
+	char info_string[60] = { 0 };
+
+	/* We DONOT expect err_src to go out of bounds */
+	BUG_ON(err_src > MAX_CLKDM_TARGETS);
+
+	if (err_src < flag_mux->num_targ_data) {
+		l3_targ_inst = &flag_mux->l3_targ[err_src];
+		target_name = l3_targ_inst->name;
+		l3_targ_base = base + l3_targ_inst->offset;
+	} else {
+		target_name = L3_TARGET_NOT_SUPPORTED;
+	}
+
+	if (target_name == L3_TARGET_NOT_SUPPORTED)
+		return -ENODEV;
+
+	/* Read the stderrlog_main_source from clk domain */
+	l3_targ_stderr = l3_targ_base + L3_TARG_STDERRLOG_MAIN;
+	l3_targ_slvofslsb = l3_targ_base + L3_TARG_STDERRLOG_SLVOFSLSB;
+
+	std_err_main = readl_relaxed(l3_targ_stderr);
+
+	switch (std_err_main & CUSTOM_ERROR) {
+	case STANDARD_ERROR:
+		err_description = "Standard";
+		snprintf(err_string, sizeof(err_string),
+			 ": At Address: 0x%08X ",
+			 readl_relaxed(l3_targ_slvofslsb));
+
+		l3_targ_mstaddr = l3_targ_base + L3_TARG_STDERRLOG_MSTADDR;
+		l3_targ_hdr = l3_targ_base + L3_TARG_STDERRLOG_HDR;
+		l3_targ_info = l3_targ_base + L3_TARG_STDERRLOG_INFO;
+		break;
+
+	case CUSTOM_ERROR:
+		err_description = "Custom";
+
+		l3_targ_mstaddr = l3_targ_base +
+				  L3_TARG_STDERRLOG_CINFO_MSTADDR;
+		l3_targ_hdr = l3_targ_base + L3_TARG_STDERRLOG_CINFO_OPCODE;
+		l3_targ_info = l3_targ_base + L3_TARG_STDERRLOG_CINFO_INFO;
+		break;
+
+	default:
+		/* Nothing to be handled here as of now */
+		return 0;
+	}
+
+	/* STDERRLOG_MSTADDR Stores the NTTP master address. */
+	masterid = (readl_relaxed(l3_targ_mstaddr) &
+		    l3->mst_addr_mask) >> __ffs(l3->mst_addr_mask);
+
+	for (k = 0, master = l3->l3_masters; k < l3->num_masters;
+	     k++, master++) {
+		if (masterid == master->id) {
+			master_name = master->name;
+			break;
+		}
+	}
+
+	op_code = readl_relaxed(l3_targ_hdr) & 0x7;
+
+	m_req_info = readl_relaxed(l3_targ_info) & 0xF;
+	snprintf(info_string, sizeof(info_string),
+		 ": %s in %s mode during %s access",
+		 (m_req_info & BIT(0)) ? "Opcode Fetch" : "Data Access",
+		 (m_req_info & BIT(1)) ? "Supervisor" : "User",
+		 (m_req_info & BIT(3)) ? "Debug" : "Functional");
+
+	WARN(true,
+	     "%s:L3 %s Error: MASTER %s TARGET %s (%s)%s%s\n",
+	     dev_name(l3->dev),
+	     err_description,
+	     master_name, target_name,
+	     l3_transaction_type[op_code],
+	     err_string, info_string);
+
+	/* clear the std error log*/
+	clear = std_err_main | CLEAR_STDERR_LOG;
+	writel_relaxed(clear, l3_targ_stderr);
+
+	return 0;
+}
+
+/**
+ * l3_interrupt_handler() - interrupt handler for l3 events
+ * @irq:	irq number
+ * @_l3:	pointer to l3 structure
+ *
+ * Interrupt Handler for L3 error detection.
+ *	1) Identify the L3 clockdomain partition to which the error belongs to.
+ *	2) Identify the slave where the error information is logged
+ *	... handle the slave event..
+ *	7) if the slave is unknown, mask out the slave.
+ */
+static irqreturn_t l3_interrupt_handler(int irq, void *_l3)
+{
+	struct omap_l3 *l3 = _l3;
+	int inttype, i, ret;
+	int err_src = 0;
+	u32 err_reg, mask_val;
+	void __iomem *base, *mask_reg;
+	struct l3_flagmux_data *flag_mux;
+
+	/* Get the Type of interrupt */
+	inttype = irq == l3->app_irq ? L3_APPLICATION_ERROR : L3_DEBUG_ERROR;
+
+	for (i = 0; i < l3->num_modules; i++) {
+		/*
+		 * Read the regerr register of the clock domain
+		 * to determine the source
+		 */
+		base = l3->l3_base[i];
+		flag_mux = l3->l3_flagmux[i];
+		err_reg = readl_relaxed(base + flag_mux->offset +
+					L3_FLAGMUX_REGERR0 + (inttype << 3));
+
+		err_reg &= ~(inttype ? flag_mux->mask_app_bits :
+				flag_mux->mask_dbg_bits);
+
+		/* Get the corresponding error and analyse */
+		if (err_reg) {
+			/* Identify the source from control status register */
+			err_src = __ffs(err_reg);
+
+			ret = l3_handle_target(l3, base, flag_mux, err_src);
+
+			/*
+			 * Certain plaforms may have "undocumented" status
+			 * pending on boot. So dont generate a severe warning
+			 * here. Just mask it off to prevent the error from
+			 * reoccuring and locking up the system.
+			 */
+			if (ret) {
+				dev_err(l3->dev,
+					"L3 %s error: target %d mod:%d %s\n",
+					inttype ? "debug" : "application",
+					err_src, i, "(unclearable)");
+
+				mask_reg = base + flag_mux->offset +
+					   L3_FLAGMUX_MASK0 + (inttype << 3);
+				mask_val = readl_relaxed(mask_reg);
+				mask_val &= ~(1 << err_src);
+				writel_relaxed(mask_val, mask_reg);
+
+				/* Mark these bits as to be ignored */
+				if (inttype)
+					flag_mux->mask_app_bits |= 1 << err_src;
+				else
+					flag_mux->mask_dbg_bits |= 1 << err_src;
+			}
+
+			/* Error found so break the for loop */
+			return IRQ_HANDLED;
+		}
+	}
+
+	dev_err(l3->dev, "L3 %s IRQ not handled!!\n",
+		inttype ? "debug" : "application");
+
+	return IRQ_NONE;
+}
+
+static const struct of_device_id l3_noc_match[] = {
+	{.compatible = "ti,omap4-l3-noc", .data = &omap4_l3_data},
+	{.compatible = "ti,omap5-l3-noc", .data = &omap5_l3_data},
+	{.compatible = "ti,dra7-l3-noc", .data = &dra_l3_data},
+	{.compatible = "ti,am4372-l3-noc", .data = &am4372_l3_data},
+	{},
+};
+MODULE_DEVICE_TABLE(of, l3_noc_match);
+
+static int omap_l3_probe(struct platform_device *pdev)
+{
+	const struct of_device_id *of_id;
+	static struct omap_l3 *l3;
+	int ret, i, res_idx;
+
+	of_id = of_match_device(l3_noc_match, &pdev->dev);
+	if (!of_id) {
+		dev_err(&pdev->dev, "OF data missing\n");
+		return -EINVAL;
+	}
+
+	l3 = devm_kzalloc(&pdev->dev, sizeof(*l3), GFP_KERNEL);
+	if (!l3)
+		return -ENOMEM;
+
+	memcpy(l3, of_id->data, sizeof(*l3));
+	l3->dev = &pdev->dev;
+	platform_set_drvdata(pdev, l3);
+
+	/* Get mem resources */
+	for (i = 0, res_idx = 0; i < l3->num_modules; i++) {
+		struct resource	*res;
+
+		if (l3->l3_base[i] == L3_BASE_IS_SUBMODULE) {
+			/* First entry cannot be submodule */
+			BUG_ON(i == 0);
+			l3->l3_base[i] = l3->l3_base[i - 1];
+			continue;
+		}
+		res = platform_get_resource(pdev, IORESOURCE_MEM, res_idx);
+		l3->l3_base[i] = devm_ioremap_resource(&pdev->dev, res);
+		if (IS_ERR(l3->l3_base[i])) {
+			dev_err(l3->dev, "ioremap %d failed\n", i);
+			return PTR_ERR(l3->l3_base[i]);
+		}
+		res_idx++;
+	}
+
+	/*
+	 * Setup interrupt Handlers
+	 */
+	l3->debug_irq = platform_get_irq(pdev, 0);
+	ret = devm_request_irq(l3->dev, l3->debug_irq, l3_interrupt_handler,
+			       0x0, "l3-dbg-irq", l3);
+	if (ret) {
+		dev_err(l3->dev, "request_irq failed for %d\n",
+			l3->debug_irq);
+		return ret;
+	}
+
+	l3->app_irq = platform_get_irq(pdev, 1);
+	ret = devm_request_irq(l3->dev, l3->app_irq, l3_interrupt_handler,
+			       0x0, "l3-app-irq", l3);
+	if (ret)
+		dev_err(l3->dev, "request_irq failed for %d\n", l3->app_irq);
+
+	return ret;
+}
+
+#ifdef	CONFIG_PM_SLEEP
+
+/**
+ * l3_resume_noirq() - resume function for l3_noc
+ * @dev:	pointer to l3_noc device structure
+ *
+ * We only have the resume handler only since we
+ * have already maintained the delta register
+ * configuration as part of configuring the system
+ */
+static int l3_resume_noirq(struct device *dev)
+{
+	struct omap_l3 *l3 = dev_get_drvdata(dev);
+	int i;
+	struct l3_flagmux_data *flag_mux;
+	void __iomem *base, *mask_regx = NULL;
+	u32 mask_val;
+
+	for (i = 0; i < l3->num_modules; i++) {
+		base = l3->l3_base[i];
+		flag_mux = l3->l3_flagmux[i];
+		if (!flag_mux->mask_app_bits && !flag_mux->mask_dbg_bits)
+			continue;
+
+		mask_regx = base + flag_mux->offset + L3_FLAGMUX_MASK0 +
+			   (L3_APPLICATION_ERROR << 3);
+		mask_val = readl_relaxed(mask_regx);
+		mask_val &= ~(flag_mux->mask_app_bits);
+
+		writel_relaxed(mask_val, mask_regx);
+		mask_regx = base + flag_mux->offset + L3_FLAGMUX_MASK0 +
+			   (L3_DEBUG_ERROR << 3);
+		mask_val = readl_relaxed(mask_regx);
+		mask_val &= ~(flag_mux->mask_dbg_bits);
+
+		writel_relaxed(mask_val, mask_regx);
+	}
+
+	/* Dummy read to force OCP barrier */
+	if (mask_regx)
+		(void)readl(mask_regx);
+
+	return 0;
+}
+
+static const struct dev_pm_ops l3_dev_pm_ops = {
+	SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(NULL, l3_resume_noirq)
+};
+
+#define L3_DEV_PM_OPS (&l3_dev_pm_ops)
+#else
+#define L3_DEV_PM_OPS NULL
+#endif
+
+static struct platform_driver omap_l3_driver = {
+	.probe		= omap_l3_probe,
+	.driver		= {
+		.name		= "omap_l3_noc",
+		.pm		= L3_DEV_PM_OPS,
+		.of_match_table = of_match_ptr(l3_noc_match),
+	},
+};
+
+static int __init omap_l3_init(void)
+{
+	return platform_driver_register(&omap_l3_driver);
+}
+postcore_initcall_sync(omap_l3_init);
+
+static void __exit omap_l3_exit(void)
+{
+	platform_driver_unregister(&omap_l3_driver);
+}
+module_exit(omap_l3_exit);
diff --git a/src/kernel/linux/v4.14/drivers/bus/omap_l3_noc.h b/src/kernel/linux/v4.14/drivers/bus/omap_l3_noc.h
new file mode 100644
index 0000000..73431f8
--- /dev/null
+++ b/src/kernel/linux/v4.14/drivers/bus/omap_l3_noc.h
@@ -0,0 +1,501 @@
+/*
+ * OMAP L3 Interconnect  error handling driver header
+ *
+ * Copyright (C) 2011-2015 Texas Instruments Incorporated - http://www.ti.com/
+ *	Santosh Shilimkar <santosh.shilimkar@ti.com>
+ *	sricharan <r.sricharan@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#ifndef __OMAP_L3_NOC_H
+#define __OMAP_L3_NOC_H
+
+#define MAX_L3_MODULES			3
+#define MAX_CLKDM_TARGETS		31
+
+#define CLEAR_STDERR_LOG		(1 << 31)
+#define CUSTOM_ERROR			0x2
+#define STANDARD_ERROR			0x0
+#define INBAND_ERROR			0x0
+#define L3_APPLICATION_ERROR		0x0
+#define L3_DEBUG_ERROR			0x1
+
+/* L3 TARG register offsets */
+#define L3_TARG_STDERRLOG_MAIN		0x48
+#define L3_TARG_STDERRLOG_HDR		0x4c
+#define L3_TARG_STDERRLOG_MSTADDR	0x50
+#define L3_TARG_STDERRLOG_INFO		0x58
+#define L3_TARG_STDERRLOG_SLVOFSLSB	0x5c
+#define L3_TARG_STDERRLOG_CINFO_INFO	0x64
+#define L3_TARG_STDERRLOG_CINFO_MSTADDR	0x68
+#define L3_TARG_STDERRLOG_CINFO_OPCODE	0x6c
+#define L3_FLAGMUX_REGERR0		0xc
+#define L3_FLAGMUX_MASK0		0x8
+
+#define L3_TARGET_NOT_SUPPORTED		NULL
+
+#define L3_BASE_IS_SUBMODULE		((void __iomem *)(1 << 0))
+
+static const char * const l3_transaction_type[] = {
+	/* 0 0 0 */ "Idle",
+	/* 0 0 1 */ "Write",
+	/* 0 1 0 */ "Read",
+	/* 0 1 1 */ "ReadEx",
+	/* 1 0 0 */ "Read Link",
+	/* 1 0 1 */ "Write Non-Posted",
+	/* 1 1 0 */ "Write Conditional",
+	/* 1 1 1 */ "Write Broadcast",
+};
+
+/**
+ * struct l3_masters_data - L3 Master information
+ * @id:		ID of the L3 Master
+ * @name:	master name
+ */
+struct l3_masters_data {
+	u32 id;
+	char *name;
+};
+
+/**
+ * struct l3_target_data - L3 Target information
+ * @offset:	Offset from base for L3 Target
+ * @name:	Target name
+ *
+ * Target information is organized indexed by bit field definitions.
+ */
+struct l3_target_data {
+	u32 offset;
+	char *name;
+};
+
+/**
+ * struct l3_flagmux_data - Flag Mux information
+ * @offset:	offset from base for flagmux register
+ * @l3_targ:	array indexed by flagmux index (bit offset) pointing to the
+ *		target data. unsupported ones are marked with
+ *		L3_TARGET_NOT_SUPPORTED
+ * @num_targ_data: number of entries in target data
+ * @mask_app_bits: ignore these from raw application irq status
+ * @mask_dbg_bits: ignore these from raw debug irq status
+ */
+struct l3_flagmux_data {
+	u32 offset;
+	struct l3_target_data *l3_targ;
+	u8 num_targ_data;
+	u32 mask_app_bits;
+	u32 mask_dbg_bits;
+};
+
+
+/**
+ * struct omap_l3 - Description of data relevant for L3 bus.
+ * @dev:	device representing the bus (populated runtime)
+ * @l3_base:	base addresses of modules (populated runtime if 0)
+ *		if set to L3_BASE_IS_SUBMODULE, then uses previous
+ *		module index as the base address
+ * @l3_flag_mux: array containing flag mux data per module
+ *		 offset from corresponding module base indexed per
+ *		 module.
+ * @num_modules: number of clock domains / modules.
+ * @l3_masters:	array pointing to master data containing name and register
+ *		offset for the master.
+ * @num_master: number of masters
+ * @mst_addr_mask: Mask representing MSTADDR information of NTTP packet
+ * @debug_irq:	irq number of the debug interrupt (populated runtime)
+ * @app_irq:	irq number of the application interrupt (populated runtime)
+ */
+struct omap_l3 {
+	struct device *dev;
+
+	void __iomem *l3_base[MAX_L3_MODULES];
+	struct l3_flagmux_data **l3_flagmux;
+	int num_modules;
+
+	struct l3_masters_data *l3_masters;
+	int num_masters;
+	u32 mst_addr_mask;
+
+	int debug_irq;
+	int app_irq;
+};
+
+static struct l3_target_data omap_l3_target_data_clk1[] = {
+	{0x100,	"DMM1",},
+	{0x200,	"DMM2",},
+	{0x300,	"ABE",},
+	{0x400,	"L4CFG",},
+	{0x600,	"CLK2PWRDISC",},
+	{0x0,	"HOSTCLK1",},
+	{0x900,	"L4WAKEUP",},
+};
+
+static struct l3_flagmux_data omap_l3_flagmux_clk1 = {
+	.offset = 0x500,
+	.l3_targ = omap_l3_target_data_clk1,
+	.num_targ_data = ARRAY_SIZE(omap_l3_target_data_clk1),
+};
+
+
+static struct l3_target_data omap_l3_target_data_clk2[] = {
+	{0x500,	"CORTEXM3",},
+	{0x300,	"DSS",},
+	{0x100,	"GPMC",},
+	{0x400,	"ISS",},
+	{0x700,	"IVAHD",},
+	{0xD00,	"AES1",},
+	{0x900,	"L4PER0",},
+	{0x200,	"OCMRAM",},
+	{0x100,	"GPMCsERROR",},
+	{0x600,	"SGX",},
+	{0x800,	"SL2",},
+	{0x1600, "C2C",},
+	{0x1100, "PWRDISCCLK1",},
+	{0xF00,	"SHA1",},
+	{0xE00,	"AES2",},
+	{0xC00,	"L4PER3",},
+	{0xA00,	"L4PER1",},
+	{0xB00,	"L4PER2",},
+	{0x0,	"HOSTCLK2",},
+	{0x1800, "CAL",},
+	{0x1700, "LLI",},
+};
+
+static struct l3_flagmux_data omap_l3_flagmux_clk2 = {
+	.offset = 0x1000,
+	.l3_targ = omap_l3_target_data_clk2,
+	.num_targ_data = ARRAY_SIZE(omap_l3_target_data_clk2),
+};
+
+
+static struct l3_target_data omap4_l3_target_data_clk3[] = {
+	{0x0100, "DEBUGSS",},
+};
+
+static struct l3_flagmux_data omap4_l3_flagmux_clk3 = {
+	.offset = 0x0200,
+	.l3_targ = omap4_l3_target_data_clk3,
+	.num_targ_data = ARRAY_SIZE(omap4_l3_target_data_clk3),
+};
+
+static struct l3_masters_data omap_l3_masters[] = {
+	{ 0x00, "MPU"},
+	{ 0x04, "CS_ADP"},
+	{ 0x05, "xxx"},
+	{ 0x08, "DSP"},
+	{ 0x0C, "IVAHD"},
+	{ 0x10, "ISS"},
+	{ 0x11, "DucatiM3"},
+	{ 0x12, "FaceDetect"},
+	{ 0x14, "SDMA_Rd"},
+	{ 0x15, "SDMA_Wr"},
+	{ 0x16, "xxx"},
+	{ 0x17, "xxx"},
+	{ 0x18, "SGX"},
+	{ 0x1C, "DSS"},
+	{ 0x20, "C2C"},
+	{ 0x22, "xxx"},
+	{ 0x23, "xxx"},
+	{ 0x24, "HSI"},
+	{ 0x28, "MMC1"},
+	{ 0x29, "MMC2"},
+	{ 0x2A, "MMC6"},
+	{ 0x2C, "UNIPRO1"},
+	{ 0x30, "USBHOSTHS"},
+	{ 0x31, "USBOTGHS"},
+	{ 0x32, "USBHOSTFS"}
+};
+
+static struct l3_flagmux_data *omap4_l3_flagmux[] = {
+	&omap_l3_flagmux_clk1,
+	&omap_l3_flagmux_clk2,
+	&omap4_l3_flagmux_clk3,
+};
+
+static const struct omap_l3 omap4_l3_data = {
+	.l3_flagmux = omap4_l3_flagmux,
+	.num_modules = ARRAY_SIZE(omap4_l3_flagmux),
+	.l3_masters = omap_l3_masters,
+	.num_masters = ARRAY_SIZE(omap_l3_masters),
+	/* The 6 MSBs of register field used to distinguish initiator */
+	.mst_addr_mask = 0xFC,
+};
+
+/* OMAP5 data */
+static struct l3_target_data omap5_l3_target_data_clk3[] = {
+	{0x0100, "L3INSTR",},
+	{0x0300, "DEBUGSS",},
+	{0x0,	 "HOSTCLK3",},
+};
+
+static struct l3_flagmux_data omap5_l3_flagmux_clk3 = {
+	.offset = 0x0200,
+	.l3_targ = omap5_l3_target_data_clk3,
+	.num_targ_data = ARRAY_SIZE(omap5_l3_target_data_clk3),
+};
+
+static struct l3_flagmux_data *omap5_l3_flagmux[] = {
+	&omap_l3_flagmux_clk1,
+	&omap_l3_flagmux_clk2,
+	&omap5_l3_flagmux_clk3,
+};
+
+static const struct omap_l3 omap5_l3_data = {
+	.l3_flagmux = omap5_l3_flagmux,
+	.num_modules = ARRAY_SIZE(omap5_l3_flagmux),
+	.l3_masters = omap_l3_masters,
+	.num_masters = ARRAY_SIZE(omap_l3_masters),
+	/* The 6 MSBs of register field used to distinguish initiator */
+	.mst_addr_mask = 0x7E0,
+};
+
+/* DRA7 data */
+static struct l3_target_data dra_l3_target_data_clk1[] = {
+	{0x2a00, "AES1",},
+	{0x0200, "DMM_P1",},
+	{0x0600, "DSP2_SDMA",},
+	{0x0b00, "EVE2",},
+	{0x1300, "DMM_P2",},
+	{0x2c00, "AES2",},
+	{0x0300, "DSP1_SDMA",},
+	{0x0a00, "EVE1",},
+	{0x0c00, "EVE3",},
+	{0x0d00, "EVE4",},
+	{0x2900, "DSS",},
+	{0x0100, "GPMC",},
+	{0x3700, "PCIE1",},
+	{0x1600, "IVA_CONFIG",},
+	{0x1800, "IVA_SL2IF",},
+	{0x0500, "L4_CFG",},
+	{0x1d00, "L4_WKUP",},
+	{0x3800, "PCIE2",},
+	{0x3300, "SHA2_1",},
+	{0x1200, "GPU",},
+	{0x1000, "IPU1",},
+	{0x1100, "IPU2",},
+	{0x2000, "TPCC_EDMA",},
+	{0x2e00, "TPTC1_EDMA",},
+	{0x2b00, "TPTC2_EDMA",},
+	{0x0700, "VCP1",},
+	{0x2500, "L4_PER2_P3",},
+	{0x0e00, "L4_PER3_P3",},
+	{0x2200, "MMU1",},
+	{0x1400, "PRUSS1",},
+	{0x1500, "PRUSS2"},
+	{0x0800, "VCP1",},
+};
+
+static struct l3_flagmux_data dra_l3_flagmux_clk1 = {
+	.offset = 0x803500,
+	.l3_targ = dra_l3_target_data_clk1,
+	.num_targ_data = ARRAY_SIZE(dra_l3_target_data_clk1),
+};
+
+static struct l3_target_data dra_l3_target_data_clk2[] = {
+	{0x0,	"HOST CLK1",},
+	{0x800000, "HOST CLK2",},
+	{0xdead, L3_TARGET_NOT_SUPPORTED,},
+	{0x3400, "SHA2_2",},
+	{0x0900, "BB2D",},
+	{0xdead, L3_TARGET_NOT_SUPPORTED,},
+	{0x2100, "L4_PER1_P3",},
+	{0x1c00, "L4_PER1_P1",},
+	{0x1f00, "L4_PER1_P2",},
+	{0x2300, "L4_PER2_P1",},
+	{0x2400, "L4_PER2_P2",},
+	{0x2600, "L4_PER3_P1",},
+	{0x2700, "L4_PER3_P2",},
+	{0x2f00, "MCASP1",},
+	{0x3000, "MCASP2",},
+	{0x3100, "MCASP3",},
+	{0x2800, "MMU2",},
+	{0x0f00, "OCMC_RAM1",},
+	{0x1700, "OCMC_RAM2",},
+	{0x1900, "OCMC_RAM3",},
+	{0x1e00, "OCMC_ROM",},
+	{0x3900, "QSPI",},
+};
+
+static struct l3_flagmux_data dra_l3_flagmux_clk2 = {
+	.offset = 0x803600,
+	.l3_targ = dra_l3_target_data_clk2,
+	.num_targ_data = ARRAY_SIZE(dra_l3_target_data_clk2),
+};
+
+static struct l3_target_data dra_l3_target_data_clk3[] = {
+	{0x0100, "L3_INSTR"},
+	{0x0300, "DEBUGSS_CT_TBR"},
+	{0x0,	 "HOST CLK3"},
+};
+
+static struct l3_flagmux_data dra_l3_flagmux_clk3 = {
+	.offset = 0x200,
+	.l3_targ = dra_l3_target_data_clk3,
+	.num_targ_data = ARRAY_SIZE(dra_l3_target_data_clk3),
+};
+
+static struct l3_masters_data dra_l3_masters[] = {
+	{ 0x0, "MPU" },
+	{ 0x4, "CS_DAP" },
+	{ 0x5, "IEEE1500_2_OCP" },
+	{ 0x8, "DSP1_MDMA" },
+	{ 0x9, "DSP1_CFG" },
+	{ 0xA, "DSP1_DMA" },
+	{ 0xB, "DSP2_MDMA" },
+	{ 0xC, "DSP2_CFG" },
+	{ 0xD, "DSP2_DMA" },
+	{ 0xE, "IVA" },
+	{ 0x10, "EVE1_P1" },
+	{ 0x11, "EVE2_P1" },
+	{ 0x12, "EVE3_P1" },
+	{ 0x13, "EVE4_P1" },
+	{ 0x14, "PRUSS1 PRU1" },
+	{ 0x15, "PRUSS1 PRU2" },
+	{ 0x16, "PRUSS2 PRU1" },
+	{ 0x17, "PRUSS2 PRU2" },
+	{ 0x18, "IPU1" },
+	{ 0x19, "IPU2" },
+	{ 0x1A, "SDMA" },
+	{ 0x1B, "CDMA" },
+	{ 0x1C, "TC1_EDMA" },
+	{ 0x1D, "TC2_EDMA" },
+	{ 0x20, "DSS" },
+	{ 0x21, "MMU1" },
+	{ 0x22, "PCIE1" },
+	{ 0x23, "MMU2" },
+	{ 0x24, "VIP1" },
+	{ 0x25, "VIP2" },
+	{ 0x26, "VIP3" },
+	{ 0x27, "VPE" },
+	{ 0x28, "GPU_P1" },
+	{ 0x29, "BB2D" },
+	{ 0x29, "GPU_P2" },
+	{ 0x2B, "GMAC_SW" },
+	{ 0x2C, "USB3" },
+	{ 0x2D, "USB2_SS" },
+	{ 0x2E, "USB2_ULPI_SS1" },
+	{ 0x2F, "USB2_ULPI_SS2" },
+	{ 0x30, "CSI2_1" },
+	{ 0x31, "CSI2_2" },
+	{ 0x33, "SATA" },
+	{ 0x34, "EVE1_P2" },
+	{ 0x35, "EVE2_P2" },
+	{ 0x36, "EVE3_P2" },
+	{ 0x37, "EVE4_P2" }
+};
+
+static struct l3_flagmux_data *dra_l3_flagmux[] = {
+	&dra_l3_flagmux_clk1,
+	&dra_l3_flagmux_clk2,
+	&dra_l3_flagmux_clk3,
+};
+
+static const struct omap_l3 dra_l3_data = {
+	.l3_base = { [1] = L3_BASE_IS_SUBMODULE },
+	.l3_flagmux = dra_l3_flagmux,
+	.num_modules = ARRAY_SIZE(dra_l3_flagmux),
+	.l3_masters = dra_l3_masters,
+	.num_masters = ARRAY_SIZE(dra_l3_masters),
+	/* The 6 MSBs of register field used to distinguish initiator */
+	.mst_addr_mask = 0xFC,
+};
+
+/* AM4372 data */
+static struct l3_target_data am4372_l3_target_data_200f[] = {
+	{0xf00,  "EMIF",},
+	{0x1200, "DES",},
+	{0x400,  "OCMCRAM",},
+	{0x700,  "TPTC0",},
+	{0x800,  "TPTC1",},
+	{0x900,  "TPTC2"},
+	{0xb00,  "TPCC",},
+	{0xd00,  "DEBUGSS",},
+	{0xdead, L3_TARGET_NOT_SUPPORTED,},
+	{0x200,  "SHA",},
+	{0xc00,  "SGX530",},
+	{0x500,  "AES0",},
+	{0xa00,  "L4_FAST",},
+	{0x300,  "MPUSS_L2_RAM",},
+	{0x100,  "ICSS",},
+};
+
+static struct l3_flagmux_data am4372_l3_flagmux_200f = {
+	.offset = 0x1000,
+	.l3_targ = am4372_l3_target_data_200f,
+	.num_targ_data = ARRAY_SIZE(am4372_l3_target_data_200f),
+};
+
+static struct l3_target_data am4372_l3_target_data_100s[] = {
+	{0x100, "L4_PER_0",},
+	{0x200, "L4_PER_1",},
+	{0x300, "L4_PER_2",},
+	{0x400, "L4_PER_3",},
+	{0x800, "McASP0",},
+	{0x900, "McASP1",},
+	{0xC00, "MMCHS2",},
+	{0x700, "GPMC",},
+	{0xD00, "L4_FW",},
+	{0xdead, L3_TARGET_NOT_SUPPORTED,},
+	{0x500, "ADCTSC",},
+	{0xE00, "L4_WKUP",},
+	{0xA00, "MAG_CARD",},
+};
+
+static struct l3_flagmux_data am4372_l3_flagmux_100s = {
+	.offset = 0x600,
+	.l3_targ = am4372_l3_target_data_100s,
+	.num_targ_data = ARRAY_SIZE(am4372_l3_target_data_100s),
+};
+
+static struct l3_masters_data am4372_l3_masters[] = {
+	{ 0x0, "M1 (128-bit)"},
+	{ 0x1, "M2 (64-bit)"},
+	{ 0x4, "DAP"},
+	{ 0x5, "P1500"},
+	{ 0xC, "ICSS0"},
+	{ 0xD, "ICSS1"},
+	{ 0x14, "Wakeup Processor"},
+	{ 0x18, "TPTC0 Read"},
+	{ 0x19, "TPTC0 Write"},
+	{ 0x1A, "TPTC1 Read"},
+	{ 0x1B, "TPTC1 Write"},
+	{ 0x1C, "TPTC2 Read"},
+	{ 0x1D, "TPTC2 Write"},
+	{ 0x20, "SGX530"},
+	{ 0x21, "OCP WP Traffic Probe"},
+	{ 0x22, "OCP WP DMA Profiling"},
+	{ 0x23, "OCP WP Event Trace"},
+	{ 0x25, "DSS"},
+	{ 0x28, "Crypto DMA RD"},
+	{ 0x29, "Crypto DMA WR"},
+	{ 0x2C, "VPFE0"},
+	{ 0x2D, "VPFE1"},
+	{ 0x30, "GEMAC"},
+	{ 0x34, "USB0 RD"},
+	{ 0x35, "USB0 WR"},
+	{ 0x36, "USB1 RD"},
+	{ 0x37, "USB1 WR"},
+};
+
+static struct l3_flagmux_data *am4372_l3_flagmux[] = {
+	&am4372_l3_flagmux_200f,
+	&am4372_l3_flagmux_100s,
+};
+
+static const struct omap_l3 am4372_l3_data = {
+	.l3_flagmux = am4372_l3_flagmux,
+	.num_modules = ARRAY_SIZE(am4372_l3_flagmux),
+	.l3_masters = am4372_l3_masters,
+	.num_masters = ARRAY_SIZE(am4372_l3_masters),
+	/* All 6 bits of register field used to distinguish initiator */
+	.mst_addr_mask = 0x3F,
+};
+
+#endif	/* __OMAP_L3_NOC_H */
diff --git a/src/kernel/linux/v4.14/drivers/bus/omap_l3_smx.c b/src/kernel/linux/v4.14/drivers/bus/omap_l3_smx.c
new file mode 100644
index 0000000..360a5c0
--- /dev/null
+++ b/src/kernel/linux/v4.14/drivers/bus/omap_l3_smx.c
@@ -0,0 +1,311 @@
+/*
+ * OMAP3XXX L3 Interconnect Driver
+ *
+ * Copyright (C) 2011 Texas Corporation
+ *	Felipe Balbi <balbi@ti.com>
+ *	Santosh Shilimkar <santosh.shilimkar@ti.com>
+ *	Sricharan <r.sricharan@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
+ * USA
+ */
+
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/platform_device.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+
+#include "omap_l3_smx.h"
+
+static inline u64 omap3_l3_readll(void __iomem *base, u16 reg)
+{
+	return __raw_readll(base + reg);
+}
+
+static inline void omap3_l3_writell(void __iomem *base, u16 reg, u64 value)
+{
+	__raw_writell(value, base + reg);
+}
+
+static inline enum omap3_l3_code omap3_l3_decode_error_code(u64 error)
+{
+	return (error & 0x0f000000) >> L3_ERROR_LOG_CODE;
+}
+
+static inline u32 omap3_l3_decode_addr(u64 error_addr)
+{
+	return error_addr & 0xffffffff;
+}
+
+static inline unsigned omap3_l3_decode_cmd(u64 error)
+{
+	return (error & 0x07) >> L3_ERROR_LOG_CMD;
+}
+
+static inline enum omap3_l3_initiator_id omap3_l3_decode_initid(u64 error)
+{
+	return (error & 0xff00) >> L3_ERROR_LOG_INITID;
+}
+
+static inline unsigned omap3_l3_decode_req_info(u64 error)
+{
+	return (error >> 32) & 0xffff;
+}
+
+static char *omap3_l3_code_string(u8 code)
+{
+	switch (code) {
+	case OMAP_L3_CODE_NOERROR:
+		return "No Error";
+	case OMAP_L3_CODE_UNSUP_CMD:
+		return "Unsupported Command";
+	case OMAP_L3_CODE_ADDR_HOLE:
+		return "Address Hole";
+	case OMAP_L3_CODE_PROTECT_VIOLATION:
+		return "Protection Violation";
+	case OMAP_L3_CODE_IN_BAND_ERR:
+		return "In-band Error";
+	case OMAP_L3_CODE_REQ_TOUT_NOT_ACCEPT:
+		return "Request Timeout Not Accepted";
+	case OMAP_L3_CODE_REQ_TOUT_NO_RESP:
+		return "Request Timeout, no response";
+	default:
+		return "UNKNOWN error";
+	}
+}
+
+static char *omap3_l3_initiator_string(u8 initid)
+{
+	switch (initid) {
+	case OMAP_L3_LCD:
+		return "LCD";
+	case OMAP_L3_SAD2D:
+		return "SAD2D";
+	case OMAP_L3_IA_MPU_SS_1:
+	case OMAP_L3_IA_MPU_SS_2:
+	case OMAP_L3_IA_MPU_SS_3:
+	case OMAP_L3_IA_MPU_SS_4:
+	case OMAP_L3_IA_MPU_SS_5:
+		return "MPU";
+	case OMAP_L3_IA_IVA_SS_1:
+	case OMAP_L3_IA_IVA_SS_2:
+	case OMAP_L3_IA_IVA_SS_3:
+		return "IVA_SS";
+	case OMAP_L3_IA_IVA_SS_DMA_1:
+	case OMAP_L3_IA_IVA_SS_DMA_2:
+	case OMAP_L3_IA_IVA_SS_DMA_3:
+	case OMAP_L3_IA_IVA_SS_DMA_4:
+	case OMAP_L3_IA_IVA_SS_DMA_5:
+	case OMAP_L3_IA_IVA_SS_DMA_6:
+		return "IVA_SS_DMA";
+	case OMAP_L3_IA_SGX:
+		return "SGX";
+	case OMAP_L3_IA_CAM_1:
+	case OMAP_L3_IA_CAM_2:
+	case OMAP_L3_IA_CAM_3:
+		return "CAM";
+	case OMAP_L3_IA_DAP:
+		return "DAP";
+	case OMAP_L3_SDMA_WR_1:
+	case OMAP_L3_SDMA_WR_2:
+		return "SDMA_WR";
+	case OMAP_L3_SDMA_RD_1:
+	case OMAP_L3_SDMA_RD_2:
+	case OMAP_L3_SDMA_RD_3:
+	case OMAP_L3_SDMA_RD_4:
+		return "SDMA_RD";
+	case OMAP_L3_USBOTG:
+		return "USB_OTG";
+	case OMAP_L3_USBHOST:
+		return "USB_HOST";
+	default:
+		return "UNKNOWN Initiator";
+	}
+}
+
+/*
+ * omap3_l3_block_irq - handles a register block's irq
+ * @l3: struct omap3_l3 *
+ * @base: register block base address
+ * @error: L3_ERROR_LOG register of our block
+ *
+ * Called in hard-irq context. Caller should take care of locking
+ *
+ * OMAP36xx TRM gives, on page 2001, Figure 9-10, the Typical Error
+ * Analysis Sequence, we are following that sequence here, please
+ * refer to that Figure for more information on the subject.
+ */
+static irqreturn_t omap3_l3_block_irq(struct omap3_l3 *l3,
+					u64 error, int error_addr)
+{
+	u8 code = omap3_l3_decode_error_code(error);
+	u8 initid = omap3_l3_decode_initid(error);
+	u8 multi = error & L3_ERROR_LOG_MULTI;
+	u32 address = omap3_l3_decode_addr(error_addr);
+
+	pr_err("%s seen by %s %s at address %x\n",
+			omap3_l3_code_string(code),
+			omap3_l3_initiator_string(initid),
+			multi ? "Multiple Errors" : "", address);
+	WARN_ON(1);
+
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t omap3_l3_app_irq(int irq, void *_l3)
+{
+	struct omap3_l3 *l3 = _l3;
+	u64 status, clear;
+	u64 error;
+	u64 error_addr;
+	u64 err_source = 0;
+	void __iomem *base;
+	int int_type;
+	irqreturn_t ret = IRQ_NONE;
+
+	int_type = irq == l3->app_irq ? L3_APPLICATION_ERROR : L3_DEBUG_ERROR;
+	if (!int_type) {
+		status = omap3_l3_readll(l3->rt, L3_SI_FLAG_STATUS_0);
+		/*
+		 * if we have a timeout error, there's nothing we can
+		 * do besides rebooting the board. So let's BUG on any
+		 * of such errors and handle the others. timeout error
+		 * is severe and not expected to occur.
+		 */
+		BUG_ON(status & L3_STATUS_0_TIMEOUT_MASK);
+	} else {
+		status = omap3_l3_readll(l3->rt, L3_SI_FLAG_STATUS_1);
+		/* No timeout error for debug sources */
+	}
+
+	/* identify the error source */
+	err_source = __ffs(status);
+
+	base = l3->rt + omap3_l3_bases[int_type][err_source];
+	error = omap3_l3_readll(base, L3_ERROR_LOG);
+	if (error) {
+		error_addr = omap3_l3_readll(base, L3_ERROR_LOG_ADDR);
+		ret |= omap3_l3_block_irq(l3, error, error_addr);
+	}
+
+	/* Clear the status register */
+	clear = (L3_AGENT_STATUS_CLEAR_IA << int_type) |
+		L3_AGENT_STATUS_CLEAR_TA;
+	omap3_l3_writell(base, L3_AGENT_STATUS, clear);
+
+	/* clear the error log register */
+	omap3_l3_writell(base, L3_ERROR_LOG, error);
+
+	return ret;
+}
+
+#if IS_BUILTIN(CONFIG_OF)
+static const struct of_device_id omap3_l3_match[] = {
+	{
+		.compatible = "ti,omap3-l3-smx",
+	},
+	{ },
+};
+MODULE_DEVICE_TABLE(of, omap3_l3_match);
+#endif
+
+static int omap3_l3_probe(struct platform_device *pdev)
+{
+	struct omap3_l3 *l3;
+	struct resource *res;
+	int ret;
+
+	l3 = kzalloc(sizeof(*l3), GFP_KERNEL);
+	if (!l3)
+		return -ENOMEM;
+
+	platform_set_drvdata(pdev, l3);
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	if (!res) {
+		dev_err(&pdev->dev, "couldn't find resource\n");
+		ret = -ENODEV;
+		goto err0;
+	}
+	l3->rt = ioremap(res->start, resource_size(res));
+	if (!l3->rt) {
+		dev_err(&pdev->dev, "ioremap failed\n");
+		ret = -ENOMEM;
+		goto err0;
+	}
+
+	l3->debug_irq = platform_get_irq(pdev, 0);
+	ret = request_irq(l3->debug_irq, omap3_l3_app_irq, IRQF_TRIGGER_RISING,
+			  "l3-debug-irq", l3);
+	if (ret) {
+		dev_err(&pdev->dev, "couldn't request debug irq\n");
+		goto err1;
+	}
+
+	l3->app_irq = platform_get_irq(pdev, 1);
+	ret = request_irq(l3->app_irq, omap3_l3_app_irq, IRQF_TRIGGER_RISING,
+			  "l3-app-irq", l3);
+	if (ret) {
+		dev_err(&pdev->dev, "couldn't request app irq\n");
+		goto err2;
+	}
+
+	return 0;
+
+err2:
+	free_irq(l3->debug_irq, l3);
+err1:
+	iounmap(l3->rt);
+err0:
+	kfree(l3);
+	return ret;
+}
+
+static int omap3_l3_remove(struct platform_device *pdev)
+{
+	struct omap3_l3         *l3 = platform_get_drvdata(pdev);
+
+	free_irq(l3->app_irq, l3);
+	free_irq(l3->debug_irq, l3);
+	iounmap(l3->rt);
+	kfree(l3);
+
+	return 0;
+}
+
+static struct platform_driver omap3_l3_driver = {
+	.probe		= omap3_l3_probe,
+	.remove         = omap3_l3_remove,
+	.driver         = {
+		.name   = "omap_l3_smx",
+		.of_match_table = of_match_ptr(omap3_l3_match),
+	},
+};
+
+static int __init omap3_l3_init(void)
+{
+	return platform_driver_register(&omap3_l3_driver);
+}
+postcore_initcall_sync(omap3_l3_init);
+
+static void __exit omap3_l3_exit(void)
+{
+	platform_driver_unregister(&omap3_l3_driver);
+}
+module_exit(omap3_l3_exit);
diff --git a/src/kernel/linux/v4.14/drivers/bus/omap_l3_smx.h b/src/kernel/linux/v4.14/drivers/bus/omap_l3_smx.h
new file mode 100644
index 0000000..4f3cebc
--- /dev/null
+++ b/src/kernel/linux/v4.14/drivers/bus/omap_l3_smx.h
@@ -0,0 +1,338 @@
+/*
+ * OMAP3XXX L3 Interconnect Driver header
+ *
+ * Copyright (C) 2011 Texas Corporation
+ *	Felipe Balbi <balbi@ti.com>
+ *	Santosh Shilimkar <santosh.shilimkar@ti.com>
+ *	sricharan <r.sricharan@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
+ * USA
+ */
+#ifndef __ARCH_ARM_MACH_OMAP2_L3_INTERCONNECT_3XXX_H
+#define __ARCH_ARM_MACH_OMAP2_L3_INTERCONNECT_3XXX_H
+
+/* Register definitions. All 64-bit wide */
+#define L3_COMPONENT			0x000
+#define L3_CORE				0x018
+#define L3_AGENT_CONTROL		0x020
+#define L3_AGENT_STATUS			0x028
+#define L3_ERROR_LOG			0x058
+
+#define L3_ERROR_LOG_MULTI		(1 << 31)
+#define L3_ERROR_LOG_SECONDARY		(1 << 30)
+
+#define L3_ERROR_LOG_ADDR		0x060
+
+/* Register definitions for Sideband Interconnect */
+#define L3_SI_CONTROL			0x020
+#define L3_SI_FLAG_STATUS_0		0x510
+
+static const u64 shift = 1;
+
+#define L3_STATUS_0_MPUIA_BRST		(shift << 0)
+#define L3_STATUS_0_MPUIA_RSP		(shift << 1)
+#define L3_STATUS_0_MPUIA_INBAND	(shift << 2)
+#define L3_STATUS_0_IVAIA_BRST		(shift << 6)
+#define L3_STATUS_0_IVAIA_RSP		(shift << 7)
+#define L3_STATUS_0_IVAIA_INBAND	(shift << 8)
+#define L3_STATUS_0_SGXIA_BRST		(shift << 9)
+#define L3_STATUS_0_SGXIA_RSP		(shift << 10)
+#define L3_STATUS_0_SGXIA_MERROR	(shift << 11)
+#define L3_STATUS_0_CAMIA_BRST		(shift << 12)
+#define L3_STATUS_0_CAMIA_RSP		(shift << 13)
+#define L3_STATUS_0_CAMIA_INBAND	(shift << 14)
+#define L3_STATUS_0_DISPIA_BRST		(shift << 15)
+#define L3_STATUS_0_DISPIA_RSP		(shift << 16)
+#define L3_STATUS_0_DMARDIA_BRST	(shift << 18)
+#define L3_STATUS_0_DMARDIA_RSP		(shift << 19)
+#define L3_STATUS_0_DMAWRIA_BRST	(shift << 21)
+#define L3_STATUS_0_DMAWRIA_RSP		(shift << 22)
+#define L3_STATUS_0_USBOTGIA_BRST	(shift << 24)
+#define L3_STATUS_0_USBOTGIA_RSP	(shift << 25)
+#define L3_STATUS_0_USBOTGIA_INBAND	(shift << 26)
+#define L3_STATUS_0_USBHOSTIA_BRST	(shift << 27)
+#define L3_STATUS_0_USBHOSTIA_INBAND	(shift << 28)
+#define L3_STATUS_0_SMSTA_REQ		(shift << 48)
+#define L3_STATUS_0_GPMCTA_REQ		(shift << 49)
+#define L3_STATUS_0_OCMRAMTA_REQ	(shift << 50)
+#define L3_STATUS_0_OCMROMTA_REQ	(shift << 51)
+#define L3_STATUS_0_IVATA_REQ		(shift << 54)
+#define L3_STATUS_0_SGXTA_REQ		(shift << 55)
+#define L3_STATUS_0_SGXTA_SERROR	(shift << 56)
+#define L3_STATUS_0_GPMCTA_SERROR	(shift << 57)
+#define L3_STATUS_0_L4CORETA_REQ	(shift << 58)
+#define L3_STATUS_0_L4PERTA_REQ		(shift << 59)
+#define L3_STATUS_0_L4EMUTA_REQ		(shift << 60)
+#define L3_STATUS_0_MAD2DTA_REQ		(shift << 61)
+
+#define L3_STATUS_0_TIMEOUT_MASK	(L3_STATUS_0_MPUIA_BRST		\
+					| L3_STATUS_0_MPUIA_RSP		\
+					| L3_STATUS_0_IVAIA_BRST	\
+					| L3_STATUS_0_IVAIA_RSP		\
+					| L3_STATUS_0_SGXIA_BRST	\
+					| L3_STATUS_0_SGXIA_RSP		\
+					| L3_STATUS_0_CAMIA_BRST	\
+					| L3_STATUS_0_CAMIA_RSP		\
+					| L3_STATUS_0_DISPIA_BRST	\
+					| L3_STATUS_0_DISPIA_RSP	\
+					| L3_STATUS_0_DMARDIA_BRST	\
+					| L3_STATUS_0_DMARDIA_RSP	\
+					| L3_STATUS_0_DMAWRIA_BRST	\
+					| L3_STATUS_0_DMAWRIA_RSP	\
+					| L3_STATUS_0_USBOTGIA_BRST	\
+					| L3_STATUS_0_USBOTGIA_RSP	\
+					| L3_STATUS_0_USBHOSTIA_BRST	\
+					| L3_STATUS_0_SMSTA_REQ		\
+					| L3_STATUS_0_GPMCTA_REQ	\
+					| L3_STATUS_0_OCMRAMTA_REQ	\
+					| L3_STATUS_0_OCMROMTA_REQ	\
+					| L3_STATUS_0_IVATA_REQ		\
+					| L3_STATUS_0_SGXTA_REQ		\
+					| L3_STATUS_0_L4CORETA_REQ	\
+					| L3_STATUS_0_L4PERTA_REQ	\
+					| L3_STATUS_0_L4EMUTA_REQ	\
+					| L3_STATUS_0_MAD2DTA_REQ)
+
+#define L3_SI_FLAG_STATUS_1		0x530
+
+#define L3_STATUS_1_MPU_DATAIA		(1 << 0)
+#define L3_STATUS_1_DAPIA0		(1 << 3)
+#define L3_STATUS_1_DAPIA1		(1 << 4)
+#define L3_STATUS_1_IVAIA		(1 << 6)
+
+#define L3_PM_ERROR_LOG			0x020
+#define L3_PM_CONTROL			0x028
+#define L3_PM_ERROR_CLEAR_SINGLE	0x030
+#define L3_PM_ERROR_CLEAR_MULTI		0x038
+#define L3_PM_REQ_INFO_PERMISSION(n)	(0x048 + (0x020 * n))
+#define L3_PM_READ_PERMISSION(n)	(0x050 + (0x020 * n))
+#define L3_PM_WRITE_PERMISSION(n)	(0x058 + (0x020 * n))
+#define L3_PM_ADDR_MATCH(n)		(0x060 + (0x020 * n))
+
+/* L3 error log bit fields. Common for IA and TA */
+#define L3_ERROR_LOG_CODE		24
+#define L3_ERROR_LOG_INITID		8
+#define L3_ERROR_LOG_CMD		0
+
+/* L3 agent status bit fields. */
+#define L3_AGENT_STATUS_CLEAR_IA	0x10000000
+#define L3_AGENT_STATUS_CLEAR_TA	0x01000000
+
+#define OMAP34xx_IRQ_L3_APP		10
+#define L3_APPLICATION_ERROR		0x0
+#define L3_DEBUG_ERROR			0x1
+
+enum omap3_l3_initiator_id {
+	/* LCD has 1 ID */
+	OMAP_L3_LCD = 29,
+	/* SAD2D has 1 ID */
+	OMAP_L3_SAD2D = 28,
+	/* MPU has 5 IDs */
+	OMAP_L3_IA_MPU_SS_1 = 27,
+	OMAP_L3_IA_MPU_SS_2 = 26,
+	OMAP_L3_IA_MPU_SS_3 = 25,
+	OMAP_L3_IA_MPU_SS_4 = 24,
+	OMAP_L3_IA_MPU_SS_5 = 23,
+	/* IVA2.2 SS has 3 IDs*/
+	OMAP_L3_IA_IVA_SS_1 = 22,
+	OMAP_L3_IA_IVA_SS_2 = 21,
+	OMAP_L3_IA_IVA_SS_3 = 20,
+	/* IVA 2.2 SS DMA has 6 IDS */
+	OMAP_L3_IA_IVA_SS_DMA_1 = 19,
+	OMAP_L3_IA_IVA_SS_DMA_2 = 18,
+	OMAP_L3_IA_IVA_SS_DMA_3 = 17,
+	OMAP_L3_IA_IVA_SS_DMA_4 = 16,
+	OMAP_L3_IA_IVA_SS_DMA_5 = 15,
+	OMAP_L3_IA_IVA_SS_DMA_6 = 14,
+	/* SGX has 1 ID */
+	OMAP_L3_IA_SGX = 13,
+	/* CAM has 3 ID */
+	OMAP_L3_IA_CAM_1 = 12,
+	OMAP_L3_IA_CAM_2 = 11,
+	OMAP_L3_IA_CAM_3 = 10,
+	/* DAP has 1 ID */
+	OMAP_L3_IA_DAP = 9,
+	/* SDMA WR has 2 IDs */
+	OMAP_L3_SDMA_WR_1 = 8,
+	OMAP_L3_SDMA_WR_2 = 7,
+	/* SDMA RD has 4 IDs */
+	OMAP_L3_SDMA_RD_1 = 6,
+	OMAP_L3_SDMA_RD_2 = 5,
+	OMAP_L3_SDMA_RD_3 = 4,
+	OMAP_L3_SDMA_RD_4 = 3,
+	/* HSUSB OTG has 1 ID */
+	OMAP_L3_USBOTG = 2,
+	/* HSUSB HOST has 1 ID */
+	OMAP_L3_USBHOST = 1,
+};
+
+enum omap3_l3_code {
+	OMAP_L3_CODE_NOERROR = 0,
+	OMAP_L3_CODE_UNSUP_CMD = 1,
+	OMAP_L3_CODE_ADDR_HOLE = 2,
+	OMAP_L3_CODE_PROTECT_VIOLATION = 3,
+	OMAP_L3_CODE_IN_BAND_ERR = 4,
+	/* codes 5 and 6 are reserved */
+	OMAP_L3_CODE_REQ_TOUT_NOT_ACCEPT = 7,
+	OMAP_L3_CODE_REQ_TOUT_NO_RESP = 8,
+	/* codes 9 - 15 are also reserved */
+};
+
+struct omap3_l3 {
+	struct device *dev;
+	struct clk *ick;
+
+	/* memory base*/
+	void __iomem *rt;
+
+	int debug_irq;
+	int app_irq;
+
+	/* true when and inband functional error occurs */
+	unsigned inband:1;
+};
+
+/* offsets for l3 agents in order with the Flag status register */
+static unsigned int omap3_l3_app_bases[] = {
+	/* MPU IA */
+	0x1400,
+	0x1400,
+	0x1400,
+	/* RESERVED */
+	0,
+	0,
+	0,
+	/* IVA 2.2 IA */
+	0x1800,
+	0x1800,
+	0x1800,
+	/* SGX IA */
+	0x1c00,
+	0x1c00,
+	/* RESERVED */
+	0,
+	/* CAMERA IA */
+	0x5800,
+	0x5800,
+	0x5800,
+	/* DISPLAY IA */
+	0x5400,
+	0x5400,
+	/* RESERVED */
+	0,
+	/*SDMA RD IA */
+	0x4c00,
+	0x4c00,
+	/* RESERVED */
+	0,
+	/* SDMA WR IA */
+	0x5000,
+	0x5000,
+	/* RESERVED */
+	0,
+	/* USB OTG IA */
+	0x4400,
+	0x4400,
+	0x4400,
+	/* USB HOST IA */
+	0x4000,
+	0x4000,
+	/* RESERVED */
+	0,
+	0,
+	0,
+	0,
+	/* SAD2D IA */
+	0x3000,
+	0x3000,
+	0x3000,
+	/* RESERVED */
+	0,
+	0,
+	0,
+	0,
+	0,
+	0,
+	0,
+	0,
+	0,
+	0,
+	0,
+	0,
+	/* SMA TA */
+	0x2000,
+	/* GPMC TA */
+	0x2400,
+	/* OCM RAM TA */
+	0x2800,
+	/* OCM ROM TA */
+	0x2C00,
+	/* L4 CORE TA */
+	0x6800,
+	/* L4 PER TA */
+	0x6c00,
+	/* IVA 2.2 TA */
+	0x6000,
+	/* SGX TA */
+	0x6400,
+	/* L4 EMU TA */
+	0x7000,
+	/* GPMC TA */
+	0x2400,
+	/* L4 CORE TA */
+	0x6800,
+	/* L4 PER TA */
+	0x6c00,
+	/* L4 EMU TA */
+	0x7000,
+	/* MAD2D TA */
+	0x3400,
+	/* RESERVED */
+	0,
+	0,
+};
+
+static unsigned int omap3_l3_debug_bases[] = {
+	/* MPU DATA IA */
+	0x1400,
+	/* RESERVED */
+	0,
+	0,
+	/* DAP IA */
+	0x5c00,
+	0x5c00,
+	/* RESERVED */
+	0,
+	/* IVA 2.2 IA */
+	0x1800,
+	/* REST RESERVED */
+};
+
+static u32 *omap3_l3_bases[] = {
+	omap3_l3_app_bases,
+	omap3_l3_debug_bases,
+};
+
+/*
+ * REVISIT define __raw_readll/__raw_writell here, but move them to
+ * <asm/io.h> at some point
+ */
+#define __raw_writell(v, a)	(__chk_io_ptr(a), \
+				*(volatile u64 __force *)(a) = (v))
+#define __raw_readll(a)		(__chk_io_ptr(a), \
+				*(volatile u64 __force *)(a))
+
+#endif
diff --git a/src/kernel/linux/v4.14/drivers/bus/qcom-ebi2.c b/src/kernel/linux/v4.14/drivers/bus/qcom-ebi2.c
new file mode 100644
index 0000000..a644424
--- /dev/null
+++ b/src/kernel/linux/v4.14/drivers/bus/qcom-ebi2.c
@@ -0,0 +1,408 @@
+/*
+ * Qualcomm External Bus Interface 2 (EBI2) driver
+ * an older version of the Qualcomm Parallel Interface Controller (QPIC)
+ *
+ * Copyright (C) 2016 Linaro Ltd.
+ *
+ * Author: Linus Walleij <linus.walleij@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2, as
+ * published by the Free Software Foundation.
+ *
+ * See the device tree bindings for this block for more details on the
+ * hardware.
+ */
+
+#include <linux/module.h>
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/slab.h>
+#include <linux/platform_device.h>
+#include <linux/bitops.h>
+
+/*
+ * CS0, CS1, CS4 and CS5 are two bits wide, CS2 and CS3 are one bit.
+ */
+#define EBI2_CS0_ENABLE_MASK BIT(0)|BIT(1)
+#define EBI2_CS1_ENABLE_MASK BIT(2)|BIT(3)
+#define EBI2_CS2_ENABLE_MASK BIT(4)
+#define EBI2_CS3_ENABLE_MASK BIT(5)
+#define EBI2_CS4_ENABLE_MASK BIT(6)|BIT(7)
+#define EBI2_CS5_ENABLE_MASK BIT(8)|BIT(9)
+#define EBI2_CSN_MASK GENMASK(9, 0)
+
+#define EBI2_XMEM_CFG 0x0000 /* Power management etc */
+
+/*
+ * SLOW CSn CFG
+ *
+ * Bits 31-28: RECOVERY recovery cycles (0 = 1, 1 = 2 etc) this is the time the
+ *             memory continues to drive the data bus after OE is de-asserted.
+ *             Inserted when reading one CS and switching to another CS or read
+ *             followed by write on the same CS. Valid values 0 thru 15.
+ * Bits 27-24: WR_HOLD write hold cycles, these are extra cycles inserted after
+ *             every write minimum 1. The data out is driven from the time WE is
+ *             asserted until CS is asserted. With a hold of 1, the CS stays
+ *             active for 1 extra cycle etc. Valid values 0 thru 15.
+ * Bits 23-16: WR_DELTA initial latency for write cycles inserted for the first
+ *             write to a page or burst memory
+ * Bits 15-8:  RD_DELTA initial latency for read cycles inserted for the first
+ *             read to a page or burst memory
+ * Bits 7-4:   WR_WAIT number of wait cycles for every write access, 0=1 cycle
+ *             so 1 thru 16 cycles.
+ * Bits 3-0:   RD_WAIT number of wait cycles for every read access, 0=1 cycle
+ *             so 1 thru 16 cycles.
+ */
+#define EBI2_XMEM_CS0_SLOW_CFG 0x0008
+#define EBI2_XMEM_CS1_SLOW_CFG 0x000C
+#define EBI2_XMEM_CS2_SLOW_CFG 0x0010
+#define EBI2_XMEM_CS3_SLOW_CFG 0x0014
+#define EBI2_XMEM_CS4_SLOW_CFG 0x0018
+#define EBI2_XMEM_CS5_SLOW_CFG 0x001C
+
+#define EBI2_XMEM_RECOVERY_SHIFT	28
+#define EBI2_XMEM_WR_HOLD_SHIFT		24
+#define EBI2_XMEM_WR_DELTA_SHIFT	16
+#define EBI2_XMEM_RD_DELTA_SHIFT	8
+#define EBI2_XMEM_WR_WAIT_SHIFT		4
+#define EBI2_XMEM_RD_WAIT_SHIFT		0
+
+/*
+ * FAST CSn CFG
+ * Bits 31-28: ?
+ * Bits 27-24: RD_HOLD: the length in cycles of the first segment of a read
+ *             transfer. For a single read trandfer this will be the time
+ *             from CS assertion to OE assertion.
+ * Bits 18-24: ?
+ * Bits 17-16: ADV_OE_RECOVERY, the number of cycles elapsed before an OE
+ *             assertion, with respect to the cycle where ADV is asserted.
+ *             2 means 2 cycles between ADV and OE. Values 0, 1, 2 or 3.
+ * Bits 5:     ADDR_HOLD_ENA, The address is held for an extra cycle to meet
+ *             hold time requirements with ADV assertion.
+ *
+ * The manual mentions "write precharge cycles" and "precharge cycles".
+ * We have not been able to figure out which bit fields these correspond to
+ * in the hardware, or what valid values exist. The current hypothesis is that
+ * this is something just used on the FAST chip selects. There is also a "byte
+ * device enable" flag somewhere for 8bit memories.
+ */
+#define EBI2_XMEM_CS0_FAST_CFG 0x0028
+#define EBI2_XMEM_CS1_FAST_CFG 0x002C
+#define EBI2_XMEM_CS2_FAST_CFG 0x0030
+#define EBI2_XMEM_CS3_FAST_CFG 0x0034
+#define EBI2_XMEM_CS4_FAST_CFG 0x0038
+#define EBI2_XMEM_CS5_FAST_CFG 0x003C
+
+#define EBI2_XMEM_RD_HOLD_SHIFT		24
+#define EBI2_XMEM_ADV_OE_RECOVERY_SHIFT	16
+#define EBI2_XMEM_ADDR_HOLD_ENA_SHIFT	5
+
+/**
+ * struct cs_data - struct with info on a chipselect setting
+ * @enable_mask: mask to enable the chipselect in the EBI2 config
+ * @slow_cfg0: offset to XMEMC slow CS config
+ * @fast_cfg1: offset to XMEMC fast CS config
+ */
+struct cs_data {
+	u32 enable_mask;
+	u16 slow_cfg;
+	u16 fast_cfg;
+};
+
+static const struct cs_data cs_info[] = {
+	{
+		/* CS0 */
+		.enable_mask = EBI2_CS0_ENABLE_MASK,
+		.slow_cfg = EBI2_XMEM_CS0_SLOW_CFG,
+		.fast_cfg = EBI2_XMEM_CS0_FAST_CFG,
+	},
+	{
+		/* CS1 */
+		.enable_mask = EBI2_CS1_ENABLE_MASK,
+		.slow_cfg = EBI2_XMEM_CS1_SLOW_CFG,
+		.fast_cfg = EBI2_XMEM_CS1_FAST_CFG,
+	},
+	{
+		/* CS2 */
+		.enable_mask = EBI2_CS2_ENABLE_MASK,
+		.slow_cfg = EBI2_XMEM_CS2_SLOW_CFG,
+		.fast_cfg = EBI2_XMEM_CS2_FAST_CFG,
+	},
+	{
+		/* CS3 */
+		.enable_mask = EBI2_CS3_ENABLE_MASK,
+		.slow_cfg = EBI2_XMEM_CS3_SLOW_CFG,
+		.fast_cfg = EBI2_XMEM_CS3_FAST_CFG,
+	},
+	{
+		/* CS4 */
+		.enable_mask = EBI2_CS4_ENABLE_MASK,
+		.slow_cfg = EBI2_XMEM_CS4_SLOW_CFG,
+		.fast_cfg = EBI2_XMEM_CS4_FAST_CFG,
+	},
+	{
+		/* CS5 */
+		.enable_mask = EBI2_CS5_ENABLE_MASK,
+		.slow_cfg = EBI2_XMEM_CS5_SLOW_CFG,
+		.fast_cfg = EBI2_XMEM_CS5_FAST_CFG,
+	},
+};
+
+/**
+ * struct ebi2_xmem_prop - describes an XMEM config property
+ * @prop: the device tree binding name
+ * @max: maximum value for the property
+ * @slowreg: true if this property is in the SLOW CS config register
+ * else it is assumed to be in the FAST config register
+ * @shift: the bit field start in the SLOW or FAST register for this
+ * property
+ */
+struct ebi2_xmem_prop {
+	const char *prop;
+	u32 max;
+	bool slowreg;
+	u16 shift;
+};
+
+static const struct ebi2_xmem_prop xmem_props[] = {
+	{
+		.prop = "qcom,xmem-recovery-cycles",
+		.max = 15,
+		.slowreg = true,
+		.shift = EBI2_XMEM_RECOVERY_SHIFT,
+	},
+	{
+		.prop = "qcom,xmem-write-hold-cycles",
+		.max = 15,
+		.slowreg = true,
+		.shift = EBI2_XMEM_WR_HOLD_SHIFT,
+	},
+	{
+		.prop = "qcom,xmem-write-delta-cycles",
+		.max = 255,
+		.slowreg = true,
+		.shift = EBI2_XMEM_WR_DELTA_SHIFT,
+	},
+	{
+		.prop = "qcom,xmem-read-delta-cycles",
+		.max = 255,
+		.slowreg = true,
+		.shift = EBI2_XMEM_RD_DELTA_SHIFT,
+	},
+	{
+		.prop = "qcom,xmem-write-wait-cycles",
+		.max = 15,
+		.slowreg = true,
+		.shift = EBI2_XMEM_WR_WAIT_SHIFT,
+	},
+	{
+		.prop = "qcom,xmem-read-wait-cycles",
+		.max = 15,
+		.slowreg = true,
+		.shift = EBI2_XMEM_RD_WAIT_SHIFT,
+	},
+	{
+		.prop = "qcom,xmem-address-hold-enable",
+		.max = 1, /* boolean prop */
+		.slowreg = false,
+		.shift = EBI2_XMEM_ADDR_HOLD_ENA_SHIFT,
+	},
+	{
+		.prop = "qcom,xmem-adv-to-oe-recovery-cycles",
+		.max = 3,
+		.slowreg = false,
+		.shift = EBI2_XMEM_ADV_OE_RECOVERY_SHIFT,
+	},
+	{
+		.prop = "qcom,xmem-read-hold-cycles",
+		.max = 15,
+		.slowreg = false,
+		.shift = EBI2_XMEM_RD_HOLD_SHIFT,
+	},
+};
+
+static void qcom_ebi2_setup_chipselect(struct device_node *np,
+				       struct device *dev,
+				       void __iomem *ebi2_base,
+				       void __iomem *ebi2_xmem,
+				       u32 csindex)
+{
+	const struct cs_data *csd;
+	u32 slowcfg, fastcfg;
+	u32 val;
+	int ret;
+	int i;
+
+	csd = &cs_info[csindex];
+	val = readl(ebi2_base);
+	val |= csd->enable_mask;
+	writel(val, ebi2_base);
+	dev_dbg(dev, "enabled CS%u\n", csindex);
+
+	/* Next set up the XMEMC */
+	slowcfg = 0;
+	fastcfg = 0;
+
+	for (i = 0; i < ARRAY_SIZE(xmem_props); i++) {
+		const struct ebi2_xmem_prop *xp = &xmem_props[i];
+
+		/* All are regular u32 values */
+		ret = of_property_read_u32(np, xp->prop, &val);
+		if (ret) {
+			dev_dbg(dev, "could not read %s for CS%d\n",
+				xp->prop, csindex);
+			continue;
+		}
+
+		/* First check boolean props */
+		if (xp->max == 1 && val) {
+			if (xp->slowreg)
+				slowcfg |= BIT(xp->shift);
+			else
+				fastcfg |= BIT(xp->shift);
+			dev_dbg(dev, "set %s flag\n", xp->prop);
+			continue;
+		}
+
+		/* We're dealing with an u32 */
+		if (val > xp->max) {
+			dev_err(dev,
+				"too high value for %s: %u, capped at %u\n",
+				xp->prop, val, xp->max);
+			val = xp->max;
+		}
+		if (xp->slowreg)
+			slowcfg |= (val << xp->shift);
+		else
+			fastcfg |= (val << xp->shift);
+		dev_dbg(dev, "set %s to %u\n", xp->prop, val);
+	}
+
+	dev_info(dev, "CS%u: SLOW CFG 0x%08x, FAST CFG 0x%08x\n",
+		 csindex, slowcfg, fastcfg);
+
+	if (slowcfg)
+		writel(slowcfg, ebi2_xmem + csd->slow_cfg);
+	if (fastcfg)
+		writel(fastcfg, ebi2_xmem + csd->fast_cfg);
+}
+
+static int qcom_ebi2_probe(struct platform_device *pdev)
+{
+	struct device_node *np = pdev->dev.of_node;
+	struct device_node *child;
+	struct device *dev = &pdev->dev;
+	struct resource *res;
+	void __iomem *ebi2_base;
+	void __iomem *ebi2_xmem;
+	struct clk *ebi2xclk;
+	struct clk *ebi2clk;
+	bool have_children = false;
+	u32 val;
+	int ret;
+
+	ebi2xclk = devm_clk_get(dev, "ebi2x");
+	if (IS_ERR(ebi2xclk))
+		return PTR_ERR(ebi2xclk);
+
+	ret = clk_prepare_enable(ebi2xclk);
+	if (ret) {
+		dev_err(dev, "could not enable EBI2X clk (%d)\n", ret);
+		return ret;
+	}
+
+	ebi2clk = devm_clk_get(dev, "ebi2");
+	if (IS_ERR(ebi2clk)) {
+		ret = PTR_ERR(ebi2clk);
+		goto err_disable_2x_clk;
+	}
+
+	ret = clk_prepare_enable(ebi2clk);
+	if (ret) {
+		dev_err(dev, "could not enable EBI2 clk\n");
+		goto err_disable_2x_clk;
+	}
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	ebi2_base = devm_ioremap_resource(dev, res);
+	if (IS_ERR(ebi2_base)) {
+		ret = PTR_ERR(ebi2_base);
+		goto err_disable_clk;
+	}
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+	ebi2_xmem = devm_ioremap_resource(dev, res);
+	if (IS_ERR(ebi2_xmem)) {
+		ret = PTR_ERR(ebi2_xmem);
+		goto err_disable_clk;
+	}
+
+	/* Allegedly this turns the power save mode off */
+	writel(0UL, ebi2_xmem + EBI2_XMEM_CFG);
+
+	/* Disable all chipselects */
+	val = readl(ebi2_base);
+	val &= ~EBI2_CSN_MASK;
+	writel(val, ebi2_base);
+
+	/* Walk over the child nodes and see what chipselects we use */
+	for_each_available_child_of_node(np, child) {
+		u32 csindex;
+
+		/* Figure out the chipselect */
+		ret = of_property_read_u32(child, "reg", &csindex);
+		if (ret)
+			return ret;
+
+		if (csindex > 5) {
+			dev_err(dev,
+				"invalid chipselect %u, we only support 0-5\n",
+				csindex);
+			continue;
+		}
+
+		qcom_ebi2_setup_chipselect(child,
+					   dev,
+					   ebi2_base,
+					   ebi2_xmem,
+					   csindex);
+
+		/* We have at least one child */
+		have_children = true;
+	}
+
+	if (have_children)
+		return of_platform_default_populate(np, NULL, dev);
+	return 0;
+
+err_disable_clk:
+	clk_disable_unprepare(ebi2clk);
+err_disable_2x_clk:
+	clk_disable_unprepare(ebi2xclk);
+
+	return ret;
+}
+
+static const struct of_device_id qcom_ebi2_of_match[] = {
+	{ .compatible = "qcom,msm8660-ebi2", },
+	{ .compatible = "qcom,apq8060-ebi2", },
+	{ }
+};
+
+static struct platform_driver qcom_ebi2_driver = {
+	.probe = qcom_ebi2_probe,
+	.driver = {
+		.name = "qcom-ebi2",
+		.of_match_table = qcom_ebi2_of_match,
+	},
+};
+module_platform_driver(qcom_ebi2_driver);
+MODULE_AUTHOR("Linus Walleij <linus.walleij@linaro.org>");
+MODULE_DESCRIPTION("Qualcomm EBI2 driver");
+MODULE_LICENSE("GPL");
diff --git a/src/kernel/linux/v4.14/drivers/bus/simple-pm-bus.c b/src/kernel/linux/v4.14/drivers/bus/simple-pm-bus.c
new file mode 100644
index 0000000..c5eb46c
--- /dev/null
+++ b/src/kernel/linux/v4.14/drivers/bus/simple-pm-bus.c
@@ -0,0 +1,58 @@
+/*
+ * Simple Power-Managed Bus Driver
+ *
+ * Copyright (C) 2014-2015 Glider bvba
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+
+
+static int simple_pm_bus_probe(struct platform_device *pdev)
+{
+	struct device_node *np = pdev->dev.of_node;
+
+	dev_dbg(&pdev->dev, "%s\n", __func__);
+
+	pm_runtime_enable(&pdev->dev);
+
+	if (np)
+		of_platform_populate(np, NULL, NULL, &pdev->dev);
+
+	return 0;
+}
+
+static int simple_pm_bus_remove(struct platform_device *pdev)
+{
+	dev_dbg(&pdev->dev, "%s\n", __func__);
+
+	pm_runtime_disable(&pdev->dev);
+	return 0;
+}
+
+static const struct of_device_id simple_pm_bus_of_match[] = {
+	{ .compatible = "simple-pm-bus", },
+	{ /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, simple_pm_bus_of_match);
+
+static struct platform_driver simple_pm_bus_driver = {
+	.probe = simple_pm_bus_probe,
+	.remove = simple_pm_bus_remove,
+	.driver = {
+		.name = "simple-pm-bus",
+		.of_match_table = simple_pm_bus_of_match,
+	},
+};
+
+module_platform_driver(simple_pm_bus_driver);
+
+MODULE_DESCRIPTION("Simple Power-Managed Bus Driver");
+MODULE_AUTHOR("Geert Uytterhoeven <geert+renesas@glider.be>");
+MODULE_LICENSE("GPL v2");
diff --git a/src/kernel/linux/v4.14/drivers/bus/sunxi-rsb.c b/src/kernel/linux/v4.14/drivers/bus/sunxi-rsb.c
new file mode 100644
index 0000000..2ca2cc5
--- /dev/null
+++ b/src/kernel/linux/v4.14/drivers/bus/sunxi-rsb.c
@@ -0,0 +1,784 @@
+/*
+ * RSB (Reduced Serial Bus) driver.
+ *
+ * Author: Chen-Yu Tsai <wens@csie.org>
+ *
+ * This file is licensed under the terms of the GNU General Public License
+ * version 2.  This program is licensed "as is" without any warranty of any
+ * kind, whether express or implied.
+ *
+ * The RSB controller looks like an SMBus controller which only supports
+ * byte and word data transfers. But, it differs from standard SMBus
+ * protocol on several aspects:
+ * - it uses addresses set at runtime to address slaves. Runtime addresses
+ *   are sent to slaves using their 12bit hardware addresses. Up to 15
+ *   runtime addresses are available.
+ * - it adds a parity bit every 8bits of data and address for read and
+ *   write accesses; this replaces the ack bit
+ * - only one read access is required to read a byte (instead of a write
+ *   followed by a read access in standard SMBus protocol)
+ * - there's no Ack bit after each read access
+ *
+ * This means this bus cannot be used to interface with standard SMBus
+ * devices. Devices known to support this interface include the AXP223,
+ * AXP809, and AXP806 PMICs, and the AC100 audio codec, all from X-Powers.
+ *
+ * A description of the operation and wire protocol can be found in the
+ * RSB section of Allwinner's A80 user manual, which can be found at
+ *
+ *     https://github.com/allwinner-zh/documents/tree/master/A80
+ *
+ * This document is officially released by Allwinner.
+ *
+ * This driver is based on i2c-sun6i-p2wi.c, the P2WI bus driver.
+ *
+ */
+
+#include <linux/clk.h>
+#include <linux/clk/clk-conf.h>
+#include <linux/device.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_irq.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/reset.h>
+#include <linux/slab.h>
+#include <linux/sunxi-rsb.h>
+#include <linux/types.h>
+
+/* RSB registers */
+#define RSB_CTRL	0x0	/* Global control */
+#define RSB_CCR		0x4	/* Clock control */
+#define RSB_INTE	0x8	/* Interrupt controls */
+#define RSB_INTS	0xc	/* Interrupt status */
+#define RSB_ADDR	0x10	/* Address to send with read/write command */
+#define RSB_DATA	0x1c	/* Data to read/write */
+#define RSB_LCR		0x24	/* Line control */
+#define RSB_DMCR	0x28	/* Device mode (init) control */
+#define RSB_CMD		0x2c	/* RSB Command */
+#define RSB_DAR		0x30	/* Device address / runtime address */
+
+/* CTRL fields */
+#define RSB_CTRL_START_TRANS		BIT(7)
+#define RSB_CTRL_ABORT_TRANS		BIT(6)
+#define RSB_CTRL_GLOBAL_INT_ENB		BIT(1)
+#define RSB_CTRL_SOFT_RST		BIT(0)
+
+/* CLK CTRL fields */
+#define RSB_CCR_SDA_OUT_DELAY(v)	(((v) & 0x7) << 8)
+#define RSB_CCR_MAX_CLK_DIV		0xff
+#define RSB_CCR_CLK_DIV(v)		((v) & RSB_CCR_MAX_CLK_DIV)
+
+/* STATUS fields */
+#define RSB_INTS_TRANS_ERR_ACK		BIT(16)
+#define RSB_INTS_TRANS_ERR_DATA_BIT(v)	(((v) >> 8) & 0xf)
+#define RSB_INTS_TRANS_ERR_DATA		GENMASK(11, 8)
+#define RSB_INTS_LOAD_BSY		BIT(2)
+#define RSB_INTS_TRANS_ERR		BIT(1)
+#define RSB_INTS_TRANS_OVER		BIT(0)
+
+/* LINE CTRL fields*/
+#define RSB_LCR_SCL_STATE		BIT(5)
+#define RSB_LCR_SDA_STATE		BIT(4)
+#define RSB_LCR_SCL_CTL			BIT(3)
+#define RSB_LCR_SCL_CTL_EN		BIT(2)
+#define RSB_LCR_SDA_CTL			BIT(1)
+#define RSB_LCR_SDA_CTL_EN		BIT(0)
+
+/* DEVICE MODE CTRL field values */
+#define RSB_DMCR_DEVICE_START		BIT(31)
+#define RSB_DMCR_MODE_DATA		(0x7c << 16)
+#define RSB_DMCR_MODE_REG		(0x3e << 8)
+#define RSB_DMCR_DEV_ADDR		0x00
+
+/* CMD values */
+#define RSB_CMD_RD8			0x8b
+#define RSB_CMD_RD16			0x9c
+#define RSB_CMD_RD32			0xa6
+#define RSB_CMD_WR8			0x4e
+#define RSB_CMD_WR16			0x59
+#define RSB_CMD_WR32			0x63
+#define RSB_CMD_STRA			0xe8
+
+/* DAR fields */
+#define RSB_DAR_RTA(v)			(((v) & 0xff) << 16)
+#define RSB_DAR_DA(v)			((v) & 0xffff)
+
+#define RSB_MAX_FREQ			20000000
+
+#define RSB_CTRL_NAME			"sunxi-rsb"
+
+struct sunxi_rsb_addr_map {
+	u16 hwaddr;
+	u8 rtaddr;
+};
+
+struct sunxi_rsb {
+	struct device *dev;
+	void __iomem *regs;
+	struct clk *clk;
+	struct reset_control *rstc;
+	struct completion complete;
+	struct mutex lock;
+	unsigned int status;
+};
+
+/* bus / slave device related functions */
+static struct bus_type sunxi_rsb_bus;
+
+static int sunxi_rsb_device_match(struct device *dev, struct device_driver *drv)
+{
+	return of_driver_match_device(dev, drv);
+}
+
+static int sunxi_rsb_device_probe(struct device *dev)
+{
+	const struct sunxi_rsb_driver *drv = to_sunxi_rsb_driver(dev->driver);
+	struct sunxi_rsb_device *rdev = to_sunxi_rsb_device(dev);
+	int ret;
+
+	if (!drv->probe)
+		return -ENODEV;
+
+	if (!rdev->irq) {
+		int irq = -ENOENT;
+
+		if (dev->of_node)
+			irq = of_irq_get(dev->of_node, 0);
+
+		if (irq == -EPROBE_DEFER)
+			return irq;
+		if (irq < 0)
+			irq = 0;
+
+		rdev->irq = irq;
+	}
+
+	ret = of_clk_set_defaults(dev->of_node, false);
+	if (ret < 0)
+		return ret;
+
+	return drv->probe(rdev);
+}
+
+static int sunxi_rsb_device_remove(struct device *dev)
+{
+	const struct sunxi_rsb_driver *drv = to_sunxi_rsb_driver(dev->driver);
+
+	return drv->remove(to_sunxi_rsb_device(dev));
+}
+
+static struct bus_type sunxi_rsb_bus = {
+	.name		= RSB_CTRL_NAME,
+	.match		= sunxi_rsb_device_match,
+	.probe		= sunxi_rsb_device_probe,
+	.remove		= sunxi_rsb_device_remove,
+	.uevent		= of_device_uevent_modalias,
+};
+
+static void sunxi_rsb_dev_release(struct device *dev)
+{
+	struct sunxi_rsb_device *rdev = to_sunxi_rsb_device(dev);
+
+	kfree(rdev);
+}
+
+/**
+ * sunxi_rsb_device_create() - allocate and add an RSB device
+ * @rsb:	RSB controller
+ * @node:	RSB slave device node
+ * @hwaddr:	RSB slave hardware address
+ * @rtaddr:	RSB slave runtime address
+ */
+static struct sunxi_rsb_device *sunxi_rsb_device_create(struct sunxi_rsb *rsb,
+		struct device_node *node, u16 hwaddr, u8 rtaddr)
+{
+	int err;
+	struct sunxi_rsb_device *rdev;
+
+	rdev = kzalloc(sizeof(*rdev), GFP_KERNEL);
+	if (!rdev)
+		return ERR_PTR(-ENOMEM);
+
+	rdev->rsb = rsb;
+	rdev->hwaddr = hwaddr;
+	rdev->rtaddr = rtaddr;
+	rdev->dev.bus = &sunxi_rsb_bus;
+	rdev->dev.parent = rsb->dev;
+	rdev->dev.of_node = node;
+	rdev->dev.release = sunxi_rsb_dev_release;
+
+	dev_set_name(&rdev->dev, "%s-%x", RSB_CTRL_NAME, hwaddr);
+
+	err = device_register(&rdev->dev);
+	if (err < 0) {
+		dev_err(&rdev->dev, "Can't add %s, status %d\n",
+			dev_name(&rdev->dev), err);
+		goto err_device_add;
+	}
+
+	dev_dbg(&rdev->dev, "device %s registered\n", dev_name(&rdev->dev));
+
+err_device_add:
+	put_device(&rdev->dev);
+
+	return ERR_PTR(err);
+}
+
+/**
+ * sunxi_rsb_device_unregister(): unregister an RSB device
+ * @rdev:	rsb_device to be removed
+ */
+static void sunxi_rsb_device_unregister(struct sunxi_rsb_device *rdev)
+{
+	device_unregister(&rdev->dev);
+}
+
+static int sunxi_rsb_remove_devices(struct device *dev, void *data)
+{
+	struct sunxi_rsb_device *rdev = to_sunxi_rsb_device(dev);
+
+	if (dev->bus == &sunxi_rsb_bus)
+		sunxi_rsb_device_unregister(rdev);
+
+	return 0;
+}
+
+/**
+ * sunxi_rsb_driver_register() - Register device driver with RSB core
+ * @rdrv:	device driver to be associated with slave-device.
+ *
+ * This API will register the client driver with the RSB framework.
+ * It is typically called from the driver's module-init function.
+ */
+int sunxi_rsb_driver_register(struct sunxi_rsb_driver *rdrv)
+{
+	rdrv->driver.bus = &sunxi_rsb_bus;
+	return driver_register(&rdrv->driver);
+}
+EXPORT_SYMBOL_GPL(sunxi_rsb_driver_register);
+
+/* common code that starts a transfer */
+static int _sunxi_rsb_run_xfer(struct sunxi_rsb *rsb)
+{
+	if (readl(rsb->regs + RSB_CTRL) & RSB_CTRL_START_TRANS) {
+		dev_dbg(rsb->dev, "RSB transfer still in progress\n");
+		return -EBUSY;
+	}
+
+	reinit_completion(&rsb->complete);
+
+	writel(RSB_INTS_LOAD_BSY | RSB_INTS_TRANS_ERR | RSB_INTS_TRANS_OVER,
+	       rsb->regs + RSB_INTE);
+	writel(RSB_CTRL_START_TRANS | RSB_CTRL_GLOBAL_INT_ENB,
+	       rsb->regs + RSB_CTRL);
+
+	if (!wait_for_completion_io_timeout(&rsb->complete,
+					    msecs_to_jiffies(100))) {
+		dev_dbg(rsb->dev, "RSB timeout\n");
+
+		/* abort the transfer */
+		writel(RSB_CTRL_ABORT_TRANS, rsb->regs + RSB_CTRL);
+
+		/* clear any interrupt flags */
+		writel(readl(rsb->regs + RSB_INTS), rsb->regs + RSB_INTS);
+
+		return -ETIMEDOUT;
+	}
+
+	if (rsb->status & RSB_INTS_LOAD_BSY) {
+		dev_dbg(rsb->dev, "RSB busy\n");
+		return -EBUSY;
+	}
+
+	if (rsb->status & RSB_INTS_TRANS_ERR) {
+		if (rsb->status & RSB_INTS_TRANS_ERR_ACK) {
+			dev_dbg(rsb->dev, "RSB slave nack\n");
+			return -EINVAL;
+		}
+
+		if (rsb->status & RSB_INTS_TRANS_ERR_DATA) {
+			dev_dbg(rsb->dev, "RSB transfer data error\n");
+			return -EIO;
+		}
+	}
+
+	return 0;
+}
+
+static int sunxi_rsb_read(struct sunxi_rsb *rsb, u8 rtaddr, u8 addr,
+			  u32 *buf, size_t len)
+{
+	u32 cmd;
+	int ret;
+
+	if (!buf)
+		return -EINVAL;
+
+	switch (len) {
+	case 1:
+		cmd = RSB_CMD_RD8;
+		break;
+	case 2:
+		cmd = RSB_CMD_RD16;
+		break;
+	case 4:
+		cmd = RSB_CMD_RD32;
+		break;
+	default:
+		dev_err(rsb->dev, "Invalid access width: %zd\n", len);
+		return -EINVAL;
+	}
+
+	mutex_lock(&rsb->lock);
+
+	writel(addr, rsb->regs + RSB_ADDR);
+	writel(RSB_DAR_RTA(rtaddr), rsb->regs + RSB_DAR);
+	writel(cmd, rsb->regs + RSB_CMD);
+
+	ret = _sunxi_rsb_run_xfer(rsb);
+	if (ret)
+		goto unlock;
+
+	*buf = readl(rsb->regs + RSB_DATA) & GENMASK(len * 8 - 1, 0);
+
+unlock:
+	mutex_unlock(&rsb->lock);
+
+	return ret;
+}
+
+static int sunxi_rsb_write(struct sunxi_rsb *rsb, u8 rtaddr, u8 addr,
+			   const u32 *buf, size_t len)
+{
+	u32 cmd;
+	int ret;
+
+	if (!buf)
+		return -EINVAL;
+
+	switch (len) {
+	case 1:
+		cmd = RSB_CMD_WR8;
+		break;
+	case 2:
+		cmd = RSB_CMD_WR16;
+		break;
+	case 4:
+		cmd = RSB_CMD_WR32;
+		break;
+	default:
+		dev_err(rsb->dev, "Invalid access width: %zd\n", len);
+		return -EINVAL;
+	}
+
+	mutex_lock(&rsb->lock);
+
+	writel(addr, rsb->regs + RSB_ADDR);
+	writel(RSB_DAR_RTA(rtaddr), rsb->regs + RSB_DAR);
+	writel(*buf, rsb->regs + RSB_DATA);
+	writel(cmd, rsb->regs + RSB_CMD);
+	ret = _sunxi_rsb_run_xfer(rsb);
+
+	mutex_unlock(&rsb->lock);
+
+	return ret;
+}
+
+/* RSB regmap functions */
+struct sunxi_rsb_ctx {
+	struct sunxi_rsb_device *rdev;
+	int size;
+};
+
+static int regmap_sunxi_rsb_reg_read(void *context, unsigned int reg,
+				     unsigned int *val)
+{
+	struct sunxi_rsb_ctx *ctx = context;
+	struct sunxi_rsb_device *rdev = ctx->rdev;
+
+	if (reg > 0xff)
+		return -EINVAL;
+
+	return sunxi_rsb_read(rdev->rsb, rdev->rtaddr, reg, val, ctx->size);
+}
+
+static int regmap_sunxi_rsb_reg_write(void *context, unsigned int reg,
+				      unsigned int val)
+{
+	struct sunxi_rsb_ctx *ctx = context;
+	struct sunxi_rsb_device *rdev = ctx->rdev;
+
+	return sunxi_rsb_write(rdev->rsb, rdev->rtaddr, reg, &val, ctx->size);
+}
+
+static void regmap_sunxi_rsb_free_ctx(void *context)
+{
+	struct sunxi_rsb_ctx *ctx = context;
+
+	kfree(ctx);
+}
+
+static struct regmap_bus regmap_sunxi_rsb = {
+	.reg_write = regmap_sunxi_rsb_reg_write,
+	.reg_read = regmap_sunxi_rsb_reg_read,
+	.free_context = regmap_sunxi_rsb_free_ctx,
+	.reg_format_endian_default = REGMAP_ENDIAN_NATIVE,
+	.val_format_endian_default = REGMAP_ENDIAN_NATIVE,
+};
+
+static struct sunxi_rsb_ctx *regmap_sunxi_rsb_init_ctx(struct sunxi_rsb_device *rdev,
+		const struct regmap_config *config)
+{
+	struct sunxi_rsb_ctx *ctx;
+
+	switch (config->val_bits) {
+	case 8:
+	case 16:
+	case 32:
+		break;
+	default:
+		return ERR_PTR(-EINVAL);
+	}
+
+	ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+	if (!ctx)
+		return ERR_PTR(-ENOMEM);
+
+	ctx->rdev = rdev;
+	ctx->size = config->val_bits / 8;
+
+	return ctx;
+}
+
+struct regmap *__devm_regmap_init_sunxi_rsb(struct sunxi_rsb_device *rdev,
+					    const struct regmap_config *config,
+					    struct lock_class_key *lock_key,
+					    const char *lock_name)
+{
+	struct sunxi_rsb_ctx *ctx = regmap_sunxi_rsb_init_ctx(rdev, config);
+
+	if (IS_ERR(ctx))
+		return ERR_CAST(ctx);
+
+	return __devm_regmap_init(&rdev->dev, &regmap_sunxi_rsb, ctx, config,
+				  lock_key, lock_name);
+}
+EXPORT_SYMBOL_GPL(__devm_regmap_init_sunxi_rsb);
+
+/* RSB controller driver functions */
+static irqreturn_t sunxi_rsb_irq(int irq, void *dev_id)
+{
+	struct sunxi_rsb *rsb = dev_id;
+	u32 status;
+
+	status = readl(rsb->regs + RSB_INTS);
+	rsb->status = status;
+
+	/* Clear interrupts */
+	status &= (RSB_INTS_LOAD_BSY | RSB_INTS_TRANS_ERR |
+		   RSB_INTS_TRANS_OVER);
+	writel(status, rsb->regs + RSB_INTS);
+
+	complete(&rsb->complete);
+
+	return IRQ_HANDLED;
+}
+
+static int sunxi_rsb_init_device_mode(struct sunxi_rsb *rsb)
+{
+	int ret = 0;
+	u32 reg;
+
+	/* send init sequence */
+	writel(RSB_DMCR_DEVICE_START | RSB_DMCR_MODE_DATA |
+	       RSB_DMCR_MODE_REG | RSB_DMCR_DEV_ADDR, rsb->regs + RSB_DMCR);
+
+	readl_poll_timeout(rsb->regs + RSB_DMCR, reg,
+			   !(reg & RSB_DMCR_DEVICE_START), 100, 250000);
+	if (reg & RSB_DMCR_DEVICE_START)
+		ret = -ETIMEDOUT;
+
+	/* clear interrupt status bits */
+	writel(readl(rsb->regs + RSB_INTS), rsb->regs + RSB_INTS);
+
+	return ret;
+}
+
+/*
+ * There are 15 valid runtime addresses, though Allwinner typically
+ * skips the first, for unknown reasons, and uses the following three.
+ *
+ * 0x17, 0x2d, 0x3a, 0x4e, 0x59, 0x63, 0x74, 0x8b,
+ * 0x9c, 0xa6, 0xb1, 0xc5, 0xd2, 0xe8, 0xff
+ *
+ * No designs with 2 RSB slave devices sharing identical hardware
+ * addresses on the same bus have been seen in the wild. All designs
+ * use 0x2d for the primary PMIC, 0x3a for the secondary PMIC if
+ * there is one, and 0x45 for peripheral ICs.
+ *
+ * The hardware does not seem to support re-setting runtime addresses.
+ * Attempts to do so result in the slave devices returning a NACK.
+ * Hence we just hardcode the mapping here, like Allwinner does.
+ */
+
+static const struct sunxi_rsb_addr_map sunxi_rsb_addr_maps[] = {
+	{ 0x3a3, 0x2d }, /* Primary PMIC: AXP223, AXP809, AXP81X, ... */
+	{ 0x745, 0x3a }, /* Secondary PMIC: AXP806, ... */
+	{ 0xe89, 0x4e }, /* Peripheral IC: AC100, ... */
+};
+
+static u8 sunxi_rsb_get_rtaddr(u16 hwaddr)
+{
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(sunxi_rsb_addr_maps); i++)
+		if (hwaddr == sunxi_rsb_addr_maps[i].hwaddr)
+			return sunxi_rsb_addr_maps[i].rtaddr;
+
+	return 0; /* 0 is an invalid runtime address */
+}
+
+static int of_rsb_register_devices(struct sunxi_rsb *rsb)
+{
+	struct device *dev = rsb->dev;
+	struct device_node *child, *np = dev->of_node;
+	u32 hwaddr;
+	u8 rtaddr;
+	int ret;
+
+	if (!np)
+		return -EINVAL;
+
+	/* Runtime addresses for all slaves should be set first */
+	for_each_available_child_of_node(np, child) {
+		dev_dbg(dev, "setting child %pOF runtime address\n",
+			child);
+
+		ret = of_property_read_u32(child, "reg", &hwaddr);
+		if (ret) {
+			dev_err(dev, "%pOF: invalid 'reg' property: %d\n",
+				child, ret);
+			continue;
+		}
+
+		rtaddr = sunxi_rsb_get_rtaddr(hwaddr);
+		if (!rtaddr) {
+			dev_err(dev, "%pOF: unknown hardware device address\n",
+				child);
+			continue;
+		}
+
+		/*
+		 * Since no devices have been registered yet, we are the
+		 * only ones using the bus, we can skip locking the bus.
+		 */
+
+		/* setup command parameters */
+		writel(RSB_CMD_STRA, rsb->regs + RSB_CMD);
+		writel(RSB_DAR_RTA(rtaddr) | RSB_DAR_DA(hwaddr),
+		       rsb->regs + RSB_DAR);
+
+		/* send command */
+		ret = _sunxi_rsb_run_xfer(rsb);
+		if (ret)
+			dev_warn(dev, "%pOF: set runtime address failed: %d\n",
+				 child, ret);
+	}
+
+	/* Then we start adding devices and probing them */
+	for_each_available_child_of_node(np, child) {
+		struct sunxi_rsb_device *rdev;
+
+		dev_dbg(dev, "adding child %pOF\n", child);
+
+		ret = of_property_read_u32(child, "reg", &hwaddr);
+		if (ret)
+			continue;
+
+		rtaddr = sunxi_rsb_get_rtaddr(hwaddr);
+		if (!rtaddr)
+			continue;
+
+		rdev = sunxi_rsb_device_create(rsb, child, hwaddr, rtaddr);
+		if (IS_ERR(rdev))
+			dev_err(dev, "failed to add child device %pOF: %ld\n",
+				child, PTR_ERR(rdev));
+	}
+
+	return 0;
+}
+
+static const struct of_device_id sunxi_rsb_of_match_table[] = {
+	{ .compatible = "allwinner,sun8i-a23-rsb" },
+	{}
+};
+MODULE_DEVICE_TABLE(of, sunxi_rsb_of_match_table);
+
+static int sunxi_rsb_probe(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	struct device_node *np = dev->of_node;
+	struct resource *r;
+	struct sunxi_rsb *rsb;
+	unsigned long p_clk_freq;
+	u32 clk_delay, clk_freq = 3000000;
+	int clk_div, irq, ret;
+	u32 reg;
+
+	of_property_read_u32(np, "clock-frequency", &clk_freq);
+	if (clk_freq > RSB_MAX_FREQ) {
+		dev_err(dev,
+			"clock-frequency (%u Hz) is too high (max = 20MHz)\n",
+			clk_freq);
+		return -EINVAL;
+	}
+
+	rsb = devm_kzalloc(dev, sizeof(*rsb), GFP_KERNEL);
+	if (!rsb)
+		return -ENOMEM;
+
+	rsb->dev = dev;
+	platform_set_drvdata(pdev, rsb);
+	r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	rsb->regs = devm_ioremap_resource(dev, r);
+	if (IS_ERR(rsb->regs))
+		return PTR_ERR(rsb->regs);
+
+	irq = platform_get_irq(pdev, 0);
+	if (irq < 0) {
+		dev_err(dev, "failed to retrieve irq: %d\n", irq);
+		return irq;
+	}
+
+	rsb->clk = devm_clk_get(dev, NULL);
+	if (IS_ERR(rsb->clk)) {
+		ret = PTR_ERR(rsb->clk);
+		dev_err(dev, "failed to retrieve clk: %d\n", ret);
+		return ret;
+	}
+
+	ret = clk_prepare_enable(rsb->clk);
+	if (ret) {
+		dev_err(dev, "failed to enable clk: %d\n", ret);
+		return ret;
+	}
+
+	p_clk_freq = clk_get_rate(rsb->clk);
+
+	rsb->rstc = devm_reset_control_get(dev, NULL);
+	if (IS_ERR(rsb->rstc)) {
+		ret = PTR_ERR(rsb->rstc);
+		dev_err(dev, "failed to retrieve reset controller: %d\n", ret);
+		goto err_clk_disable;
+	}
+
+	ret = reset_control_deassert(rsb->rstc);
+	if (ret) {
+		dev_err(dev, "failed to deassert reset line: %d\n", ret);
+		goto err_clk_disable;
+	}
+
+	init_completion(&rsb->complete);
+	mutex_init(&rsb->lock);
+
+	/* reset the controller */
+	writel(RSB_CTRL_SOFT_RST, rsb->regs + RSB_CTRL);
+	readl_poll_timeout(rsb->regs + RSB_CTRL, reg,
+			   !(reg & RSB_CTRL_SOFT_RST), 1000, 100000);
+
+	/*
+	 * Clock frequency and delay calculation code is from
+	 * Allwinner U-boot sources.
+	 *
+	 * From A83 user manual:
+	 * bus clock frequency = parent clock frequency / (2 * (divider + 1))
+	 */
+	clk_div = p_clk_freq / clk_freq / 2;
+	if (!clk_div)
+		clk_div = 1;
+	else if (clk_div > RSB_CCR_MAX_CLK_DIV + 1)
+		clk_div = RSB_CCR_MAX_CLK_DIV + 1;
+
+	clk_delay = clk_div >> 1;
+	if (!clk_delay)
+		clk_delay = 1;
+
+	dev_info(dev, "RSB running at %lu Hz\n", p_clk_freq / clk_div / 2);
+	writel(RSB_CCR_SDA_OUT_DELAY(clk_delay) | RSB_CCR_CLK_DIV(clk_div - 1),
+	       rsb->regs + RSB_CCR);
+
+	ret = devm_request_irq(dev, irq, sunxi_rsb_irq, 0, RSB_CTRL_NAME, rsb);
+	if (ret) {
+		dev_err(dev, "can't register interrupt handler irq %d: %d\n",
+			irq, ret);
+		goto err_reset_assert;
+	}
+
+	/* initialize all devices on the bus into RSB mode */
+	ret = sunxi_rsb_init_device_mode(rsb);
+	if (ret)
+		dev_warn(dev, "Initialize device mode failed: %d\n", ret);
+
+	of_rsb_register_devices(rsb);
+
+	return 0;
+
+err_reset_assert:
+	reset_control_assert(rsb->rstc);
+
+err_clk_disable:
+	clk_disable_unprepare(rsb->clk);
+
+	return ret;
+}
+
+static int sunxi_rsb_remove(struct platform_device *pdev)
+{
+	struct sunxi_rsb *rsb = platform_get_drvdata(pdev);
+
+	device_for_each_child(rsb->dev, NULL, sunxi_rsb_remove_devices);
+	reset_control_assert(rsb->rstc);
+	clk_disable_unprepare(rsb->clk);
+
+	return 0;
+}
+
+static struct platform_driver sunxi_rsb_driver = {
+	.probe = sunxi_rsb_probe,
+	.remove	= sunxi_rsb_remove,
+	.driver	= {
+		.name = RSB_CTRL_NAME,
+		.of_match_table = sunxi_rsb_of_match_table,
+	},
+};
+
+static int __init sunxi_rsb_init(void)
+{
+	int ret;
+
+	ret = bus_register(&sunxi_rsb_bus);
+	if (ret) {
+		pr_err("failed to register sunxi sunxi_rsb bus: %d\n", ret);
+		return ret;
+	}
+
+	return platform_driver_register(&sunxi_rsb_driver);
+}
+module_init(sunxi_rsb_init);
+
+static void __exit sunxi_rsb_exit(void)
+{
+	platform_driver_unregister(&sunxi_rsb_driver);
+	bus_unregister(&sunxi_rsb_bus);
+}
+module_exit(sunxi_rsb_exit);
+
+MODULE_AUTHOR("Chen-Yu Tsai <wens@csie.org>");
+MODULE_DESCRIPTION("Allwinner sunXi Reduced Serial Bus controller driver");
+MODULE_LICENSE("GPL v2");
diff --git a/src/kernel/linux/v4.14/drivers/bus/tegra-aconnect.c b/src/kernel/linux/v4.14/drivers/bus/tegra-aconnect.c
new file mode 100644
index 0000000..084ae28
--- /dev/null
+++ b/src/kernel/linux/v4.14/drivers/bus/tegra-aconnect.c
@@ -0,0 +1,94 @@
+/*
+ * Tegra ACONNECT Bus Driver
+ *
+ * Copyright (C) 2016, NVIDIA CORPORATION.  All rights reserved.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+
+#include <linux/clk.h>
+#include <linux/module.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/pm_clock.h>
+#include <linux/pm_runtime.h>
+
+static int tegra_aconnect_probe(struct platform_device *pdev)
+{
+	int ret;
+
+	if (!pdev->dev.of_node)
+		return -EINVAL;
+
+	ret = pm_clk_create(&pdev->dev);
+	if (ret)
+		return ret;
+
+	ret = of_pm_clk_add_clk(&pdev->dev, "ape");
+	if (ret)
+		goto clk_destroy;
+
+	ret = of_pm_clk_add_clk(&pdev->dev, "apb2ape");
+	if (ret)
+		goto clk_destroy;
+
+	pm_runtime_enable(&pdev->dev);
+
+	of_platform_populate(pdev->dev.of_node, NULL, NULL, &pdev->dev);
+
+	dev_info(&pdev->dev, "Tegra ACONNECT bus registered\n");
+
+	return 0;
+
+clk_destroy:
+	pm_clk_destroy(&pdev->dev);
+
+	return ret;
+}
+
+static int tegra_aconnect_remove(struct platform_device *pdev)
+{
+	pm_runtime_disable(&pdev->dev);
+
+	pm_clk_destroy(&pdev->dev);
+
+	return 0;
+}
+
+static int tegra_aconnect_runtime_resume(struct device *dev)
+{
+	return pm_clk_resume(dev);
+}
+
+static int tegra_aconnect_runtime_suspend(struct device *dev)
+{
+	return pm_clk_suspend(dev);
+}
+
+static const struct dev_pm_ops tegra_aconnect_pm_ops = {
+	SET_RUNTIME_PM_OPS(tegra_aconnect_runtime_suspend,
+			   tegra_aconnect_runtime_resume, NULL)
+};
+
+static const struct of_device_id tegra_aconnect_of_match[] = {
+	{ .compatible = "nvidia,tegra210-aconnect", },
+	{ }
+};
+MODULE_DEVICE_TABLE(of, tegra_aconnect_of_match);
+
+static struct platform_driver tegra_aconnect_driver = {
+	.probe = tegra_aconnect_probe,
+	.remove = tegra_aconnect_remove,
+	.driver = {
+		.name = "tegra-aconnect",
+		.of_match_table = tegra_aconnect_of_match,
+		.pm = &tegra_aconnect_pm_ops,
+	},
+};
+module_platform_driver(tegra_aconnect_driver);
+
+MODULE_DESCRIPTION("NVIDIA Tegra ACONNECT Bus Driver");
+MODULE_AUTHOR("Jon Hunter <jonathanh@nvidia.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/src/kernel/linux/v4.14/drivers/bus/tegra-gmi.c b/src/kernel/linux/v4.14/drivers/bus/tegra-gmi.c
new file mode 100644
index 0000000..a657078
--- /dev/null
+++ b/src/kernel/linux/v4.14/drivers/bus/tegra-gmi.c
@@ -0,0 +1,284 @@
+/*
+ * Driver for NVIDIA Generic Memory Interface
+ *
+ * Copyright (C) 2016 Host Mobility AB. All rights reserved.
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/reset.h>
+
+#define TEGRA_GMI_CONFIG		0x00
+#define TEGRA_GMI_CONFIG_GO		BIT(31)
+#define TEGRA_GMI_BUS_WIDTH_32BIT	BIT(30)
+#define TEGRA_GMI_MUX_MODE		BIT(28)
+#define TEGRA_GMI_RDY_BEFORE_DATA	BIT(24)
+#define TEGRA_GMI_RDY_ACTIVE_HIGH	BIT(23)
+#define TEGRA_GMI_ADV_ACTIVE_HIGH	BIT(22)
+#define TEGRA_GMI_OE_ACTIVE_HIGH	BIT(21)
+#define TEGRA_GMI_CS_ACTIVE_HIGH	BIT(20)
+#define TEGRA_GMI_CS_SELECT(x)		((x & 0x7) << 4)
+
+#define TEGRA_GMI_TIMING0		0x10
+#define TEGRA_GMI_MUXED_WIDTH(x)	((x & 0xf) << 12)
+#define TEGRA_GMI_HOLD_WIDTH(x)		((x & 0xf) << 8)
+#define TEGRA_GMI_ADV_WIDTH(x)		((x & 0xf) << 4)
+#define TEGRA_GMI_CE_WIDTH(x)		(x & 0xf)
+
+#define TEGRA_GMI_TIMING1		0x14
+#define TEGRA_GMI_WE_WIDTH(x)		((x & 0xff) << 16)
+#define TEGRA_GMI_OE_WIDTH(x)		((x & 0xff) << 8)
+#define TEGRA_GMI_WAIT_WIDTH(x)		(x & 0xff)
+
+#define TEGRA_GMI_MAX_CHIP_SELECT	8
+
+struct tegra_gmi {
+	struct device *dev;
+	void __iomem *base;
+	struct clk *clk;
+	struct reset_control *rst;
+
+	u32 snor_config;
+	u32 snor_timing0;
+	u32 snor_timing1;
+};
+
+static int tegra_gmi_enable(struct tegra_gmi *gmi)
+{
+	int err;
+
+	err = clk_prepare_enable(gmi->clk);
+	if (err < 0) {
+		dev_err(gmi->dev, "failed to enable clock: %d\n", err);
+		return err;
+	}
+
+	reset_control_assert(gmi->rst);
+	usleep_range(2000, 4000);
+	reset_control_deassert(gmi->rst);
+
+	writel(gmi->snor_timing0, gmi->base + TEGRA_GMI_TIMING0);
+	writel(gmi->snor_timing1, gmi->base + TEGRA_GMI_TIMING1);
+
+	gmi->snor_config |= TEGRA_GMI_CONFIG_GO;
+	writel(gmi->snor_config, gmi->base + TEGRA_GMI_CONFIG);
+
+	return 0;
+}
+
+static void tegra_gmi_disable(struct tegra_gmi *gmi)
+{
+	u32 config;
+
+	/* stop GMI operation */
+	config = readl(gmi->base + TEGRA_GMI_CONFIG);
+	config &= ~TEGRA_GMI_CONFIG_GO;
+	writel(config, gmi->base + TEGRA_GMI_CONFIG);
+
+	reset_control_assert(gmi->rst);
+	clk_disable_unprepare(gmi->clk);
+}
+
+static int tegra_gmi_parse_dt(struct tegra_gmi *gmi)
+{
+	struct device_node *child;
+	u32 property, ranges[4];
+	int err;
+
+	child = of_get_next_available_child(gmi->dev->of_node, NULL);
+	if (!child) {
+		dev_err(gmi->dev, "no child nodes found\n");
+		return -ENODEV;
+	}
+
+	/*
+	 * We currently only support one child device due to lack of
+	 * chip-select address decoding. Which means that we only have one
+	 * chip-select line from the GMI controller.
+	 */
+	if (of_get_child_count(gmi->dev->of_node) > 1)
+		dev_warn(gmi->dev, "only one child device is supported.");
+
+	if (of_property_read_bool(child, "nvidia,snor-data-width-32bit"))
+		gmi->snor_config |= TEGRA_GMI_BUS_WIDTH_32BIT;
+
+	if (of_property_read_bool(child, "nvidia,snor-mux-mode"))
+		gmi->snor_config |= TEGRA_GMI_MUX_MODE;
+
+	if (of_property_read_bool(child, "nvidia,snor-rdy-active-before-data"))
+		gmi->snor_config |= TEGRA_GMI_RDY_BEFORE_DATA;
+
+	if (of_property_read_bool(child, "nvidia,snor-rdy-active-high"))
+		gmi->snor_config |= TEGRA_GMI_RDY_ACTIVE_HIGH;
+
+	if (of_property_read_bool(child, "nvidia,snor-adv-active-high"))
+		gmi->snor_config |= TEGRA_GMI_ADV_ACTIVE_HIGH;
+
+	if (of_property_read_bool(child, "nvidia,snor-oe-active-high"))
+		gmi->snor_config |= TEGRA_GMI_OE_ACTIVE_HIGH;
+
+	if (of_property_read_bool(child, "nvidia,snor-cs-active-high"))
+		gmi->snor_config |= TEGRA_GMI_CS_ACTIVE_HIGH;
+
+	/* Decode the CS# */
+	err = of_property_read_u32_array(child, "ranges", ranges, 4);
+	if (err < 0) {
+		/* Invalid binding */
+		if (err == -EOVERFLOW) {
+			dev_err(gmi->dev,
+				"failed to decode CS: invalid ranges length\n");
+			goto error_cs;
+		}
+
+		/*
+		 * If we reach here it means that the child node has an empty
+		 * ranges or it does not exist at all. Attempt to decode the
+		 * CS# from the reg property instead.
+		 */
+		err = of_property_read_u32(child, "reg", &property);
+		if (err < 0) {
+			dev_err(gmi->dev,
+				"failed to decode CS: no reg property found\n");
+			goto error_cs;
+		}
+	} else {
+		property = ranges[1];
+	}
+
+	/* Valid chip selects are CS0-CS7 */
+	if (property >= TEGRA_GMI_MAX_CHIP_SELECT) {
+		dev_err(gmi->dev, "invalid chip select: %d", property);
+		err = -EINVAL;
+		goto error_cs;
+	}
+
+	gmi->snor_config |= TEGRA_GMI_CS_SELECT(property);
+
+	/* The default values that are provided below are reset values */
+	if (!of_property_read_u32(child, "nvidia,snor-muxed-width", &property))
+		gmi->snor_timing0 |= TEGRA_GMI_MUXED_WIDTH(property);
+	else
+		gmi->snor_timing0 |= TEGRA_GMI_MUXED_WIDTH(1);
+
+	if (!of_property_read_u32(child, "nvidia,snor-hold-width", &property))
+		gmi->snor_timing0 |= TEGRA_GMI_HOLD_WIDTH(property);
+	else
+		gmi->snor_timing0 |= TEGRA_GMI_HOLD_WIDTH(1);
+
+	if (!of_property_read_u32(child, "nvidia,snor-adv-width", &property))
+		gmi->snor_timing0 |= TEGRA_GMI_ADV_WIDTH(property);
+	else
+		gmi->snor_timing0 |= TEGRA_GMI_ADV_WIDTH(1);
+
+	if (!of_property_read_u32(child, "nvidia,snor-ce-width", &property))
+		gmi->snor_timing0 |= TEGRA_GMI_CE_WIDTH(property);
+	else
+		gmi->snor_timing0 |= TEGRA_GMI_CE_WIDTH(4);
+
+	if (!of_property_read_u32(child, "nvidia,snor-we-width", &property))
+		gmi->snor_timing1 |= TEGRA_GMI_WE_WIDTH(property);
+	else
+		gmi->snor_timing1 |= TEGRA_GMI_WE_WIDTH(1);
+
+	if (!of_property_read_u32(child, "nvidia,snor-oe-width", &property))
+		gmi->snor_timing1 |= TEGRA_GMI_OE_WIDTH(property);
+	else
+		gmi->snor_timing1 |= TEGRA_GMI_OE_WIDTH(1);
+
+	if (!of_property_read_u32(child, "nvidia,snor-wait-width", &property))
+		gmi->snor_timing1 |= TEGRA_GMI_WAIT_WIDTH(property);
+	else
+		gmi->snor_timing1 |= TEGRA_GMI_WAIT_WIDTH(3);
+
+error_cs:
+	of_node_put(child);
+	return err;
+}
+
+static int tegra_gmi_probe(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	struct tegra_gmi *gmi;
+	struct resource *res;
+	int err;
+
+	gmi = devm_kzalloc(dev, sizeof(*gmi), GFP_KERNEL);
+	if (!gmi)
+		return -ENOMEM;
+
+	gmi->dev = dev;
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	gmi->base = devm_ioremap_resource(dev, res);
+	if (IS_ERR(gmi->base))
+		return PTR_ERR(gmi->base);
+
+	gmi->clk = devm_clk_get(dev, "gmi");
+	if (IS_ERR(gmi->clk)) {
+		dev_err(dev, "can not get clock\n");
+		return PTR_ERR(gmi->clk);
+	}
+
+	gmi->rst = devm_reset_control_get(dev, "gmi");
+	if (IS_ERR(gmi->rst)) {
+		dev_err(dev, "can not get reset\n");
+		return PTR_ERR(gmi->rst);
+	}
+
+	err = tegra_gmi_parse_dt(gmi);
+	if (err)
+		return err;
+
+	err = tegra_gmi_enable(gmi);
+	if (err < 0)
+		return err;
+
+	err = of_platform_default_populate(dev->of_node, NULL, dev);
+	if (err < 0) {
+		dev_err(dev, "fail to create devices.\n");
+		tegra_gmi_disable(gmi);
+		return err;
+	}
+
+	platform_set_drvdata(pdev, gmi);
+
+	return 0;
+}
+
+static int tegra_gmi_remove(struct platform_device *pdev)
+{
+	struct tegra_gmi *gmi = platform_get_drvdata(pdev);
+
+	of_platform_depopulate(gmi->dev);
+	tegra_gmi_disable(gmi);
+
+	return 0;
+}
+
+static const struct of_device_id tegra_gmi_id_table[] = {
+	{ .compatible = "nvidia,tegra20-gmi", },
+	{ .compatible = "nvidia,tegra30-gmi", },
+	{ }
+};
+MODULE_DEVICE_TABLE(of, tegra_gmi_id_table);
+
+static struct platform_driver tegra_gmi_driver = {
+	.probe = tegra_gmi_probe,
+	.remove = tegra_gmi_remove,
+	.driver = {
+		.name		= "tegra-gmi",
+		.of_match_table	= tegra_gmi_id_table,
+	},
+};
+module_platform_driver(tegra_gmi_driver);
+
+MODULE_AUTHOR("Mirza Krak <mirza.krak@gmail.com");
+MODULE_DESCRIPTION("NVIDIA Tegra GMI Bus Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/src/kernel/linux/v4.14/drivers/bus/uniphier-system-bus.c b/src/kernel/linux/v4.14/drivers/bus/uniphier-system-bus.c
new file mode 100644
index 0000000..f76be6b
--- /dev/null
+++ b/src/kernel/linux/v4.14/drivers/bus/uniphier-system-bus.c
@@ -0,0 +1,294 @@
+/*
+ * Copyright (C) 2015 Masahiro Yamada <yamada.masahiro@socionext.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/io.h>
+#include <linux/log2.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+
+/* System Bus Controller registers */
+#define UNIPHIER_SBC_BASE	0x100	/* base address of bank0 space */
+#define    UNIPHIER_SBC_BASE_BE		BIT(0)	/* bank_enable */
+#define UNIPHIER_SBC_CTRL0	0x200	/* timing parameter 0 of bank0 */
+#define UNIPHIER_SBC_CTRL1	0x204	/* timing parameter 1 of bank0 */
+#define UNIPHIER_SBC_CTRL2	0x208	/* timing parameter 2 of bank0 */
+#define UNIPHIER_SBC_CTRL3	0x20c	/* timing parameter 3 of bank0 */
+#define UNIPHIER_SBC_CTRL4	0x300	/* timing parameter 4 of bank0 */
+
+#define UNIPHIER_SBC_STRIDE	0x10	/* register stride to next bank */
+#define UNIPHIER_SBC_NR_BANKS	8	/* number of banks (chip select) */
+#define UNIPHIER_SBC_BASE_DUMMY	0xffffffff	/* data to squash bank 0, 1 */
+
+struct uniphier_system_bus_bank {
+	u32 base;
+	u32 end;
+};
+
+struct uniphier_system_bus_priv {
+	struct device *dev;
+	void __iomem *membase;
+	struct uniphier_system_bus_bank bank[UNIPHIER_SBC_NR_BANKS];
+};
+
+static int uniphier_system_bus_add_bank(struct uniphier_system_bus_priv *priv,
+					int bank, u32 addr, u64 paddr, u32 size)
+{
+	u64 end, mask;
+
+	dev_dbg(priv->dev,
+		"range found: bank = %d, addr = %08x, paddr = %08llx, size = %08x\n",
+		bank, addr, paddr, size);
+
+	if (bank >= ARRAY_SIZE(priv->bank)) {
+		dev_err(priv->dev, "unsupported bank number %d\n", bank);
+		return -EINVAL;
+	}
+
+	if (priv->bank[bank].base || priv->bank[bank].end) {
+		dev_err(priv->dev,
+			"range for bank %d has already been specified\n", bank);
+		return -EINVAL;
+	}
+
+	if (paddr > U32_MAX) {
+		dev_err(priv->dev, "base address %llx is too high\n", paddr);
+		return -EINVAL;
+	}
+
+	end = paddr + size;
+
+	if (addr > paddr) {
+		dev_err(priv->dev,
+			"base %08x cannot be mapped to %08llx of parent\n",
+			addr, paddr);
+		return -EINVAL;
+	}
+	paddr -= addr;
+
+	paddr = round_down(paddr, 0x00020000);
+	end = round_up(end, 0x00020000);
+
+	if (end > U32_MAX) {
+		dev_err(priv->dev, "end address %08llx is too high\n", end);
+		return -EINVAL;
+	}
+	mask = paddr ^ (end - 1);
+	mask = roundup_pow_of_two(mask);
+
+	paddr = round_down(paddr, mask);
+	end = round_up(end, mask);
+
+	priv->bank[bank].base = paddr;
+	priv->bank[bank].end = end;
+
+	dev_dbg(priv->dev, "range added: bank = %d, addr = %08x, end = %08x\n",
+		bank, priv->bank[bank].base, priv->bank[bank].end);
+
+	return 0;
+}
+
+static int uniphier_system_bus_check_overlap(
+				const struct uniphier_system_bus_priv *priv)
+{
+	int i, j;
+
+	for (i = 0; i < ARRAY_SIZE(priv->bank); i++) {
+		for (j = i + 1; j < ARRAY_SIZE(priv->bank); j++) {
+			if (priv->bank[i].end > priv->bank[j].base &&
+			    priv->bank[i].base < priv->bank[j].end) {
+				dev_err(priv->dev,
+					"region overlap between bank%d and bank%d\n",
+					i, j);
+				return -EINVAL;
+			}
+		}
+	}
+
+	return 0;
+}
+
+static void uniphier_system_bus_check_boot_swap(
+					struct uniphier_system_bus_priv *priv)
+{
+	void __iomem *base_reg = priv->membase + UNIPHIER_SBC_BASE;
+	int is_swapped;
+
+	is_swapped = !(readl(base_reg) & UNIPHIER_SBC_BASE_BE);
+
+	dev_dbg(priv->dev, "Boot Swap: %s\n", is_swapped ? "on" : "off");
+
+	/*
+	 * If BOOT_SWAP was asserted on power-on-reset, the CS0 and CS1 are
+	 * swapped.  In this case, bank0 and bank1 should be swapped as well.
+	 */
+	if (is_swapped)
+		swap(priv->bank[0], priv->bank[1]);
+}
+
+static void uniphier_system_bus_set_reg(
+				const struct uniphier_system_bus_priv *priv)
+{
+	void __iomem *base_reg = priv->membase + UNIPHIER_SBC_BASE;
+	u32 base, end, mask, val;
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(priv->bank); i++) {
+		base = priv->bank[i].base;
+		end = priv->bank[i].end;
+
+		if (base == end) {
+			/*
+			 * If SBC_BASE0 or SBC_BASE1 is set to zero, the access
+			 * to anywhere in the system bus space is routed to
+			 * bank 0 (if boot swap if off) or bank 1 (if boot swap
+			 * if on).  It means that CPUs cannot get access to
+			 * bank 2 or later.  In other words, bank 0/1 cannot
+			 * be disabled even if its bank_enable bits is cleared.
+			 * This seems odd, but it is how this hardware goes.
+			 * As a workaround, dummy data (0xffffffff) should be
+			 * set when the bank 0/1 is unused.  As for bank 2 and
+			 * later, they can be simply disable by clearing the
+			 * bank_enable bit.
+			 */
+			if (i < 2)
+				val = UNIPHIER_SBC_BASE_DUMMY;
+			else
+				val = 0;
+		} else {
+			mask = base ^ (end - 1);
+
+			val = base & 0xfffe0000;
+			val |= (~mask >> 16) & 0xfffe;
+			val |= UNIPHIER_SBC_BASE_BE;
+		}
+		dev_dbg(priv->dev, "SBC_BASE[%d] = 0x%08x\n", i, val);
+
+		writel(val, base_reg + UNIPHIER_SBC_STRIDE * i);
+	}
+}
+
+static int uniphier_system_bus_probe(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	struct uniphier_system_bus_priv *priv;
+	struct resource *regs;
+	const __be32 *ranges;
+	u32 cells, addr, size;
+	u64 paddr;
+	int pna, bank, rlen, rone, ret;
+
+	priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+	if (!priv)
+		return -ENOMEM;
+
+	regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	priv->membase = devm_ioremap_resource(dev, regs);
+	if (IS_ERR(priv->membase))
+		return PTR_ERR(priv->membase);
+
+	priv->dev = dev;
+
+	pna = of_n_addr_cells(dev->of_node);
+
+	ret = of_property_read_u32(dev->of_node, "#address-cells", &cells);
+	if (ret) {
+		dev_err(dev, "failed to get #address-cells\n");
+		return ret;
+	}
+	if (cells != 2) {
+		dev_err(dev, "#address-cells must be 2\n");
+		return -EINVAL;
+	}
+
+	ret = of_property_read_u32(dev->of_node, "#size-cells", &cells);
+	if (ret) {
+		dev_err(dev, "failed to get #size-cells\n");
+		return ret;
+	}
+	if (cells != 1) {
+		dev_err(dev, "#size-cells must be 1\n");
+		return -EINVAL;
+	}
+
+	ranges = of_get_property(dev->of_node, "ranges", &rlen);
+	if (!ranges) {
+		dev_err(dev, "failed to get ranges property\n");
+		return -ENOENT;
+	}
+
+	rlen /= sizeof(*ranges);
+	rone = pna + 2;
+
+	for (; rlen >= rone; rlen -= rone) {
+		bank = be32_to_cpup(ranges++);
+		addr = be32_to_cpup(ranges++);
+		paddr = of_translate_address(dev->of_node, ranges);
+		if (paddr == OF_BAD_ADDR)
+			return -EINVAL;
+		ranges += pna;
+		size = be32_to_cpup(ranges++);
+
+		ret = uniphier_system_bus_add_bank(priv, bank, addr,
+						   paddr, size);
+		if (ret)
+			return ret;
+	}
+
+	ret = uniphier_system_bus_check_overlap(priv);
+	if (ret)
+		return ret;
+
+	uniphier_system_bus_check_boot_swap(priv);
+
+	uniphier_system_bus_set_reg(priv);
+
+	platform_set_drvdata(pdev, priv);
+
+	/* Now, the bus is configured.  Populate platform_devices below it */
+	return of_platform_default_populate(dev->of_node, NULL, dev);
+}
+
+static int __maybe_unused uniphier_system_bus_resume(struct device *dev)
+{
+	uniphier_system_bus_set_reg(dev_get_drvdata(dev));
+
+	return 0;
+}
+
+static const struct dev_pm_ops uniphier_system_bus_pm_ops = {
+	SET_SYSTEM_SLEEP_PM_OPS(NULL, uniphier_system_bus_resume)
+};
+
+static const struct of_device_id uniphier_system_bus_match[] = {
+	{ .compatible = "socionext,uniphier-system-bus" },
+	{ /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, uniphier_system_bus_match);
+
+static struct platform_driver uniphier_system_bus_driver = {
+	.probe		= uniphier_system_bus_probe,
+	.driver = {
+		.name	= "uniphier-system-bus",
+		.of_match_table = uniphier_system_bus_match,
+		.pm = &uniphier_system_bus_pm_ops,
+	},
+};
+module_platform_driver(uniphier_system_bus_driver);
+
+MODULE_AUTHOR("Masahiro Yamada <yamada.masahiro@socionext.com>");
+MODULE_DESCRIPTION("UniPhier System Bus driver");
+MODULE_LICENSE("GPL");
diff --git a/src/kernel/linux/v4.14/drivers/bus/vexpress-config.c b/src/kernel/linux/v4.14/drivers/bus/vexpress-config.c
new file mode 100644
index 0000000..493e7b9
--- /dev/null
+++ b/src/kernel/linux/v4.14/drivers/bus/vexpress-config.c
@@ -0,0 +1,210 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * Copyright (C) 2014 ARM Limited
+ */
+
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/vexpress.h>
+
+
+struct vexpress_config_bridge {
+	struct vexpress_config_bridge_ops *ops;
+	void *context;
+};
+
+
+static DEFINE_MUTEX(vexpress_config_mutex);
+static struct class *vexpress_config_class;
+static u32 vexpress_config_site_master = VEXPRESS_SITE_MASTER;
+
+
+void vexpress_config_set_master(u32 site)
+{
+	vexpress_config_site_master = site;
+}
+
+u32 vexpress_config_get_master(void)
+{
+	return vexpress_config_site_master;
+}
+
+void vexpress_config_lock(void *arg)
+{
+	mutex_lock(&vexpress_config_mutex);
+}
+
+void vexpress_config_unlock(void *arg)
+{
+	mutex_unlock(&vexpress_config_mutex);
+}
+
+
+static void vexpress_config_find_prop(struct device_node *node,
+		const char *name, u32 *val)
+{
+	/* Default value */
+	*val = 0;
+
+	of_node_get(node);
+	while (node) {
+		if (of_property_read_u32(node, name, val) == 0) {
+			of_node_put(node);
+			return;
+		}
+		node = of_get_next_parent(node);
+	}
+}
+
+int vexpress_config_get_topo(struct device_node *node, u32 *site,
+		u32 *position, u32 *dcc)
+{
+	vexpress_config_find_prop(node, "arm,vexpress,site", site);
+	if (*site == VEXPRESS_SITE_MASTER)
+		*site = vexpress_config_site_master;
+	if (WARN_ON(vexpress_config_site_master == VEXPRESS_SITE_MASTER))
+		return -EINVAL;
+	vexpress_config_find_prop(node, "arm,vexpress,position", position);
+	vexpress_config_find_prop(node, "arm,vexpress,dcc", dcc);
+
+	return 0;
+}
+
+
+static void vexpress_config_devres_release(struct device *dev, void *res)
+{
+	struct vexpress_config_bridge *bridge = dev_get_drvdata(dev->parent);
+	struct regmap *regmap = res;
+
+	bridge->ops->regmap_exit(regmap, bridge->context);
+}
+
+struct regmap *devm_regmap_init_vexpress_config(struct device *dev)
+{
+	struct vexpress_config_bridge *bridge;
+	struct regmap *regmap;
+	struct regmap **res;
+
+	if (WARN_ON(dev->parent->class != vexpress_config_class))
+		return ERR_PTR(-ENODEV);
+
+	bridge = dev_get_drvdata(dev->parent);
+	if (WARN_ON(!bridge))
+		return ERR_PTR(-EINVAL);
+
+	res = devres_alloc(vexpress_config_devres_release, sizeof(*res),
+			GFP_KERNEL);
+	if (!res)
+		return ERR_PTR(-ENOMEM);
+
+	regmap = (bridge->ops->regmap_init)(dev, bridge->context);
+	if (IS_ERR(regmap)) {
+		devres_free(res);
+		return regmap;
+	}
+
+	*res = regmap;
+	devres_add(dev, res);
+
+	return regmap;
+}
+EXPORT_SYMBOL_GPL(devm_regmap_init_vexpress_config);
+
+struct device *vexpress_config_bridge_register(struct device *parent,
+		struct vexpress_config_bridge_ops *ops, void *context)
+{
+	struct device *dev;
+	struct vexpress_config_bridge *bridge;
+
+	if (!vexpress_config_class) {
+		vexpress_config_class = class_create(THIS_MODULE,
+				"vexpress-config");
+		if (IS_ERR(vexpress_config_class))
+			return (void *)vexpress_config_class;
+	}
+
+	dev = device_create(vexpress_config_class, parent, 0,
+			NULL, "%s.bridge", dev_name(parent));
+
+	if (IS_ERR(dev))
+		return dev;
+
+	bridge = devm_kmalloc(dev, sizeof(*bridge), GFP_KERNEL);
+	if (!bridge) {
+		put_device(dev);
+		device_unregister(dev);
+		return ERR_PTR(-ENOMEM);
+	}
+	bridge->ops = ops;
+	bridge->context = context;
+
+	dev_set_drvdata(dev, bridge);
+
+	dev_dbg(parent, "Registered bridge '%s', parent node %p\n",
+			dev_name(dev), parent->of_node);
+
+	return dev;
+}
+
+
+static int vexpress_config_node_match(struct device *dev, const void *data)
+{
+	const struct device_node *node = data;
+
+	dev_dbg(dev, "Parent node %p, looking for %p\n",
+			dev->parent->of_node, node);
+
+	return dev->parent->of_node == node;
+}
+
+static int vexpress_config_populate(struct device_node *node)
+{
+	struct device_node *bridge;
+	struct device *parent;
+	int ret;
+
+	bridge = of_parse_phandle(node, "arm,vexpress,config-bridge", 0);
+	if (!bridge)
+		return -EINVAL;
+
+	parent = class_find_device(vexpress_config_class, NULL, bridge,
+			vexpress_config_node_match);
+	of_node_put(bridge);
+	if (WARN_ON(!parent))
+		return -ENODEV;
+
+	ret = of_platform_populate(node, NULL, NULL, parent);
+
+	put_device(parent);
+
+	return ret;
+}
+
+static int __init vexpress_config_init(void)
+{
+	int err = 0;
+	struct device_node *node;
+
+	/* Need the config devices early, before the "normal" devices... */
+	for_each_compatible_node(node, NULL, "arm,vexpress,config-bus") {
+		err = vexpress_config_populate(node);
+		if (err) {
+			of_node_put(node);
+			break;
+		}
+	}
+
+	return err;
+}
+postcore_initcall(vexpress_config_init);
+