[Feature]add MT2731_MP2_MR2_SVN388 baseline version

Change-Id: Ief04314834b31e27effab435d3ca8ba33b499059
diff --git a/src/kernel/linux/v4.14/arch/mips/cavium-octeon/Kconfig b/src/kernel/linux/v4.14/arch/mips/cavium-octeon/Kconfig
new file mode 100644
index 0000000..204a167
--- /dev/null
+++ b/src/kernel/linux/v4.14/arch/mips/cavium-octeon/Kconfig
@@ -0,0 +1,90 @@
+# SPDX-License-Identifier: GPL-2.0
+if CPU_CAVIUM_OCTEON
+
+config CAVIUM_CN63XXP1
+	bool "Enable CN63XXP1 errata workarounds"
+	default "n"
+	help
+	  The CN63XXP1 chip requires build time workarounds to
+	  function reliably, select this option to enable them.  These
+	  workarounds will cause a slight decrease in performance on
+	  non-CN63XXP1 hardware, so it is recommended to select "n"
+	  unless it is known the workarounds are needed.
+
+config CAVIUM_OCTEON_CVMSEG_SIZE
+	int "Number of L1 cache lines reserved for CVMSEG memory"
+	range 0 54
+	default 1
+	help
+	  CVMSEG LM is a segment that accesses portions of the dcache as a
+	  local memory; the larger CVMSEG is, the smaller the cache is.
+	  This selects the size of CVMSEG LM, which is in cache blocks. The
+	  legally range is from zero to 54 cache blocks (i.e. CVMSEG LM is
+	  between zero and 6192 bytes).
+
+endif # CPU_CAVIUM_OCTEON
+
+if CAVIUM_OCTEON_SOC
+
+config CAVIUM_OCTEON_LOCK_L2
+	bool "Lock often used kernel code in the L2"
+	default "y"
+	help
+	  Enable locking parts of the kernel into the L2 cache.
+
+config CAVIUM_OCTEON_LOCK_L2_TLB
+	bool "Lock the TLB handler in L2"
+	depends on CAVIUM_OCTEON_LOCK_L2
+	default "y"
+	help
+	  Lock the low level TLB fast path into L2.
+
+config CAVIUM_OCTEON_LOCK_L2_EXCEPTION
+	bool "Lock the exception handler in L2"
+	depends on CAVIUM_OCTEON_LOCK_L2
+	default "y"
+	help
+	  Lock the low level exception handler into L2.
+
+config CAVIUM_OCTEON_LOCK_L2_LOW_LEVEL_INTERRUPT
+	bool "Lock the interrupt handler in L2"
+	depends on CAVIUM_OCTEON_LOCK_L2
+	default "y"
+	help
+	  Lock the low level interrupt handler into L2.
+
+config CAVIUM_OCTEON_LOCK_L2_INTERRUPT
+	bool "Lock the 2nd level interrupt handler in L2"
+	depends on CAVIUM_OCTEON_LOCK_L2
+	default "y"
+	help
+	  Lock the 2nd level interrupt handler in L2.
+
+config CAVIUM_OCTEON_LOCK_L2_MEMCPY
+	bool "Lock memcpy() in L2"
+	depends on CAVIUM_OCTEON_LOCK_L2
+	default "y"
+	help
+	  Lock the kernel's implementation of memcpy() into L2.
+
+config IOMMU_HELPER
+	bool
+
+config NEED_SG_DMA_LENGTH
+	bool
+
+config SWIOTLB
+	def_bool y
+	select IOMMU_HELPER
+	select NEED_SG_DMA_LENGTH
+
+config OCTEON_ILM
+	tristate "Module to measure interrupt latency using Octeon CIU Timer"
+	help
+	  This driver is a module to measure interrupt latency using the
+	  the CIU Timers on Octeon.
+
+	  To compile this driver as a module, choose M here.  The module
+	  will be called octeon-ilm
+
+endif # CAVIUM_OCTEON_SOC
diff --git a/src/kernel/linux/v4.14/arch/mips/cavium-octeon/Makefile b/src/kernel/linux/v4.14/arch/mips/cavium-octeon/Makefile
new file mode 100644
index 0000000..7c02e54
--- /dev/null
+++ b/src/kernel/linux/v4.14/arch/mips/cavium-octeon/Makefile
@@ -0,0 +1,21 @@
+#
+# Makefile for the Cavium Octeon specific kernel interface routines
+# under Linux.
+#
+# This file is subject to the terms and conditions of the GNU General Public
+# License.  See the file "COPYING" in the main directory of this archive
+# for more details.
+#
+# Copyright (C) 2005-2009 Cavium Networks
+#
+
+obj-y := cpu.o setup.o octeon-platform.o octeon-irq.o csrc-octeon.o
+obj-y += dma-octeon.o
+obj-y += octeon-memcpy.o
+obj-y += executive/
+obj-y += crypto/
+
+obj-$(CONFIG_MTD)		      += flash_setup.o
+obj-$(CONFIG_SMP)		      += smp.o
+obj-$(CONFIG_OCTEON_ILM)	      += oct_ilm.o
+obj-$(CONFIG_USB)		      += octeon-usb.o
diff --git a/src/kernel/linux/v4.14/arch/mips/cavium-octeon/Platform b/src/kernel/linux/v4.14/arch/mips/cavium-octeon/Platform
new file mode 100644
index 0000000..45be853
--- /dev/null
+++ b/src/kernel/linux/v4.14/arch/mips/cavium-octeon/Platform
@@ -0,0 +1,7 @@
+#
+# Cavium Octeon
+#
+platform-$(CONFIG_CAVIUM_OCTEON_SOC)	+= cavium-octeon/
+cflags-$(CONFIG_CAVIUM_OCTEON_SOC)	+=				\
+		-I$(srctree)/arch/mips/include/asm/mach-cavium-octeon
+load-$(CONFIG_CAVIUM_OCTEON_SOC)	+= 0xffffffff81100000
diff --git a/src/kernel/linux/v4.14/arch/mips/cavium-octeon/cpu.c b/src/kernel/linux/v4.14/arch/mips/cavium-octeon/cpu.c
new file mode 100644
index 0000000..036d56c
--- /dev/null
+++ b/src/kernel/linux/v4.14/arch/mips/cavium-octeon/cpu.c
@@ -0,0 +1,50 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2009 Wind River Systems,
+ *   written by Ralf Baechle <ralf@linux-mips.org>
+ */
+#include <linux/init.h>
+#include <linux/irqflags.h>
+#include <linux/notifier.h>
+#include <linux/prefetch.h>
+#include <linux/ptrace.h>
+#include <linux/sched.h>
+#include <linux/sched/task_stack.h>
+
+#include <asm/cop2.h>
+#include <asm/current.h>
+#include <asm/mipsregs.h>
+#include <asm/page.h>
+#include <asm/octeon/octeon.h>
+
+static int cnmips_cu2_call(struct notifier_block *nfb, unsigned long action,
+	void *data)
+{
+	unsigned long flags;
+	unsigned int status;
+
+	switch (action) {
+	case CU2_EXCEPTION:
+		prefetch(&current->thread.cp2);
+		local_irq_save(flags);
+		KSTK_STATUS(current) |= ST0_CU2;
+		status = read_c0_status();
+		write_c0_status(status | ST0_CU2);
+		octeon_cop2_restore(&(current->thread.cp2));
+		write_c0_status(status & ~ST0_CU2);
+		local_irq_restore(flags);
+
+		return NOTIFY_BAD;	/* Don't call default notifier */
+	}
+
+	return NOTIFY_OK;		/* Let default notifier send signals */
+}
+
+static int __init cnmips_cu2_setup(void)
+{
+	return cu2_notifier(cnmips_cu2_call, 0);
+}
+early_initcall(cnmips_cu2_setup);
diff --git a/src/kernel/linux/v4.14/arch/mips/cavium-octeon/crypto/Makefile b/src/kernel/linux/v4.14/arch/mips/cavium-octeon/crypto/Makefile
new file mode 100644
index 0000000..db26c73
--- /dev/null
+++ b/src/kernel/linux/v4.14/arch/mips/cavium-octeon/crypto/Makefile
@@ -0,0 +1,11 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# OCTEON-specific crypto modules.
+#
+
+obj-y += octeon-crypto.o
+
+obj-$(CONFIG_CRYPTO_MD5_OCTEON)		+= octeon-md5.o
+obj-$(CONFIG_CRYPTO_SHA1_OCTEON)	+= octeon-sha1.o
+obj-$(CONFIG_CRYPTO_SHA256_OCTEON)	+= octeon-sha256.o
+obj-$(CONFIG_CRYPTO_SHA512_OCTEON)	+= octeon-sha512.o
diff --git a/src/kernel/linux/v4.14/arch/mips/cavium-octeon/crypto/octeon-crypto.c b/src/kernel/linux/v4.14/arch/mips/cavium-octeon/crypto/octeon-crypto.c
new file mode 100644
index 0000000..cfb4a14
--- /dev/null
+++ b/src/kernel/linux/v4.14/arch/mips/cavium-octeon/crypto/octeon-crypto.c
@@ -0,0 +1,69 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2004-2012 Cavium Networks
+ */
+
+#include <asm/cop2.h>
+#include <linux/export.h>
+#include <linux/interrupt.h>
+#include <linux/sched/task_stack.h>
+
+#include "octeon-crypto.h"
+
+/**
+ * Enable access to Octeon's COP2 crypto hardware for kernel use. Wrap any
+ * crypto operations in calls to octeon_crypto_enable/disable in order to make
+ * sure the state of COP2 isn't corrupted if userspace is also performing
+ * hardware crypto operations. Allocate the state parameter on the stack.
+ * Returns with preemption disabled.
+ *
+ * @state: Pointer to state structure to store current COP2 state in.
+ *
+ * Returns: Flags to be passed to octeon_crypto_disable()
+ */
+unsigned long octeon_crypto_enable(struct octeon_cop2_state *state)
+{
+	int status;
+	unsigned long flags;
+
+	preempt_disable();
+	local_irq_save(flags);
+	status = read_c0_status();
+	write_c0_status(status | ST0_CU2);
+	if (KSTK_STATUS(current) & ST0_CU2) {
+		octeon_cop2_save(&(current->thread.cp2));
+		KSTK_STATUS(current) &= ~ST0_CU2;
+		status &= ~ST0_CU2;
+	} else if (status & ST0_CU2) {
+		octeon_cop2_save(state);
+	}
+	local_irq_restore(flags);
+	return status & ST0_CU2;
+}
+EXPORT_SYMBOL_GPL(octeon_crypto_enable);
+
+/**
+ * Disable access to Octeon's COP2 crypto hardware in the kernel. This must be
+ * called after an octeon_crypto_enable() before any context switch or return to
+ * userspace.
+ *
+ * @state:	Pointer to COP2 state to restore
+ * @flags:	Return value from octeon_crypto_enable()
+ */
+void octeon_crypto_disable(struct octeon_cop2_state *state,
+			   unsigned long crypto_flags)
+{
+	unsigned long flags;
+
+	local_irq_save(flags);
+	if (crypto_flags & ST0_CU2)
+		octeon_cop2_restore(state);
+	else
+		write_c0_status(read_c0_status() & ~ST0_CU2);
+	local_irq_restore(flags);
+	preempt_enable();
+}
+EXPORT_SYMBOL_GPL(octeon_crypto_disable);
diff --git a/src/kernel/linux/v4.14/arch/mips/cavium-octeon/crypto/octeon-crypto.h b/src/kernel/linux/v4.14/arch/mips/cavium-octeon/crypto/octeon-crypto.h
new file mode 100644
index 0000000..7315cc3
--- /dev/null
+++ b/src/kernel/linux/v4.14/arch/mips/cavium-octeon/crypto/octeon-crypto.h
@@ -0,0 +1,224 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2012-2013 Cavium Inc., All Rights Reserved.
+ *
+ * MD5/SHA1/SHA256/SHA512 instruction definitions added by
+ * Aaro Koskinen <aaro.koskinen@iki.fi>.
+ *
+ */
+#ifndef __LINUX_OCTEON_CRYPTO_H
+#define __LINUX_OCTEON_CRYPTO_H
+
+#include <linux/sched.h>
+#include <asm/mipsregs.h>
+
+#define OCTEON_CR_OPCODE_PRIORITY 300
+
+extern unsigned long octeon_crypto_enable(struct octeon_cop2_state *state);
+extern void octeon_crypto_disable(struct octeon_cop2_state *state,
+				  unsigned long flags);
+
+/*
+ * Macros needed to implement MD5/SHA1/SHA256:
+ */
+
+/*
+ * The index can be 0-1 (MD5) or 0-2 (SHA1), 0-3 (SHA256).
+ */
+#define write_octeon_64bit_hash_dword(value, index)	\
+do {							\
+	__asm__ __volatile__ (				\
+	"dmtc2 %[rt],0x0048+" STR(index)		\
+	:						\
+	: [rt] "d" (cpu_to_be64(value)));		\
+} while (0)
+
+/*
+ * The index can be 0-1 (MD5) or 0-2 (SHA1), 0-3 (SHA256).
+ */
+#define read_octeon_64bit_hash_dword(index)		\
+({							\
+	u64 __value;					\
+							\
+	__asm__ __volatile__ (				\
+	"dmfc2 %[rt],0x0048+" STR(index)		\
+	: [rt] "=d" (__value)				\
+	: );						\
+							\
+	be64_to_cpu(__value);				\
+})
+
+/*
+ * The index can be 0-6.
+ */
+#define write_octeon_64bit_block_dword(value, index)	\
+do {							\
+	__asm__ __volatile__ (				\
+	"dmtc2 %[rt],0x0040+" STR(index)		\
+	:						\
+	: [rt] "d" (cpu_to_be64(value)));		\
+} while (0)
+
+/*
+ * The value is the final block dword (64-bit).
+ */
+#define octeon_md5_start(value)				\
+do {							\
+	__asm__ __volatile__ (				\
+	"dmtc2 %[rt],0x4047"				\
+	:						\
+	: [rt] "d" (cpu_to_be64(value)));		\
+} while (0)
+
+/*
+ * The value is the final block dword (64-bit).
+ */
+#define octeon_sha1_start(value)			\
+do {							\
+	__asm__ __volatile__ (				\
+	"dmtc2 %[rt],0x4057"				\
+	:						\
+	: [rt] "d" (value));				\
+} while (0)
+
+/*
+ * The value is the final block dword (64-bit).
+ */
+#define octeon_sha256_start(value)			\
+do {							\
+	__asm__ __volatile__ (				\
+	"dmtc2 %[rt],0x404f"				\
+	:						\
+	: [rt] "d" (value));				\
+} while (0)
+
+/*
+ * Macros needed to implement SHA512:
+ */
+
+/*
+ * The index can be 0-7.
+ */
+#define write_octeon_64bit_hash_sha512(value, index)	\
+do {							\
+	__asm__ __volatile__ (				\
+	"dmtc2 %[rt],0x0250+" STR(index)		\
+	:						\
+	: [rt] "d" (value));				\
+} while (0)
+
+/*
+ * The index can be 0-7.
+ */
+#define read_octeon_64bit_hash_sha512(index)		\
+({							\
+	u64 __value;					\
+							\
+	__asm__ __volatile__ (				\
+	"dmfc2 %[rt],0x0250+" STR(index)		\
+	: [rt] "=d" (__value)				\
+	: );						\
+							\
+	__value;					\
+})
+
+/*
+ * The index can be 0-14.
+ */
+#define write_octeon_64bit_block_sha512(value, index)	\
+do {							\
+	__asm__ __volatile__ (				\
+	"dmtc2 %[rt],0x0240+" STR(index)		\
+	:						\
+	: [rt] "d" (value));				\
+} while (0)
+
+/*
+ * The value is the final block word (64-bit).
+ */
+#define octeon_sha512_start(value)			\
+do {							\
+	__asm__ __volatile__ (				\
+	"dmtc2 %[rt],0x424f"				\
+	:						\
+	: [rt] "d" (value));				\
+} while (0)
+
+/*
+ * The value is the final block dword (64-bit).
+ */
+#define octeon_sha1_start(value)			\
+do {							\
+	__asm__ __volatile__ (				\
+	"dmtc2 %[rt],0x4057"				\
+	:						\
+	: [rt] "d" (value));				\
+} while (0)
+
+/*
+ * The value is the final block dword (64-bit).
+ */
+#define octeon_sha256_start(value)			\
+do {							\
+	__asm__ __volatile__ (				\
+	"dmtc2 %[rt],0x404f"				\
+	:						\
+	: [rt] "d" (value));				\
+} while (0)
+
+/*
+ * Macros needed to implement SHA512:
+ */
+
+/*
+ * The index can be 0-7.
+ */
+#define write_octeon_64bit_hash_sha512(value, index)	\
+do {							\
+	__asm__ __volatile__ (				\
+	"dmtc2 %[rt],0x0250+" STR(index)		\
+	:						\
+	: [rt] "d" (value));				\
+} while (0)
+
+/*
+ * The index can be 0-7.
+ */
+#define read_octeon_64bit_hash_sha512(index)		\
+({							\
+	u64 __value;					\
+							\
+	__asm__ __volatile__ (				\
+	"dmfc2 %[rt],0x0250+" STR(index)		\
+	: [rt] "=d" (__value)				\
+	: );						\
+							\
+	__value;					\
+})
+
+/*
+ * The index can be 0-14.
+ */
+#define write_octeon_64bit_block_sha512(value, index)	\
+do {							\
+	__asm__ __volatile__ (				\
+	"dmtc2 %[rt],0x0240+" STR(index)		\
+	:						\
+	: [rt] "d" (value));				\
+} while (0)
+
+/*
+ * The value is the final block word (64-bit).
+ */
+#define octeon_sha512_start(value)			\
+do {							\
+	__asm__ __volatile__ (				\
+	"dmtc2 %[rt],0x424f"				\
+	:						\
+	: [rt] "d" (value));				\
+} while (0)
+
+#endif /* __LINUX_OCTEON_CRYPTO_H */
diff --git a/src/kernel/linux/v4.14/arch/mips/cavium-octeon/crypto/octeon-md5.c b/src/kernel/linux/v4.14/arch/mips/cavium-octeon/crypto/octeon-md5.c
new file mode 100644
index 0000000..af4c712
--- /dev/null
+++ b/src/kernel/linux/v4.14/arch/mips/cavium-octeon/crypto/octeon-md5.c
@@ -0,0 +1,208 @@
+/*
+ * Cryptographic API.
+ *
+ * MD5 Message Digest Algorithm (RFC1321).
+ *
+ * Adapted for OCTEON by Aaro Koskinen <aaro.koskinen@iki.fi>.
+ *
+ * Based on crypto/md5.c, which is:
+ *
+ * Derived from cryptoapi implementation, originally based on the
+ * public domain implementation written by Colin Plumb in 1993.
+ *
+ * Copyright (c) Cryptoapi developers.
+ * Copyright (c) 2002 James Morris <jmorris@intercode.com.au>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ */
+
+#include <crypto/md5.h>
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/module.h>
+#include <linux/string.h>
+#include <asm/byteorder.h>
+#include <linux/cryptohash.h>
+#include <asm/octeon/octeon.h>
+#include <crypto/internal/hash.h>
+
+#include "octeon-crypto.h"
+
+/*
+ * We pass everything as 64-bit. OCTEON can handle misaligned data.
+ */
+
+static void octeon_md5_store_hash(struct md5_state *ctx)
+{
+	u64 *hash = (u64 *)ctx->hash;
+
+	write_octeon_64bit_hash_dword(hash[0], 0);
+	write_octeon_64bit_hash_dword(hash[1], 1);
+}
+
+static void octeon_md5_read_hash(struct md5_state *ctx)
+{
+	u64 *hash = (u64 *)ctx->hash;
+
+	hash[0] = read_octeon_64bit_hash_dword(0);
+	hash[1] = read_octeon_64bit_hash_dword(1);
+}
+
+static void octeon_md5_transform(const void *_block)
+{
+	const u64 *block = _block;
+
+	write_octeon_64bit_block_dword(block[0], 0);
+	write_octeon_64bit_block_dword(block[1], 1);
+	write_octeon_64bit_block_dword(block[2], 2);
+	write_octeon_64bit_block_dword(block[3], 3);
+	write_octeon_64bit_block_dword(block[4], 4);
+	write_octeon_64bit_block_dword(block[5], 5);
+	write_octeon_64bit_block_dword(block[6], 6);
+	octeon_md5_start(block[7]);
+}
+
+static int octeon_md5_init(struct shash_desc *desc)
+{
+	struct md5_state *mctx = shash_desc_ctx(desc);
+
+	mctx->hash[0] = cpu_to_le32(MD5_H0);
+	mctx->hash[1] = cpu_to_le32(MD5_H1);
+	mctx->hash[2] = cpu_to_le32(MD5_H2);
+	mctx->hash[3] = cpu_to_le32(MD5_H3);
+	mctx->byte_count = 0;
+
+	return 0;
+}
+
+static int octeon_md5_update(struct shash_desc *desc, const u8 *data,
+			     unsigned int len)
+{
+	struct md5_state *mctx = shash_desc_ctx(desc);
+	const u32 avail = sizeof(mctx->block) - (mctx->byte_count & 0x3f);
+	struct octeon_cop2_state state;
+	unsigned long flags;
+
+	mctx->byte_count += len;
+
+	if (avail > len) {
+		memcpy((char *)mctx->block + (sizeof(mctx->block) - avail),
+		       data, len);
+		return 0;
+	}
+
+	memcpy((char *)mctx->block + (sizeof(mctx->block) - avail), data,
+	       avail);
+
+	flags = octeon_crypto_enable(&state);
+	octeon_md5_store_hash(mctx);
+
+	octeon_md5_transform(mctx->block);
+	data += avail;
+	len -= avail;
+
+	while (len >= sizeof(mctx->block)) {
+		octeon_md5_transform(data);
+		data += sizeof(mctx->block);
+		len -= sizeof(mctx->block);
+	}
+
+	octeon_md5_read_hash(mctx);
+	octeon_crypto_disable(&state, flags);
+
+	memcpy(mctx->block, data, len);
+
+	return 0;
+}
+
+static int octeon_md5_final(struct shash_desc *desc, u8 *out)
+{
+	struct md5_state *mctx = shash_desc_ctx(desc);
+	const unsigned int offset = mctx->byte_count & 0x3f;
+	char *p = (char *)mctx->block + offset;
+	int padding = 56 - (offset + 1);
+	struct octeon_cop2_state state;
+	unsigned long flags;
+
+	*p++ = 0x80;
+
+	flags = octeon_crypto_enable(&state);
+	octeon_md5_store_hash(mctx);
+
+	if (padding < 0) {
+		memset(p, 0x00, padding + sizeof(u64));
+		octeon_md5_transform(mctx->block);
+		p = (char *)mctx->block;
+		padding = 56;
+	}
+
+	memset(p, 0, padding);
+	mctx->block[14] = cpu_to_le32(mctx->byte_count << 3);
+	mctx->block[15] = cpu_to_le32(mctx->byte_count >> 29);
+	octeon_md5_transform(mctx->block);
+
+	octeon_md5_read_hash(mctx);
+	octeon_crypto_disable(&state, flags);
+
+	memcpy(out, mctx->hash, sizeof(mctx->hash));
+	memset(mctx, 0, sizeof(*mctx));
+
+	return 0;
+}
+
+static int octeon_md5_export(struct shash_desc *desc, void *out)
+{
+	struct md5_state *ctx = shash_desc_ctx(desc);
+
+	memcpy(out, ctx, sizeof(*ctx));
+	return 0;
+}
+
+static int octeon_md5_import(struct shash_desc *desc, const void *in)
+{
+	struct md5_state *ctx = shash_desc_ctx(desc);
+
+	memcpy(ctx, in, sizeof(*ctx));
+	return 0;
+}
+
+static struct shash_alg alg = {
+	.digestsize	=	MD5_DIGEST_SIZE,
+	.init		=	octeon_md5_init,
+	.update		=	octeon_md5_update,
+	.final		=	octeon_md5_final,
+	.export		=	octeon_md5_export,
+	.import		=	octeon_md5_import,
+	.descsize	=	sizeof(struct md5_state),
+	.statesize	=	sizeof(struct md5_state),
+	.base		=	{
+		.cra_name	=	"md5",
+		.cra_driver_name=	"octeon-md5",
+		.cra_priority	=	OCTEON_CR_OPCODE_PRIORITY,
+		.cra_flags	=	CRYPTO_ALG_TYPE_SHASH,
+		.cra_blocksize	=	MD5_HMAC_BLOCK_SIZE,
+		.cra_module	=	THIS_MODULE,
+	}
+};
+
+static int __init md5_mod_init(void)
+{
+	if (!octeon_has_crypto())
+		return -ENOTSUPP;
+	return crypto_register_shash(&alg);
+}
+
+static void __exit md5_mod_fini(void)
+{
+	crypto_unregister_shash(&alg);
+}
+
+module_init(md5_mod_init);
+module_exit(md5_mod_fini);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("MD5 Message Digest Algorithm (OCTEON)");
+MODULE_AUTHOR("Aaro Koskinen <aaro.koskinen@iki.fi>");
diff --git a/src/kernel/linux/v4.14/arch/mips/cavium-octeon/crypto/octeon-sha1.c b/src/kernel/linux/v4.14/arch/mips/cavium-octeon/crypto/octeon-sha1.c
new file mode 100644
index 0000000..2b74b5b
--- /dev/null
+++ b/src/kernel/linux/v4.14/arch/mips/cavium-octeon/crypto/octeon-sha1.c
@@ -0,0 +1,241 @@
+/*
+ * Cryptographic API.
+ *
+ * SHA1 Secure Hash Algorithm.
+ *
+ * Adapted for OCTEON by Aaro Koskinen <aaro.koskinen@iki.fi>.
+ *
+ * Based on crypto/sha1_generic.c, which is:
+ *
+ * Copyright (c) Alan Smithee.
+ * Copyright (c) Andrew McDonald <andrew@mcdonald.org.uk>
+ * Copyright (c) Jean-Francois Dive <jef@linuxbe.org>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ */
+
+#include <linux/mm.h>
+#include <crypto/sha.h>
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/module.h>
+#include <asm/byteorder.h>
+#include <asm/octeon/octeon.h>
+#include <crypto/internal/hash.h>
+
+#include "octeon-crypto.h"
+
+/*
+ * We pass everything as 64-bit. OCTEON can handle misaligned data.
+ */
+
+static void octeon_sha1_store_hash(struct sha1_state *sctx)
+{
+	u64 *hash = (u64 *)sctx->state;
+	union {
+		u32 word[2];
+		u64 dword;
+	} hash_tail = { { sctx->state[4], } };
+
+	write_octeon_64bit_hash_dword(hash[0], 0);
+	write_octeon_64bit_hash_dword(hash[1], 1);
+	write_octeon_64bit_hash_dword(hash_tail.dword, 2);
+	memzero_explicit(&hash_tail.word[0], sizeof(hash_tail.word[0]));
+}
+
+static void octeon_sha1_read_hash(struct sha1_state *sctx)
+{
+	u64 *hash = (u64 *)sctx->state;
+	union {
+		u32 word[2];
+		u64 dword;
+	} hash_tail;
+
+	hash[0]		= read_octeon_64bit_hash_dword(0);
+	hash[1]		= read_octeon_64bit_hash_dword(1);
+	hash_tail.dword	= read_octeon_64bit_hash_dword(2);
+	sctx->state[4]	= hash_tail.word[0];
+	memzero_explicit(&hash_tail.dword, sizeof(hash_tail.dword));
+}
+
+static void octeon_sha1_transform(const void *_block)
+{
+	const u64 *block = _block;
+
+	write_octeon_64bit_block_dword(block[0], 0);
+	write_octeon_64bit_block_dword(block[1], 1);
+	write_octeon_64bit_block_dword(block[2], 2);
+	write_octeon_64bit_block_dword(block[3], 3);
+	write_octeon_64bit_block_dword(block[4], 4);
+	write_octeon_64bit_block_dword(block[5], 5);
+	write_octeon_64bit_block_dword(block[6], 6);
+	octeon_sha1_start(block[7]);
+}
+
+static int octeon_sha1_init(struct shash_desc *desc)
+{
+	struct sha1_state *sctx = shash_desc_ctx(desc);
+
+	sctx->state[0] = SHA1_H0;
+	sctx->state[1] = SHA1_H1;
+	sctx->state[2] = SHA1_H2;
+	sctx->state[3] = SHA1_H3;
+	sctx->state[4] = SHA1_H4;
+	sctx->count = 0;
+
+	return 0;
+}
+
+static void __octeon_sha1_update(struct sha1_state *sctx, const u8 *data,
+				 unsigned int len)
+{
+	unsigned int partial;
+	unsigned int done;
+	const u8 *src;
+
+	partial = sctx->count % SHA1_BLOCK_SIZE;
+	sctx->count += len;
+	done = 0;
+	src = data;
+
+	if ((partial + len) >= SHA1_BLOCK_SIZE) {
+		if (partial) {
+			done = -partial;
+			memcpy(sctx->buffer + partial, data,
+			       done + SHA1_BLOCK_SIZE);
+			src = sctx->buffer;
+		}
+
+		do {
+			octeon_sha1_transform(src);
+			done += SHA1_BLOCK_SIZE;
+			src = data + done;
+		} while (done + SHA1_BLOCK_SIZE <= len);
+
+		partial = 0;
+	}
+	memcpy(sctx->buffer + partial, src, len - done);
+}
+
+static int octeon_sha1_update(struct shash_desc *desc, const u8 *data,
+			unsigned int len)
+{
+	struct sha1_state *sctx = shash_desc_ctx(desc);
+	struct octeon_cop2_state state;
+	unsigned long flags;
+
+	/*
+	 * Small updates never reach the crypto engine, so the generic sha1 is
+	 * faster because of the heavyweight octeon_crypto_enable() /
+	 * octeon_crypto_disable().
+	 */
+	if ((sctx->count % SHA1_BLOCK_SIZE) + len < SHA1_BLOCK_SIZE)
+		return crypto_sha1_update(desc, data, len);
+
+	flags = octeon_crypto_enable(&state);
+	octeon_sha1_store_hash(sctx);
+
+	__octeon_sha1_update(sctx, data, len);
+
+	octeon_sha1_read_hash(sctx);
+	octeon_crypto_disable(&state, flags);
+
+	return 0;
+}
+
+static int octeon_sha1_final(struct shash_desc *desc, u8 *out)
+{
+	struct sha1_state *sctx = shash_desc_ctx(desc);
+	static const u8 padding[64] = { 0x80, };
+	struct octeon_cop2_state state;
+	__be32 *dst = (__be32 *)out;
+	unsigned int pad_len;
+	unsigned long flags;
+	unsigned int index;
+	__be64 bits;
+	int i;
+
+	/* Save number of bits. */
+	bits = cpu_to_be64(sctx->count << 3);
+
+	/* Pad out to 56 mod 64. */
+	index = sctx->count & 0x3f;
+	pad_len = (index < 56) ? (56 - index) : ((64+56) - index);
+
+	flags = octeon_crypto_enable(&state);
+	octeon_sha1_store_hash(sctx);
+
+	__octeon_sha1_update(sctx, padding, pad_len);
+
+	/* Append length (before padding). */
+	__octeon_sha1_update(sctx, (const u8 *)&bits, sizeof(bits));
+
+	octeon_sha1_read_hash(sctx);
+	octeon_crypto_disable(&state, flags);
+
+	/* Store state in digest */
+	for (i = 0; i < 5; i++)
+		dst[i] = cpu_to_be32(sctx->state[i]);
+
+	/* Zeroize sensitive information. */
+	memset(sctx, 0, sizeof(*sctx));
+
+	return 0;
+}
+
+static int octeon_sha1_export(struct shash_desc *desc, void *out)
+{
+	struct sha1_state *sctx = shash_desc_ctx(desc);
+
+	memcpy(out, sctx, sizeof(*sctx));
+	return 0;
+}
+
+static int octeon_sha1_import(struct shash_desc *desc, const void *in)
+{
+	struct sha1_state *sctx = shash_desc_ctx(desc);
+
+	memcpy(sctx, in, sizeof(*sctx));
+	return 0;
+}
+
+static struct shash_alg octeon_sha1_alg = {
+	.digestsize	=	SHA1_DIGEST_SIZE,
+	.init		=	octeon_sha1_init,
+	.update		=	octeon_sha1_update,
+	.final		=	octeon_sha1_final,
+	.export		=	octeon_sha1_export,
+	.import		=	octeon_sha1_import,
+	.descsize	=	sizeof(struct sha1_state),
+	.statesize	=	sizeof(struct sha1_state),
+	.base		=	{
+		.cra_name	=	"sha1",
+		.cra_driver_name=	"octeon-sha1",
+		.cra_priority	=	OCTEON_CR_OPCODE_PRIORITY,
+		.cra_flags	=	CRYPTO_ALG_TYPE_SHASH,
+		.cra_blocksize	=	SHA1_BLOCK_SIZE,
+		.cra_module	=	THIS_MODULE,
+	}
+};
+
+static int __init octeon_sha1_mod_init(void)
+{
+	if (!octeon_has_crypto())
+		return -ENOTSUPP;
+	return crypto_register_shash(&octeon_sha1_alg);
+}
+
+static void __exit octeon_sha1_mod_fini(void)
+{
+	crypto_unregister_shash(&octeon_sha1_alg);
+}
+
+module_init(octeon_sha1_mod_init);
+module_exit(octeon_sha1_mod_fini);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("SHA1 Secure Hash Algorithm (OCTEON)");
+MODULE_AUTHOR("Aaro Koskinen <aaro.koskinen@iki.fi>");
diff --git a/src/kernel/linux/v4.14/arch/mips/cavium-octeon/crypto/octeon-sha256.c b/src/kernel/linux/v4.14/arch/mips/cavium-octeon/crypto/octeon-sha256.c
new file mode 100644
index 0000000..97e96fe
--- /dev/null
+++ b/src/kernel/linux/v4.14/arch/mips/cavium-octeon/crypto/octeon-sha256.c
@@ -0,0 +1,280 @@
+/*
+ * Cryptographic API.
+ *
+ * SHA-224 and SHA-256 Secure Hash Algorithm.
+ *
+ * Adapted for OCTEON by Aaro Koskinen <aaro.koskinen@iki.fi>.
+ *
+ * Based on crypto/sha256_generic.c, which is:
+ *
+ * Copyright (c) Jean-Luc Cooke <jlcooke@certainkey.com>
+ * Copyright (c) Andrew McDonald <andrew@mcdonald.org.uk>
+ * Copyright (c) 2002 James Morris <jmorris@intercode.com.au>
+ * SHA224 Support Copyright 2007 Intel Corporation <jonathan.lynch@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ */
+
+#include <linux/mm.h>
+#include <crypto/sha.h>
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/module.h>
+#include <asm/byteorder.h>
+#include <asm/octeon/octeon.h>
+#include <crypto/internal/hash.h>
+
+#include "octeon-crypto.h"
+
+/*
+ * We pass everything as 64-bit. OCTEON can handle misaligned data.
+ */
+
+static void octeon_sha256_store_hash(struct sha256_state *sctx)
+{
+	u64 *hash = (u64 *)sctx->state;
+
+	write_octeon_64bit_hash_dword(hash[0], 0);
+	write_octeon_64bit_hash_dword(hash[1], 1);
+	write_octeon_64bit_hash_dword(hash[2], 2);
+	write_octeon_64bit_hash_dword(hash[3], 3);
+}
+
+static void octeon_sha256_read_hash(struct sha256_state *sctx)
+{
+	u64 *hash = (u64 *)sctx->state;
+
+	hash[0] = read_octeon_64bit_hash_dword(0);
+	hash[1] = read_octeon_64bit_hash_dword(1);
+	hash[2] = read_octeon_64bit_hash_dword(2);
+	hash[3] = read_octeon_64bit_hash_dword(3);
+}
+
+static void octeon_sha256_transform(const void *_block)
+{
+	const u64 *block = _block;
+
+	write_octeon_64bit_block_dword(block[0], 0);
+	write_octeon_64bit_block_dword(block[1], 1);
+	write_octeon_64bit_block_dword(block[2], 2);
+	write_octeon_64bit_block_dword(block[3], 3);
+	write_octeon_64bit_block_dword(block[4], 4);
+	write_octeon_64bit_block_dword(block[5], 5);
+	write_octeon_64bit_block_dword(block[6], 6);
+	octeon_sha256_start(block[7]);
+}
+
+static int octeon_sha224_init(struct shash_desc *desc)
+{
+	struct sha256_state *sctx = shash_desc_ctx(desc);
+
+	sctx->state[0] = SHA224_H0;
+	sctx->state[1] = SHA224_H1;
+	sctx->state[2] = SHA224_H2;
+	sctx->state[3] = SHA224_H3;
+	sctx->state[4] = SHA224_H4;
+	sctx->state[5] = SHA224_H5;
+	sctx->state[6] = SHA224_H6;
+	sctx->state[7] = SHA224_H7;
+	sctx->count = 0;
+
+	return 0;
+}
+
+static int octeon_sha256_init(struct shash_desc *desc)
+{
+	struct sha256_state *sctx = shash_desc_ctx(desc);
+
+	sctx->state[0] = SHA256_H0;
+	sctx->state[1] = SHA256_H1;
+	sctx->state[2] = SHA256_H2;
+	sctx->state[3] = SHA256_H3;
+	sctx->state[4] = SHA256_H4;
+	sctx->state[5] = SHA256_H5;
+	sctx->state[6] = SHA256_H6;
+	sctx->state[7] = SHA256_H7;
+	sctx->count = 0;
+
+	return 0;
+}
+
+static void __octeon_sha256_update(struct sha256_state *sctx, const u8 *data,
+				   unsigned int len)
+{
+	unsigned int partial;
+	unsigned int done;
+	const u8 *src;
+
+	partial = sctx->count % SHA256_BLOCK_SIZE;
+	sctx->count += len;
+	done = 0;
+	src = data;
+
+	if ((partial + len) >= SHA256_BLOCK_SIZE) {
+		if (partial) {
+			done = -partial;
+			memcpy(sctx->buf + partial, data,
+			       done + SHA256_BLOCK_SIZE);
+			src = sctx->buf;
+		}
+
+		do {
+			octeon_sha256_transform(src);
+			done += SHA256_BLOCK_SIZE;
+			src = data + done;
+		} while (done + SHA256_BLOCK_SIZE <= len);
+
+		partial = 0;
+	}
+	memcpy(sctx->buf + partial, src, len - done);
+}
+
+static int octeon_sha256_update(struct shash_desc *desc, const u8 *data,
+				unsigned int len)
+{
+	struct sha256_state *sctx = shash_desc_ctx(desc);
+	struct octeon_cop2_state state;
+	unsigned long flags;
+
+	/*
+	 * Small updates never reach the crypto engine, so the generic sha256 is
+	 * faster because of the heavyweight octeon_crypto_enable() /
+	 * octeon_crypto_disable().
+	 */
+	if ((sctx->count % SHA256_BLOCK_SIZE) + len < SHA256_BLOCK_SIZE)
+		return crypto_sha256_update(desc, data, len);
+
+	flags = octeon_crypto_enable(&state);
+	octeon_sha256_store_hash(sctx);
+
+	__octeon_sha256_update(sctx, data, len);
+
+	octeon_sha256_read_hash(sctx);
+	octeon_crypto_disable(&state, flags);
+
+	return 0;
+}
+
+static int octeon_sha256_final(struct shash_desc *desc, u8 *out)
+{
+	struct sha256_state *sctx = shash_desc_ctx(desc);
+	static const u8 padding[64] = { 0x80, };
+	struct octeon_cop2_state state;
+	__be32 *dst = (__be32 *)out;
+	unsigned int pad_len;
+	unsigned long flags;
+	unsigned int index;
+	__be64 bits;
+	int i;
+
+	/* Save number of bits. */
+	bits = cpu_to_be64(sctx->count << 3);
+
+	/* Pad out to 56 mod 64. */
+	index = sctx->count & 0x3f;
+	pad_len = (index < 56) ? (56 - index) : ((64+56) - index);
+
+	flags = octeon_crypto_enable(&state);
+	octeon_sha256_store_hash(sctx);
+
+	__octeon_sha256_update(sctx, padding, pad_len);
+
+	/* Append length (before padding). */
+	__octeon_sha256_update(sctx, (const u8 *)&bits, sizeof(bits));
+
+	octeon_sha256_read_hash(sctx);
+	octeon_crypto_disable(&state, flags);
+
+	/* Store state in digest */
+	for (i = 0; i < 8; i++)
+		dst[i] = cpu_to_be32(sctx->state[i]);
+
+	/* Zeroize sensitive information. */
+	memset(sctx, 0, sizeof(*sctx));
+
+	return 0;
+}
+
+static int octeon_sha224_final(struct shash_desc *desc, u8 *hash)
+{
+	u8 D[SHA256_DIGEST_SIZE];
+
+	octeon_sha256_final(desc, D);
+
+	memcpy(hash, D, SHA224_DIGEST_SIZE);
+	memzero_explicit(D, SHA256_DIGEST_SIZE);
+
+	return 0;
+}
+
+static int octeon_sha256_export(struct shash_desc *desc, void *out)
+{
+	struct sha256_state *sctx = shash_desc_ctx(desc);
+
+	memcpy(out, sctx, sizeof(*sctx));
+	return 0;
+}
+
+static int octeon_sha256_import(struct shash_desc *desc, const void *in)
+{
+	struct sha256_state *sctx = shash_desc_ctx(desc);
+
+	memcpy(sctx, in, sizeof(*sctx));
+	return 0;
+}
+
+static struct shash_alg octeon_sha256_algs[2] = { {
+	.digestsize	=	SHA256_DIGEST_SIZE,
+	.init		=	octeon_sha256_init,
+	.update		=	octeon_sha256_update,
+	.final		=	octeon_sha256_final,
+	.export		=	octeon_sha256_export,
+	.import		=	octeon_sha256_import,
+	.descsize	=	sizeof(struct sha256_state),
+	.statesize	=	sizeof(struct sha256_state),
+	.base		=	{
+		.cra_name	=	"sha256",
+		.cra_driver_name=	"octeon-sha256",
+		.cra_priority	=	OCTEON_CR_OPCODE_PRIORITY,
+		.cra_flags	=	CRYPTO_ALG_TYPE_SHASH,
+		.cra_blocksize	=	SHA256_BLOCK_SIZE,
+		.cra_module	=	THIS_MODULE,
+	}
+}, {
+	.digestsize	=	SHA224_DIGEST_SIZE,
+	.init		=	octeon_sha224_init,
+	.update		=	octeon_sha256_update,
+	.final		=	octeon_sha224_final,
+	.descsize	=	sizeof(struct sha256_state),
+	.base		=	{
+		.cra_name	=	"sha224",
+		.cra_driver_name=	"octeon-sha224",
+		.cra_flags	=	CRYPTO_ALG_TYPE_SHASH,
+		.cra_blocksize	=	SHA224_BLOCK_SIZE,
+		.cra_module	=	THIS_MODULE,
+	}
+} };
+
+static int __init octeon_sha256_mod_init(void)
+{
+	if (!octeon_has_crypto())
+		return -ENOTSUPP;
+	return crypto_register_shashes(octeon_sha256_algs,
+				       ARRAY_SIZE(octeon_sha256_algs));
+}
+
+static void __exit octeon_sha256_mod_fini(void)
+{
+	crypto_unregister_shashes(octeon_sha256_algs,
+				  ARRAY_SIZE(octeon_sha256_algs));
+}
+
+module_init(octeon_sha256_mod_init);
+module_exit(octeon_sha256_mod_fini);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("SHA-224 and SHA-256 Secure Hash Algorithm (OCTEON)");
+MODULE_AUTHOR("Aaro Koskinen <aaro.koskinen@iki.fi>");
diff --git a/src/kernel/linux/v4.14/arch/mips/cavium-octeon/crypto/octeon-sha512.c b/src/kernel/linux/v4.14/arch/mips/cavium-octeon/crypto/octeon-sha512.c
new file mode 100644
index 0000000..d5fb3c6
--- /dev/null
+++ b/src/kernel/linux/v4.14/arch/mips/cavium-octeon/crypto/octeon-sha512.c
@@ -0,0 +1,277 @@
+/*
+ * Cryptographic API.
+ *
+ * SHA-512 and SHA-384 Secure Hash Algorithm.
+ *
+ * Adapted for OCTEON by Aaro Koskinen <aaro.koskinen@iki.fi>.
+ *
+ * Based on crypto/sha512_generic.c, which is:
+ *
+ * Copyright (c) Jean-Luc Cooke <jlcooke@certainkey.com>
+ * Copyright (c) Andrew McDonald <andrew@mcdonald.org.uk>
+ * Copyright (c) 2003 Kyle McMartin <kyle@debian.org>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2, or (at your option) any
+ * later version.
+ */
+
+#include <linux/mm.h>
+#include <crypto/sha.h>
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/module.h>
+#include <asm/byteorder.h>
+#include <asm/octeon/octeon.h>
+#include <crypto/internal/hash.h>
+
+#include "octeon-crypto.h"
+
+/*
+ * We pass everything as 64-bit. OCTEON can handle misaligned data.
+ */
+
+static void octeon_sha512_store_hash(struct sha512_state *sctx)
+{
+	write_octeon_64bit_hash_sha512(sctx->state[0], 0);
+	write_octeon_64bit_hash_sha512(sctx->state[1], 1);
+	write_octeon_64bit_hash_sha512(sctx->state[2], 2);
+	write_octeon_64bit_hash_sha512(sctx->state[3], 3);
+	write_octeon_64bit_hash_sha512(sctx->state[4], 4);
+	write_octeon_64bit_hash_sha512(sctx->state[5], 5);
+	write_octeon_64bit_hash_sha512(sctx->state[6], 6);
+	write_octeon_64bit_hash_sha512(sctx->state[7], 7);
+}
+
+static void octeon_sha512_read_hash(struct sha512_state *sctx)
+{
+	sctx->state[0] = read_octeon_64bit_hash_sha512(0);
+	sctx->state[1] = read_octeon_64bit_hash_sha512(1);
+	sctx->state[2] = read_octeon_64bit_hash_sha512(2);
+	sctx->state[3] = read_octeon_64bit_hash_sha512(3);
+	sctx->state[4] = read_octeon_64bit_hash_sha512(4);
+	sctx->state[5] = read_octeon_64bit_hash_sha512(5);
+	sctx->state[6] = read_octeon_64bit_hash_sha512(6);
+	sctx->state[7] = read_octeon_64bit_hash_sha512(7);
+}
+
+static void octeon_sha512_transform(const void *_block)
+{
+	const u64 *block = _block;
+
+	write_octeon_64bit_block_sha512(block[0], 0);
+	write_octeon_64bit_block_sha512(block[1], 1);
+	write_octeon_64bit_block_sha512(block[2], 2);
+	write_octeon_64bit_block_sha512(block[3], 3);
+	write_octeon_64bit_block_sha512(block[4], 4);
+	write_octeon_64bit_block_sha512(block[5], 5);
+	write_octeon_64bit_block_sha512(block[6], 6);
+	write_octeon_64bit_block_sha512(block[7], 7);
+	write_octeon_64bit_block_sha512(block[8], 8);
+	write_octeon_64bit_block_sha512(block[9], 9);
+	write_octeon_64bit_block_sha512(block[10], 10);
+	write_octeon_64bit_block_sha512(block[11], 11);
+	write_octeon_64bit_block_sha512(block[12], 12);
+	write_octeon_64bit_block_sha512(block[13], 13);
+	write_octeon_64bit_block_sha512(block[14], 14);
+	octeon_sha512_start(block[15]);
+}
+
+static int octeon_sha512_init(struct shash_desc *desc)
+{
+	struct sha512_state *sctx = shash_desc_ctx(desc);
+
+	sctx->state[0] = SHA512_H0;
+	sctx->state[1] = SHA512_H1;
+	sctx->state[2] = SHA512_H2;
+	sctx->state[3] = SHA512_H3;
+	sctx->state[4] = SHA512_H4;
+	sctx->state[5] = SHA512_H5;
+	sctx->state[6] = SHA512_H6;
+	sctx->state[7] = SHA512_H7;
+	sctx->count[0] = sctx->count[1] = 0;
+
+	return 0;
+}
+
+static int octeon_sha384_init(struct shash_desc *desc)
+{
+	struct sha512_state *sctx = shash_desc_ctx(desc);
+
+	sctx->state[0] = SHA384_H0;
+	sctx->state[1] = SHA384_H1;
+	sctx->state[2] = SHA384_H2;
+	sctx->state[3] = SHA384_H3;
+	sctx->state[4] = SHA384_H4;
+	sctx->state[5] = SHA384_H5;
+	sctx->state[6] = SHA384_H6;
+	sctx->state[7] = SHA384_H7;
+	sctx->count[0] = sctx->count[1] = 0;
+
+	return 0;
+}
+
+static void __octeon_sha512_update(struct sha512_state *sctx, const u8 *data,
+				   unsigned int len)
+{
+	unsigned int part_len;
+	unsigned int index;
+	unsigned int i;
+
+	/* Compute number of bytes mod 128. */
+	index = sctx->count[0] % SHA512_BLOCK_SIZE;
+
+	/* Update number of bytes. */
+	if ((sctx->count[0] += len) < len)
+		sctx->count[1]++;
+
+	part_len = SHA512_BLOCK_SIZE - index;
+
+	/* Transform as many times as possible. */
+	if (len >= part_len) {
+		memcpy(&sctx->buf[index], data, part_len);
+		octeon_sha512_transform(sctx->buf);
+
+		for (i = part_len; i + SHA512_BLOCK_SIZE <= len;
+			i += SHA512_BLOCK_SIZE)
+			octeon_sha512_transform(&data[i]);
+
+		index = 0;
+	} else {
+		i = 0;
+	}
+
+	/* Buffer remaining input. */
+	memcpy(&sctx->buf[index], &data[i], len - i);
+}
+
+static int octeon_sha512_update(struct shash_desc *desc, const u8 *data,
+				unsigned int len)
+{
+	struct sha512_state *sctx = shash_desc_ctx(desc);
+	struct octeon_cop2_state state;
+	unsigned long flags;
+
+	/*
+	 * Small updates never reach the crypto engine, so the generic sha512 is
+	 * faster because of the heavyweight octeon_crypto_enable() /
+	 * octeon_crypto_disable().
+	 */
+	if ((sctx->count[0] % SHA512_BLOCK_SIZE) + len < SHA512_BLOCK_SIZE)
+		return crypto_sha512_update(desc, data, len);
+
+	flags = octeon_crypto_enable(&state);
+	octeon_sha512_store_hash(sctx);
+
+	__octeon_sha512_update(sctx, data, len);
+
+	octeon_sha512_read_hash(sctx);
+	octeon_crypto_disable(&state, flags);
+
+	return 0;
+}
+
+static int octeon_sha512_final(struct shash_desc *desc, u8 *hash)
+{
+	struct sha512_state *sctx = shash_desc_ctx(desc);
+	static u8 padding[128] = { 0x80, };
+	struct octeon_cop2_state state;
+	__be64 *dst = (__be64 *)hash;
+	unsigned int pad_len;
+	unsigned long flags;
+	unsigned int index;
+	__be64 bits[2];
+	int i;
+
+	/* Save number of bits. */
+	bits[1] = cpu_to_be64(sctx->count[0] << 3);
+	bits[0] = cpu_to_be64(sctx->count[1] << 3 | sctx->count[0] >> 61);
+
+	/* Pad out to 112 mod 128. */
+	index = sctx->count[0] & 0x7f;
+	pad_len = (index < 112) ? (112 - index) : ((128+112) - index);
+
+	flags = octeon_crypto_enable(&state);
+	octeon_sha512_store_hash(sctx);
+
+	__octeon_sha512_update(sctx, padding, pad_len);
+
+	/* Append length (before padding). */
+	__octeon_sha512_update(sctx, (const u8 *)bits, sizeof(bits));
+
+	octeon_sha512_read_hash(sctx);
+	octeon_crypto_disable(&state, flags);
+
+	/* Store state in digest. */
+	for (i = 0; i < 8; i++)
+		dst[i] = cpu_to_be64(sctx->state[i]);
+
+	/* Zeroize sensitive information. */
+	memset(sctx, 0, sizeof(struct sha512_state));
+
+	return 0;
+}
+
+static int octeon_sha384_final(struct shash_desc *desc, u8 *hash)
+{
+	u8 D[64];
+
+	octeon_sha512_final(desc, D);
+
+	memcpy(hash, D, 48);
+	memzero_explicit(D, 64);
+
+	return 0;
+}
+
+static struct shash_alg octeon_sha512_algs[2] = { {
+	.digestsize	=	SHA512_DIGEST_SIZE,
+	.init		=	octeon_sha512_init,
+	.update		=	octeon_sha512_update,
+	.final		=	octeon_sha512_final,
+	.descsize	=	sizeof(struct sha512_state),
+	.base		=	{
+		.cra_name	=	"sha512",
+		.cra_driver_name=	"octeon-sha512",
+		.cra_priority	=	OCTEON_CR_OPCODE_PRIORITY,
+		.cra_flags	=	CRYPTO_ALG_TYPE_SHASH,
+		.cra_blocksize	=	SHA512_BLOCK_SIZE,
+		.cra_module	=	THIS_MODULE,
+	}
+}, {
+	.digestsize	=	SHA384_DIGEST_SIZE,
+	.init		=	octeon_sha384_init,
+	.update		=	octeon_sha512_update,
+	.final		=	octeon_sha384_final,
+	.descsize	=	sizeof(struct sha512_state),
+	.base		=	{
+		.cra_name	=	"sha384",
+		.cra_driver_name=	"octeon-sha384",
+		.cra_priority	=	OCTEON_CR_OPCODE_PRIORITY,
+		.cra_flags	=	CRYPTO_ALG_TYPE_SHASH,
+		.cra_blocksize	=	SHA384_BLOCK_SIZE,
+		.cra_module	=	THIS_MODULE,
+	}
+} };
+
+static int __init octeon_sha512_mod_init(void)
+{
+	if (!octeon_has_crypto())
+		return -ENOTSUPP;
+	return crypto_register_shashes(octeon_sha512_algs,
+				       ARRAY_SIZE(octeon_sha512_algs));
+}
+
+static void __exit octeon_sha512_mod_fini(void)
+{
+	crypto_unregister_shashes(octeon_sha512_algs,
+				  ARRAY_SIZE(octeon_sha512_algs));
+}
+
+module_init(octeon_sha512_mod_init);
+module_exit(octeon_sha512_mod_fini);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("SHA-512 and SHA-384 Secure Hash Algorithms (OCTEON)");
+MODULE_AUTHOR("Aaro Koskinen <aaro.koskinen@iki.fi>");
diff --git a/src/kernel/linux/v4.14/arch/mips/cavium-octeon/csrc-octeon.c b/src/kernel/linux/v4.14/arch/mips/cavium-octeon/csrc-octeon.c
new file mode 100644
index 0000000..39f153f
--- /dev/null
+++ b/src/kernel/linux/v4.14/arch/mips/cavium-octeon/csrc-octeon.c
@@ -0,0 +1,213 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2007 by Ralf Baechle
+ * Copyright (C) 2009, 2012 Cavium, Inc.
+ */
+#include <linux/clocksource.h>
+#include <linux/export.h>
+#include <linux/init.h>
+#include <linux/smp.h>
+
+#include <asm/cpu-info.h>
+#include <asm/cpu-type.h>
+#include <asm/time.h>
+
+#include <asm/octeon/octeon.h>
+#include <asm/octeon/cvmx-ipd-defs.h>
+#include <asm/octeon/cvmx-mio-defs.h>
+#include <asm/octeon/cvmx-rst-defs.h>
+#include <asm/octeon/cvmx-fpa-defs.h>
+
+static u64 f;
+static u64 rdiv;
+static u64 sdiv;
+static u64 octeon_udelay_factor;
+static u64 octeon_ndelay_factor;
+
+void __init octeon_setup_delays(void)
+{
+	octeon_udelay_factor = octeon_get_clock_rate() / 1000000;
+	/*
+	 * For __ndelay we divide by 2^16, so the factor is multiplied
+	 * by the same amount.
+	 */
+	octeon_ndelay_factor = (octeon_udelay_factor * 0x10000ull) / 1000ull;
+
+	preset_lpj = octeon_get_clock_rate() / HZ;
+
+	if (current_cpu_type() == CPU_CAVIUM_OCTEON2) {
+		union cvmx_mio_rst_boot rst_boot;
+
+		rst_boot.u64 = cvmx_read_csr(CVMX_MIO_RST_BOOT);
+		rdiv = rst_boot.s.c_mul;	/* CPU clock */
+		sdiv = rst_boot.s.pnr_mul;	/* I/O clock */
+		f = (0x8000000000000000ull / sdiv) * 2;
+	} else if (current_cpu_type() == CPU_CAVIUM_OCTEON3) {
+		union cvmx_rst_boot rst_boot;
+
+		rst_boot.u64 = cvmx_read_csr(CVMX_RST_BOOT);
+		rdiv = rst_boot.s.c_mul;	/* CPU clock */
+		sdiv = rst_boot.s.pnr_mul;	/* I/O clock */
+		f = (0x8000000000000000ull / sdiv) * 2;
+	}
+
+}
+
+/*
+ * Set the current core's cvmcount counter to the value of the
+ * IPD_CLK_COUNT.  We do this on all cores as they are brought
+ * on-line.  This allows for a read from a local cpu register to
+ * access a synchronized counter.
+ *
+ * On CPU_CAVIUM_OCTEON2 the IPD_CLK_COUNT is scaled by rdiv/sdiv.
+ */
+void octeon_init_cvmcount(void)
+{
+	u64 clk_reg;
+	unsigned long flags;
+	unsigned loops = 2;
+
+	clk_reg = octeon_has_feature(OCTEON_FEATURE_FPA3) ?
+		CVMX_FPA_CLK_COUNT : CVMX_IPD_CLK_COUNT;
+
+	/* Clobber loops so GCC will not unroll the following while loop. */
+	asm("" : "+r" (loops));
+
+	local_irq_save(flags);
+	/*
+	 * Loop several times so we are executing from the cache,
+	 * which should give more deterministic timing.
+	 */
+	while (loops--) {
+		u64 clk_count = cvmx_read_csr(clk_reg);
+		if (rdiv != 0) {
+			clk_count *= rdiv;
+			if (f != 0) {
+				asm("dmultu\t%[cnt],%[f]\n\t"
+				    "mfhi\t%[cnt]"
+				    : [cnt] "+r" (clk_count)
+				    : [f] "r" (f)
+				    : "hi", "lo");
+			}
+		}
+		write_c0_cvmcount(clk_count);
+	}
+	local_irq_restore(flags);
+}
+
+static u64 octeon_cvmcount_read(struct clocksource *cs)
+{
+	return read_c0_cvmcount();
+}
+
+static struct clocksource clocksource_mips = {
+	.name		= "OCTEON_CVMCOUNT",
+	.read		= octeon_cvmcount_read,
+	.mask		= CLOCKSOURCE_MASK(64),
+	.flags		= CLOCK_SOURCE_IS_CONTINUOUS,
+};
+
+unsigned long long notrace sched_clock(void)
+{
+	/* 64-bit arithmatic can overflow, so use 128-bit.  */
+	u64 t1, t2, t3;
+	unsigned long long rv;
+	u64 mult = clocksource_mips.mult;
+	u64 shift = clocksource_mips.shift;
+	u64 cnt = read_c0_cvmcount();
+
+	asm (
+		"dmultu\t%[cnt],%[mult]\n\t"
+		"nor\t%[t1],$0,%[shift]\n\t"
+		"mfhi\t%[t2]\n\t"
+		"mflo\t%[t3]\n\t"
+		"dsll\t%[t2],%[t2],1\n\t"
+		"dsrlv\t%[rv],%[t3],%[shift]\n\t"
+		"dsllv\t%[t1],%[t2],%[t1]\n\t"
+		"or\t%[rv],%[t1],%[rv]\n\t"
+		: [rv] "=&r" (rv), [t1] "=&r" (t1), [t2] "=&r" (t2), [t3] "=&r" (t3)
+		: [cnt] "r" (cnt), [mult] "r" (mult), [shift] "r" (shift)
+		: "hi", "lo");
+	return rv;
+}
+
+void __init plat_time_init(void)
+{
+	clocksource_mips.rating = 300;
+	clocksource_register_hz(&clocksource_mips, octeon_get_clock_rate());
+}
+
+void __udelay(unsigned long us)
+{
+	u64 cur, end, inc;
+
+	cur = read_c0_cvmcount();
+
+	inc = us * octeon_udelay_factor;
+	end = cur + inc;
+
+	while (end > cur)
+		cur = read_c0_cvmcount();
+}
+EXPORT_SYMBOL(__udelay);
+
+void __ndelay(unsigned long ns)
+{
+	u64 cur, end, inc;
+
+	cur = read_c0_cvmcount();
+
+	inc = ((ns * octeon_ndelay_factor) >> 16);
+	end = cur + inc;
+
+	while (end > cur)
+		cur = read_c0_cvmcount();
+}
+EXPORT_SYMBOL(__ndelay);
+
+void __delay(unsigned long loops)
+{
+	u64 cur, end;
+
+	cur = read_c0_cvmcount();
+	end = cur + loops;
+
+	while (end > cur)
+		cur = read_c0_cvmcount();
+}
+EXPORT_SYMBOL(__delay);
+
+
+/**
+ * octeon_io_clk_delay - wait for a given number of io clock cycles to pass.
+ *
+ * We scale the wait by the clock ratio, and then wait for the
+ * corresponding number of core clocks.
+ *
+ * @count: The number of clocks to wait.
+ */
+void octeon_io_clk_delay(unsigned long count)
+{
+	u64 cur, end;
+
+	cur = read_c0_cvmcount();
+	if (rdiv != 0) {
+		end = count * rdiv;
+		if (f != 0) {
+			asm("dmultu\t%[cnt],%[f]\n\t"
+				"mfhi\t%[cnt]"
+				: [cnt] "+r" (end)
+				: [f] "r" (f)
+				: "hi", "lo");
+		}
+		end = cur + end;
+	} else {
+		end = cur + count;
+	}
+	while (end > cur)
+		cur = read_c0_cvmcount();
+}
+EXPORT_SYMBOL(octeon_io_clk_delay);
diff --git a/src/kernel/linux/v4.14/arch/mips/cavium-octeon/dma-octeon.c b/src/kernel/linux/v4.14/arch/mips/cavium-octeon/dma-octeon.c
new file mode 100644
index 0000000..c64bd87
--- /dev/null
+++ b/src/kernel/linux/v4.14/arch/mips/cavium-octeon/dma-octeon.c
@@ -0,0 +1,357 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2000  Ani Joshi <ajoshi@unixbox.com>
+ * Copyright (C) 2000, 2001  Ralf Baechle <ralf@gnu.org>
+ * Copyright (C) 2005 Ilya A. Volynets-Evenbakh <ilya@total-knowledge.com>
+ * swiped from i386, and cloned for MIPS by Geert, polished by Ralf.
+ * IP32 changes by Ilya.
+ * Copyright (C) 2010 Cavium Networks, Inc.
+ */
+#include <linux/dma-mapping.h>
+#include <linux/scatterlist.h>
+#include <linux/bootmem.h>
+#include <linux/export.h>
+#include <linux/swiotlb.h>
+#include <linux/types.h>
+#include <linux/init.h>
+#include <linux/mm.h>
+
+#include <asm/bootinfo.h>
+
+#include <asm/octeon/octeon.h>
+
+#ifdef CONFIG_PCI
+#include <asm/octeon/pci-octeon.h>
+#include <asm/octeon/cvmx-npi-defs.h>
+#include <asm/octeon/cvmx-pci-defs.h>
+
+static dma_addr_t octeon_hole_phys_to_dma(phys_addr_t paddr)
+{
+	if (paddr >= CVMX_PCIE_BAR1_PHYS_BASE && paddr < (CVMX_PCIE_BAR1_PHYS_BASE + CVMX_PCIE_BAR1_PHYS_SIZE))
+		return paddr - CVMX_PCIE_BAR1_PHYS_BASE + CVMX_PCIE_BAR1_RC_BASE;
+	else
+		return paddr;
+}
+
+static phys_addr_t octeon_hole_dma_to_phys(dma_addr_t daddr)
+{
+	if (daddr >= CVMX_PCIE_BAR1_RC_BASE)
+		return daddr + CVMX_PCIE_BAR1_PHYS_BASE - CVMX_PCIE_BAR1_RC_BASE;
+	else
+		return daddr;
+}
+
+static dma_addr_t octeon_gen1_phys_to_dma(struct device *dev, phys_addr_t paddr)
+{
+	if (paddr >= 0x410000000ull && paddr < 0x420000000ull)
+		paddr -= 0x400000000ull;
+	return octeon_hole_phys_to_dma(paddr);
+}
+
+static phys_addr_t octeon_gen1_dma_to_phys(struct device *dev, dma_addr_t daddr)
+{
+	daddr = octeon_hole_dma_to_phys(daddr);
+
+	if (daddr >= 0x10000000ull && daddr < 0x20000000ull)
+		daddr += 0x400000000ull;
+
+	return daddr;
+}
+
+static dma_addr_t octeon_gen2_phys_to_dma(struct device *dev, phys_addr_t paddr)
+{
+	return octeon_hole_phys_to_dma(paddr);
+}
+
+static phys_addr_t octeon_gen2_dma_to_phys(struct device *dev, dma_addr_t daddr)
+{
+	return octeon_hole_dma_to_phys(daddr);
+}
+
+static dma_addr_t octeon_big_phys_to_dma(struct device *dev, phys_addr_t paddr)
+{
+	if (paddr >= 0x410000000ull && paddr < 0x420000000ull)
+		paddr -= 0x400000000ull;
+
+	/* Anything in the BAR1 hole or above goes via BAR2 */
+	if (paddr >= 0xf0000000ull)
+		paddr = OCTEON_BAR2_PCI_ADDRESS + paddr;
+
+	return paddr;
+}
+
+static phys_addr_t octeon_big_dma_to_phys(struct device *dev, dma_addr_t daddr)
+{
+	if (daddr >= OCTEON_BAR2_PCI_ADDRESS)
+		daddr -= OCTEON_BAR2_PCI_ADDRESS;
+
+	if (daddr >= 0x10000000ull && daddr < 0x20000000ull)
+		daddr += 0x400000000ull;
+	return daddr;
+}
+
+static dma_addr_t octeon_small_phys_to_dma(struct device *dev,
+					   phys_addr_t paddr)
+{
+	if (paddr >= 0x410000000ull && paddr < 0x420000000ull)
+		paddr -= 0x400000000ull;
+
+	/* Anything not in the BAR1 range goes via BAR2 */
+	if (paddr >= octeon_bar1_pci_phys && paddr < octeon_bar1_pci_phys + 0x8000000ull)
+		paddr = paddr - octeon_bar1_pci_phys;
+	else
+		paddr = OCTEON_BAR2_PCI_ADDRESS + paddr;
+
+	return paddr;
+}
+
+static phys_addr_t octeon_small_dma_to_phys(struct device *dev,
+					    dma_addr_t daddr)
+{
+	if (daddr >= OCTEON_BAR2_PCI_ADDRESS)
+		daddr -= OCTEON_BAR2_PCI_ADDRESS;
+	else
+		daddr += octeon_bar1_pci_phys;
+
+	if (daddr >= 0x10000000ull && daddr < 0x20000000ull)
+		daddr += 0x400000000ull;
+	return daddr;
+}
+
+#endif /* CONFIG_PCI */
+
+static dma_addr_t octeon_dma_map_page(struct device *dev, struct page *page,
+	unsigned long offset, size_t size, enum dma_data_direction direction,
+	unsigned long attrs)
+{
+	dma_addr_t daddr = swiotlb_map_page(dev, page, offset, size,
+					    direction, attrs);
+	mb();
+
+	return daddr;
+}
+
+static int octeon_dma_map_sg(struct device *dev, struct scatterlist *sg,
+	int nents, enum dma_data_direction direction, unsigned long attrs)
+{
+	int r = swiotlb_map_sg_attrs(dev, sg, nents, direction, attrs);
+	mb();
+	return r;
+}
+
+static void octeon_dma_sync_single_for_device(struct device *dev,
+	dma_addr_t dma_handle, size_t size, enum dma_data_direction direction)
+{
+	swiotlb_sync_single_for_device(dev, dma_handle, size, direction);
+	mb();
+}
+
+static void octeon_dma_sync_sg_for_device(struct device *dev,
+	struct scatterlist *sg, int nelems, enum dma_data_direction direction)
+{
+	swiotlb_sync_sg_for_device(dev, sg, nelems, direction);
+	mb();
+}
+
+static void *octeon_dma_alloc_coherent(struct device *dev, size_t size,
+	dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs)
+{
+	void *ret;
+
+	/* ignore region specifiers */
+	gfp &= ~(__GFP_DMA | __GFP_DMA32 | __GFP_HIGHMEM);
+
+	if (IS_ENABLED(CONFIG_ZONE_DMA) && dev == NULL)
+		gfp |= __GFP_DMA;
+	else if (IS_ENABLED(CONFIG_ZONE_DMA) &&
+		 dev->coherent_dma_mask <= DMA_BIT_MASK(24))
+		gfp |= __GFP_DMA;
+	else if (IS_ENABLED(CONFIG_ZONE_DMA32) &&
+		 dev->coherent_dma_mask <= DMA_BIT_MASK(32))
+		gfp |= __GFP_DMA32;
+
+	/* Don't invoke OOM killer */
+	gfp |= __GFP_NORETRY;
+
+	ret = swiotlb_alloc_coherent(dev, size, dma_handle, gfp);
+
+	mb();
+
+	return ret;
+}
+
+static void octeon_dma_free_coherent(struct device *dev, size_t size,
+	void *vaddr, dma_addr_t dma_handle, unsigned long attrs)
+{
+	swiotlb_free_coherent(dev, size, vaddr, dma_handle);
+}
+
+static dma_addr_t octeon_unity_phys_to_dma(struct device *dev, phys_addr_t paddr)
+{
+	return paddr;
+}
+
+static phys_addr_t octeon_unity_dma_to_phys(struct device *dev, dma_addr_t daddr)
+{
+	return daddr;
+}
+
+struct octeon_dma_map_ops {
+	const struct dma_map_ops dma_map_ops;
+	dma_addr_t (*phys_to_dma)(struct device *dev, phys_addr_t paddr);
+	phys_addr_t (*dma_to_phys)(struct device *dev, dma_addr_t daddr);
+};
+
+dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr)
+{
+	struct octeon_dma_map_ops *ops = container_of(get_dma_ops(dev),
+						      struct octeon_dma_map_ops,
+						      dma_map_ops);
+
+	return ops->phys_to_dma(dev, paddr);
+}
+EXPORT_SYMBOL(phys_to_dma);
+
+phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr)
+{
+	struct octeon_dma_map_ops *ops = container_of(get_dma_ops(dev),
+						      struct octeon_dma_map_ops,
+						      dma_map_ops);
+
+	return ops->dma_to_phys(dev, daddr);
+}
+EXPORT_SYMBOL(dma_to_phys);
+
+static struct octeon_dma_map_ops octeon_linear_dma_map_ops = {
+	.dma_map_ops = {
+		.alloc = octeon_dma_alloc_coherent,
+		.free = octeon_dma_free_coherent,
+		.map_page = octeon_dma_map_page,
+		.unmap_page = swiotlb_unmap_page,
+		.map_sg = octeon_dma_map_sg,
+		.unmap_sg = swiotlb_unmap_sg_attrs,
+		.sync_single_for_cpu = swiotlb_sync_single_for_cpu,
+		.sync_single_for_device = octeon_dma_sync_single_for_device,
+		.sync_sg_for_cpu = swiotlb_sync_sg_for_cpu,
+		.sync_sg_for_device = octeon_dma_sync_sg_for_device,
+		.mapping_error = swiotlb_dma_mapping_error,
+		.dma_supported = swiotlb_dma_supported
+	},
+	.phys_to_dma = octeon_unity_phys_to_dma,
+	.dma_to_phys = octeon_unity_dma_to_phys
+};
+
+char *octeon_swiotlb;
+
+void __init plat_swiotlb_setup(void)
+{
+	int i;
+	phys_addr_t max_addr;
+	phys_addr_t addr_size;
+	size_t swiotlbsize;
+	unsigned long swiotlb_nslabs;
+
+	max_addr = 0;
+	addr_size = 0;
+
+	for (i = 0 ; i < boot_mem_map.nr_map; i++) {
+		struct boot_mem_map_entry *e = &boot_mem_map.map[i];
+		if (e->type != BOOT_MEM_RAM && e->type != BOOT_MEM_INIT_RAM)
+			continue;
+
+		/* These addresses map low for PCI. */
+		if (e->addr > 0x410000000ull && !OCTEON_IS_OCTEON2())
+			continue;
+
+		addr_size += e->size;
+
+		if (max_addr < e->addr + e->size)
+			max_addr = e->addr + e->size;
+
+	}
+
+	swiotlbsize = PAGE_SIZE;
+
+#ifdef CONFIG_PCI
+	/*
+	 * For OCTEON_DMA_BAR_TYPE_SMALL, size the iotlb at 1/4 memory
+	 * size to a maximum of 64MB
+	 */
+	if (OCTEON_IS_MODEL(OCTEON_CN31XX)
+	    || OCTEON_IS_MODEL(OCTEON_CN38XX_PASS2)) {
+		swiotlbsize = addr_size / 4;
+		if (swiotlbsize > 64 * (1<<20))
+			swiotlbsize = 64 * (1<<20);
+	} else if (max_addr > 0xf0000000ul) {
+		/*
+		 * Otherwise only allocate a big iotlb if there is
+		 * memory past the BAR1 hole.
+		 */
+		swiotlbsize = 64 * (1<<20);
+	}
+#endif
+#ifdef CONFIG_USB_OHCI_HCD_PLATFORM
+	/* OCTEON II ohci is only 32-bit. */
+	if (OCTEON_IS_OCTEON2() && max_addr >= 0x100000000ul)
+		swiotlbsize = 64 * (1<<20);
+#endif
+	swiotlb_nslabs = swiotlbsize >> IO_TLB_SHIFT;
+	swiotlb_nslabs = ALIGN(swiotlb_nslabs, IO_TLB_SEGSIZE);
+	swiotlbsize = swiotlb_nslabs << IO_TLB_SHIFT;
+
+	octeon_swiotlb = alloc_bootmem_low_pages(swiotlbsize);
+
+	if (swiotlb_init_with_tbl(octeon_swiotlb, swiotlb_nslabs, 1) == -ENOMEM)
+		panic("Cannot allocate SWIOTLB buffer");
+
+	mips_dma_map_ops = &octeon_linear_dma_map_ops.dma_map_ops;
+}
+
+#ifdef CONFIG_PCI
+static struct octeon_dma_map_ops _octeon_pci_dma_map_ops = {
+	.dma_map_ops = {
+		.alloc = octeon_dma_alloc_coherent,
+		.free = octeon_dma_free_coherent,
+		.map_page = octeon_dma_map_page,
+		.unmap_page = swiotlb_unmap_page,
+		.map_sg = octeon_dma_map_sg,
+		.unmap_sg = swiotlb_unmap_sg_attrs,
+		.sync_single_for_cpu = swiotlb_sync_single_for_cpu,
+		.sync_single_for_device = octeon_dma_sync_single_for_device,
+		.sync_sg_for_cpu = swiotlb_sync_sg_for_cpu,
+		.sync_sg_for_device = octeon_dma_sync_sg_for_device,
+		.mapping_error = swiotlb_dma_mapping_error,
+		.dma_supported = swiotlb_dma_supported
+	},
+};
+
+const struct dma_map_ops *octeon_pci_dma_map_ops;
+
+void __init octeon_pci_dma_init(void)
+{
+	switch (octeon_dma_bar_type) {
+	case OCTEON_DMA_BAR_TYPE_PCIE2:
+		_octeon_pci_dma_map_ops.phys_to_dma = octeon_gen2_phys_to_dma;
+		_octeon_pci_dma_map_ops.dma_to_phys = octeon_gen2_dma_to_phys;
+		break;
+	case OCTEON_DMA_BAR_TYPE_PCIE:
+		_octeon_pci_dma_map_ops.phys_to_dma = octeon_gen1_phys_to_dma;
+		_octeon_pci_dma_map_ops.dma_to_phys = octeon_gen1_dma_to_phys;
+		break;
+	case OCTEON_DMA_BAR_TYPE_BIG:
+		_octeon_pci_dma_map_ops.phys_to_dma = octeon_big_phys_to_dma;
+		_octeon_pci_dma_map_ops.dma_to_phys = octeon_big_dma_to_phys;
+		break;
+	case OCTEON_DMA_BAR_TYPE_SMALL:
+		_octeon_pci_dma_map_ops.phys_to_dma = octeon_small_phys_to_dma;
+		_octeon_pci_dma_map_ops.dma_to_phys = octeon_small_dma_to_phys;
+		break;
+	default:
+		BUG();
+	}
+	octeon_pci_dma_map_ops = &_octeon_pci_dma_map_ops.dma_map_ops;
+}
+#endif /* CONFIG_PCI */
diff --git a/src/kernel/linux/v4.14/arch/mips/cavium-octeon/executive/Makefile b/src/kernel/linux/v4.14/arch/mips/cavium-octeon/executive/Makefile
new file mode 100644
index 0000000..50b4278
--- /dev/null
+++ b/src/kernel/linux/v4.14/arch/mips/cavium-octeon/executive/Makefile
@@ -0,0 +1,19 @@
+#
+# Makefile for the Cavium Octeon specific kernel interface routines
+# under Linux.
+#
+# This file is subject to the terms and conditions of the GNU General Public
+# License.  See the file "COPYING" in the main directory of this archive
+# for more details.
+#
+# Copyright (C) 2005-2008 Cavium Networks
+#
+
+obj-y += cvmx-bootmem.o cvmx-l2c.o cvmx-sysinfo.o octeon-model.o
+obj-y += cvmx-pko.o cvmx-spi.o cvmx-cmd-queue.o \
+	cvmx-helper-board.o cvmx-helper.o cvmx-helper-xaui.o \
+	cvmx-helper-rgmii.o cvmx-helper-sgmii.o cvmx-helper-npi.o \
+	cvmx-helper-loop.o cvmx-helper-spi.o cvmx-helper-util.o \
+	cvmx-interrupt-decodes.o cvmx-interrupt-rsl.o
+
+obj-y += cvmx-helper-errata.o cvmx-helper-jtag.o cvmx-boot-vector.o
diff --git a/src/kernel/linux/v4.14/arch/mips/cavium-octeon/executive/cvmx-boot-vector.c b/src/kernel/linux/v4.14/arch/mips/cavium-octeon/executive/cvmx-boot-vector.c
new file mode 100644
index 0000000..b7019d2
--- /dev/null
+++ b/src/kernel/linux/v4.14/arch/mips/cavium-octeon/executive/cvmx-boot-vector.c
@@ -0,0 +1,167 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2004-2017 Cavium, Inc.
+ */
+
+
+/*
+  We install this program at the bootvector:
+------------------------------------
+	.set noreorder
+	.set nomacro
+	.set noat
+reset_vector:
+	dmtc0	$k0, $31, 0	# Save $k0 to DESAVE
+	dmtc0	$k1, $31, 3	# Save $k1 to KScratch2
+
+	mfc0	$k0, $12, 0	# Status
+	mfc0	$k1, $15, 1	# Ebase
+
+	ori	$k0, 0x84	# Enable 64-bit addressing, set
+				# ERL (should already be set)
+	andi	$k1, 0x3ff	# mask out core ID
+
+	mtc0	$k0, $12, 0	# Status
+	sll	$k1, 5
+
+	lui	$k0, 0xbfc0
+	cache	17, 0($0)	# Core-14345, clear L1 Dcache virtual
+				# tags if the core hit an NMI
+
+	ld	$k0, 0x78($k0)	# k0 <- (bfc00078) pointer to the reset vector
+	synci	0($0)		# Invalidate ICache to get coherent
+				# view of target code.
+
+	daddu	$k0, $k0, $k1
+	nop
+
+	ld	$k0, 0($k0)	# k0 <- core specific target address
+	dmfc0	$k1, $31, 3	# Restore $k1 from KScratch2
+
+	beqz	$k0, wait_loop	# Spin in wait loop
+	nop
+
+	jr	$k0
+	nop
+
+	nop			# NOPs needed here to fill delay slots
+	nop			# on endian reversal of previous instructions
+
+wait_loop:
+	wait
+	nop
+
+	b	wait_loop
+	nop
+
+	nop
+	nop
+------------------------------------
+
+0000000000000000 <reset_vector>:
+   0:	40baf800	dmtc0	k0,c0_desave
+   4:	40bbf803	dmtc0	k1,c0_kscratch2
+
+   8:	401a6000	mfc0	k0,c0_status
+   c:	401b7801	mfc0	k1,c0_ebase
+
+  10:	375a0084	ori	k0,k0,0x84
+  14:	337b03ff	andi	k1,k1,0x3ff
+
+  18:	409a6000	mtc0	k0,c0_status
+  1c:	001bd940	sll	k1,k1,0x5
+
+  20:	3c1abfc0	lui	k0,0xbfc0
+  24:	bc110000	cache	0x11,0(zero)
+
+  28:	df5a0078	ld	k0,120(k0)
+  2c:	041f0000	synci	0(zero)
+
+  30:	035bd02d	daddu	k0,k0,k1
+  34:	00000000	nop
+
+  38:	df5a0000	ld	k0,0(k0)
+  3c:	403bf803	dmfc0	k1,c0_kscratch2
+
+  40:	13400005	beqz	k0,58 <wait_loop>
+  44:	00000000	nop
+
+  48:	03400008	jr	k0
+  4c:	00000000	nop
+
+  50:	00000000	nop
+  54:	00000000	nop
+
+0000000000000058 <wait_loop>:
+  58:	42000020	wait
+  5c:	00000000	nop
+
+  60:	1000fffd	b	58 <wait_loop>
+  64:	00000000	nop
+
+  68:	00000000	nop
+  6c:	00000000	nop
+
+ */
+
+#include <asm/octeon/cvmx-boot-vector.h>
+
+static unsigned long long _cvmx_bootvector_data[16] = {
+	0x40baf80040bbf803ull,  /* patch low order 8-bits if no KScratch*/
+	0x401a6000401b7801ull,
+	0x375a0084337b03ffull,
+	0x409a6000001bd940ull,
+	0x3c1abfc0bc110000ull,
+	0xdf5a0078041f0000ull,
+	0x035bd02d00000000ull,
+	0xdf5a0000403bf803ull,  /* patch low order 8-bits if no KScratch*/
+	0x1340000500000000ull,
+	0x0340000800000000ull,
+	0x0000000000000000ull,
+	0x4200002000000000ull,
+	0x1000fffd00000000ull,
+	0x0000000000000000ull,
+	OCTEON_BOOT_MOVEABLE_MAGIC1,
+	0 /* To be filled in with address of vector block*/
+};
+
+/* 2^10 CPUs */
+#define VECTOR_TABLE_SIZE (1024 * sizeof(struct cvmx_boot_vector_element))
+
+static void cvmx_boot_vector_init(void *mem)
+{
+	uint64_t kseg0_mem;
+	int i;
+
+	memset(mem, 0, VECTOR_TABLE_SIZE);
+	kseg0_mem = cvmx_ptr_to_phys(mem) | 0x8000000000000000ull;
+
+	for (i = 0; i < 15; i++) {
+		uint64_t v = _cvmx_bootvector_data[i];
+
+		if (OCTEON_IS_OCTEON1PLUS() && (i == 0 || i == 7))
+			v &= 0xffffffff00000000ull; /* KScratch not availble. */
+		cvmx_write_csr(CVMX_MIO_BOOT_LOC_ADR, i * 8);
+		cvmx_write_csr(CVMX_MIO_BOOT_LOC_DAT, v);
+	}
+	cvmx_write_csr(CVMX_MIO_BOOT_LOC_ADR, 15 * 8);
+	cvmx_write_csr(CVMX_MIO_BOOT_LOC_DAT, kseg0_mem);
+	cvmx_write_csr(CVMX_MIO_BOOT_LOC_CFGX(0), 0x81fc0000);
+}
+
+/**
+ * Get a pointer to the per-core table of reset vector pointers
+ *
+ */
+struct cvmx_boot_vector_element *cvmx_boot_vector_get(void)
+{
+	struct cvmx_boot_vector_element *ret;
+
+	ret = cvmx_bootmem_alloc_named_range_once(VECTOR_TABLE_SIZE, 0,
+		(1ull << 32) - 1, 8, "__boot_vector1__", cvmx_boot_vector_init);
+	return ret;
+}
+EXPORT_SYMBOL(cvmx_boot_vector_get);
diff --git a/src/kernel/linux/v4.14/arch/mips/cavium-octeon/executive/cvmx-bootmem.c b/src/kernel/linux/v4.14/arch/mips/cavium-octeon/executive/cvmx-bootmem.c
new file mode 100644
index 0000000..94d97eb
--- /dev/null
+++ b/src/kernel/linux/v4.14/arch/mips/cavium-octeon/executive/cvmx-bootmem.c
@@ -0,0 +1,780 @@
+/***********************license start***************
+ * Author: Cavium Networks
+ *
+ * Contact: support@caviumnetworks.com
+ * This file is part of the OCTEON SDK
+ *
+ * Copyright (c) 2003-2008 Cavium Networks
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, Version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+ * NONINFRINGEMENT.  See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this file; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ * or visit http://www.gnu.org/licenses/.
+ *
+ * This file may also be available under a different license from Cavium.
+ * Contact Cavium Networks for more information
+ ***********************license end**************************************/
+
+/*
+ * Simple allocate only memory allocator.  Used to allocate memory at
+ * application start time.
+ */
+
+#include <linux/export.h>
+#include <linux/kernel.h>
+
+#include <asm/octeon/cvmx.h>
+#include <asm/octeon/cvmx-spinlock.h>
+#include <asm/octeon/cvmx-bootmem.h>
+
+/*#define DEBUG */
+
+
+static struct cvmx_bootmem_desc *cvmx_bootmem_desc;
+
+/* See header file for descriptions of functions */
+
+/**
+ * This macro returns the size of a member of a structure.
+ * Logically it is the same as "sizeof(s::field)" in C++, but
+ * C lacks the "::" operator.
+ */
+#define SIZEOF_FIELD(s, field) sizeof(((s *)NULL)->field)
+
+/**
+ * This macro returns a member of the
+ * cvmx_bootmem_named_block_desc_t structure. These members can't
+ * be directly addressed as they might be in memory not directly
+ * reachable. In the case where bootmem is compiled with
+ * LINUX_HOST, the structure itself might be located on a remote
+ * Octeon. The argument "field" is the member name of the
+ * cvmx_bootmem_named_block_desc_t to read. Regardless of the type
+ * of the field, the return type is always a uint64_t. The "addr"
+ * parameter is the physical address of the structure.
+ */
+#define CVMX_BOOTMEM_NAMED_GET_FIELD(addr, field)			\
+	__cvmx_bootmem_desc_get(addr,					\
+		offsetof(struct cvmx_bootmem_named_block_desc, field),	\
+		SIZEOF_FIELD(struct cvmx_bootmem_named_block_desc, field))
+
+/**
+ * This function is the implementation of the get macros defined
+ * for individual structure members. The argument are generated
+ * by the macros inorder to read only the needed memory.
+ *
+ * @param base   64bit physical address of the complete structure
+ * @param offset Offset from the beginning of the structure to the member being
+ *               accessed.
+ * @param size   Size of the structure member.
+ *
+ * @return Value of the structure member promoted into a uint64_t.
+ */
+static inline uint64_t __cvmx_bootmem_desc_get(uint64_t base, int offset,
+					       int size)
+{
+	base = (1ull << 63) | (base + offset);
+	switch (size) {
+	case 4:
+		return cvmx_read64_uint32(base);
+	case 8:
+		return cvmx_read64_uint64(base);
+	default:
+		return 0;
+	}
+}
+
+/*
+ * Wrapper functions are provided for reading/writing the size and
+ * next block values as these may not be directly addressible (in 32
+ * bit applications, for instance.)  Offsets of data elements in
+ * bootmem list, must match cvmx_bootmem_block_header_t.
+ */
+#define NEXT_OFFSET 0
+#define SIZE_OFFSET 8
+
+static void cvmx_bootmem_phy_set_size(uint64_t addr, uint64_t size)
+{
+	cvmx_write64_uint64((addr + SIZE_OFFSET) | (1ull << 63), size);
+}
+
+static void cvmx_bootmem_phy_set_next(uint64_t addr, uint64_t next)
+{
+	cvmx_write64_uint64((addr + NEXT_OFFSET) | (1ull << 63), next);
+}
+
+static uint64_t cvmx_bootmem_phy_get_size(uint64_t addr)
+{
+	return cvmx_read64_uint64((addr + SIZE_OFFSET) | (1ull << 63));
+}
+
+static uint64_t cvmx_bootmem_phy_get_next(uint64_t addr)
+{
+	return cvmx_read64_uint64((addr + NEXT_OFFSET) | (1ull << 63));
+}
+
+void *cvmx_bootmem_alloc_range(uint64_t size, uint64_t alignment,
+			       uint64_t min_addr, uint64_t max_addr)
+{
+	int64_t address;
+	address =
+	    cvmx_bootmem_phy_alloc(size, min_addr, max_addr, alignment, 0);
+
+	if (address > 0)
+		return cvmx_phys_to_ptr(address);
+	else
+		return NULL;
+}
+
+void *cvmx_bootmem_alloc_address(uint64_t size, uint64_t address,
+				 uint64_t alignment)
+{
+	return cvmx_bootmem_alloc_range(size, alignment, address,
+					address + size);
+}
+
+void *cvmx_bootmem_alloc(uint64_t size, uint64_t alignment)
+{
+	return cvmx_bootmem_alloc_range(size, alignment, 0, 0);
+}
+
+void *cvmx_bootmem_alloc_named_range_once(uint64_t size, uint64_t min_addr,
+					  uint64_t max_addr, uint64_t align,
+					  char *name,
+					  void (*init) (void *))
+{
+	int64_t addr;
+	void *ptr;
+	uint64_t named_block_desc_addr;
+
+	named_block_desc_addr = (uint64_t)
+		cvmx_bootmem_phy_named_block_find(name,
+						  (uint32_t)CVMX_BOOTMEM_FLAG_NO_LOCKING);
+
+	if (named_block_desc_addr) {
+		addr = CVMX_BOOTMEM_NAMED_GET_FIELD(named_block_desc_addr,
+						    base_addr);
+		return cvmx_phys_to_ptr(addr);
+	}
+
+	addr = cvmx_bootmem_phy_named_block_alloc(size, min_addr, max_addr,
+						  align, name,
+						  (uint32_t)CVMX_BOOTMEM_FLAG_NO_LOCKING);
+
+	if (addr < 0)
+		return NULL;
+	ptr = cvmx_phys_to_ptr(addr);
+
+	if (init)
+		init(ptr);
+	else
+		memset(ptr, 0, size);
+
+	return ptr;
+}
+EXPORT_SYMBOL(cvmx_bootmem_alloc_named_range_once);
+
+void *cvmx_bootmem_alloc_named_range(uint64_t size, uint64_t min_addr,
+				     uint64_t max_addr, uint64_t align,
+				     char *name)
+{
+	int64_t addr;
+
+	addr = cvmx_bootmem_phy_named_block_alloc(size, min_addr, max_addr,
+						  align, name, 0);
+	if (addr >= 0)
+		return cvmx_phys_to_ptr(addr);
+	else
+		return NULL;
+}
+
+void *cvmx_bootmem_alloc_named_address(uint64_t size, uint64_t address,
+				       char *name)
+{
+    return cvmx_bootmem_alloc_named_range(size, address, address + size,
+					  0, name);
+}
+
+void *cvmx_bootmem_alloc_named(uint64_t size, uint64_t alignment, char *name)
+{
+    return cvmx_bootmem_alloc_named_range(size, 0, 0, alignment, name);
+}
+EXPORT_SYMBOL(cvmx_bootmem_alloc_named);
+
+int cvmx_bootmem_free_named(char *name)
+{
+	return cvmx_bootmem_phy_named_block_free(name, 0);
+}
+
+struct cvmx_bootmem_named_block_desc *cvmx_bootmem_find_named_block(char *name)
+{
+	return cvmx_bootmem_phy_named_block_find(name, 0);
+}
+EXPORT_SYMBOL(cvmx_bootmem_find_named_block);
+
+void cvmx_bootmem_lock(void)
+{
+	cvmx_spinlock_lock((cvmx_spinlock_t *) &(cvmx_bootmem_desc->lock));
+}
+
+void cvmx_bootmem_unlock(void)
+{
+	cvmx_spinlock_unlock((cvmx_spinlock_t *) &(cvmx_bootmem_desc->lock));
+}
+
+int cvmx_bootmem_init(void *mem_desc_ptr)
+{
+	/* Here we set the global pointer to the bootmem descriptor
+	 * block.  This pointer will be used directly, so we will set
+	 * it up to be directly usable by the application.  It is set
+	 * up as follows for the various runtime/ABI combinations:
+	 *
+	 * Linux 64 bit: Set XKPHYS bit
+	 * Linux 32 bit: use mmap to create mapping, use virtual address
+	 * CVMX 64 bit:	 use physical address directly
+	 * CVMX 32 bit:	 use physical address directly
+	 *
+	 * Note that the CVMX environment assumes the use of 1-1 TLB
+	 * mappings so that the physical addresses can be used
+	 * directly
+	 */
+	if (!cvmx_bootmem_desc) {
+#if   defined(CVMX_ABI_64)
+		/* Set XKPHYS bit */
+		cvmx_bootmem_desc = cvmx_phys_to_ptr(CAST64(mem_desc_ptr));
+#else
+		cvmx_bootmem_desc = (struct cvmx_bootmem_desc *) mem_desc_ptr;
+#endif
+	}
+
+	return 0;
+}
+
+/*
+ * The cvmx_bootmem_phy* functions below return 64 bit physical
+ * addresses, and expose more features that the cvmx_bootmem_functions
+ * above.  These are required for full memory space access in 32 bit
+ * applications, as well as for using some advance features.  Most
+ * applications should not need to use these.
+ */
+
+int64_t cvmx_bootmem_phy_alloc(uint64_t req_size, uint64_t address_min,
+			       uint64_t address_max, uint64_t alignment,
+			       uint32_t flags)
+{
+
+	uint64_t head_addr;
+	uint64_t ent_addr;
+	/* points to previous list entry, NULL current entry is head of list */
+	uint64_t prev_addr = 0;
+	uint64_t new_ent_addr = 0;
+	uint64_t desired_min_addr;
+
+#ifdef DEBUG
+	cvmx_dprintf("cvmx_bootmem_phy_alloc: req_size: 0x%llx, "
+		     "min_addr: 0x%llx, max_addr: 0x%llx, align: 0x%llx\n",
+		     (unsigned long long)req_size,
+		     (unsigned long long)address_min,
+		     (unsigned long long)address_max,
+		     (unsigned long long)alignment);
+#endif
+
+	if (cvmx_bootmem_desc->major_version > 3) {
+		cvmx_dprintf("ERROR: Incompatible bootmem descriptor "
+			     "version: %d.%d at addr: %p\n",
+			     (int)cvmx_bootmem_desc->major_version,
+			     (int)cvmx_bootmem_desc->minor_version,
+			     cvmx_bootmem_desc);
+		goto error_out;
+	}
+
+	/*
+	 * Do a variety of checks to validate the arguments.  The
+	 * allocator code will later assume that these checks have
+	 * been made.  We validate that the requested constraints are
+	 * not self-contradictory before we look through the list of
+	 * available memory.
+	 */
+
+	/* 0 is not a valid req_size for this allocator */
+	if (!req_size)
+		goto error_out;
+
+	/* Round req_size up to mult of minimum alignment bytes */
+	req_size = (req_size + (CVMX_BOOTMEM_ALIGNMENT_SIZE - 1)) &
+		~(CVMX_BOOTMEM_ALIGNMENT_SIZE - 1);
+
+	/*
+	 * Convert !0 address_min and 0 address_max to special case of
+	 * range that specifies an exact memory block to allocate.  Do
+	 * this before other checks and adjustments so that this
+	 * tranformation will be validated.
+	 */
+	if (address_min && !address_max)
+		address_max = address_min + req_size;
+	else if (!address_min && !address_max)
+		address_max = ~0ull;  /* If no limits given, use max limits */
+
+
+	/*
+	 * Enforce minimum alignment (this also keeps the minimum free block
+	 * req_size the same as the alignment req_size.
+	 */
+	if (alignment < CVMX_BOOTMEM_ALIGNMENT_SIZE)
+		alignment = CVMX_BOOTMEM_ALIGNMENT_SIZE;
+
+	/*
+	 * Adjust address minimum based on requested alignment (round
+	 * up to meet alignment).  Do this here so we can reject
+	 * impossible requests up front. (NOP for address_min == 0)
+	 */
+	if (alignment)
+		address_min = ALIGN(address_min, alignment);
+
+	/*
+	 * Reject inconsistent args.  We have adjusted these, so this
+	 * may fail due to our internal changes even if this check
+	 * would pass for the values the user supplied.
+	 */
+	if (req_size > address_max - address_min)
+		goto error_out;
+
+	/* Walk through the list entries - first fit found is returned */
+
+	if (!(flags & CVMX_BOOTMEM_FLAG_NO_LOCKING))
+		cvmx_bootmem_lock();
+	head_addr = cvmx_bootmem_desc->head_addr;
+	ent_addr = head_addr;
+	for (; ent_addr;
+	     prev_addr = ent_addr,
+	     ent_addr = cvmx_bootmem_phy_get_next(ent_addr)) {
+		uint64_t usable_base, usable_max;
+		uint64_t ent_size = cvmx_bootmem_phy_get_size(ent_addr);
+
+		if (cvmx_bootmem_phy_get_next(ent_addr)
+		    && ent_addr > cvmx_bootmem_phy_get_next(ent_addr)) {
+			cvmx_dprintf("Internal bootmem_alloc() error: ent: "
+				"0x%llx, next: 0x%llx\n",
+				(unsigned long long)ent_addr,
+				(unsigned long long)
+				cvmx_bootmem_phy_get_next(ent_addr));
+			goto error_out;
+		}
+
+		/*
+		 * Determine if this is an entry that can satisify the
+		 * request Check to make sure entry is large enough to
+		 * satisfy request.
+		 */
+		usable_base =
+		    ALIGN(max(address_min, ent_addr), alignment);
+		usable_max = min(address_max, ent_addr + ent_size);
+		/*
+		 * We should be able to allocate block at address
+		 * usable_base.
+		 */
+
+		desired_min_addr = usable_base;
+		/*
+		 * Determine if request can be satisfied from the
+		 * current entry.
+		 */
+		if (!((ent_addr + ent_size) > usable_base
+				&& ent_addr < address_max
+				&& req_size <= usable_max - usable_base))
+			continue;
+		/*
+		 * We have found an entry that has room to satisfy the
+		 * request, so allocate it from this entry.  If end
+		 * CVMX_BOOTMEM_FLAG_END_ALLOC set, then allocate from
+		 * the end of this block rather than the beginning.
+		 */
+		if (flags & CVMX_BOOTMEM_FLAG_END_ALLOC) {
+			desired_min_addr = usable_max - req_size;
+			/*
+			 * Align desired address down to required
+			 * alignment.
+			 */
+			desired_min_addr &= ~(alignment - 1);
+		}
+
+		/* Match at start of entry */
+		if (desired_min_addr == ent_addr) {
+			if (req_size < ent_size) {
+				/*
+				 * big enough to create a new block
+				 * from top portion of block.
+				 */
+				new_ent_addr = ent_addr + req_size;
+				cvmx_bootmem_phy_set_next(new_ent_addr,
+					cvmx_bootmem_phy_get_next(ent_addr));
+				cvmx_bootmem_phy_set_size(new_ent_addr,
+							ent_size -
+							req_size);
+
+				/*
+				 * Adjust next pointer as following
+				 * code uses this.
+				 */
+				cvmx_bootmem_phy_set_next(ent_addr,
+							new_ent_addr);
+			}
+
+			/*
+			 * adjust prev ptr or head to remove this
+			 * entry from list.
+			 */
+			if (prev_addr)
+				cvmx_bootmem_phy_set_next(prev_addr,
+					cvmx_bootmem_phy_get_next(ent_addr));
+			else
+				/*
+				 * head of list being returned, so
+				 * update head ptr.
+				 */
+				cvmx_bootmem_desc->head_addr =
+					cvmx_bootmem_phy_get_next(ent_addr);
+
+			if (!(flags & CVMX_BOOTMEM_FLAG_NO_LOCKING))
+				cvmx_bootmem_unlock();
+			return desired_min_addr;
+		}
+		/*
+		 * block returned doesn't start at beginning of entry,
+		 * so we know that we will be splitting a block off
+		 * the front of this one.  Create a new block from the
+		 * beginning, add to list, and go to top of loop
+		 * again.
+		 *
+		 * create new block from high portion of
+		 * block, so that top block starts at desired
+		 * addr.
+		 */
+		new_ent_addr = desired_min_addr;
+		cvmx_bootmem_phy_set_next(new_ent_addr,
+					cvmx_bootmem_phy_get_next
+					(ent_addr));
+		cvmx_bootmem_phy_set_size(new_ent_addr,
+					cvmx_bootmem_phy_get_size
+					(ent_addr) -
+					(desired_min_addr -
+						ent_addr));
+		cvmx_bootmem_phy_set_size(ent_addr,
+					desired_min_addr - ent_addr);
+		cvmx_bootmem_phy_set_next(ent_addr, new_ent_addr);
+		/* Loop again to handle actual alloc from new block */
+	}
+error_out:
+	/* We didn't find anything, so return error */
+	if (!(flags & CVMX_BOOTMEM_FLAG_NO_LOCKING))
+		cvmx_bootmem_unlock();
+	return -1;
+}
+
+int __cvmx_bootmem_phy_free(uint64_t phy_addr, uint64_t size, uint32_t flags)
+{
+	uint64_t cur_addr;
+	uint64_t prev_addr = 0; /* zero is invalid */
+	int retval = 0;
+
+#ifdef DEBUG
+	cvmx_dprintf("__cvmx_bootmem_phy_free addr: 0x%llx, size: 0x%llx\n",
+		     (unsigned long long)phy_addr, (unsigned long long)size);
+#endif
+	if (cvmx_bootmem_desc->major_version > 3) {
+		cvmx_dprintf("ERROR: Incompatible bootmem descriptor "
+			     "version: %d.%d at addr: %p\n",
+			     (int)cvmx_bootmem_desc->major_version,
+			     (int)cvmx_bootmem_desc->minor_version,
+			     cvmx_bootmem_desc);
+		return 0;
+	}
+
+	/* 0 is not a valid size for this allocator */
+	if (!size)
+		return 0;
+
+	if (!(flags & CVMX_BOOTMEM_FLAG_NO_LOCKING))
+		cvmx_bootmem_lock();
+	cur_addr = cvmx_bootmem_desc->head_addr;
+	if (cur_addr == 0 || phy_addr < cur_addr) {
+		/* add at front of list - special case with changing head ptr */
+		if (cur_addr && phy_addr + size > cur_addr)
+			goto bootmem_free_done; /* error, overlapping section */
+		else if (phy_addr + size == cur_addr) {
+			/* Add to front of existing first block */
+			cvmx_bootmem_phy_set_next(phy_addr,
+						  cvmx_bootmem_phy_get_next
+						  (cur_addr));
+			cvmx_bootmem_phy_set_size(phy_addr,
+						  cvmx_bootmem_phy_get_size
+						  (cur_addr) + size);
+			cvmx_bootmem_desc->head_addr = phy_addr;
+
+		} else {
+			/* New block before first block.  OK if cur_addr is 0 */
+			cvmx_bootmem_phy_set_next(phy_addr, cur_addr);
+			cvmx_bootmem_phy_set_size(phy_addr, size);
+			cvmx_bootmem_desc->head_addr = phy_addr;
+		}
+		retval = 1;
+		goto bootmem_free_done;
+	}
+
+	/* Find place in list to add block */
+	while (cur_addr && phy_addr > cur_addr) {
+		prev_addr = cur_addr;
+		cur_addr = cvmx_bootmem_phy_get_next(cur_addr);
+	}
+
+	if (!cur_addr) {
+		/*
+		 * We have reached the end of the list, add on to end,
+		 * checking to see if we need to combine with last
+		 * block
+		 */
+		if (prev_addr + cvmx_bootmem_phy_get_size(prev_addr) ==
+		    phy_addr) {
+			cvmx_bootmem_phy_set_size(prev_addr,
+						  cvmx_bootmem_phy_get_size
+						  (prev_addr) + size);
+		} else {
+			cvmx_bootmem_phy_set_next(prev_addr, phy_addr);
+			cvmx_bootmem_phy_set_size(phy_addr, size);
+			cvmx_bootmem_phy_set_next(phy_addr, 0);
+		}
+		retval = 1;
+		goto bootmem_free_done;
+	} else {
+		/*
+		 * insert between prev and cur nodes, checking for
+		 * merge with either/both.
+		 */
+		if (prev_addr + cvmx_bootmem_phy_get_size(prev_addr) ==
+		    phy_addr) {
+			/* Merge with previous */
+			cvmx_bootmem_phy_set_size(prev_addr,
+						  cvmx_bootmem_phy_get_size
+						  (prev_addr) + size);
+			if (phy_addr + size == cur_addr) {
+				/* Also merge with current */
+				cvmx_bootmem_phy_set_size(prev_addr,
+					cvmx_bootmem_phy_get_size(cur_addr) +
+					cvmx_bootmem_phy_get_size(prev_addr));
+				cvmx_bootmem_phy_set_next(prev_addr,
+					cvmx_bootmem_phy_get_next(cur_addr));
+			}
+			retval = 1;
+			goto bootmem_free_done;
+		} else if (phy_addr + size == cur_addr) {
+			/* Merge with current */
+			cvmx_bootmem_phy_set_size(phy_addr,
+						  cvmx_bootmem_phy_get_size
+						  (cur_addr) + size);
+			cvmx_bootmem_phy_set_next(phy_addr,
+						  cvmx_bootmem_phy_get_next
+						  (cur_addr));
+			cvmx_bootmem_phy_set_next(prev_addr, phy_addr);
+			retval = 1;
+			goto bootmem_free_done;
+		}
+
+		/* It is a standalone block, add in between prev and cur */
+		cvmx_bootmem_phy_set_size(phy_addr, size);
+		cvmx_bootmem_phy_set_next(phy_addr, cur_addr);
+		cvmx_bootmem_phy_set_next(prev_addr, phy_addr);
+
+	}
+	retval = 1;
+
+bootmem_free_done:
+	if (!(flags & CVMX_BOOTMEM_FLAG_NO_LOCKING))
+		cvmx_bootmem_unlock();
+	return retval;
+
+}
+
+struct cvmx_bootmem_named_block_desc *
+	cvmx_bootmem_phy_named_block_find(char *name, uint32_t flags)
+{
+	unsigned int i;
+	struct cvmx_bootmem_named_block_desc *named_block_array_ptr;
+
+#ifdef DEBUG
+	cvmx_dprintf("cvmx_bootmem_phy_named_block_find: %s\n", name);
+#endif
+	/*
+	 * Lock the structure to make sure that it is not being
+	 * changed while we are examining it.
+	 */
+	if (!(flags & CVMX_BOOTMEM_FLAG_NO_LOCKING))
+		cvmx_bootmem_lock();
+
+	/* Use XKPHYS for 64 bit linux */
+	named_block_array_ptr = (struct cvmx_bootmem_named_block_desc *)
+	    cvmx_phys_to_ptr(cvmx_bootmem_desc->named_block_array_addr);
+
+#ifdef DEBUG
+	cvmx_dprintf
+	    ("cvmx_bootmem_phy_named_block_find: named_block_array_ptr: %p\n",
+	     named_block_array_ptr);
+#endif
+	if (cvmx_bootmem_desc->major_version == 3) {
+		for (i = 0;
+		     i < cvmx_bootmem_desc->named_block_num_blocks; i++) {
+			if ((name && named_block_array_ptr[i].size
+			     && !strncmp(name, named_block_array_ptr[i].name,
+					 cvmx_bootmem_desc->named_block_name_len
+					 - 1))
+			    || (!name && !named_block_array_ptr[i].size)) {
+				if (!(flags & CVMX_BOOTMEM_FLAG_NO_LOCKING))
+					cvmx_bootmem_unlock();
+
+				return &(named_block_array_ptr[i]);
+			}
+		}
+	} else {
+		cvmx_dprintf("ERROR: Incompatible bootmem descriptor "
+			     "version: %d.%d at addr: %p\n",
+			     (int)cvmx_bootmem_desc->major_version,
+			     (int)cvmx_bootmem_desc->minor_version,
+			     cvmx_bootmem_desc);
+	}
+	if (!(flags & CVMX_BOOTMEM_FLAG_NO_LOCKING))
+		cvmx_bootmem_unlock();
+
+	return NULL;
+}
+
+int cvmx_bootmem_phy_named_block_free(char *name, uint32_t flags)
+{
+	struct cvmx_bootmem_named_block_desc *named_block_ptr;
+
+	if (cvmx_bootmem_desc->major_version != 3) {
+		cvmx_dprintf("ERROR: Incompatible bootmem descriptor version: "
+			     "%d.%d at addr: %p\n",
+			     (int)cvmx_bootmem_desc->major_version,
+			     (int)cvmx_bootmem_desc->minor_version,
+			     cvmx_bootmem_desc);
+		return 0;
+	}
+#ifdef DEBUG
+	cvmx_dprintf("cvmx_bootmem_phy_named_block_free: %s\n", name);
+#endif
+
+	/*
+	 * Take lock here, as name lookup/block free/name free need to
+	 * be atomic.
+	 */
+	cvmx_bootmem_lock();
+
+	named_block_ptr =
+	    cvmx_bootmem_phy_named_block_find(name,
+					      CVMX_BOOTMEM_FLAG_NO_LOCKING);
+	if (named_block_ptr) {
+#ifdef DEBUG
+		cvmx_dprintf("cvmx_bootmem_phy_named_block_free: "
+			     "%s, base: 0x%llx, size: 0x%llx\n",
+			     name,
+			     (unsigned long long)named_block_ptr->base_addr,
+			     (unsigned long long)named_block_ptr->size);
+#endif
+		__cvmx_bootmem_phy_free(named_block_ptr->base_addr,
+					named_block_ptr->size,
+					CVMX_BOOTMEM_FLAG_NO_LOCKING);
+		named_block_ptr->size = 0;
+		/* Set size to zero to indicate block not used. */
+	}
+
+	cvmx_bootmem_unlock();
+	return named_block_ptr != NULL; /* 0 on failure, 1 on success */
+}
+
+int64_t cvmx_bootmem_phy_named_block_alloc(uint64_t size, uint64_t min_addr,
+					   uint64_t max_addr,
+					   uint64_t alignment,
+					   char *name,
+					   uint32_t flags)
+{
+	int64_t addr_allocated;
+	struct cvmx_bootmem_named_block_desc *named_block_desc_ptr;
+
+#ifdef DEBUG
+	cvmx_dprintf("cvmx_bootmem_phy_named_block_alloc: size: 0x%llx, min: "
+		     "0x%llx, max: 0x%llx, align: 0x%llx, name: %s\n",
+		     (unsigned long long)size,
+		     (unsigned long long)min_addr,
+		     (unsigned long long)max_addr,
+		     (unsigned long long)alignment,
+		     name);
+#endif
+	if (cvmx_bootmem_desc->major_version != 3) {
+		cvmx_dprintf("ERROR: Incompatible bootmem descriptor version: "
+			     "%d.%d at addr: %p\n",
+			     (int)cvmx_bootmem_desc->major_version,
+			     (int)cvmx_bootmem_desc->minor_version,
+			     cvmx_bootmem_desc);
+		return -1;
+	}
+
+	/*
+	 * Take lock here, as name lookup/block alloc/name add need to
+	 * be atomic.
+	 */
+	if (!(flags & CVMX_BOOTMEM_FLAG_NO_LOCKING))
+		cvmx_spinlock_lock((cvmx_spinlock_t *)&(cvmx_bootmem_desc->lock));
+
+	/* Get pointer to first available named block descriptor */
+	named_block_desc_ptr =
+		cvmx_bootmem_phy_named_block_find(NULL,
+						  flags | CVMX_BOOTMEM_FLAG_NO_LOCKING);
+
+	/*
+	 * Check to see if name already in use, return error if name
+	 * not available or no more room for blocks.
+	 */
+	if (cvmx_bootmem_phy_named_block_find(name,
+					      flags | CVMX_BOOTMEM_FLAG_NO_LOCKING) || !named_block_desc_ptr) {
+		if (!(flags & CVMX_BOOTMEM_FLAG_NO_LOCKING))
+			cvmx_spinlock_unlock((cvmx_spinlock_t *)&(cvmx_bootmem_desc->lock));
+		return -1;
+	}
+
+
+	/*
+	 * Round size up to mult of minimum alignment bytes We need
+	 * the actual size allocated to allow for blocks to be
+	 * coalesced when they are freed. The alloc routine does the
+	 * same rounding up on all allocations.
+	 */
+	size = ALIGN(size, CVMX_BOOTMEM_ALIGNMENT_SIZE);
+
+	addr_allocated = cvmx_bootmem_phy_alloc(size, min_addr, max_addr,
+						alignment,
+						flags | CVMX_BOOTMEM_FLAG_NO_LOCKING);
+	if (addr_allocated >= 0) {
+		named_block_desc_ptr->base_addr = addr_allocated;
+		named_block_desc_ptr->size = size;
+		strncpy(named_block_desc_ptr->name, name,
+			cvmx_bootmem_desc->named_block_name_len);
+		named_block_desc_ptr->name[cvmx_bootmem_desc->named_block_name_len - 1] = 0;
+	}
+
+	if (!(flags & CVMX_BOOTMEM_FLAG_NO_LOCKING))
+		cvmx_spinlock_unlock((cvmx_spinlock_t *)&(cvmx_bootmem_desc->lock));
+	return addr_allocated;
+}
+
+struct cvmx_bootmem_desc *cvmx_bootmem_get_desc(void)
+{
+	return cvmx_bootmem_desc;
+}
diff --git a/src/kernel/linux/v4.14/arch/mips/cavium-octeon/executive/cvmx-cmd-queue.c b/src/kernel/linux/v4.14/arch/mips/cavium-octeon/executive/cvmx-cmd-queue.c
new file mode 100644
index 0000000..3839feb
--- /dev/null
+++ b/src/kernel/linux/v4.14/arch/mips/cavium-octeon/executive/cvmx-cmd-queue.c
@@ -0,0 +1,307 @@
+/***********************license start***************
+ * Author: Cavium Networks
+ *
+ * Contact: support@caviumnetworks.com
+ * This file is part of the OCTEON SDK
+ *
+ * Copyright (c) 2003-2008 Cavium Networks
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, Version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+ * NONINFRINGEMENT.  See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this file; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ * or visit http://www.gnu.org/licenses/.
+ *
+ * This file may also be available under a different license from Cavium.
+ * Contact Cavium Networks for more information
+ ***********************license end**************************************/
+
+/*
+ * Support functions for managing command queues used for
+ * various hardware blocks.
+ */
+
+#include <linux/kernel.h>
+
+#include <asm/octeon/octeon.h>
+
+#include <asm/octeon/cvmx-config.h>
+#include <asm/octeon/cvmx-fpa.h>
+#include <asm/octeon/cvmx-cmd-queue.h>
+
+#include <asm/octeon/cvmx-npei-defs.h>
+#include <asm/octeon/cvmx-pexp-defs.h>
+#include <asm/octeon/cvmx-pko-defs.h>
+
+/**
+ * This application uses this pointer to access the global queue
+ * state. It points to a bootmem named block.
+ */
+__cvmx_cmd_queue_all_state_t *__cvmx_cmd_queue_state_ptr;
+EXPORT_SYMBOL_GPL(__cvmx_cmd_queue_state_ptr);
+
+/**
+ * Initialize the Global queue state pointer.
+ *
+ * Returns CVMX_CMD_QUEUE_SUCCESS or a failure code
+ */
+static cvmx_cmd_queue_result_t __cvmx_cmd_queue_init_state_ptr(void)
+{
+	char *alloc_name = "cvmx_cmd_queues";
+#if defined(CONFIG_CAVIUM_RESERVE32) && CONFIG_CAVIUM_RESERVE32
+	extern uint64_t octeon_reserve32_memory;
+#endif
+
+	if (likely(__cvmx_cmd_queue_state_ptr))
+		return CVMX_CMD_QUEUE_SUCCESS;
+
+#if defined(CONFIG_CAVIUM_RESERVE32) && CONFIG_CAVIUM_RESERVE32
+	if (octeon_reserve32_memory)
+		__cvmx_cmd_queue_state_ptr =
+		    cvmx_bootmem_alloc_named_range(sizeof(*__cvmx_cmd_queue_state_ptr),
+						   octeon_reserve32_memory,
+						   octeon_reserve32_memory +
+						   (CONFIG_CAVIUM_RESERVE32 <<
+						    20) - 1, 128, alloc_name);
+	else
+#endif
+		__cvmx_cmd_queue_state_ptr =
+		    cvmx_bootmem_alloc_named(sizeof(*__cvmx_cmd_queue_state_ptr),
+					    128,
+					    alloc_name);
+	if (__cvmx_cmd_queue_state_ptr)
+		memset(__cvmx_cmd_queue_state_ptr, 0,
+		       sizeof(*__cvmx_cmd_queue_state_ptr));
+	else {
+		struct cvmx_bootmem_named_block_desc *block_desc =
+		    cvmx_bootmem_find_named_block(alloc_name);
+		if (block_desc)
+			__cvmx_cmd_queue_state_ptr =
+			    cvmx_phys_to_ptr(block_desc->base_addr);
+		else {
+			cvmx_dprintf
+			    ("ERROR: cvmx_cmd_queue_initialize: Unable to get named block %s.\n",
+			     alloc_name);
+			return CVMX_CMD_QUEUE_NO_MEMORY;
+		}
+	}
+	return CVMX_CMD_QUEUE_SUCCESS;
+}
+
+/**
+ * Initialize a command queue for use. The initial FPA buffer is
+ * allocated and the hardware unit is configured to point to the
+ * new command queue.
+ *
+ * @queue_id:  Hardware command queue to initialize.
+ * @max_depth: Maximum outstanding commands that can be queued.
+ * @fpa_pool:  FPA pool the command queues should come from.
+ * @pool_size: Size of each buffer in the FPA pool (bytes)
+ *
+ * Returns CVMX_CMD_QUEUE_SUCCESS or a failure code
+ */
+cvmx_cmd_queue_result_t cvmx_cmd_queue_initialize(cvmx_cmd_queue_id_t queue_id,
+						  int max_depth, int fpa_pool,
+						  int pool_size)
+{
+	__cvmx_cmd_queue_state_t *qstate;
+	cvmx_cmd_queue_result_t result = __cvmx_cmd_queue_init_state_ptr();
+	if (result != CVMX_CMD_QUEUE_SUCCESS)
+		return result;
+
+	qstate = __cvmx_cmd_queue_get_state(queue_id);
+	if (qstate == NULL)
+		return CVMX_CMD_QUEUE_INVALID_PARAM;
+
+	/*
+	 * We artificially limit max_depth to 1<<20 words. It is an
+	 * arbitrary limit.
+	 */
+	if (CVMX_CMD_QUEUE_ENABLE_MAX_DEPTH) {
+		if ((max_depth < 0) || (max_depth > 1 << 20))
+			return CVMX_CMD_QUEUE_INVALID_PARAM;
+	} else if (max_depth != 0)
+		return CVMX_CMD_QUEUE_INVALID_PARAM;
+
+	if ((fpa_pool < 0) || (fpa_pool > 7))
+		return CVMX_CMD_QUEUE_INVALID_PARAM;
+	if ((pool_size < 128) || (pool_size > 65536))
+		return CVMX_CMD_QUEUE_INVALID_PARAM;
+
+	/* See if someone else has already initialized the queue */
+	if (qstate->base_ptr_div128) {
+		if (max_depth != (int)qstate->max_depth) {
+			cvmx_dprintf("ERROR: cvmx_cmd_queue_initialize: "
+				"Queue already initialized with different "
+				"max_depth (%d).\n",
+			     (int)qstate->max_depth);
+			return CVMX_CMD_QUEUE_INVALID_PARAM;
+		}
+		if (fpa_pool != qstate->fpa_pool) {
+			cvmx_dprintf("ERROR: cvmx_cmd_queue_initialize: "
+				"Queue already initialized with different "
+				"FPA pool (%u).\n",
+			     qstate->fpa_pool);
+			return CVMX_CMD_QUEUE_INVALID_PARAM;
+		}
+		if ((pool_size >> 3) - 1 != qstate->pool_size_m1) {
+			cvmx_dprintf("ERROR: cvmx_cmd_queue_initialize: "
+				"Queue already initialized with different "
+				"FPA pool size (%u).\n",
+			     (qstate->pool_size_m1 + 1) << 3);
+			return CVMX_CMD_QUEUE_INVALID_PARAM;
+		}
+		CVMX_SYNCWS;
+		return CVMX_CMD_QUEUE_ALREADY_SETUP;
+	} else {
+		union cvmx_fpa_ctl_status status;
+		void *buffer;
+
+		status.u64 = cvmx_read_csr(CVMX_FPA_CTL_STATUS);
+		if (!status.s.enb) {
+			cvmx_dprintf("ERROR: cvmx_cmd_queue_initialize: "
+				     "FPA is not enabled.\n");
+			return CVMX_CMD_QUEUE_NO_MEMORY;
+		}
+		buffer = cvmx_fpa_alloc(fpa_pool);
+		if (buffer == NULL) {
+			cvmx_dprintf("ERROR: cvmx_cmd_queue_initialize: "
+				     "Unable to allocate initial buffer.\n");
+			return CVMX_CMD_QUEUE_NO_MEMORY;
+		}
+
+		memset(qstate, 0, sizeof(*qstate));
+		qstate->max_depth = max_depth;
+		qstate->fpa_pool = fpa_pool;
+		qstate->pool_size_m1 = (pool_size >> 3) - 1;
+		qstate->base_ptr_div128 = cvmx_ptr_to_phys(buffer) / 128;
+		/*
+		 * We zeroed the now serving field so we need to also
+		 * zero the ticket.
+		 */
+		__cvmx_cmd_queue_state_ptr->
+		    ticket[__cvmx_cmd_queue_get_index(queue_id)] = 0;
+		CVMX_SYNCWS;
+		return CVMX_CMD_QUEUE_SUCCESS;
+	}
+}
+
+/**
+ * Shutdown a queue a free it's command buffers to the FPA. The
+ * hardware connected to the queue must be stopped before this
+ * function is called.
+ *
+ * @queue_id: Queue to shutdown
+ *
+ * Returns CVMX_CMD_QUEUE_SUCCESS or a failure code
+ */
+cvmx_cmd_queue_result_t cvmx_cmd_queue_shutdown(cvmx_cmd_queue_id_t queue_id)
+{
+	__cvmx_cmd_queue_state_t *qptr = __cvmx_cmd_queue_get_state(queue_id);
+	if (qptr == NULL) {
+		cvmx_dprintf("ERROR: cvmx_cmd_queue_shutdown: Unable to "
+			     "get queue information.\n");
+		return CVMX_CMD_QUEUE_INVALID_PARAM;
+	}
+
+	if (cvmx_cmd_queue_length(queue_id) > 0) {
+		cvmx_dprintf("ERROR: cvmx_cmd_queue_shutdown: Queue still "
+			     "has data in it.\n");
+		return CVMX_CMD_QUEUE_FULL;
+	}
+
+	__cvmx_cmd_queue_lock(queue_id, qptr);
+	if (qptr->base_ptr_div128) {
+		cvmx_fpa_free(cvmx_phys_to_ptr
+			      ((uint64_t) qptr->base_ptr_div128 << 7),
+			      qptr->fpa_pool, 0);
+		qptr->base_ptr_div128 = 0;
+	}
+	__cvmx_cmd_queue_unlock(qptr);
+
+	return CVMX_CMD_QUEUE_SUCCESS;
+}
+
+/**
+ * Return the number of command words pending in the queue. This
+ * function may be relatively slow for some hardware units.
+ *
+ * @queue_id: Hardware command queue to query
+ *
+ * Returns Number of outstanding commands
+ */
+int cvmx_cmd_queue_length(cvmx_cmd_queue_id_t queue_id)
+{
+	if (CVMX_ENABLE_PARAMETER_CHECKING) {
+		if (__cvmx_cmd_queue_get_state(queue_id) == NULL)
+			return CVMX_CMD_QUEUE_INVALID_PARAM;
+	}
+
+	/*
+	 * The cast is here so gcc with check that all values in the
+	 * cvmx_cmd_queue_id_t enumeration are here.
+	 */
+	switch ((cvmx_cmd_queue_id_t) (queue_id & 0xff0000)) {
+	case CVMX_CMD_QUEUE_PKO_BASE:
+		/*
+		 * FIXME: Need atomic lock on
+		 * CVMX_PKO_REG_READ_IDX. Right now we are normally
+		 * called with the queue lock, so that is a SLIGHT
+		 * amount of protection.
+		 */
+		cvmx_write_csr(CVMX_PKO_REG_READ_IDX, queue_id & 0xffff);
+		if (OCTEON_IS_MODEL(OCTEON_CN3XXX)) {
+			union cvmx_pko_mem_debug9 debug9;
+			debug9.u64 = cvmx_read_csr(CVMX_PKO_MEM_DEBUG9);
+			return debug9.cn38xx.doorbell;
+		} else {
+			union cvmx_pko_mem_debug8 debug8;
+			debug8.u64 = cvmx_read_csr(CVMX_PKO_MEM_DEBUG8);
+			return debug8.cn50xx.doorbell;
+		}
+	case CVMX_CMD_QUEUE_ZIP:
+	case CVMX_CMD_QUEUE_DFA:
+	case CVMX_CMD_QUEUE_RAID:
+		/* FIXME: Implement other lengths */
+		return 0;
+	case CVMX_CMD_QUEUE_DMA_BASE:
+		{
+			union cvmx_npei_dmax_counts dmax_counts;
+			dmax_counts.u64 =
+			    cvmx_read_csr(CVMX_PEXP_NPEI_DMAX_COUNTS
+					  (queue_id & 0x7));
+			return dmax_counts.s.dbell;
+		}
+	case CVMX_CMD_QUEUE_END:
+		return CVMX_CMD_QUEUE_INVALID_PARAM;
+	}
+	return CVMX_CMD_QUEUE_INVALID_PARAM;
+}
+
+/**
+ * Return the command buffer to be written to. The purpose of this
+ * function is to allow CVMX routine access t othe low level buffer
+ * for initial hardware setup. User applications should not call this
+ * function directly.
+ *
+ * @queue_id: Command queue to query
+ *
+ * Returns Command buffer or NULL on failure
+ */
+void *cvmx_cmd_queue_buffer(cvmx_cmd_queue_id_t queue_id)
+{
+	__cvmx_cmd_queue_state_t *qptr = __cvmx_cmd_queue_get_state(queue_id);
+	if (qptr && qptr->base_ptr_div128)
+		return cvmx_phys_to_ptr((uint64_t) qptr->base_ptr_div128 << 7);
+	else
+		return NULL;
+}
diff --git a/src/kernel/linux/v4.14/arch/mips/cavium-octeon/executive/cvmx-helper-board.c b/src/kernel/linux/v4.14/arch/mips/cavium-octeon/executive/cvmx-helper-board.c
new file mode 100644
index 0000000..ab8362e
--- /dev/null
+++ b/src/kernel/linux/v4.14/arch/mips/cavium-octeon/executive/cvmx-helper-board.c
@@ -0,0 +1,424 @@
+/***********************license start***************
+ * Author: Cavium Networks
+ *
+ * Contact: support@caviumnetworks.com
+ * This file is part of the OCTEON SDK
+ *
+ * Copyright (c) 2003-2008 Cavium Networks
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, Version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+ * NONINFRINGEMENT.  See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this file; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ * or visit http://www.gnu.org/licenses/.
+ *
+ * This file may also be available under a different license from Cavium.
+ * Contact Cavium Networks for more information
+ ***********************license end**************************************/
+
+/*
+ *
+ * Helper functions to abstract board specific data about
+ * network ports from the rest of the cvmx-helper files.
+ */
+
+#include <asm/octeon/octeon.h>
+#include <asm/octeon/cvmx-bootinfo.h>
+
+#include <asm/octeon/cvmx-config.h>
+
+#include <asm/octeon/cvmx-helper.h>
+#include <asm/octeon/cvmx-helper-util.h>
+#include <asm/octeon/cvmx-helper-board.h>
+
+#include <asm/octeon/cvmx-gmxx-defs.h>
+#include <asm/octeon/cvmx-asxx-defs.h>
+
+/**
+ * Return the MII PHY address associated with the given IPD
+ * port. A result of -1 means there isn't a MII capable PHY
+ * connected to this port. On chips supporting multiple MII
+ * busses the bus number is encoded in bits <15:8>.
+ *
+ * This function must be modified for every new Octeon board.
+ * Internally it uses switch statements based on the cvmx_sysinfo
+ * data to determine board types and revisions. It replies on the
+ * fact that every Octeon board receives a unique board type
+ * enumeration from the bootloader.
+ *
+ * @ipd_port: Octeon IPD port to get the MII address for.
+ *
+ * Returns MII PHY address and bus number or -1.
+ */
+int cvmx_helper_board_get_mii_address(int ipd_port)
+{
+	switch (cvmx_sysinfo_get()->board_type) {
+	case CVMX_BOARD_TYPE_SIM:
+		/* Simulator doesn't have MII */
+		return -1;
+	case CVMX_BOARD_TYPE_EBT3000:
+	case CVMX_BOARD_TYPE_EBT5800:
+	case CVMX_BOARD_TYPE_THUNDER:
+	case CVMX_BOARD_TYPE_NICPRO2:
+		/* Interface 0 is SPI4, interface 1 is RGMII */
+		if ((ipd_port >= 16) && (ipd_port < 20))
+			return ipd_port - 16;
+		else
+			return -1;
+	case CVMX_BOARD_TYPE_KODAMA:
+	case CVMX_BOARD_TYPE_EBH3100:
+	case CVMX_BOARD_TYPE_HIKARI:
+	case CVMX_BOARD_TYPE_CN3010_EVB_HS5:
+	case CVMX_BOARD_TYPE_CN3005_EVB_HS5:
+	case CVMX_BOARD_TYPE_CN3020_EVB_HS5:
+		/*
+		 * Port 0 is WAN connected to a PHY, Port 1 is GMII
+		 * connected to a switch
+		 */
+		if (ipd_port == 0)
+			return 4;
+		else if (ipd_port == 1)
+			return 9;
+		else
+			return -1;
+	case CVMX_BOARD_TYPE_NAC38:
+		/* Board has 8 RGMII ports PHYs are 0-7 */
+		if ((ipd_port >= 0) && (ipd_port < 4))
+			return ipd_port;
+		else if ((ipd_port >= 16) && (ipd_port < 20))
+			return ipd_port - 16 + 4;
+		else
+			return -1;
+	case CVMX_BOARD_TYPE_EBH3000:
+		/* Board has dual SPI4 and no PHYs */
+		return -1;
+	case CVMX_BOARD_TYPE_EBH5200:
+	case CVMX_BOARD_TYPE_EBH5201:
+	case CVMX_BOARD_TYPE_EBT5200:
+		/* Board has 2 management ports */
+		if ((ipd_port >= CVMX_HELPER_BOARD_MGMT_IPD_PORT) &&
+		    (ipd_port < (CVMX_HELPER_BOARD_MGMT_IPD_PORT + 2)))
+			return ipd_port - CVMX_HELPER_BOARD_MGMT_IPD_PORT;
+		/*
+		 * Board has 4 SGMII ports. The PHYs start right after the MII
+		 * ports MII0 = 0, MII1 = 1, SGMII = 2-5.
+		 */
+		if ((ipd_port >= 0) && (ipd_port < 4))
+			return ipd_port + 2;
+		else
+			return -1;
+	case CVMX_BOARD_TYPE_EBH5600:
+	case CVMX_BOARD_TYPE_EBH5601:
+	case CVMX_BOARD_TYPE_EBH5610:
+		/* Board has 1 management port */
+		if (ipd_port == CVMX_HELPER_BOARD_MGMT_IPD_PORT)
+			return 0;
+		/*
+		 * Board has 8 SGMII ports. 4 connect out, two connect
+		 * to a switch, and 2 loop to each other
+		 */
+		if ((ipd_port >= 0) && (ipd_port < 4))
+			return ipd_port + 1;
+		else
+			return -1;
+	case CVMX_BOARD_TYPE_CUST_NB5:
+		if (ipd_port == 2)
+			return 4;
+		else
+			return -1;
+	case CVMX_BOARD_TYPE_NIC_XLE_4G:
+		/* Board has 4 SGMII ports. connected QLM3(interface 1) */
+		if ((ipd_port >= 16) && (ipd_port < 20))
+			return ipd_port - 16 + 1;
+		else
+			return -1;
+	case CVMX_BOARD_TYPE_NIC_XLE_10G:
+	case CVMX_BOARD_TYPE_NIC10E:
+		return -1;
+	case CVMX_BOARD_TYPE_NIC4E:
+		if (ipd_port >= 0 && ipd_port <= 3)
+			return (ipd_port + 0x1f) & 0x1f;
+		else
+			return -1;
+	case CVMX_BOARD_TYPE_NIC2E:
+		if (ipd_port >= 0 && ipd_port <= 1)
+			return ipd_port + 1;
+		else
+			return -1;
+	case CVMX_BOARD_TYPE_BBGW_REF:
+		/*
+		 * No PHYs are connected to Octeon, everything is
+		 * through switch.
+		 */
+		return -1;
+
+	case CVMX_BOARD_TYPE_CUST_WSX16:
+		if (ipd_port >= 0 && ipd_port <= 3)
+			return ipd_port;
+		else if (ipd_port >= 16 && ipd_port <= 19)
+			return ipd_port - 16 + 4;
+		else
+			return -1;
+	case CVMX_BOARD_TYPE_UBNT_E100:
+		if (ipd_port >= 0 && ipd_port <= 2)
+			return 7 - ipd_port;
+		else
+			return -1;
+	case CVMX_BOARD_TYPE_KONTRON_S1901:
+		if (ipd_port == CVMX_HELPER_BOARD_MGMT_IPD_PORT)
+			return 1;
+		else
+			return -1;
+
+	}
+
+	/* Some unknown board. Somebody forgot to update this function... */
+	cvmx_dprintf
+	    ("cvmx_helper_board_get_mii_address: Unknown board type %d\n",
+	     cvmx_sysinfo_get()->board_type);
+	return -1;
+}
+
+/**
+ * This function is the board specific method of determining an
+ * ethernet ports link speed. Most Octeon boards have Marvell PHYs
+ * and are handled by the fall through case. This function must be
+ * updated for boards that don't have the normal Marvell PHYs.
+ *
+ * This function must be modified for every new Octeon board.
+ * Internally it uses switch statements based on the cvmx_sysinfo
+ * data to determine board types and revisions. It relies on the
+ * fact that every Octeon board receives a unique board type
+ * enumeration from the bootloader.
+ *
+ * @ipd_port: IPD input port associated with the port we want to get link
+ *		   status for.
+ *
+ * Returns The ports link status. If the link isn't fully resolved, this must
+ *	   return zero.
+ */
+cvmx_helper_link_info_t __cvmx_helper_board_link_get(int ipd_port)
+{
+	cvmx_helper_link_info_t result;
+
+	/* Unless we fix it later, all links are defaulted to down */
+	result.u64 = 0;
+
+	/*
+	 * This switch statement should handle all ports that either don't use
+	 * Marvell PHYS, or don't support in-band status.
+	 */
+	switch (cvmx_sysinfo_get()->board_type) {
+	case CVMX_BOARD_TYPE_SIM:
+		/* The simulator gives you a simulated 1Gbps full duplex link */
+		result.s.link_up = 1;
+		result.s.full_duplex = 1;
+		result.s.speed = 1000;
+		return result;
+	case CVMX_BOARD_TYPE_EBH3100:
+	case CVMX_BOARD_TYPE_CN3010_EVB_HS5:
+	case CVMX_BOARD_TYPE_CN3005_EVB_HS5:
+	case CVMX_BOARD_TYPE_CN3020_EVB_HS5:
+		/* Port 1 on these boards is always Gigabit */
+		if (ipd_port == 1) {
+			result.s.link_up = 1;
+			result.s.full_duplex = 1;
+			result.s.speed = 1000;
+			return result;
+		}
+		/* Fall through to the generic code below */
+		break;
+	case CVMX_BOARD_TYPE_CUST_NB5:
+		/* Port 1 on these boards is always Gigabit */
+		if (ipd_port == 1) {
+			result.s.link_up = 1;
+			result.s.full_duplex = 1;
+			result.s.speed = 1000;
+			return result;
+		}
+		break;
+	case CVMX_BOARD_TYPE_BBGW_REF:
+		/* Port 1 on these boards is always Gigabit */
+		if (ipd_port == 2) {
+			/* Port 2 is not hooked up */
+			result.u64 = 0;
+			return result;
+		} else {
+			/* Ports 0 and 1 connect to the switch */
+			result.s.link_up = 1;
+			result.s.full_duplex = 1;
+			result.s.speed = 1000;
+			return result;
+		}
+		break;
+	}
+
+	if (OCTEON_IS_MODEL(OCTEON_CN3XXX)
+		   || OCTEON_IS_MODEL(OCTEON_CN58XX)
+		   || OCTEON_IS_MODEL(OCTEON_CN50XX)) {
+		/*
+		 * We don't have a PHY address, so attempt to use
+		 * in-band status. It is really important that boards
+		 * not supporting in-band status never get
+		 * here. Reading broken in-band status tends to do bad
+		 * things
+		 */
+		union cvmx_gmxx_rxx_rx_inbnd inband_status;
+		int interface = cvmx_helper_get_interface_num(ipd_port);
+		int index = cvmx_helper_get_interface_index_num(ipd_port);
+		inband_status.u64 =
+		    cvmx_read_csr(CVMX_GMXX_RXX_RX_INBND(index, interface));
+
+		result.s.link_up = inband_status.s.status;
+		result.s.full_duplex = inband_status.s.duplex;
+		switch (inband_status.s.speed) {
+		case 0: /* 10 Mbps */
+			result.s.speed = 10;
+			break;
+		case 1: /* 100 Mbps */
+			result.s.speed = 100;
+			break;
+		case 2: /* 1 Gbps */
+			result.s.speed = 1000;
+			break;
+		case 3: /* Illegal */
+			result.u64 = 0;
+			break;
+		}
+	} else {
+		/*
+		 * We don't have a PHY address and we don't have
+		 * in-band status. There is no way to determine the
+		 * link speed. Return down assuming this port isn't
+		 * wired
+		 */
+		result.u64 = 0;
+	}
+
+	/* If link is down, return all fields as zero. */
+	if (!result.s.link_up)
+		result.u64 = 0;
+
+	return result;
+}
+
+/**
+ * This function is called by cvmx_helper_interface_probe() after it
+ * determines the number of ports Octeon can support on a specific
+ * interface. This function is the per board location to override
+ * this value. It is called with the number of ports Octeon might
+ * support and should return the number of actual ports on the
+ * board.
+ *
+ * This function must be modifed for every new Octeon board.
+ * Internally it uses switch statements based on the cvmx_sysinfo
+ * data to determine board types and revisions. It relys on the
+ * fact that every Octeon board receives a unique board type
+ * enumeration from the bootloader.
+ *
+ * @interface: Interface to probe
+ * @supported_ports:
+ *		    Number of ports Octeon supports.
+ *
+ * Returns Number of ports the actual board supports. Many times this will
+ *	   simple be "support_ports".
+ */
+int __cvmx_helper_board_interface_probe(int interface, int supported_ports)
+{
+	switch (cvmx_sysinfo_get()->board_type) {
+	case CVMX_BOARD_TYPE_CN3005_EVB_HS5:
+		if (interface == 0)
+			return 2;
+		break;
+	case CVMX_BOARD_TYPE_BBGW_REF:
+		if (interface == 0)
+			return 2;
+		break;
+	case CVMX_BOARD_TYPE_NIC_XLE_4G:
+		if (interface == 0)
+			return 0;
+		break;
+		/* The 2nd interface on the EBH5600 is connected to the Marvel switch,
+		   which we don't support. Disable ports connected to it */
+	case CVMX_BOARD_TYPE_EBH5600:
+		if (interface == 1)
+			return 0;
+		break;
+	}
+	return supported_ports;
+}
+
+/**
+ * Enable packet input/output from the hardware. This function is
+ * called after by cvmx_helper_packet_hardware_enable() to
+ * perform board specific initialization. For most boards
+ * nothing is needed.
+ *
+ * @interface: Interface to enable
+ *
+ * Returns Zero on success, negative on failure
+ */
+int __cvmx_helper_board_hardware_enable(int interface)
+{
+	if (cvmx_sysinfo_get()->board_type == CVMX_BOARD_TYPE_CN3005_EVB_HS5) {
+		if (interface == 0) {
+			/* Different config for switch port */
+			cvmx_write_csr(CVMX_ASXX_TX_CLK_SETX(1, interface), 0);
+			cvmx_write_csr(CVMX_ASXX_RX_CLK_SETX(1, interface), 0);
+			/*
+			 * Boards with gigabit WAN ports need a
+			 * different setting that is compatible with
+			 * 100 Mbit settings
+			 */
+			cvmx_write_csr(CVMX_ASXX_TX_CLK_SETX(0, interface),
+				       0xc);
+			cvmx_write_csr(CVMX_ASXX_RX_CLK_SETX(0, interface),
+				       0xc);
+		}
+	} else if (cvmx_sysinfo_get()->board_type ==
+			CVMX_BOARD_TYPE_UBNT_E100) {
+		cvmx_write_csr(CVMX_ASXX_RX_CLK_SETX(0, interface), 0);
+		cvmx_write_csr(CVMX_ASXX_TX_CLK_SETX(0, interface), 0x10);
+		cvmx_write_csr(CVMX_ASXX_RX_CLK_SETX(1, interface), 0);
+		cvmx_write_csr(CVMX_ASXX_TX_CLK_SETX(1, interface), 0x10);
+		cvmx_write_csr(CVMX_ASXX_RX_CLK_SETX(2, interface), 0);
+		cvmx_write_csr(CVMX_ASXX_TX_CLK_SETX(2, interface), 0x10);
+	}
+	return 0;
+}
+
+/**
+ * Get the clock type used for the USB block based on board type.
+ * Used by the USB code for auto configuration of clock type.
+ *
+ * Return USB clock type enumeration
+ */
+enum cvmx_helper_board_usb_clock_types __cvmx_helper_board_usb_get_clock_type(void)
+{
+	switch (cvmx_sysinfo_get()->board_type) {
+	case CVMX_BOARD_TYPE_BBGW_REF:
+	case CVMX_BOARD_TYPE_LANAI2_A:
+	case CVMX_BOARD_TYPE_LANAI2_U:
+	case CVMX_BOARD_TYPE_LANAI2_G:
+	case CVMX_BOARD_TYPE_NIC10E_66:
+	case CVMX_BOARD_TYPE_UBNT_E100:
+		return USB_CLOCK_TYPE_CRYSTAL_12;
+	case CVMX_BOARD_TYPE_NIC10E:
+		return USB_CLOCK_TYPE_REF_12;
+	default:
+		break;
+	}
+	/* Most boards except NIC10e use a 12MHz crystal */
+	if (OCTEON_IS_OCTEON2())
+		return USB_CLOCK_TYPE_CRYSTAL_12;
+	return USB_CLOCK_TYPE_REF_48;
+}
diff --git a/src/kernel/linux/v4.14/arch/mips/cavium-octeon/executive/cvmx-helper-errata.c b/src/kernel/linux/v4.14/arch/mips/cavium-octeon/executive/cvmx-helper-errata.c
new file mode 100644
index 0000000..4b26fed
--- /dev/null
+++ b/src/kernel/linux/v4.14/arch/mips/cavium-octeon/executive/cvmx-helper-errata.c
@@ -0,0 +1,73 @@
+/***********************license start***************
+ * Author: Cavium Networks
+ *
+ * Contact: support@caviumnetworks.com
+ * This file is part of the OCTEON SDK
+ *
+ * Copyright (c) 2003-2008 Cavium Networks
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, Version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+ * NONINFRINGEMENT.  See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this file; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ * or visit http://www.gnu.org/licenses/.
+ *
+ * This file may also be available under a different license from Cavium.
+ * Contact Cavium Networks for more information
+ ***********************license end**************************************/
+
+/**
+ *
+ * Fixes and workaround for Octeon chip errata. This file
+ * contains functions called by cvmx-helper to workaround known
+ * chip errata. For the most part, code doesn't need to call
+ * these functions directly.
+ *
+ */
+#include <linux/export.h>
+
+#include <asm/octeon/octeon.h>
+
+#include <asm/octeon/cvmx-helper-jtag.h>
+
+/**
+ * Due to errata G-720, the 2nd order CDR circuit on CN52XX pass
+ * 1 doesn't work properly. The following code disables 2nd order
+ * CDR for the specified QLM.
+ *
+ * @qlm:    QLM to disable 2nd order CDR for.
+ */
+void __cvmx_helper_errata_qlm_disable_2nd_order_cdr(int qlm)
+{
+	int lane;
+	cvmx_helper_qlm_jtag_init();
+	/* We need to load all four lanes of the QLM, a total of 1072 bits */
+	for (lane = 0; lane < 4; lane++) {
+		/*
+		 * Each lane has 268 bits. We need to set
+		 * cfg_cdr_incx<67:64> = 3 and cfg_cdr_secord<77> =
+		 * 1. All other bits are zero. Bits go in LSB first,
+		 * so start off with the zeros for bits <63:0>.
+		 */
+		cvmx_helper_qlm_jtag_shift_zeros(qlm, 63 - 0 + 1);
+		/* cfg_cdr_incx<67:64>=3 */
+		cvmx_helper_qlm_jtag_shift(qlm, 67 - 64 + 1, 3);
+		/* Zeros for bits <76:68> */
+		cvmx_helper_qlm_jtag_shift_zeros(qlm, 76 - 68 + 1);
+		/* cfg_cdr_secord<77>=1 */
+		cvmx_helper_qlm_jtag_shift(qlm, 77 - 77 + 1, 1);
+		/* Zeros for bits <267:78> */
+		cvmx_helper_qlm_jtag_shift_zeros(qlm, 267 - 78 + 1);
+	}
+	cvmx_helper_qlm_jtag_update(qlm);
+}
+EXPORT_SYMBOL(__cvmx_helper_errata_qlm_disable_2nd_order_cdr);
diff --git a/src/kernel/linux/v4.14/arch/mips/cavium-octeon/executive/cvmx-helper-jtag.c b/src/kernel/linux/v4.14/arch/mips/cavium-octeon/executive/cvmx-helper-jtag.c
new file mode 100644
index 0000000..607b4e6
--- /dev/null
+++ b/src/kernel/linux/v4.14/arch/mips/cavium-octeon/executive/cvmx-helper-jtag.c
@@ -0,0 +1,144 @@
+
+/***********************license start***************
+ * Author: Cavium Networks
+ *
+ * Contact: support@caviumnetworks.com
+ * This file is part of the OCTEON SDK
+ *
+ * Copyright (c) 2003-2008 Cavium Networks
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, Version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+ * NONINFRINGEMENT.  See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this file; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ * or visit http://www.gnu.org/licenses/.
+ *
+ * This file may also be available under a different license from Cavium.
+ * Contact Cavium Networks for more information
+ ***********************license end**************************************/
+
+/**
+ *
+ * Helper utilities for qlm_jtag.
+ *
+ */
+
+#include <asm/octeon/octeon.h>
+#include <asm/octeon/cvmx-helper-jtag.h>
+
+
+/**
+ * Initialize the internal QLM JTAG logic to allow programming
+ * of the JTAG chain by the cvmx_helper_qlm_jtag_*() functions.
+ * These functions should only be used at the direction of Cavium
+ * Networks. Programming incorrect values into the JTAG chain
+ * can cause chip damage.
+ */
+void cvmx_helper_qlm_jtag_init(void)
+{
+	union cvmx_ciu_qlm_jtgc jtgc;
+	uint32_t clock_div = 0;
+	uint32_t divisor = cvmx_sysinfo_get()->cpu_clock_hz / (25 * 1000000);
+	divisor = (divisor - 1) >> 2;
+	/* Convert the divisor into a power of 2 shift */
+	while (divisor) {
+		clock_div++;
+		divisor = divisor >> 1;
+	}
+
+	/*
+	 * Clock divider for QLM JTAG operations.  eclk is divided by
+	 * 2^(CLK_DIV + 2)
+	 */
+	jtgc.u64 = 0;
+	jtgc.s.clk_div = clock_div;
+	jtgc.s.mux_sel = 0;
+	if (OCTEON_IS_MODEL(OCTEON_CN52XX))
+		jtgc.s.bypass = 0x3;
+	else
+		jtgc.s.bypass = 0xf;
+	cvmx_write_csr(CVMX_CIU_QLM_JTGC, jtgc.u64);
+	cvmx_read_csr(CVMX_CIU_QLM_JTGC);
+}
+
+/**
+ * Write up to 32bits into the QLM jtag chain. Bits are shifted
+ * into the MSB and out the LSB, so you should shift in the low
+ * order bits followed by the high order bits. The JTAG chain is
+ * 4 * 268 bits long, or 1072.
+ *
+ * @qlm:    QLM to shift value into
+ * @bits:   Number of bits to shift in (1-32).
+ * @data:   Data to shift in. Bit 0 enters the chain first, followed by
+ *		 bit 1, etc.
+ *
+ * Returns The low order bits of the JTAG chain that shifted out of the
+ *	   circle.
+ */
+uint32_t cvmx_helper_qlm_jtag_shift(int qlm, int bits, uint32_t data)
+{
+	union cvmx_ciu_qlm_jtgd jtgd;
+	jtgd.u64 = 0;
+	jtgd.s.shift = 1;
+	jtgd.s.shft_cnt = bits - 1;
+	jtgd.s.shft_reg = data;
+	if (!OCTEON_IS_MODEL(OCTEON_CN56XX_PASS1_X))
+		jtgd.s.select = 1 << qlm;
+	cvmx_write_csr(CVMX_CIU_QLM_JTGD, jtgd.u64);
+	do {
+		jtgd.u64 = cvmx_read_csr(CVMX_CIU_QLM_JTGD);
+	} while (jtgd.s.shift);
+	return jtgd.s.shft_reg >> (32 - bits);
+}
+
+/**
+ * Shift long sequences of zeros into the QLM JTAG chain. It is
+ * common to need to shift more than 32 bits of zeros into the
+ * chain. This function is a convience wrapper around
+ * cvmx_helper_qlm_jtag_shift() to shift more than 32 bits of
+ * zeros at a time.
+ *
+ * @qlm:    QLM to shift zeros into
+ * @bits:
+ */
+void cvmx_helper_qlm_jtag_shift_zeros(int qlm, int bits)
+{
+	while (bits > 0) {
+		int n = bits;
+		if (n > 32)
+			n = 32;
+		cvmx_helper_qlm_jtag_shift(qlm, n, 0);
+		bits -= n;
+	}
+}
+
+/**
+ * Program the QLM JTAG chain into all lanes of the QLM. You must
+ * have already shifted in 268*4, or 1072 bits into the JTAG
+ * chain. Updating invalid values can possibly cause chip damage.
+ *
+ * @qlm:    QLM to program
+ */
+void cvmx_helper_qlm_jtag_update(int qlm)
+{
+	union cvmx_ciu_qlm_jtgd jtgd;
+
+	/* Update the new data */
+	jtgd.u64 = 0;
+	jtgd.s.update = 1;
+	if (!OCTEON_IS_MODEL(OCTEON_CN56XX_PASS1_X))
+		jtgd.s.select = 1 << qlm;
+	cvmx_write_csr(CVMX_CIU_QLM_JTGD, jtgd.u64);
+	do {
+		jtgd.u64 = cvmx_read_csr(CVMX_CIU_QLM_JTGD);
+	} while (jtgd.s.update);
+}
diff --git a/src/kernel/linux/v4.14/arch/mips/cavium-octeon/executive/cvmx-helper-loop.c b/src/kernel/linux/v4.14/arch/mips/cavium-octeon/executive/cvmx-helper-loop.c
new file mode 100644
index 0000000..bfbd461
--- /dev/null
+++ b/src/kernel/linux/v4.14/arch/mips/cavium-octeon/executive/cvmx-helper-loop.c
@@ -0,0 +1,85 @@
+/***********************license start***************
+ * Author: Cavium Networks
+ *
+ * Contact: support@caviumnetworks.com
+ * This file is part of the OCTEON SDK
+ *
+ * Copyright (c) 2003-2008 Cavium Networks
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, Version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+ * NONINFRINGEMENT.  See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this file; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ * or visit http://www.gnu.org/licenses/.
+ *
+ * This file may also be available under a different license from Cavium.
+ * Contact Cavium Networks for more information
+ ***********************license end**************************************/
+
+/*
+ * Functions for LOOP initialization, configuration,
+ * and monitoring.
+ */
+#include <asm/octeon/octeon.h>
+
+#include <asm/octeon/cvmx-config.h>
+
+#include <asm/octeon/cvmx-helper.h>
+#include <asm/octeon/cvmx-pip-defs.h>
+
+/**
+ * Probe a LOOP interface and determine the number of ports
+ * connected to it. The LOOP interface should still be down
+ * after this call.
+ *
+ * @interface: Interface to probe
+ *
+ * Returns Number of ports on the interface. Zero to disable.
+ */
+int __cvmx_helper_loop_probe(int interface)
+{
+	union cvmx_ipd_sub_port_fcs ipd_sub_port_fcs;
+	int num_ports = 4;
+	int port;
+
+	/* We need to disable length checking so packet < 64 bytes and jumbo
+	   frames don't get errors */
+	for (port = 0; port < num_ports; port++) {
+		union cvmx_pip_prt_cfgx port_cfg;
+		int ipd_port = cvmx_helper_get_ipd_port(interface, port);
+		port_cfg.u64 = cvmx_read_csr(CVMX_PIP_PRT_CFGX(ipd_port));
+		port_cfg.s.maxerr_en = 0;
+		port_cfg.s.minerr_en = 0;
+		cvmx_write_csr(CVMX_PIP_PRT_CFGX(ipd_port), port_cfg.u64);
+	}
+
+	/* Disable FCS stripping for loopback ports */
+	ipd_sub_port_fcs.u64 = cvmx_read_csr(CVMX_IPD_SUB_PORT_FCS);
+	ipd_sub_port_fcs.s.port_bit2 = 0;
+	cvmx_write_csr(CVMX_IPD_SUB_PORT_FCS, ipd_sub_port_fcs.u64);
+	return num_ports;
+}
+
+/**
+ * Bringup and enable a LOOP interface. After this call packet
+ * I/O should be fully functional. This is called with IPD
+ * enabled but PKO disabled.
+ *
+ * @interface: Interface to bring up
+ *
+ * Returns Zero on success, negative on failure
+ */
+int __cvmx_helper_loop_enable(int interface)
+{
+	/* Do nothing. */
+	return 0;
+}
diff --git a/src/kernel/linux/v4.14/arch/mips/cavium-octeon/executive/cvmx-helper-npi.c b/src/kernel/linux/v4.14/arch/mips/cavium-octeon/executive/cvmx-helper-npi.c
new file mode 100644
index 0000000..cc94cfa
--- /dev/null
+++ b/src/kernel/linux/v4.14/arch/mips/cavium-octeon/executive/cvmx-helper-npi.c
@@ -0,0 +1,113 @@
+/***********************license start***************
+ * Author: Cavium Networks
+ *
+ * Contact: support@caviumnetworks.com
+ * This file is part of the OCTEON SDK
+ *
+ * Copyright (c) 2003-2008 Cavium Networks
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, Version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+ * NONINFRINGEMENT.  See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this file; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ * or visit http://www.gnu.org/licenses/.
+ *
+ * This file may also be available under a different license from Cavium.
+ * Contact Cavium Networks for more information
+ ***********************license end**************************************/
+
+/*
+ * Functions for NPI initialization, configuration,
+ * and monitoring.
+ */
+#include <asm/octeon/octeon.h>
+
+#include <asm/octeon/cvmx-config.h>
+
+#include <asm/octeon/cvmx-helper.h>
+
+#include <asm/octeon/cvmx-pip-defs.h>
+
+/**
+ * Probe a NPI interface and determine the number of ports
+ * connected to it. The NPI interface should still be down
+ * after this call.
+ *
+ * @interface: Interface to probe
+ *
+ * Returns Number of ports on the interface. Zero to disable.
+ */
+int __cvmx_helper_npi_probe(int interface)
+{
+#if CVMX_PKO_QUEUES_PER_PORT_PCI > 0
+	if (OCTEON_IS_MODEL(OCTEON_CN38XX) || OCTEON_IS_MODEL(OCTEON_CN58XX))
+		return 4;
+	else if (OCTEON_IS_MODEL(OCTEON_CN56XX)
+		 && !OCTEON_IS_MODEL(OCTEON_CN56XX_PASS1_X))
+		/* The packet engines didn't exist before pass 2 */
+		return 4;
+	else if (OCTEON_IS_MODEL(OCTEON_CN52XX)
+		 && !OCTEON_IS_MODEL(OCTEON_CN52XX_PASS1_X))
+		/* The packet engines didn't exist before pass 2 */
+		return 4;
+#if 0
+	/*
+	 * Technically CN30XX, CN31XX, and CN50XX contain packet
+	 * engines, but nobody ever uses them. Since this is the case,
+	 * we disable them here.
+	 */
+	else if (OCTEON_IS_MODEL(OCTEON_CN31XX)
+		 || OCTEON_IS_MODEL(OCTEON_CN50XX))
+		return 2;
+	else if (OCTEON_IS_MODEL(OCTEON_CN30XX))
+		return 1;
+#endif
+#endif
+	return 0;
+}
+
+/**
+ * Bringup and enable a NPI interface. After this call packet
+ * I/O should be fully functional. This is called with IPD
+ * enabled but PKO disabled.
+ *
+ * @interface: Interface to bring up
+ *
+ * Returns Zero on success, negative on failure
+ */
+int __cvmx_helper_npi_enable(int interface)
+{
+	/*
+	 * On CN50XX, CN52XX, and CN56XX we need to disable length
+	 * checking so packet < 64 bytes and jumbo frames don't get
+	 * errors.
+	 */
+	if (!OCTEON_IS_MODEL(OCTEON_CN3XXX) &&
+	    !OCTEON_IS_MODEL(OCTEON_CN58XX)) {
+		int num_ports = cvmx_helper_ports_on_interface(interface);
+		int port;
+		for (port = 0; port < num_ports; port++) {
+			union cvmx_pip_prt_cfgx port_cfg;
+			int ipd_port =
+			    cvmx_helper_get_ipd_port(interface, port);
+			port_cfg.u64 =
+			    cvmx_read_csr(CVMX_PIP_PRT_CFGX(ipd_port));
+			port_cfg.s.maxerr_en = 0;
+			port_cfg.s.minerr_en = 0;
+			cvmx_write_csr(CVMX_PIP_PRT_CFGX(ipd_port),
+				       port_cfg.u64);
+		}
+	}
+
+	/* Enables are controlled by the remote host, so nothing to do here */
+	return 0;
+}
diff --git a/src/kernel/linux/v4.14/arch/mips/cavium-octeon/executive/cvmx-helper-rgmii.c b/src/kernel/linux/v4.14/arch/mips/cavium-octeon/executive/cvmx-helper-rgmii.c
new file mode 100644
index 0000000..d18ed5a
--- /dev/null
+++ b/src/kernel/linux/v4.14/arch/mips/cavium-octeon/executive/cvmx-helper-rgmii.c
@@ -0,0 +1,522 @@
+/***********************license start***************
+ * Author: Cavium Networks
+ *
+ * Contact: support@caviumnetworks.com
+ * This file is part of the OCTEON SDK
+ *
+ * Copyright (c) 2003-2008 Cavium Networks
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, Version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+ * NONINFRINGEMENT.  See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this file; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ * or visit http://www.gnu.org/licenses/.
+ *
+ * This file may also be available under a different license from Cavium.
+ * Contact Cavium Networks for more information
+ ***********************license end**************************************/
+
+/*
+ * Functions for RGMII/GMII/MII initialization, configuration,
+ * and monitoring.
+ */
+#include <asm/octeon/octeon.h>
+
+#include <asm/octeon/cvmx-config.h>
+
+#include <asm/octeon/cvmx-pko.h>
+#include <asm/octeon/cvmx-helper.h>
+#include <asm/octeon/cvmx-helper-board.h>
+
+#include <asm/octeon/cvmx-npi-defs.h>
+#include <asm/octeon/cvmx-gmxx-defs.h>
+#include <asm/octeon/cvmx-asxx-defs.h>
+#include <asm/octeon/cvmx-dbg-defs.h>
+
+void __cvmx_interrupt_gmxx_enable(int interface);
+void __cvmx_interrupt_asxx_enable(int block);
+
+/**
+ * Probe RGMII ports and determine the number present
+ *
+ * @interface: Interface to probe
+ *
+ * Returns Number of RGMII/GMII/MII ports (0-4).
+ */
+int __cvmx_helper_rgmii_probe(int interface)
+{
+	int num_ports = 0;
+	union cvmx_gmxx_inf_mode mode;
+	mode.u64 = cvmx_read_csr(CVMX_GMXX_INF_MODE(interface));
+
+	if (mode.s.type) {
+		if (OCTEON_IS_MODEL(OCTEON_CN38XX)
+		    || OCTEON_IS_MODEL(OCTEON_CN58XX)) {
+			cvmx_dprintf("ERROR: RGMII initialize called in "
+				     "SPI interface\n");
+		} else if (OCTEON_IS_MODEL(OCTEON_CN31XX)
+			   || OCTEON_IS_MODEL(OCTEON_CN30XX)
+			   || OCTEON_IS_MODEL(OCTEON_CN50XX)) {
+			/*
+			 * On these chips "type" says we're in
+			 * GMII/MII mode. This limits us to 2 ports
+			 */
+			num_ports = 2;
+		} else {
+			cvmx_dprintf("ERROR: Unsupported Octeon model in %s\n",
+				     __func__);
+		}
+	} else {
+		if (OCTEON_IS_MODEL(OCTEON_CN38XX)
+		    || OCTEON_IS_MODEL(OCTEON_CN58XX)) {
+			num_ports = 4;
+		} else if (OCTEON_IS_MODEL(OCTEON_CN31XX)
+			   || OCTEON_IS_MODEL(OCTEON_CN30XX)
+			   || OCTEON_IS_MODEL(OCTEON_CN50XX)) {
+			num_ports = 3;
+		} else {
+			cvmx_dprintf("ERROR: Unsupported Octeon model in %s\n",
+				     __func__);
+		}
+	}
+	return num_ports;
+}
+
+/**
+ * Put an RGMII interface in loopback mode. Internal packets sent
+ * out will be received back again on the same port. Externally
+ * received packets will echo back out.
+ *
+ * @port:   IPD port number to loop.
+ */
+void cvmx_helper_rgmii_internal_loopback(int port)
+{
+	int interface = (port >> 4) & 1;
+	int index = port & 0xf;
+	uint64_t tmp;
+
+	union cvmx_gmxx_prtx_cfg gmx_cfg;
+	gmx_cfg.u64 = 0;
+	gmx_cfg.s.duplex = 1;
+	gmx_cfg.s.slottime = 1;
+	gmx_cfg.s.speed = 1;
+	cvmx_write_csr(CVMX_GMXX_TXX_CLK(index, interface), 1);
+	cvmx_write_csr(CVMX_GMXX_TXX_SLOT(index, interface), 0x200);
+	cvmx_write_csr(CVMX_GMXX_TXX_BURST(index, interface), 0x2000);
+	cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface), gmx_cfg.u64);
+	tmp = cvmx_read_csr(CVMX_ASXX_PRT_LOOP(interface));
+	cvmx_write_csr(CVMX_ASXX_PRT_LOOP(interface), (1 << index) | tmp);
+	tmp = cvmx_read_csr(CVMX_ASXX_TX_PRT_EN(interface));
+	cvmx_write_csr(CVMX_ASXX_TX_PRT_EN(interface), (1 << index) | tmp);
+	tmp = cvmx_read_csr(CVMX_ASXX_RX_PRT_EN(interface));
+	cvmx_write_csr(CVMX_ASXX_RX_PRT_EN(interface), (1 << index) | tmp);
+	gmx_cfg.s.en = 1;
+	cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface), gmx_cfg.u64);
+}
+
+/**
+ * Workaround ASX setup errata with CN38XX pass1
+ *
+ * @interface: Interface to setup
+ * @port:      Port to setup (0..3)
+ * @cpu_clock_hz:
+ *		    Chip frequency in Hertz
+ *
+ * Returns Zero on success, negative on failure
+ */
+static int __cvmx_helper_errata_asx_pass1(int interface, int port,
+					  int cpu_clock_hz)
+{
+	/* Set hi water mark as per errata GMX-4 */
+	if (cpu_clock_hz >= 325000000 && cpu_clock_hz < 375000000)
+		cvmx_write_csr(CVMX_ASXX_TX_HI_WATERX(port, interface), 12);
+	else if (cpu_clock_hz >= 375000000 && cpu_clock_hz < 437000000)
+		cvmx_write_csr(CVMX_ASXX_TX_HI_WATERX(port, interface), 11);
+	else if (cpu_clock_hz >= 437000000 && cpu_clock_hz < 550000000)
+		cvmx_write_csr(CVMX_ASXX_TX_HI_WATERX(port, interface), 10);
+	else if (cpu_clock_hz >= 550000000 && cpu_clock_hz < 687000000)
+		cvmx_write_csr(CVMX_ASXX_TX_HI_WATERX(port, interface), 9);
+	else
+		cvmx_dprintf("Illegal clock frequency (%d). "
+			"CVMX_ASXX_TX_HI_WATERX not set\n", cpu_clock_hz);
+	return 0;
+}
+
+/**
+ * Configure all of the ASX, GMX, and PKO registers required
+ * to get RGMII to function on the supplied interface.
+ *
+ * @interface: PKO Interface to configure (0 or 1)
+ *
+ * Returns Zero on success
+ */
+int __cvmx_helper_rgmii_enable(int interface)
+{
+	int num_ports = cvmx_helper_ports_on_interface(interface);
+	int port;
+	struct cvmx_sysinfo *sys_info_ptr = cvmx_sysinfo_get();
+	union cvmx_gmxx_inf_mode mode;
+	union cvmx_asxx_tx_prt_en asx_tx;
+	union cvmx_asxx_rx_prt_en asx_rx;
+
+	mode.u64 = cvmx_read_csr(CVMX_GMXX_INF_MODE(interface));
+
+	if (mode.s.en == 0)
+		return -1;
+	if ((OCTEON_IS_MODEL(OCTEON_CN38XX) ||
+	     OCTEON_IS_MODEL(OCTEON_CN58XX)) && mode.s.type == 1)
+		/* Ignore SPI interfaces */
+		return -1;
+
+	/* Configure the ASX registers needed to use the RGMII ports */
+	asx_tx.u64 = 0;
+	asx_tx.s.prt_en = cvmx_build_mask(num_ports);
+	cvmx_write_csr(CVMX_ASXX_TX_PRT_EN(interface), asx_tx.u64);
+
+	asx_rx.u64 = 0;
+	asx_rx.s.prt_en = cvmx_build_mask(num_ports);
+	cvmx_write_csr(CVMX_ASXX_RX_PRT_EN(interface), asx_rx.u64);
+
+	/* Configure the GMX registers needed to use the RGMII ports */
+	for (port = 0; port < num_ports; port++) {
+		/* Setting of CVMX_GMXX_TXX_THRESH has been moved to
+		   __cvmx_helper_setup_gmx() */
+
+		if (cvmx_octeon_is_pass1())
+			__cvmx_helper_errata_asx_pass1(interface, port,
+						       sys_info_ptr->
+						       cpu_clock_hz);
+		else {
+			/*
+			 * Configure more flexible RGMII preamble
+			 * checking. Pass 1 doesn't support this
+			 * feature.
+			 */
+			union cvmx_gmxx_rxx_frm_ctl frm_ctl;
+			frm_ctl.u64 =
+			    cvmx_read_csr(CVMX_GMXX_RXX_FRM_CTL
+					  (port, interface));
+			/* New field, so must be compile time */
+			frm_ctl.s.pre_free = 1;
+			cvmx_write_csr(CVMX_GMXX_RXX_FRM_CTL(port, interface),
+				       frm_ctl.u64);
+		}
+
+		/*
+		 * Each pause frame transmitted will ask for about 10M
+		 * bit times before resume.  If buffer space comes
+		 * available before that time has expired, an XON
+		 * pause frame (0 time) will be transmitted to restart
+		 * the flow.
+		 */
+		cvmx_write_csr(CVMX_GMXX_TXX_PAUSE_PKT_TIME(port, interface),
+			       20000);
+		cvmx_write_csr(CVMX_GMXX_TXX_PAUSE_PKT_INTERVAL
+			       (port, interface), 19000);
+
+		if (OCTEON_IS_MODEL(OCTEON_CN50XX)) {
+			cvmx_write_csr(CVMX_ASXX_TX_CLK_SETX(port, interface),
+				       16);
+			cvmx_write_csr(CVMX_ASXX_RX_CLK_SETX(port, interface),
+				       16);
+		} else {
+			cvmx_write_csr(CVMX_ASXX_TX_CLK_SETX(port, interface),
+				       24);
+			cvmx_write_csr(CVMX_ASXX_RX_CLK_SETX(port, interface),
+				       24);
+		}
+	}
+
+	__cvmx_helper_setup_gmx(interface, num_ports);
+
+	/* enable the ports now */
+	for (port = 0; port < num_ports; port++) {
+		union cvmx_gmxx_prtx_cfg gmx_cfg;
+
+		gmx_cfg.u64 =
+		    cvmx_read_csr(CVMX_GMXX_PRTX_CFG(port, interface));
+		gmx_cfg.s.en = 1;
+		cvmx_write_csr(CVMX_GMXX_PRTX_CFG(port, interface),
+			       gmx_cfg.u64);
+	}
+	__cvmx_interrupt_asxx_enable(interface);
+	__cvmx_interrupt_gmxx_enable(interface);
+
+	return 0;
+}
+
+/**
+ * Return the link state of an IPD/PKO port as returned by
+ * auto negotiation. The result of this function may not match
+ * Octeon's link config if auto negotiation has changed since
+ * the last call to cvmx_helper_link_set().
+ *
+ * @ipd_port: IPD/PKO port to query
+ *
+ * Returns Link state
+ */
+cvmx_helper_link_info_t __cvmx_helper_rgmii_link_get(int ipd_port)
+{
+	int interface = cvmx_helper_get_interface_num(ipd_port);
+	int index = cvmx_helper_get_interface_index_num(ipd_port);
+	union cvmx_asxx_prt_loop asxx_prt_loop;
+
+	asxx_prt_loop.u64 = cvmx_read_csr(CVMX_ASXX_PRT_LOOP(interface));
+	if (asxx_prt_loop.s.int_loop & (1 << index)) {
+		/* Force 1Gbps full duplex on internal loopback */
+		cvmx_helper_link_info_t result;
+		result.u64 = 0;
+		result.s.full_duplex = 1;
+		result.s.link_up = 1;
+		result.s.speed = 1000;
+		return result;
+	} else
+		return __cvmx_helper_board_link_get(ipd_port);
+}
+
+/**
+ * Configure an IPD/PKO port for the specified link state. This
+ * function does not influence auto negotiation at the PHY level.
+ * The passed link state must always match the link state returned
+ * by cvmx_helper_link_get().
+ *
+ * @ipd_port:  IPD/PKO port to configure
+ * @link_info: The new link state
+ *
+ * Returns Zero on success, negative on failure
+ */
+int __cvmx_helper_rgmii_link_set(int ipd_port,
+				 cvmx_helper_link_info_t link_info)
+{
+	int result = 0;
+	int interface = cvmx_helper_get_interface_num(ipd_port);
+	int index = cvmx_helper_get_interface_index_num(ipd_port);
+	union cvmx_gmxx_prtx_cfg original_gmx_cfg;
+	union cvmx_gmxx_prtx_cfg new_gmx_cfg;
+	union cvmx_pko_mem_queue_qos pko_mem_queue_qos;
+	union cvmx_pko_mem_queue_qos pko_mem_queue_qos_save[16];
+	union cvmx_gmxx_tx_ovr_bp gmx_tx_ovr_bp;
+	union cvmx_gmxx_tx_ovr_bp gmx_tx_ovr_bp_save;
+	int i;
+
+	/* Ignore speed sets in the simulator */
+	if (cvmx_sysinfo_get()->board_type == CVMX_BOARD_TYPE_SIM)
+		return 0;
+
+	/* Read the current settings so we know the current enable state */
+	original_gmx_cfg.u64 =
+	    cvmx_read_csr(CVMX_GMXX_PRTX_CFG(index, interface));
+	new_gmx_cfg = original_gmx_cfg;
+
+	/* Disable the lowest level RX */
+	cvmx_write_csr(CVMX_ASXX_RX_PRT_EN(interface),
+		       cvmx_read_csr(CVMX_ASXX_RX_PRT_EN(interface)) &
+				     ~(1 << index));
+
+	memset(pko_mem_queue_qos_save, 0, sizeof(pko_mem_queue_qos_save));
+	/* Disable all queues so that TX should become idle */
+	for (i = 0; i < cvmx_pko_get_num_queues(ipd_port); i++) {
+		int queue = cvmx_pko_get_base_queue(ipd_port) + i;
+		cvmx_write_csr(CVMX_PKO_REG_READ_IDX, queue);
+		pko_mem_queue_qos.u64 = cvmx_read_csr(CVMX_PKO_MEM_QUEUE_QOS);
+		pko_mem_queue_qos.s.pid = ipd_port;
+		pko_mem_queue_qos.s.qid = queue;
+		pko_mem_queue_qos_save[i] = pko_mem_queue_qos;
+		pko_mem_queue_qos.s.qos_mask = 0;
+		cvmx_write_csr(CVMX_PKO_MEM_QUEUE_QOS, pko_mem_queue_qos.u64);
+	}
+
+	/* Disable backpressure */
+	gmx_tx_ovr_bp.u64 = cvmx_read_csr(CVMX_GMXX_TX_OVR_BP(interface));
+	gmx_tx_ovr_bp_save = gmx_tx_ovr_bp;
+	gmx_tx_ovr_bp.s.bp &= ~(1 << index);
+	gmx_tx_ovr_bp.s.en |= 1 << index;
+	cvmx_write_csr(CVMX_GMXX_TX_OVR_BP(interface), gmx_tx_ovr_bp.u64);
+	cvmx_read_csr(CVMX_GMXX_TX_OVR_BP(interface));
+
+	/*
+	 * Poll the GMX state machine waiting for it to become
+	 * idle. Preferably we should only change speed when it is
+	 * idle. If it doesn't become idle we will still do the speed
+	 * change, but there is a slight chance that GMX will
+	 * lockup.
+	 */
+	cvmx_write_csr(CVMX_NPI_DBG_SELECT,
+		       interface * 0x800 + index * 0x100 + 0x880);
+	CVMX_WAIT_FOR_FIELD64(CVMX_DBG_DATA, union cvmx_dbg_data, data & 7,
+			==, 0, 10000);
+	CVMX_WAIT_FOR_FIELD64(CVMX_DBG_DATA, union cvmx_dbg_data, data & 0xf,
+			==, 0, 10000);
+
+	/* Disable the port before we make any changes */
+	new_gmx_cfg.s.en = 0;
+	cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface), new_gmx_cfg.u64);
+	cvmx_read_csr(CVMX_GMXX_PRTX_CFG(index, interface));
+
+	/* Set full/half duplex */
+	if (cvmx_octeon_is_pass1())
+		/* Half duplex is broken for 38XX Pass 1 */
+		new_gmx_cfg.s.duplex = 1;
+	else if (!link_info.s.link_up)
+		/* Force full duplex on down links */
+		new_gmx_cfg.s.duplex = 1;
+	else
+		new_gmx_cfg.s.duplex = link_info.s.full_duplex;
+
+	/* Set the link speed. Anything unknown is set to 1Gbps */
+	if (link_info.s.speed == 10) {
+		new_gmx_cfg.s.slottime = 0;
+		new_gmx_cfg.s.speed = 0;
+	} else if (link_info.s.speed == 100) {
+		new_gmx_cfg.s.slottime = 0;
+		new_gmx_cfg.s.speed = 0;
+	} else {
+		new_gmx_cfg.s.slottime = 1;
+		new_gmx_cfg.s.speed = 1;
+	}
+
+	/* Adjust the clocks */
+	if (link_info.s.speed == 10) {
+		cvmx_write_csr(CVMX_GMXX_TXX_CLK(index, interface), 50);
+		cvmx_write_csr(CVMX_GMXX_TXX_SLOT(index, interface), 0x40);
+		cvmx_write_csr(CVMX_GMXX_TXX_BURST(index, interface), 0);
+	} else if (link_info.s.speed == 100) {
+		cvmx_write_csr(CVMX_GMXX_TXX_CLK(index, interface), 5);
+		cvmx_write_csr(CVMX_GMXX_TXX_SLOT(index, interface), 0x40);
+		cvmx_write_csr(CVMX_GMXX_TXX_BURST(index, interface), 0);
+	} else {
+		cvmx_write_csr(CVMX_GMXX_TXX_CLK(index, interface), 1);
+		cvmx_write_csr(CVMX_GMXX_TXX_SLOT(index, interface), 0x200);
+		cvmx_write_csr(CVMX_GMXX_TXX_BURST(index, interface), 0x2000);
+	}
+
+	if (OCTEON_IS_MODEL(OCTEON_CN30XX) || OCTEON_IS_MODEL(OCTEON_CN50XX)) {
+		if ((link_info.s.speed == 10) || (link_info.s.speed == 100)) {
+			union cvmx_gmxx_inf_mode mode;
+			mode.u64 = cvmx_read_csr(CVMX_GMXX_INF_MODE(interface));
+
+	/*
+	 * Port	 .en  .type  .p0mii  Configuration
+	 * ----	 ---  -----  ------  -----------------------------------------
+	 *  X	   0	 X	X    All links are disabled.
+	 *  0	   1	 X	0    Port 0 is RGMII
+	 *  0	   1	 X	1    Port 0 is MII
+	 *  1	   1	 0	X    Ports 1 and 2 are configured as RGMII ports.
+	 *  1	   1	 1	X    Port 1: GMII/MII; Port 2: disabled. GMII or
+	 *			     MII port is selected by GMX_PRT1_CFG[SPEED].
+	 */
+
+			/* In MII mode, CLK_CNT = 1. */
+			if (((index == 0) && (mode.s.p0mii == 1))
+			    || ((index != 0) && (mode.s.type == 1))) {
+				cvmx_write_csr(CVMX_GMXX_TXX_CLK
+					       (index, interface), 1);
+			}
+		}
+	}
+
+	/* Do a read to make sure all setup stuff is complete */
+	cvmx_read_csr(CVMX_GMXX_PRTX_CFG(index, interface));
+
+	/* Save the new GMX setting without enabling the port */
+	cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface), new_gmx_cfg.u64);
+
+	/* Enable the lowest level RX */
+	cvmx_write_csr(CVMX_ASXX_RX_PRT_EN(interface),
+		       cvmx_read_csr(CVMX_ASXX_RX_PRT_EN(interface)) | (1 <<
+									index));
+
+	/* Re-enable the TX path */
+	for (i = 0; i < cvmx_pko_get_num_queues(ipd_port); i++) {
+		int queue = cvmx_pko_get_base_queue(ipd_port) + i;
+		cvmx_write_csr(CVMX_PKO_REG_READ_IDX, queue);
+		cvmx_write_csr(CVMX_PKO_MEM_QUEUE_QOS,
+			       pko_mem_queue_qos_save[i].u64);
+	}
+
+	/* Restore backpressure */
+	cvmx_write_csr(CVMX_GMXX_TX_OVR_BP(interface), gmx_tx_ovr_bp_save.u64);
+
+	/* Restore the GMX enable state. Port config is complete */
+	new_gmx_cfg.s.en = original_gmx_cfg.s.en;
+	cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface), new_gmx_cfg.u64);
+
+	return result;
+}
+
+/**
+ * Configure a port for internal and/or external loopback. Internal loopback
+ * causes packets sent by the port to be received by Octeon. External loopback
+ * causes packets received from the wire to sent out again.
+ *
+ * @ipd_port: IPD/PKO port to loopback.
+ * @enable_internal:
+ *		   Non zero if you want internal loopback
+ * @enable_external:
+ *		   Non zero if you want external loopback
+ *
+ * Returns Zero on success, negative on failure.
+ */
+int __cvmx_helper_rgmii_configure_loopback(int ipd_port, int enable_internal,
+					   int enable_external)
+{
+	int interface = cvmx_helper_get_interface_num(ipd_port);
+	int index = cvmx_helper_get_interface_index_num(ipd_port);
+	int original_enable;
+	union cvmx_gmxx_prtx_cfg gmx_cfg;
+	union cvmx_asxx_prt_loop asxx_prt_loop;
+
+	/* Read the current enable state and save it */
+	gmx_cfg.u64 = cvmx_read_csr(CVMX_GMXX_PRTX_CFG(index, interface));
+	original_enable = gmx_cfg.s.en;
+	/* Force port to be disabled */
+	gmx_cfg.s.en = 0;
+	if (enable_internal) {
+		/* Force speed if we're doing internal loopback */
+		gmx_cfg.s.duplex = 1;
+		gmx_cfg.s.slottime = 1;
+		gmx_cfg.s.speed = 1;
+		cvmx_write_csr(CVMX_GMXX_TXX_CLK(index, interface), 1);
+		cvmx_write_csr(CVMX_GMXX_TXX_SLOT(index, interface), 0x200);
+		cvmx_write_csr(CVMX_GMXX_TXX_BURST(index, interface), 0x2000);
+	}
+	cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface), gmx_cfg.u64);
+
+	/* Set the loopback bits */
+	asxx_prt_loop.u64 = cvmx_read_csr(CVMX_ASXX_PRT_LOOP(interface));
+	if (enable_internal)
+		asxx_prt_loop.s.int_loop |= 1 << index;
+	else
+		asxx_prt_loop.s.int_loop &= ~(1 << index);
+	if (enable_external)
+		asxx_prt_loop.s.ext_loop |= 1 << index;
+	else
+		asxx_prt_loop.s.ext_loop &= ~(1 << index);
+	cvmx_write_csr(CVMX_ASXX_PRT_LOOP(interface), asxx_prt_loop.u64);
+
+	/* Force enables in internal loopback */
+	if (enable_internal) {
+		uint64_t tmp;
+		tmp = cvmx_read_csr(CVMX_ASXX_TX_PRT_EN(interface));
+		cvmx_write_csr(CVMX_ASXX_TX_PRT_EN(interface),
+			       (1 << index) | tmp);
+		tmp = cvmx_read_csr(CVMX_ASXX_RX_PRT_EN(interface));
+		cvmx_write_csr(CVMX_ASXX_RX_PRT_EN(interface),
+			       (1 << index) | tmp);
+		original_enable = 1;
+	}
+
+	/* Restore the enable state */
+	gmx_cfg.s.en = original_enable;
+	cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface), gmx_cfg.u64);
+	return 0;
+}
diff --git a/src/kernel/linux/v4.14/arch/mips/cavium-octeon/executive/cvmx-helper-sgmii.c b/src/kernel/linux/v4.14/arch/mips/cavium-octeon/executive/cvmx-helper-sgmii.c
new file mode 100644
index 0000000..5782833
--- /dev/null
+++ b/src/kernel/linux/v4.14/arch/mips/cavium-octeon/executive/cvmx-helper-sgmii.c
@@ -0,0 +1,556 @@
+/***********************license start***************
+ * Author: Cavium Networks
+ *
+ * Contact: support@caviumnetworks.com
+ * This file is part of the OCTEON SDK
+ *
+ * Copyright (c) 2003-2008 Cavium Networks
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, Version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+ * NONINFRINGEMENT.  See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this file; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ * or visit http://www.gnu.org/licenses/.
+ *
+ * This file may also be available under a different license from Cavium.
+ * Contact Cavium Networks for more information
+ ***********************license end**************************************/
+
+/*
+ * Functions for SGMII initialization, configuration,
+ * and monitoring.
+ */
+
+#include <asm/octeon/octeon.h>
+
+#include <asm/octeon/cvmx-config.h>
+
+#include <asm/octeon/cvmx-helper.h>
+#include <asm/octeon/cvmx-helper-board.h>
+
+#include <asm/octeon/cvmx-gmxx-defs.h>
+#include <asm/octeon/cvmx-pcsx-defs.h>
+
+void __cvmx_interrupt_gmxx_enable(int interface);
+void __cvmx_interrupt_pcsx_intx_en_reg_enable(int index, int block);
+void __cvmx_interrupt_pcsxx_int_en_reg_enable(int index);
+
+/**
+ * Perform initialization required only once for an SGMII port.
+ *
+ * @interface: Interface to init
+ * @index:     Index of prot on the interface
+ *
+ * Returns Zero on success, negative on failure
+ */
+static int __cvmx_helper_sgmii_hardware_init_one_time(int interface, int index)
+{
+	const uint64_t clock_mhz = cvmx_sysinfo_get()->cpu_clock_hz / 1000000;
+	union cvmx_pcsx_miscx_ctl_reg pcs_misc_ctl_reg;
+	union cvmx_pcsx_linkx_timer_count_reg pcsx_linkx_timer_count_reg;
+	union cvmx_gmxx_prtx_cfg gmxx_prtx_cfg;
+
+	/* Disable GMX */
+	gmxx_prtx_cfg.u64 = cvmx_read_csr(CVMX_GMXX_PRTX_CFG(index, interface));
+	gmxx_prtx_cfg.s.en = 0;
+	cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface), gmxx_prtx_cfg.u64);
+
+	/*
+	 * Write PCS*_LINK*_TIMER_COUNT_REG[COUNT] with the
+	 * appropriate value. 1000BASE-X specifies a 10ms
+	 * interval. SGMII specifies a 1.6ms interval.
+	 */
+	pcs_misc_ctl_reg.u64 =
+	    cvmx_read_csr(CVMX_PCSX_MISCX_CTL_REG(index, interface));
+	pcsx_linkx_timer_count_reg.u64 =
+	    cvmx_read_csr(CVMX_PCSX_LINKX_TIMER_COUNT_REG(index, interface));
+	if (pcs_misc_ctl_reg.s.mode) {
+		/* 1000BASE-X */
+		pcsx_linkx_timer_count_reg.s.count =
+		    (10000ull * clock_mhz) >> 10;
+	} else {
+		/* SGMII */
+		pcsx_linkx_timer_count_reg.s.count =
+		    (1600ull * clock_mhz) >> 10;
+	}
+	cvmx_write_csr(CVMX_PCSX_LINKX_TIMER_COUNT_REG(index, interface),
+		       pcsx_linkx_timer_count_reg.u64);
+
+	/*
+	 * Write the advertisement register to be used as the
+	 * tx_Config_Reg<D15:D0> of the autonegotiation.  In
+	 * 1000BASE-X mode, tx_Config_Reg<D15:D0> is PCS*_AN*_ADV_REG.
+	 * In SGMII PHY mode, tx_Config_Reg<D15:D0> is
+	 * PCS*_SGM*_AN_ADV_REG.  In SGMII MAC mode,
+	 * tx_Config_Reg<D15:D0> is the fixed value 0x4001, so this
+	 * step can be skipped.
+	 */
+	if (pcs_misc_ctl_reg.s.mode) {
+		/* 1000BASE-X */
+		union cvmx_pcsx_anx_adv_reg pcsx_anx_adv_reg;
+		pcsx_anx_adv_reg.u64 =
+		    cvmx_read_csr(CVMX_PCSX_ANX_ADV_REG(index, interface));
+		pcsx_anx_adv_reg.s.rem_flt = 0;
+		pcsx_anx_adv_reg.s.pause = 3;
+		pcsx_anx_adv_reg.s.hfd = 1;
+		pcsx_anx_adv_reg.s.fd = 1;
+		cvmx_write_csr(CVMX_PCSX_ANX_ADV_REG(index, interface),
+			       pcsx_anx_adv_reg.u64);
+	} else {
+		union cvmx_pcsx_miscx_ctl_reg pcsx_miscx_ctl_reg;
+		pcsx_miscx_ctl_reg.u64 =
+		    cvmx_read_csr(CVMX_PCSX_MISCX_CTL_REG(index, interface));
+		if (pcsx_miscx_ctl_reg.s.mac_phy) {
+			/* PHY Mode */
+			union cvmx_pcsx_sgmx_an_adv_reg pcsx_sgmx_an_adv_reg;
+			pcsx_sgmx_an_adv_reg.u64 =
+			    cvmx_read_csr(CVMX_PCSX_SGMX_AN_ADV_REG
+					  (index, interface));
+			pcsx_sgmx_an_adv_reg.s.link = 1;
+			pcsx_sgmx_an_adv_reg.s.dup = 1;
+			pcsx_sgmx_an_adv_reg.s.speed = 2;
+			cvmx_write_csr(CVMX_PCSX_SGMX_AN_ADV_REG
+				       (index, interface),
+				       pcsx_sgmx_an_adv_reg.u64);
+		} else {
+			/* MAC Mode - Nothing to do */
+		}
+	}
+	return 0;
+}
+
+/**
+ * Initialize the SERTES link for the first time or after a loss
+ * of link.
+ *
+ * @interface: Interface to init
+ * @index:     Index of prot on the interface
+ *
+ * Returns Zero on success, negative on failure
+ */
+static int __cvmx_helper_sgmii_hardware_init_link(int interface, int index)
+{
+	union cvmx_pcsx_mrx_control_reg control_reg;
+
+	/*
+	 * Take PCS through a reset sequence.
+	 * PCS*_MR*_CONTROL_REG[PWR_DN] should be cleared to zero.
+	 * Write PCS*_MR*_CONTROL_REG[RESET]=1 (while not changing the
+	 * value of the other PCS*_MR*_CONTROL_REG bits).  Read
+	 * PCS*_MR*_CONTROL_REG[RESET] until it changes value to
+	 * zero.
+	 */
+	control_reg.u64 =
+	    cvmx_read_csr(CVMX_PCSX_MRX_CONTROL_REG(index, interface));
+	if (cvmx_sysinfo_get()->board_type != CVMX_BOARD_TYPE_SIM) {
+		control_reg.s.reset = 1;
+		cvmx_write_csr(CVMX_PCSX_MRX_CONTROL_REG(index, interface),
+			       control_reg.u64);
+		if (CVMX_WAIT_FOR_FIELD64
+		    (CVMX_PCSX_MRX_CONTROL_REG(index, interface),
+		     union cvmx_pcsx_mrx_control_reg, reset, ==, 0, 10000)) {
+			cvmx_dprintf("SGMII%d: Timeout waiting for port %d "
+				     "to finish reset\n",
+			     interface, index);
+			return -1;
+		}
+	}
+
+	/*
+	 * Write PCS*_MR*_CONTROL_REG[RST_AN]=1 to ensure a fresh
+	 * sgmii negotiation starts.
+	 */
+	control_reg.s.rst_an = 1;
+	control_reg.s.an_en = 1;
+	control_reg.s.pwr_dn = 0;
+	cvmx_write_csr(CVMX_PCSX_MRX_CONTROL_REG(index, interface),
+		       control_reg.u64);
+
+	/*
+	 * Wait for PCS*_MR*_STATUS_REG[AN_CPT] to be set, indicating
+	 * that sgmii autonegotiation is complete. In MAC mode this
+	 * isn't an ethernet link, but a link between Octeon and the
+	 * PHY.
+	 */
+	if ((cvmx_sysinfo_get()->board_type != CVMX_BOARD_TYPE_SIM) &&
+	    CVMX_WAIT_FOR_FIELD64(CVMX_PCSX_MRX_STATUS_REG(index, interface),
+				  union cvmx_pcsx_mrx_status_reg, an_cpt, ==, 1,
+				  10000)) {
+		/* cvmx_dprintf("SGMII%d: Port %d link timeout\n", interface, index); */
+		return -1;
+	}
+	return 0;
+}
+
+/**
+ * Configure an SGMII link to the specified speed after the SERTES
+ * link is up.
+ *
+ * @interface: Interface to init
+ * @index:     Index of prot on the interface
+ * @link_info: Link state to configure
+ *
+ * Returns Zero on success, negative on failure
+ */
+static int __cvmx_helper_sgmii_hardware_init_link_speed(int interface,
+							int index,
+							cvmx_helper_link_info_t
+							link_info)
+{
+	int is_enabled;
+	union cvmx_gmxx_prtx_cfg gmxx_prtx_cfg;
+	union cvmx_pcsx_miscx_ctl_reg pcsx_miscx_ctl_reg;
+
+	/* Disable GMX before we make any changes. Remember the enable state */
+	gmxx_prtx_cfg.u64 = cvmx_read_csr(CVMX_GMXX_PRTX_CFG(index, interface));
+	is_enabled = gmxx_prtx_cfg.s.en;
+	gmxx_prtx_cfg.s.en = 0;
+	cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface), gmxx_prtx_cfg.u64);
+
+	/* Wait for GMX to be idle */
+	if (CVMX_WAIT_FOR_FIELD64
+	    (CVMX_GMXX_PRTX_CFG(index, interface), union cvmx_gmxx_prtx_cfg,
+	     rx_idle, ==, 1, 10000)
+	    || CVMX_WAIT_FOR_FIELD64(CVMX_GMXX_PRTX_CFG(index, interface),
+				     union cvmx_gmxx_prtx_cfg, tx_idle, ==, 1,
+				     10000)) {
+		cvmx_dprintf
+		    ("SGMII%d: Timeout waiting for port %d to be idle\n",
+		     interface, index);
+		return -1;
+	}
+
+	/* Read GMX CFG again to make sure the disable completed */
+	gmxx_prtx_cfg.u64 = cvmx_read_csr(CVMX_GMXX_PRTX_CFG(index, interface));
+
+	/*
+	 * Get the misc control for PCS. We will need to set the
+	 * duplication amount.
+	 */
+	pcsx_miscx_ctl_reg.u64 =
+	    cvmx_read_csr(CVMX_PCSX_MISCX_CTL_REG(index, interface));
+
+	/*
+	 * Use GMXENO to force the link down if the status we get says
+	 * it should be down.
+	 */
+	pcsx_miscx_ctl_reg.s.gmxeno = !link_info.s.link_up;
+
+	/* Only change the duplex setting if the link is up */
+	if (link_info.s.link_up)
+		gmxx_prtx_cfg.s.duplex = link_info.s.full_duplex;
+
+	/* Do speed based setting for GMX */
+	switch (link_info.s.speed) {
+	case 10:
+		gmxx_prtx_cfg.s.speed = 0;
+		gmxx_prtx_cfg.s.speed_msb = 1;
+		gmxx_prtx_cfg.s.slottime = 0;
+		/* Setting from GMX-603 */
+		pcsx_miscx_ctl_reg.s.samp_pt = 25;
+		cvmx_write_csr(CVMX_GMXX_TXX_SLOT(index, interface), 64);
+		cvmx_write_csr(CVMX_GMXX_TXX_BURST(index, interface), 0);
+		break;
+	case 100:
+		gmxx_prtx_cfg.s.speed = 0;
+		gmxx_prtx_cfg.s.speed_msb = 0;
+		gmxx_prtx_cfg.s.slottime = 0;
+		pcsx_miscx_ctl_reg.s.samp_pt = 0x5;
+		cvmx_write_csr(CVMX_GMXX_TXX_SLOT(index, interface), 64);
+		cvmx_write_csr(CVMX_GMXX_TXX_BURST(index, interface), 0);
+		break;
+	case 1000:
+		gmxx_prtx_cfg.s.speed = 1;
+		gmxx_prtx_cfg.s.speed_msb = 0;
+		gmxx_prtx_cfg.s.slottime = 1;
+		pcsx_miscx_ctl_reg.s.samp_pt = 1;
+		cvmx_write_csr(CVMX_GMXX_TXX_SLOT(index, interface), 512);
+		cvmx_write_csr(CVMX_GMXX_TXX_BURST(index, interface), 8192);
+		break;
+	default:
+		break;
+	}
+
+	/* Write the new misc control for PCS */
+	cvmx_write_csr(CVMX_PCSX_MISCX_CTL_REG(index, interface),
+		       pcsx_miscx_ctl_reg.u64);
+
+	/* Write the new GMX settings with the port still disabled */
+	cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface), gmxx_prtx_cfg.u64);
+
+	/* Read GMX CFG again to make sure the config completed */
+	gmxx_prtx_cfg.u64 = cvmx_read_csr(CVMX_GMXX_PRTX_CFG(index, interface));
+
+	/* Restore the enabled / disabled state */
+	gmxx_prtx_cfg.s.en = is_enabled;
+	cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface), gmxx_prtx_cfg.u64);
+
+	return 0;
+}
+
+/**
+ * Bring up the SGMII interface to be ready for packet I/O but
+ * leave I/O disabled using the GMX override. This function
+ * follows the bringup documented in 10.6.3 of the manual.
+ *
+ * @interface: Interface to bringup
+ * @num_ports: Number of ports on the interface
+ *
+ * Returns Zero on success, negative on failure
+ */
+static int __cvmx_helper_sgmii_hardware_init(int interface, int num_ports)
+{
+	int index;
+
+	__cvmx_helper_setup_gmx(interface, num_ports);
+
+	for (index = 0; index < num_ports; index++) {
+		int ipd_port = cvmx_helper_get_ipd_port(interface, index);
+		__cvmx_helper_sgmii_hardware_init_one_time(interface, index);
+		/* Linux kernel driver will call ....link_set with the
+		 * proper link state. In the simulator there is no
+		 * link state polling and hence it is set from
+		 * here.
+		 */
+		if (cvmx_sysinfo_get()->board_type == CVMX_BOARD_TYPE_SIM)
+			__cvmx_helper_sgmii_link_set(ipd_port,
+				       __cvmx_helper_sgmii_link_get(ipd_port));
+	}
+
+	return 0;
+}
+
+int __cvmx_helper_sgmii_enumerate(int interface)
+{
+	return 4;
+}
+/**
+ * Probe a SGMII interface and determine the number of ports
+ * connected to it. The SGMII interface should still be down after
+ * this call.
+ *
+ * @interface: Interface to probe
+ *
+ * Returns Number of ports on the interface. Zero to disable.
+ */
+int __cvmx_helper_sgmii_probe(int interface)
+{
+	union cvmx_gmxx_inf_mode mode;
+
+	/*
+	 * Due to errata GMX-700 on CN56XXp1.x and CN52XXp1.x, the
+	 * interface needs to be enabled before IPD otherwise per port
+	 * backpressure may not work properly
+	 */
+	mode.u64 = cvmx_read_csr(CVMX_GMXX_INF_MODE(interface));
+	mode.s.en = 1;
+	cvmx_write_csr(CVMX_GMXX_INF_MODE(interface), mode.u64);
+	return __cvmx_helper_sgmii_enumerate(interface);
+}
+
+/**
+ * Bringup and enable a SGMII interface. After this call packet
+ * I/O should be fully functional. This is called with IPD
+ * enabled but PKO disabled.
+ *
+ * @interface: Interface to bring up
+ *
+ * Returns Zero on success, negative on failure
+ */
+int __cvmx_helper_sgmii_enable(int interface)
+{
+	int num_ports = cvmx_helper_ports_on_interface(interface);
+	int index;
+
+	__cvmx_helper_sgmii_hardware_init(interface, num_ports);
+
+	for (index = 0; index < num_ports; index++) {
+		union cvmx_gmxx_prtx_cfg gmxx_prtx_cfg;
+		gmxx_prtx_cfg.u64 =
+		    cvmx_read_csr(CVMX_GMXX_PRTX_CFG(index, interface));
+		gmxx_prtx_cfg.s.en = 1;
+		cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface),
+			       gmxx_prtx_cfg.u64);
+		__cvmx_interrupt_pcsx_intx_en_reg_enable(index, interface);
+	}
+	__cvmx_interrupt_pcsxx_int_en_reg_enable(interface);
+	__cvmx_interrupt_gmxx_enable(interface);
+	return 0;
+}
+
+/**
+ * Return the link state of an IPD/PKO port as returned by
+ * auto negotiation. The result of this function may not match
+ * Octeon's link config if auto negotiation has changed since
+ * the last call to cvmx_helper_link_set().
+ *
+ * @ipd_port: IPD/PKO port to query
+ *
+ * Returns Link state
+ */
+cvmx_helper_link_info_t __cvmx_helper_sgmii_link_get(int ipd_port)
+{
+	cvmx_helper_link_info_t result;
+	union cvmx_pcsx_miscx_ctl_reg pcs_misc_ctl_reg;
+	int interface = cvmx_helper_get_interface_num(ipd_port);
+	int index = cvmx_helper_get_interface_index_num(ipd_port);
+	union cvmx_pcsx_mrx_control_reg pcsx_mrx_control_reg;
+
+	result.u64 = 0;
+
+	if (cvmx_sysinfo_get()->board_type == CVMX_BOARD_TYPE_SIM) {
+		/* The simulator gives you a simulated 1Gbps full duplex link */
+		result.s.link_up = 1;
+		result.s.full_duplex = 1;
+		result.s.speed = 1000;
+		return result;
+	}
+
+	pcsx_mrx_control_reg.u64 =
+	    cvmx_read_csr(CVMX_PCSX_MRX_CONTROL_REG(index, interface));
+	if (pcsx_mrx_control_reg.s.loopbck1) {
+		/* Force 1Gbps full duplex link for internal loopback */
+		result.s.link_up = 1;
+		result.s.full_duplex = 1;
+		result.s.speed = 1000;
+		return result;
+	}
+
+	pcs_misc_ctl_reg.u64 =
+	    cvmx_read_csr(CVMX_PCSX_MISCX_CTL_REG(index, interface));
+	if (pcs_misc_ctl_reg.s.mode) {
+		/* 1000BASE-X */
+		/* FIXME */
+	} else {
+		union cvmx_pcsx_miscx_ctl_reg pcsx_miscx_ctl_reg;
+		pcsx_miscx_ctl_reg.u64 =
+		    cvmx_read_csr(CVMX_PCSX_MISCX_CTL_REG(index, interface));
+		if (pcsx_miscx_ctl_reg.s.mac_phy) {
+			/* PHY Mode */
+			union cvmx_pcsx_mrx_status_reg pcsx_mrx_status_reg;
+			union cvmx_pcsx_anx_results_reg pcsx_anx_results_reg;
+
+			/*
+			 * Don't bother continuing if the SERTES low
+			 * level link is down
+			 */
+			pcsx_mrx_status_reg.u64 =
+			    cvmx_read_csr(CVMX_PCSX_MRX_STATUS_REG
+					  (index, interface));
+			if (pcsx_mrx_status_reg.s.lnk_st == 0) {
+				if (__cvmx_helper_sgmii_hardware_init_link
+				    (interface, index) != 0)
+					return result;
+			}
+
+			/* Read the autoneg results */
+			pcsx_anx_results_reg.u64 =
+			    cvmx_read_csr(CVMX_PCSX_ANX_RESULTS_REG
+					  (index, interface));
+			if (pcsx_anx_results_reg.s.an_cpt) {
+				/*
+				 * Auto negotiation is complete. Set
+				 * status accordingly.
+				 */
+				result.s.full_duplex =
+				    pcsx_anx_results_reg.s.dup;
+				result.s.link_up =
+				    pcsx_anx_results_reg.s.link_ok;
+				switch (pcsx_anx_results_reg.s.spd) {
+				case 0:
+					result.s.speed = 10;
+					break;
+				case 1:
+					result.s.speed = 100;
+					break;
+				case 2:
+					result.s.speed = 1000;
+					break;
+				default:
+					result.s.speed = 0;
+					result.s.link_up = 0;
+					break;
+				}
+			} else {
+				/*
+				 * Auto negotiation isn't
+				 * complete. Return link down.
+				 */
+				result.s.speed = 0;
+				result.s.link_up = 0;
+			}
+		} else {	/* MAC Mode */
+
+			result = __cvmx_helper_board_link_get(ipd_port);
+		}
+	}
+	return result;
+}
+
+/**
+ * Configure an IPD/PKO port for the specified link state. This
+ * function does not influence auto negotiation at the PHY level.
+ * The passed link state must always match the link state returned
+ * by cvmx_helper_link_get().
+ *
+ * @ipd_port:  IPD/PKO port to configure
+ * @link_info: The new link state
+ *
+ * Returns Zero on success, negative on failure
+ */
+int __cvmx_helper_sgmii_link_set(int ipd_port,
+				 cvmx_helper_link_info_t link_info)
+{
+	int interface = cvmx_helper_get_interface_num(ipd_port);
+	int index = cvmx_helper_get_interface_index_num(ipd_port);
+	__cvmx_helper_sgmii_hardware_init_link(interface, index);
+	return __cvmx_helper_sgmii_hardware_init_link_speed(interface, index,
+							    link_info);
+}
+
+/**
+ * Configure a port for internal and/or external loopback. Internal
+ * loopback causes packets sent by the port to be received by
+ * Octeon. External loopback causes packets received from the wire to
+ * sent out again.
+ *
+ * @ipd_port: IPD/PKO port to loopback.
+ * @enable_internal:
+ *		   Non zero if you want internal loopback
+ * @enable_external:
+ *		   Non zero if you want external loopback
+ *
+ * Returns Zero on success, negative on failure.
+ */
+int __cvmx_helper_sgmii_configure_loopback(int ipd_port, int enable_internal,
+					   int enable_external)
+{
+	int interface = cvmx_helper_get_interface_num(ipd_port);
+	int index = cvmx_helper_get_interface_index_num(ipd_port);
+	union cvmx_pcsx_mrx_control_reg pcsx_mrx_control_reg;
+	union cvmx_pcsx_miscx_ctl_reg pcsx_miscx_ctl_reg;
+
+	pcsx_mrx_control_reg.u64 =
+	    cvmx_read_csr(CVMX_PCSX_MRX_CONTROL_REG(index, interface));
+	pcsx_mrx_control_reg.s.loopbck1 = enable_internal;
+	cvmx_write_csr(CVMX_PCSX_MRX_CONTROL_REG(index, interface),
+		       pcsx_mrx_control_reg.u64);
+
+	pcsx_miscx_ctl_reg.u64 =
+	    cvmx_read_csr(CVMX_PCSX_MISCX_CTL_REG(index, interface));
+	pcsx_miscx_ctl_reg.s.loopbck2 = enable_external;
+	cvmx_write_csr(CVMX_PCSX_MISCX_CTL_REG(index, interface),
+		       pcsx_miscx_ctl_reg.u64);
+
+	__cvmx_helper_sgmii_hardware_init_link(interface, index);
+	return 0;
+}
diff --git a/src/kernel/linux/v4.14/arch/mips/cavium-octeon/executive/cvmx-helper-spi.c b/src/kernel/linux/v4.14/arch/mips/cavium-octeon/executive/cvmx-helper-spi.c
new file mode 100644
index 0000000..ef16aa0
--- /dev/null
+++ b/src/kernel/linux/v4.14/arch/mips/cavium-octeon/executive/cvmx-helper-spi.c
@@ -0,0 +1,204 @@
+/***********************license start***************
+ * Author: Cavium Networks
+ *
+ * Contact: support@caviumnetworks.com
+ * This file is part of the OCTEON SDK
+ *
+ * Copyright (c) 2003-2008 Cavium Networks
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, Version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+ * NONINFRINGEMENT.  See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this file; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ * or visit http://www.gnu.org/licenses/.
+ *
+ * This file may also be available under a different license from Cavium.
+ * Contact Cavium Networks for more information
+ ***********************license end**************************************/
+
+void __cvmx_interrupt_gmxx_enable(int interface);
+void __cvmx_interrupt_spxx_int_msk_enable(int index);
+void __cvmx_interrupt_stxx_int_msk_enable(int index);
+
+/*
+ * Functions for SPI initialization, configuration,
+ * and monitoring.
+ */
+#include <asm/octeon/octeon.h>
+
+#include <asm/octeon/cvmx-config.h>
+#include <asm/octeon/cvmx-spi.h>
+#include <asm/octeon/cvmx-helper.h>
+
+#include <asm/octeon/cvmx-pip-defs.h>
+#include <asm/octeon/cvmx-pko-defs.h>
+
+/*
+ * CVMX_HELPER_SPI_TIMEOUT is used to determine how long the SPI
+ * initialization routines wait for SPI training. You can override the
+ * value using executive-config.h if necessary.
+ */
+#ifndef CVMX_HELPER_SPI_TIMEOUT
+#define CVMX_HELPER_SPI_TIMEOUT 10
+#endif
+
+int __cvmx_helper_spi_enumerate(int interface)
+{
+	if ((cvmx_sysinfo_get()->board_type != CVMX_BOARD_TYPE_SIM) &&
+	    cvmx_spi4000_is_present(interface)) {
+		return 10;
+	} else {
+		return 16;
+	}
+}
+
+/**
+ * Probe a SPI interface and determine the number of ports
+ * connected to it. The SPI interface should still be down after
+ * this call.
+ *
+ * @interface: Interface to probe
+ *
+ * Returns Number of ports on the interface. Zero to disable.
+ */
+int __cvmx_helper_spi_probe(int interface)
+{
+	int num_ports = 0;
+
+	if ((cvmx_sysinfo_get()->board_type != CVMX_BOARD_TYPE_SIM) &&
+	    cvmx_spi4000_is_present(interface)) {
+		num_ports = 10;
+	} else {
+		union cvmx_pko_reg_crc_enable enable;
+		num_ports = 16;
+		/*
+		 * Unlike the SPI4000, most SPI devices don't
+		 * automatically put on the L2 CRC. For everything
+		 * except for the SPI4000 have PKO append the L2 CRC
+		 * to the packet.
+		 */
+		enable.u64 = cvmx_read_csr(CVMX_PKO_REG_CRC_ENABLE);
+		enable.s.enable |= 0xffff << (interface * 16);
+		cvmx_write_csr(CVMX_PKO_REG_CRC_ENABLE, enable.u64);
+	}
+	__cvmx_helper_setup_gmx(interface, num_ports);
+	return num_ports;
+}
+
+/**
+ * Bringup and enable a SPI interface. After this call packet I/O
+ * should be fully functional. This is called with IPD enabled but
+ * PKO disabled.
+ *
+ * @interface: Interface to bring up
+ *
+ * Returns Zero on success, negative on failure
+ */
+int __cvmx_helper_spi_enable(int interface)
+{
+	/*
+	 * Normally the ethernet L2 CRC is checked and stripped in the
+	 * GMX block.  When you are using SPI, this isn' the case and
+	 * IPD needs to check the L2 CRC.
+	 */
+	int num_ports = cvmx_helper_ports_on_interface(interface);
+	int ipd_port;
+	for (ipd_port = interface * 16; ipd_port < interface * 16 + num_ports;
+	     ipd_port++) {
+		union cvmx_pip_prt_cfgx port_config;
+		port_config.u64 = cvmx_read_csr(CVMX_PIP_PRT_CFGX(ipd_port));
+		port_config.s.crc_en = 1;
+		cvmx_write_csr(CVMX_PIP_PRT_CFGX(ipd_port), port_config.u64);
+	}
+
+	if (cvmx_sysinfo_get()->board_type != CVMX_BOARD_TYPE_SIM) {
+		cvmx_spi_start_interface(interface, CVMX_SPI_MODE_DUPLEX,
+					 CVMX_HELPER_SPI_TIMEOUT, num_ports);
+		if (cvmx_spi4000_is_present(interface))
+			cvmx_spi4000_initialize(interface);
+	}
+	__cvmx_interrupt_spxx_int_msk_enable(interface);
+	__cvmx_interrupt_stxx_int_msk_enable(interface);
+	__cvmx_interrupt_gmxx_enable(interface);
+	return 0;
+}
+
+/**
+ * Return the link state of an IPD/PKO port as returned by
+ * auto negotiation. The result of this function may not match
+ * Octeon's link config if auto negotiation has changed since
+ * the last call to cvmx_helper_link_set().
+ *
+ * @ipd_port: IPD/PKO port to query
+ *
+ * Returns Link state
+ */
+cvmx_helper_link_info_t __cvmx_helper_spi_link_get(int ipd_port)
+{
+	cvmx_helper_link_info_t result;
+	int interface = cvmx_helper_get_interface_num(ipd_port);
+	int index = cvmx_helper_get_interface_index_num(ipd_port);
+	result.u64 = 0;
+
+	if (cvmx_sysinfo_get()->board_type == CVMX_BOARD_TYPE_SIM) {
+		/* The simulator gives you a simulated full duplex link */
+		result.s.link_up = 1;
+		result.s.full_duplex = 1;
+		result.s.speed = 10000;
+	} else if (cvmx_spi4000_is_present(interface)) {
+		union cvmx_gmxx_rxx_rx_inbnd inband =
+		    cvmx_spi4000_check_speed(interface, index);
+		result.s.link_up = inband.s.status;
+		result.s.full_duplex = inband.s.duplex;
+		switch (inband.s.speed) {
+		case 0: /* 10 Mbps */
+			result.s.speed = 10;
+			break;
+		case 1: /* 100 Mbps */
+			result.s.speed = 100;
+			break;
+		case 2: /* 1 Gbps */
+			result.s.speed = 1000;
+			break;
+		case 3: /* Illegal */
+			result.s.speed = 0;
+			result.s.link_up = 0;
+			break;
+		}
+	} else {
+		/* For generic SPI we can't determine the link, just return some
+		   sane results */
+		result.s.link_up = 1;
+		result.s.full_duplex = 1;
+		result.s.speed = 10000;
+	}
+	return result;
+}
+
+/**
+ * Configure an IPD/PKO port for the specified link state. This
+ * function does not influence auto negotiation at the PHY level.
+ * The passed link state must always match the link state returned
+ * by cvmx_helper_link_get().
+ *
+ * @ipd_port:  IPD/PKO port to configure
+ * @link_info: The new link state
+ *
+ * Returns Zero on success, negative on failure
+ */
+int __cvmx_helper_spi_link_set(int ipd_port, cvmx_helper_link_info_t link_info)
+{
+	/* Nothing to do. If we have a SPI4000 then the setup was already performed
+	   by cvmx_spi4000_check_speed(). If not then there isn't any link
+	   info */
+	return 0;
+}
diff --git a/src/kernel/linux/v4.14/arch/mips/cavium-octeon/executive/cvmx-helper-util.c b/src/kernel/linux/v4.14/arch/mips/cavium-octeon/executive/cvmx-helper-util.c
new file mode 100644
index 0000000..b45b297
--- /dev/null
+++ b/src/kernel/linux/v4.14/arch/mips/cavium-octeon/executive/cvmx-helper-util.c
@@ -0,0 +1,449 @@
+/***********************license start***************
+ * Author: Cavium Networks
+ *
+ * Contact: support@caviumnetworks.com
+ * This file is part of the OCTEON SDK
+ *
+ * Copyright (c) 2003-2008 Cavium Networks
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, Version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+ * NONINFRINGEMENT.  See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this file; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ * or visit http://www.gnu.org/licenses/.
+ *
+ * This file may also be available under a different license from Cavium.
+ * Contact Cavium Networks for more information
+ ***********************license end**************************************/
+
+/*
+ * Small helper utilities.
+ */
+#include <linux/kernel.h>
+
+#include <asm/octeon/octeon.h>
+
+#include <asm/octeon/cvmx-config.h>
+
+#include <asm/octeon/cvmx-fpa.h>
+#include <asm/octeon/cvmx-pip.h>
+#include <asm/octeon/cvmx-pko.h>
+#include <asm/octeon/cvmx-ipd.h>
+#include <asm/octeon/cvmx-spi.h>
+
+#include <asm/octeon/cvmx-helper.h>
+#include <asm/octeon/cvmx-helper-util.h>
+
+#include <asm/octeon/cvmx-ipd-defs.h>
+
+/**
+ * Convert a interface mode into a human readable string
+ *
+ * @mode:   Mode to convert
+ *
+ * Returns String
+ */
+const char *cvmx_helper_interface_mode_to_string(cvmx_helper_interface_mode_t
+						 mode)
+{
+	switch (mode) {
+	case CVMX_HELPER_INTERFACE_MODE_DISABLED:
+		return "DISABLED";
+	case CVMX_HELPER_INTERFACE_MODE_RGMII:
+		return "RGMII";
+	case CVMX_HELPER_INTERFACE_MODE_GMII:
+		return "GMII";
+	case CVMX_HELPER_INTERFACE_MODE_SPI:
+		return "SPI";
+	case CVMX_HELPER_INTERFACE_MODE_PCIE:
+		return "PCIE";
+	case CVMX_HELPER_INTERFACE_MODE_XAUI:
+		return "XAUI";
+	case CVMX_HELPER_INTERFACE_MODE_SGMII:
+		return "SGMII";
+	case CVMX_HELPER_INTERFACE_MODE_PICMG:
+		return "PICMG";
+	case CVMX_HELPER_INTERFACE_MODE_NPI:
+		return "NPI";
+	case CVMX_HELPER_INTERFACE_MODE_LOOP:
+		return "LOOP";
+	}
+	return "UNKNOWN";
+}
+
+/**
+ * Debug routine to dump the packet structure to the console
+ *
+ * @work:   Work queue entry containing the packet to dump
+ * Returns
+ */
+int cvmx_helper_dump_packet(cvmx_wqe_t *work)
+{
+	uint64_t count;
+	uint64_t remaining_bytes;
+	union cvmx_buf_ptr buffer_ptr;
+	uint64_t start_of_buffer;
+	uint8_t *data_address;
+	uint8_t *end_of_data;
+
+	cvmx_dprintf("Packet Length:   %u\n", work->word1.len);
+	cvmx_dprintf("	  Input Port:  %u\n", cvmx_wqe_get_port(work));
+	cvmx_dprintf("	  QoS:	       %u\n", cvmx_wqe_get_qos(work));
+	cvmx_dprintf("	  Buffers:     %u\n", work->word2.s.bufs);
+
+	if (work->word2.s.bufs == 0) {
+		union cvmx_ipd_wqe_fpa_queue wqe_pool;
+		wqe_pool.u64 = cvmx_read_csr(CVMX_IPD_WQE_FPA_QUEUE);
+		buffer_ptr.u64 = 0;
+		buffer_ptr.s.pool = wqe_pool.s.wqe_pool;
+		buffer_ptr.s.size = 128;
+		buffer_ptr.s.addr = cvmx_ptr_to_phys(work->packet_data);
+		if (likely(!work->word2.s.not_IP)) {
+			union cvmx_pip_ip_offset pip_ip_offset;
+			pip_ip_offset.u64 = cvmx_read_csr(CVMX_PIP_IP_OFFSET);
+			buffer_ptr.s.addr +=
+			    (pip_ip_offset.s.offset << 3) -
+			    work->word2.s.ip_offset;
+			buffer_ptr.s.addr += (work->word2.s.is_v6 ^ 1) << 2;
+		} else {
+			/*
+			 * WARNING: This code assumes that the packet
+			 * is not RAW. If it was, we would use
+			 * PIP_GBL_CFG[RAW_SHF] instead of
+			 * PIP_GBL_CFG[NIP_SHF].
+			 */
+			union cvmx_pip_gbl_cfg pip_gbl_cfg;
+			pip_gbl_cfg.u64 = cvmx_read_csr(CVMX_PIP_GBL_CFG);
+			buffer_ptr.s.addr += pip_gbl_cfg.s.nip_shf;
+		}
+	} else
+		buffer_ptr = work->packet_ptr;
+	remaining_bytes = work->word1.len;
+
+	while (remaining_bytes) {
+		start_of_buffer =
+		    ((buffer_ptr.s.addr >> 7) - buffer_ptr.s.back) << 7;
+		cvmx_dprintf("	  Buffer Start:%llx\n",
+			     (unsigned long long)start_of_buffer);
+		cvmx_dprintf("	  Buffer I   : %u\n", buffer_ptr.s.i);
+		cvmx_dprintf("	  Buffer Back: %u\n", buffer_ptr.s.back);
+		cvmx_dprintf("	  Buffer Pool: %u\n", buffer_ptr.s.pool);
+		cvmx_dprintf("	  Buffer Data: %llx\n",
+			     (unsigned long long)buffer_ptr.s.addr);
+		cvmx_dprintf("	  Buffer Size: %u\n", buffer_ptr.s.size);
+
+		cvmx_dprintf("\t\t");
+		data_address = (uint8_t *) cvmx_phys_to_ptr(buffer_ptr.s.addr);
+		end_of_data = data_address + buffer_ptr.s.size;
+		count = 0;
+		while (data_address < end_of_data) {
+			if (remaining_bytes == 0)
+				break;
+			else
+				remaining_bytes--;
+			cvmx_dprintf("%02x", (unsigned int)*data_address);
+			data_address++;
+			if (remaining_bytes && (count == 7)) {
+				cvmx_dprintf("\n\t\t");
+				count = 0;
+			} else
+				count++;
+		}
+		cvmx_dprintf("\n");
+
+		if (remaining_bytes)
+			buffer_ptr = *(union cvmx_buf_ptr *)
+				cvmx_phys_to_ptr(buffer_ptr.s.addr - 8);
+	}
+	return 0;
+}
+
+/**
+ * Setup Random Early Drop on a specific input queue
+ *
+ * @queue:  Input queue to setup RED on (0-7)
+ * @pass_thresh:
+ *		 Packets will begin slowly dropping when there are less than
+ *		 this many packet buffers free in FPA 0.
+ * @drop_thresh:
+ *		 All incoming packets will be dropped when there are less
+ *		 than this many free packet buffers in FPA 0.
+ * Returns Zero on success. Negative on failure
+ */
+int cvmx_helper_setup_red_queue(int queue, int pass_thresh, int drop_thresh)
+{
+	union cvmx_ipd_qosx_red_marks red_marks;
+	union cvmx_ipd_red_quex_param red_param;
+
+	/* Set RED to begin dropping packets when there are pass_thresh buffers
+	   left. It will linearly drop more packets until reaching drop_thresh
+	   buffers */
+	red_marks.u64 = 0;
+	red_marks.s.drop = drop_thresh;
+	red_marks.s.pass = pass_thresh;
+	cvmx_write_csr(CVMX_IPD_QOSX_RED_MARKS(queue), red_marks.u64);
+
+	/* Use the actual queue 0 counter, not the average */
+	red_param.u64 = 0;
+	red_param.s.prb_con =
+	    (255ul << 24) / (red_marks.s.pass - red_marks.s.drop);
+	red_param.s.avg_con = 1;
+	red_param.s.new_con = 255;
+	red_param.s.use_pcnt = 1;
+	cvmx_write_csr(CVMX_IPD_RED_QUEX_PARAM(queue), red_param.u64);
+	return 0;
+}
+
+/**
+ * Setup Random Early Drop to automatically begin dropping packets.
+ *
+ * @pass_thresh:
+ *		 Packets will begin slowly dropping when there are less than
+ *		 this many packet buffers free in FPA 0.
+ * @drop_thresh:
+ *		 All incoming packets will be dropped when there are less
+ *		 than this many free packet buffers in FPA 0.
+ * Returns Zero on success. Negative on failure
+ */
+int cvmx_helper_setup_red(int pass_thresh, int drop_thresh)
+{
+	union cvmx_ipd_portx_bp_page_cnt page_cnt;
+	union cvmx_ipd_bp_prt_red_end ipd_bp_prt_red_end;
+	union cvmx_ipd_red_port_enable red_port_enable;
+	int queue;
+	int interface;
+	int port;
+
+	/* Disable backpressure based on queued buffers. It needs SW support */
+	page_cnt.u64 = 0;
+	page_cnt.s.bp_enb = 0;
+	page_cnt.s.page_cnt = 100;
+	for (interface = 0; interface < 2; interface++) {
+		for (port = cvmx_helper_get_first_ipd_port(interface);
+		     port < cvmx_helper_get_last_ipd_port(interface); port++)
+			cvmx_write_csr(CVMX_IPD_PORTX_BP_PAGE_CNT(port),
+				       page_cnt.u64);
+	}
+
+	for (queue = 0; queue < 8; queue++)
+		cvmx_helper_setup_red_queue(queue, pass_thresh, drop_thresh);
+
+	/* Shutoff the dropping based on the per port page count. SW isn't
+	   decrementing it right now */
+	ipd_bp_prt_red_end.u64 = 0;
+	ipd_bp_prt_red_end.s.prt_enb = 0;
+	cvmx_write_csr(CVMX_IPD_BP_PRT_RED_END, ipd_bp_prt_red_end.u64);
+
+	red_port_enable.u64 = 0;
+	red_port_enable.s.prt_enb = 0xfffffffffull;
+	red_port_enable.s.avg_dly = 10000;
+	red_port_enable.s.prb_dly = 10000;
+	cvmx_write_csr(CVMX_IPD_RED_PORT_ENABLE, red_port_enable.u64);
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(cvmx_helper_setup_red);
+
+/**
+ * Setup the common GMX settings that determine the number of
+ * ports. These setting apply to almost all configurations of all
+ * chips.
+ *
+ * @interface: Interface to configure
+ * @num_ports: Number of ports on the interface
+ *
+ * Returns Zero on success, negative on failure
+ */
+int __cvmx_helper_setup_gmx(int interface, int num_ports)
+{
+	union cvmx_gmxx_tx_prts gmx_tx_prts;
+	union cvmx_gmxx_rx_prts gmx_rx_prts;
+	union cvmx_pko_reg_gmx_port_mode pko_mode;
+	union cvmx_gmxx_txx_thresh gmx_tx_thresh;
+	int index;
+
+	/* Tell GMX the number of TX ports on this interface */
+	gmx_tx_prts.u64 = cvmx_read_csr(CVMX_GMXX_TX_PRTS(interface));
+	gmx_tx_prts.s.prts = num_ports;
+	cvmx_write_csr(CVMX_GMXX_TX_PRTS(interface), gmx_tx_prts.u64);
+
+	/* Tell GMX the number of RX ports on this interface.  This only
+	 ** applies to *GMII and XAUI ports */
+	if (cvmx_helper_interface_get_mode(interface) ==
+	    CVMX_HELPER_INTERFACE_MODE_RGMII
+	    || cvmx_helper_interface_get_mode(interface) ==
+	    CVMX_HELPER_INTERFACE_MODE_SGMII
+	    || cvmx_helper_interface_get_mode(interface) ==
+	    CVMX_HELPER_INTERFACE_MODE_GMII
+	    || cvmx_helper_interface_get_mode(interface) ==
+	    CVMX_HELPER_INTERFACE_MODE_XAUI) {
+		if (num_ports > 4) {
+			cvmx_dprintf("__cvmx_helper_setup_gmx: Illegal "
+				     "num_ports\n");
+			return -1;
+		}
+
+		gmx_rx_prts.u64 = cvmx_read_csr(CVMX_GMXX_RX_PRTS(interface));
+		gmx_rx_prts.s.prts = num_ports;
+		cvmx_write_csr(CVMX_GMXX_RX_PRTS(interface), gmx_rx_prts.u64);
+	}
+
+	/* Skip setting CVMX_PKO_REG_GMX_PORT_MODE on 30XX, 31XX, and 50XX */
+	if (!OCTEON_IS_MODEL(OCTEON_CN30XX) && !OCTEON_IS_MODEL(OCTEON_CN31XX)
+	    && !OCTEON_IS_MODEL(OCTEON_CN50XX)) {
+		/* Tell PKO the number of ports on this interface */
+		pko_mode.u64 = cvmx_read_csr(CVMX_PKO_REG_GMX_PORT_MODE);
+		if (interface == 0) {
+			if (num_ports == 1)
+				pko_mode.s.mode0 = 4;
+			else if (num_ports == 2)
+				pko_mode.s.mode0 = 3;
+			else if (num_ports <= 4)
+				pko_mode.s.mode0 = 2;
+			else if (num_ports <= 8)
+				pko_mode.s.mode0 = 1;
+			else
+				pko_mode.s.mode0 = 0;
+		} else {
+			if (num_ports == 1)
+				pko_mode.s.mode1 = 4;
+			else if (num_ports == 2)
+				pko_mode.s.mode1 = 3;
+			else if (num_ports <= 4)
+				pko_mode.s.mode1 = 2;
+			else if (num_ports <= 8)
+				pko_mode.s.mode1 = 1;
+			else
+				pko_mode.s.mode1 = 0;
+		}
+		cvmx_write_csr(CVMX_PKO_REG_GMX_PORT_MODE, pko_mode.u64);
+	}
+
+	/*
+	 * Set GMX to buffer as much data as possible before starting
+	 * transmit.  This reduces the chances that we have a TX under
+	 * run due to memory contention. Any packet that fits entirely
+	 * in the GMX FIFO can never have an under run regardless of
+	 * memory load.
+	 */
+	gmx_tx_thresh.u64 = cvmx_read_csr(CVMX_GMXX_TXX_THRESH(0, interface));
+	if (OCTEON_IS_MODEL(OCTEON_CN30XX) || OCTEON_IS_MODEL(OCTEON_CN31XX)
+	    || OCTEON_IS_MODEL(OCTEON_CN50XX)) {
+		/* These chips have a fixed max threshold of 0x40 */
+		gmx_tx_thresh.s.cnt = 0x40;
+	} else {
+		/* Choose the max value for the number of ports */
+		if (num_ports <= 1)
+			gmx_tx_thresh.s.cnt = 0x100 / 1;
+		else if (num_ports == 2)
+			gmx_tx_thresh.s.cnt = 0x100 / 2;
+		else
+			gmx_tx_thresh.s.cnt = 0x100 / 4;
+	}
+	/*
+	 * SPI and XAUI can have lots of ports but the GMX hardware
+	 * only ever has a max of 4.
+	 */
+	if (num_ports > 4)
+		num_ports = 4;
+	for (index = 0; index < num_ports; index++)
+		cvmx_write_csr(CVMX_GMXX_TXX_THRESH(index, interface),
+			       gmx_tx_thresh.u64);
+
+	return 0;
+}
+
+/**
+ * Returns the IPD/PKO port number for a port on the given
+ * interface.
+ *
+ * @interface: Interface to use
+ * @port:      Port on the interface
+ *
+ * Returns IPD/PKO port number
+ */
+int cvmx_helper_get_ipd_port(int interface, int port)
+{
+	switch (interface) {
+	case 0:
+		return port;
+	case 1:
+		return port + 16;
+	case 2:
+		return port + 32;
+	case 3:
+		return port + 36;
+	case 4:
+		return port + 40;
+	case 5:
+		return port + 44;
+	}
+	return -1;
+}
+EXPORT_SYMBOL_GPL(cvmx_helper_get_ipd_port);
+
+/**
+ * Returns the interface number for an IPD/PKO port number.
+ *
+ * @ipd_port: IPD/PKO port number
+ *
+ * Returns Interface number
+ */
+int cvmx_helper_get_interface_num(int ipd_port)
+{
+	if (ipd_port < 16)
+		return 0;
+	else if (ipd_port < 32)
+		return 1;
+	else if (ipd_port < 36)
+		return 2;
+	else if (ipd_port < 40)
+		return 3;
+	else if (ipd_port < 44)
+		return 4;
+	else if (ipd_port < 48)
+		return 5;
+	else
+		cvmx_dprintf("cvmx_helper_get_interface_num: Illegal IPD "
+			     "port number\n");
+
+	return -1;
+}
+EXPORT_SYMBOL_GPL(cvmx_helper_get_interface_num);
+
+/**
+ * Returns the interface index number for an IPD/PKO port
+ * number.
+ *
+ * @ipd_port: IPD/PKO port number
+ *
+ * Returns Interface index number
+ */
+int cvmx_helper_get_interface_index_num(int ipd_port)
+{
+	if (ipd_port < 32)
+		return ipd_port & 15;
+	else if (ipd_port < 36)
+		return ipd_port & 3;
+	else if (ipd_port < 40)
+		return ipd_port & 3;
+	else if (ipd_port < 44)
+		return ipd_port & 3;
+	else if (ipd_port < 48)
+		return ipd_port & 3;
+	else
+		cvmx_dprintf("cvmx_helper_get_interface_index_num: "
+			     "Illegal IPD port number\n");
+
+	return -1;
+}
+EXPORT_SYMBOL_GPL(cvmx_helper_get_interface_index_num);
diff --git a/src/kernel/linux/v4.14/arch/mips/cavium-octeon/executive/cvmx-helper-xaui.c b/src/kernel/linux/v4.14/arch/mips/cavium-octeon/executive/cvmx-helper-xaui.c
new file mode 100644
index 0000000..19d54e0
--- /dev/null
+++ b/src/kernel/linux/v4.14/arch/mips/cavium-octeon/executive/cvmx-helper-xaui.c
@@ -0,0 +1,363 @@
+/***********************license start***************
+ * Author: Cavium Networks
+ *
+ * Contact: support@caviumnetworks.com
+ * This file is part of the OCTEON SDK
+ *
+ * Copyright (c) 2003-2008 Cavium Networks
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, Version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+ * NONINFRINGEMENT.  See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this file; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ * or visit http://www.gnu.org/licenses/.
+ *
+ * This file may also be available under a different license from Cavium.
+ * Contact Cavium Networks for more information
+ ***********************license end**************************************/
+
+/*
+ * Functions for XAUI initialization, configuration,
+ * and monitoring.
+ *
+ */
+
+#include <asm/octeon/octeon.h>
+
+#include <asm/octeon/cvmx-config.h>
+
+#include <asm/octeon/cvmx-helper.h>
+
+#include <asm/octeon/cvmx-pko-defs.h>
+#include <asm/octeon/cvmx-gmxx-defs.h>
+#include <asm/octeon/cvmx-pcsxx-defs.h>
+
+void __cvmx_interrupt_gmxx_enable(int interface);
+void __cvmx_interrupt_pcsx_intx_en_reg_enable(int index, int block);
+void __cvmx_interrupt_pcsxx_int_en_reg_enable(int index);
+
+int __cvmx_helper_xaui_enumerate(int interface)
+{
+	union cvmx_gmxx_hg2_control gmx_hg2_control;
+
+	/* If HiGig2 is enabled return 16 ports, otherwise return 1 port */
+	gmx_hg2_control.u64 = cvmx_read_csr(CVMX_GMXX_HG2_CONTROL(interface));
+	if (gmx_hg2_control.s.hg2tx_en)
+		return 16;
+	else
+		return 1;
+}
+
+/**
+ * Probe a XAUI interface and determine the number of ports
+ * connected to it. The XAUI interface should still be down
+ * after this call.
+ *
+ * @interface: Interface to probe
+ *
+ * Returns Number of ports on the interface. Zero to disable.
+ */
+int __cvmx_helper_xaui_probe(int interface)
+{
+	int i;
+	union cvmx_gmxx_inf_mode mode;
+
+	/*
+	 * Due to errata GMX-700 on CN56XXp1.x and CN52XXp1.x, the
+	 * interface needs to be enabled before IPD otherwise per port
+	 * backpressure may not work properly.
+	 */
+	mode.u64 = cvmx_read_csr(CVMX_GMXX_INF_MODE(interface));
+	mode.s.en = 1;
+	cvmx_write_csr(CVMX_GMXX_INF_MODE(interface), mode.u64);
+
+	__cvmx_helper_setup_gmx(interface, 1);
+
+	/*
+	 * Setup PKO to support 16 ports for HiGig2 virtual
+	 * ports. We're pointing all of the PKO packet ports for this
+	 * interface to the XAUI. This allows us to use HiGig2
+	 * backpressure per port.
+	 */
+	for (i = 0; i < 16; i++) {
+		union cvmx_pko_mem_port_ptrs pko_mem_port_ptrs;
+		pko_mem_port_ptrs.u64 = 0;
+		/*
+		 * We set each PKO port to have equal priority in a
+		 * round robin fashion.
+		 */
+		pko_mem_port_ptrs.s.static_p = 0;
+		pko_mem_port_ptrs.s.qos_mask = 0xff;
+		/* All PKO ports map to the same XAUI hardware port */
+		pko_mem_port_ptrs.s.eid = interface * 4;
+		pko_mem_port_ptrs.s.pid = interface * 16 + i;
+		cvmx_write_csr(CVMX_PKO_MEM_PORT_PTRS, pko_mem_port_ptrs.u64);
+	}
+	return __cvmx_helper_xaui_enumerate(interface);
+}
+
+/**
+ * Bringup and enable a XAUI interface. After this call packet
+ * I/O should be fully functional. This is called with IPD
+ * enabled but PKO disabled.
+ *
+ * @interface: Interface to bring up
+ *
+ * Returns Zero on success, negative on failure
+ */
+int __cvmx_helper_xaui_enable(int interface)
+{
+	union cvmx_gmxx_prtx_cfg gmx_cfg;
+	union cvmx_pcsxx_control1_reg xauiCtl;
+	union cvmx_pcsxx_misc_ctl_reg xauiMiscCtl;
+	union cvmx_gmxx_tx_xaui_ctl gmxXauiTxCtl;
+	union cvmx_gmxx_rxx_int_en gmx_rx_int_en;
+	union cvmx_gmxx_tx_int_en gmx_tx_int_en;
+	union cvmx_pcsxx_int_en_reg pcsx_int_en_reg;
+
+	/* Setup PKND */
+	if (octeon_has_feature(OCTEON_FEATURE_PKND)) {
+		gmx_cfg.u64 = cvmx_read_csr(CVMX_GMXX_PRTX_CFG(0, interface));
+		gmx_cfg.s.pknd = cvmx_helper_get_ipd_port(interface, 0);
+		cvmx_write_csr(CVMX_GMXX_PRTX_CFG(0, interface), gmx_cfg.u64);
+	}
+
+	/* (1) Interface has already been enabled. */
+
+	/* (2) Disable GMX. */
+	xauiMiscCtl.u64 = cvmx_read_csr(CVMX_PCSXX_MISC_CTL_REG(interface));
+	xauiMiscCtl.s.gmxeno = 1;
+	cvmx_write_csr(CVMX_PCSXX_MISC_CTL_REG(interface), xauiMiscCtl.u64);
+
+	/* (3) Disable GMX and PCSX interrupts. */
+	gmx_rx_int_en.u64 = cvmx_read_csr(CVMX_GMXX_RXX_INT_EN(0, interface));
+	cvmx_write_csr(CVMX_GMXX_RXX_INT_EN(0, interface), 0x0);
+	gmx_tx_int_en.u64 = cvmx_read_csr(CVMX_GMXX_TX_INT_EN(interface));
+	cvmx_write_csr(CVMX_GMXX_TX_INT_EN(interface), 0x0);
+	pcsx_int_en_reg.u64 = cvmx_read_csr(CVMX_PCSXX_INT_EN_REG(interface));
+	cvmx_write_csr(CVMX_PCSXX_INT_EN_REG(interface), 0x0);
+
+	/* (4) Bring up the PCSX and GMX reconciliation layer. */
+	/* (4)a Set polarity and lane swapping. */
+	/* (4)b */
+	gmxXauiTxCtl.u64 = cvmx_read_csr(CVMX_GMXX_TX_XAUI_CTL(interface));
+	/* Enable better IFG packing and improves performance */
+	gmxXauiTxCtl.s.dic_en = 1;
+	gmxXauiTxCtl.s.uni_en = 0;
+	cvmx_write_csr(CVMX_GMXX_TX_XAUI_CTL(interface), gmxXauiTxCtl.u64);
+
+	/* (4)c Aply reset sequence */
+	xauiCtl.u64 = cvmx_read_csr(CVMX_PCSXX_CONTROL1_REG(interface));
+	xauiCtl.s.lo_pwr = 0;
+
+	/* Issuing a reset here seems to hang some CN68XX chips. */
+	if (!OCTEON_IS_MODEL(OCTEON_CN68XX_PASS1_X) &&
+	    !OCTEON_IS_MODEL(OCTEON_CN68XX_PASS2_X))
+		xauiCtl.s.reset = 1;
+
+	cvmx_write_csr(CVMX_PCSXX_CONTROL1_REG(interface), xauiCtl.u64);
+
+	/* Wait for PCS to come out of reset */
+	if (CVMX_WAIT_FOR_FIELD64
+	    (CVMX_PCSXX_CONTROL1_REG(interface), union cvmx_pcsxx_control1_reg,
+	     reset, ==, 0, 10000))
+		return -1;
+	/* Wait for PCS to be aligned */
+	if (CVMX_WAIT_FOR_FIELD64
+	    (CVMX_PCSXX_10GBX_STATUS_REG(interface),
+	     union cvmx_pcsxx_10gbx_status_reg, alignd, ==, 1, 10000))
+		return -1;
+	/* Wait for RX to be ready */
+	if (CVMX_WAIT_FOR_FIELD64
+	    (CVMX_GMXX_RX_XAUI_CTL(interface), union cvmx_gmxx_rx_xaui_ctl,
+		    status, ==, 0, 10000))
+		return -1;
+
+	/* (6) Configure GMX */
+	gmx_cfg.u64 = cvmx_read_csr(CVMX_GMXX_PRTX_CFG(0, interface));
+	gmx_cfg.s.en = 0;
+	cvmx_write_csr(CVMX_GMXX_PRTX_CFG(0, interface), gmx_cfg.u64);
+
+	/* Wait for GMX RX to be idle */
+	if (CVMX_WAIT_FOR_FIELD64
+	    (CVMX_GMXX_PRTX_CFG(0, interface), union cvmx_gmxx_prtx_cfg,
+		    rx_idle, ==, 1, 10000))
+		return -1;
+	/* Wait for GMX TX to be idle */
+	if (CVMX_WAIT_FOR_FIELD64
+	    (CVMX_GMXX_PRTX_CFG(0, interface), union cvmx_gmxx_prtx_cfg,
+		    tx_idle, ==, 1, 10000))
+		return -1;
+
+	/* GMX configure */
+	gmx_cfg.u64 = cvmx_read_csr(CVMX_GMXX_PRTX_CFG(0, interface));
+	gmx_cfg.s.speed = 1;
+	gmx_cfg.s.speed_msb = 0;
+	gmx_cfg.s.slottime = 1;
+	cvmx_write_csr(CVMX_GMXX_TX_PRTS(interface), 1);
+	cvmx_write_csr(CVMX_GMXX_TXX_SLOT(0, interface), 512);
+	cvmx_write_csr(CVMX_GMXX_TXX_BURST(0, interface), 8192);
+	cvmx_write_csr(CVMX_GMXX_PRTX_CFG(0, interface), gmx_cfg.u64);
+
+	/* (7) Clear out any error state */
+	cvmx_write_csr(CVMX_GMXX_RXX_INT_REG(0, interface),
+		       cvmx_read_csr(CVMX_GMXX_RXX_INT_REG(0, interface)));
+	cvmx_write_csr(CVMX_GMXX_TX_INT_REG(interface),
+		       cvmx_read_csr(CVMX_GMXX_TX_INT_REG(interface)));
+	cvmx_write_csr(CVMX_PCSXX_INT_REG(interface),
+		       cvmx_read_csr(CVMX_PCSXX_INT_REG(interface)));
+
+	/* Wait for receive link */
+	if (CVMX_WAIT_FOR_FIELD64
+	    (CVMX_PCSXX_STATUS1_REG(interface), union cvmx_pcsxx_status1_reg,
+	     rcv_lnk, ==, 1, 10000))
+		return -1;
+	if (CVMX_WAIT_FOR_FIELD64
+	    (CVMX_PCSXX_STATUS2_REG(interface), union cvmx_pcsxx_status2_reg,
+	     xmtflt, ==, 0, 10000))
+		return -1;
+	if (CVMX_WAIT_FOR_FIELD64
+	    (CVMX_PCSXX_STATUS2_REG(interface), union cvmx_pcsxx_status2_reg,
+	     rcvflt, ==, 0, 10000))
+		return -1;
+
+	cvmx_write_csr(CVMX_GMXX_RXX_INT_EN(0, interface), gmx_rx_int_en.u64);
+	cvmx_write_csr(CVMX_GMXX_TX_INT_EN(interface), gmx_tx_int_en.u64);
+	cvmx_write_csr(CVMX_PCSXX_INT_EN_REG(interface), pcsx_int_en_reg.u64);
+
+	/* (8) Enable packet reception */
+	xauiMiscCtl.s.gmxeno = 0;
+	cvmx_write_csr(CVMX_PCSXX_MISC_CTL_REG(interface), xauiMiscCtl.u64);
+
+	gmx_cfg.u64 = cvmx_read_csr(CVMX_GMXX_PRTX_CFG(0, interface));
+	gmx_cfg.s.en = 1;
+	cvmx_write_csr(CVMX_GMXX_PRTX_CFG(0, interface), gmx_cfg.u64);
+
+	__cvmx_interrupt_pcsx_intx_en_reg_enable(0, interface);
+	__cvmx_interrupt_pcsx_intx_en_reg_enable(1, interface);
+	__cvmx_interrupt_pcsx_intx_en_reg_enable(2, interface);
+	__cvmx_interrupt_pcsx_intx_en_reg_enable(3, interface);
+	__cvmx_interrupt_pcsxx_int_en_reg_enable(interface);
+	__cvmx_interrupt_gmxx_enable(interface);
+
+	return 0;
+}
+
+/**
+ * Return the link state of an IPD/PKO port as returned by
+ * auto negotiation. The result of this function may not match
+ * Octeon's link config if auto negotiation has changed since
+ * the last call to cvmx_helper_link_set().
+ *
+ * @ipd_port: IPD/PKO port to query
+ *
+ * Returns Link state
+ */
+cvmx_helper_link_info_t __cvmx_helper_xaui_link_get(int ipd_port)
+{
+	int interface = cvmx_helper_get_interface_num(ipd_port);
+	union cvmx_gmxx_tx_xaui_ctl gmxx_tx_xaui_ctl;
+	union cvmx_gmxx_rx_xaui_ctl gmxx_rx_xaui_ctl;
+	union cvmx_pcsxx_status1_reg pcsxx_status1_reg;
+	cvmx_helper_link_info_t result;
+
+	gmxx_tx_xaui_ctl.u64 = cvmx_read_csr(CVMX_GMXX_TX_XAUI_CTL(interface));
+	gmxx_rx_xaui_ctl.u64 = cvmx_read_csr(CVMX_GMXX_RX_XAUI_CTL(interface));
+	pcsxx_status1_reg.u64 =
+	    cvmx_read_csr(CVMX_PCSXX_STATUS1_REG(interface));
+	result.u64 = 0;
+
+	/* Only return a link if both RX and TX are happy */
+	if ((gmxx_tx_xaui_ctl.s.ls == 0) && (gmxx_rx_xaui_ctl.s.status == 0) &&
+	    (pcsxx_status1_reg.s.rcv_lnk == 1)) {
+		result.s.link_up = 1;
+		result.s.full_duplex = 1;
+		result.s.speed = 10000;
+	} else {
+		/* Disable GMX and PCSX interrupts. */
+		cvmx_write_csr(CVMX_GMXX_RXX_INT_EN(0, interface), 0x0);
+		cvmx_write_csr(CVMX_GMXX_TX_INT_EN(interface), 0x0);
+		cvmx_write_csr(CVMX_PCSXX_INT_EN_REG(interface), 0x0);
+	}
+	return result;
+}
+
+/**
+ * Configure an IPD/PKO port for the specified link state. This
+ * function does not influence auto negotiation at the PHY level.
+ * The passed link state must always match the link state returned
+ * by cvmx_helper_link_get().
+ *
+ * @ipd_port:  IPD/PKO port to configure
+ * @link_info: The new link state
+ *
+ * Returns Zero on success, negative on failure
+ */
+int __cvmx_helper_xaui_link_set(int ipd_port, cvmx_helper_link_info_t link_info)
+{
+	int interface = cvmx_helper_get_interface_num(ipd_port);
+	union cvmx_gmxx_tx_xaui_ctl gmxx_tx_xaui_ctl;
+	union cvmx_gmxx_rx_xaui_ctl gmxx_rx_xaui_ctl;
+
+	gmxx_tx_xaui_ctl.u64 = cvmx_read_csr(CVMX_GMXX_TX_XAUI_CTL(interface));
+	gmxx_rx_xaui_ctl.u64 = cvmx_read_csr(CVMX_GMXX_RX_XAUI_CTL(interface));
+
+	/* If the link shouldn't be up, then just return */
+	if (!link_info.s.link_up)
+		return 0;
+
+	/* Do nothing if both RX and TX are happy */
+	if ((gmxx_tx_xaui_ctl.s.ls == 0) && (gmxx_rx_xaui_ctl.s.status == 0))
+		return 0;
+
+	/* Bring the link up */
+	return __cvmx_helper_xaui_enable(interface);
+}
+
+/**
+ * Configure a port for internal and/or external loopback. Internal loopback
+ * causes packets sent by the port to be received by Octeon. External loopback
+ * causes packets received from the wire to sent out again.
+ *
+ * @ipd_port: IPD/PKO port to loopback.
+ * @enable_internal:
+ *		   Non zero if you want internal loopback
+ * @enable_external:
+ *		   Non zero if you want external loopback
+ *
+ * Returns Zero on success, negative on failure.
+ */
+extern int __cvmx_helper_xaui_configure_loopback(int ipd_port,
+						 int enable_internal,
+						 int enable_external)
+{
+	int interface = cvmx_helper_get_interface_num(ipd_port);
+	union cvmx_pcsxx_control1_reg pcsxx_control1_reg;
+	union cvmx_gmxx_xaui_ext_loopback gmxx_xaui_ext_loopback;
+
+	/* Set the internal loop */
+	pcsxx_control1_reg.u64 =
+	    cvmx_read_csr(CVMX_PCSXX_CONTROL1_REG(interface));
+	pcsxx_control1_reg.s.loopbck1 = enable_internal;
+	cvmx_write_csr(CVMX_PCSXX_CONTROL1_REG(interface),
+		       pcsxx_control1_reg.u64);
+
+	/* Set the external loop */
+	gmxx_xaui_ext_loopback.u64 =
+	    cvmx_read_csr(CVMX_GMXX_XAUI_EXT_LOOPBACK(interface));
+	gmxx_xaui_ext_loopback.s.en = enable_external;
+	cvmx_write_csr(CVMX_GMXX_XAUI_EXT_LOOPBACK(interface),
+		       gmxx_xaui_ext_loopback.u64);
+
+	/* Take the link through a reset */
+	return __cvmx_helper_xaui_enable(interface);
+}
diff --git a/src/kernel/linux/v4.14/arch/mips/cavium-octeon/executive/cvmx-helper.c b/src/kernel/linux/v4.14/arch/mips/cavium-octeon/executive/cvmx-helper.c
new file mode 100644
index 0000000..c376f17
--- /dev/null
+++ b/src/kernel/linux/v4.14/arch/mips/cavium-octeon/executive/cvmx-helper.c
@@ -0,0 +1,1296 @@
+/***********************license start***************
+ * Author: Cavium Networks
+ *
+ * Contact: support@caviumnetworks.com
+ * This file is part of the OCTEON SDK
+ *
+ * Copyright (c) 2003-2008 Cavium Networks
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, Version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+ * NONINFRINGEMENT.  See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this file; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ * or visit http://www.gnu.org/licenses/.
+ *
+ * This file may also be available under a different license from Cavium.
+ * Contact Cavium Networks for more information
+ ***********************license end**************************************/
+
+/*
+ *
+ * Helper functions for common, but complicated tasks.
+ *
+ */
+#include <asm/octeon/octeon.h>
+
+#include <asm/octeon/cvmx-config.h>
+
+#include <asm/octeon/cvmx-fpa.h>
+#include <asm/octeon/cvmx-pip.h>
+#include <asm/octeon/cvmx-pko.h>
+#include <asm/octeon/cvmx-ipd.h>
+#include <asm/octeon/cvmx-spi.h>
+#include <asm/octeon/cvmx-helper.h>
+#include <asm/octeon/cvmx-helper-board.h>
+
+#include <asm/octeon/cvmx-pip-defs.h>
+#include <asm/octeon/cvmx-smix-defs.h>
+#include <asm/octeon/cvmx-asxx-defs.h>
+
+/**
+ * cvmx_override_pko_queue_priority(int ipd_port, uint64_t
+ * priorities[16]) is a function pointer. It is meant to allow
+ * customization of the PKO queue priorities based on the port
+ * number. Users should set this pointer to a function before
+ * calling any cvmx-helper operations.
+ */
+void (*cvmx_override_pko_queue_priority) (int pko_port,
+					  uint64_t priorities[16]);
+
+/**
+ * cvmx_override_ipd_port_setup(int ipd_port) is a function
+ * pointer. It is meant to allow customization of the IPD port
+ * setup before packet input/output comes online. It is called
+ * after cvmx-helper does the default IPD configuration, but
+ * before IPD is enabled. Users should set this pointer to a
+ * function before calling any cvmx-helper operations.
+ */
+void (*cvmx_override_ipd_port_setup) (int ipd_port);
+
+/* Port count per interface */
+static int interface_port_count[9];
+
+/**
+ * Return the number of interfaces the chip has. Each interface
+ * may have multiple ports. Most chips support two interfaces,
+ * but the CNX0XX and CNX1XX are exceptions. These only support
+ * one interface.
+ *
+ * Returns Number of interfaces on chip
+ */
+int cvmx_helper_get_number_of_interfaces(void)
+{
+	if (OCTEON_IS_MODEL(OCTEON_CN68XX))
+		return 9;
+	if (OCTEON_IS_MODEL(OCTEON_CN56XX) || OCTEON_IS_MODEL(OCTEON_CN52XX))
+		return 4;
+	if (OCTEON_IS_MODEL(OCTEON_CN7XXX))
+		return 5;
+	else
+		return 3;
+}
+EXPORT_SYMBOL_GPL(cvmx_helper_get_number_of_interfaces);
+
+/**
+ * Return the number of ports on an interface. Depending on the
+ * chip and configuration, this can be 1-16. A value of 0
+ * specifies that the interface doesn't exist or isn't usable.
+ *
+ * @interface: Interface to get the port count for
+ *
+ * Returns Number of ports on interface. Can be Zero.
+ */
+int cvmx_helper_ports_on_interface(int interface)
+{
+	return interface_port_count[interface];
+}
+EXPORT_SYMBOL_GPL(cvmx_helper_ports_on_interface);
+
+/**
+ * @INTERNAL
+ * Return interface mode for CN68xx.
+ */
+static cvmx_helper_interface_mode_t __cvmx_get_mode_cn68xx(int interface)
+{
+	union cvmx_mio_qlmx_cfg qlm_cfg;
+	switch (interface) {
+	case 0:
+		qlm_cfg.u64 = cvmx_read_csr(CVMX_MIO_QLMX_CFG(0));
+		/* QLM is disabled when QLM SPD is 15. */
+		if (qlm_cfg.s.qlm_spd == 15)
+			return CVMX_HELPER_INTERFACE_MODE_DISABLED;
+
+		if (qlm_cfg.s.qlm_cfg == 2)
+			return CVMX_HELPER_INTERFACE_MODE_SGMII;
+		else if (qlm_cfg.s.qlm_cfg == 3)
+			return CVMX_HELPER_INTERFACE_MODE_XAUI;
+		else
+			return CVMX_HELPER_INTERFACE_MODE_DISABLED;
+	case 2:
+	case 3:
+	case 4:
+		qlm_cfg.u64 = cvmx_read_csr(CVMX_MIO_QLMX_CFG(interface));
+		/* QLM is disabled when QLM SPD is 15. */
+		if (qlm_cfg.s.qlm_spd == 15)
+			return CVMX_HELPER_INTERFACE_MODE_DISABLED;
+
+		if (qlm_cfg.s.qlm_cfg == 2)
+			return CVMX_HELPER_INTERFACE_MODE_SGMII;
+		else if (qlm_cfg.s.qlm_cfg == 3)
+			return CVMX_HELPER_INTERFACE_MODE_XAUI;
+		else
+			return CVMX_HELPER_INTERFACE_MODE_DISABLED;
+	case 7:
+		qlm_cfg.u64 = cvmx_read_csr(CVMX_MIO_QLMX_CFG(3));
+		/* QLM is disabled when QLM SPD is 15. */
+		if (qlm_cfg.s.qlm_spd == 15) {
+			return CVMX_HELPER_INTERFACE_MODE_DISABLED;
+		} else if (qlm_cfg.s.qlm_cfg != 0) {
+			qlm_cfg.u64 = cvmx_read_csr(CVMX_MIO_QLMX_CFG(1));
+			if (qlm_cfg.s.qlm_cfg != 0)
+				return CVMX_HELPER_INTERFACE_MODE_DISABLED;
+		}
+		return CVMX_HELPER_INTERFACE_MODE_NPI;
+	case 8:
+		return CVMX_HELPER_INTERFACE_MODE_LOOP;
+	default:
+		return CVMX_HELPER_INTERFACE_MODE_DISABLED;
+	}
+}
+
+/**
+ * @INTERNAL
+ * Return interface mode for an Octeon II
+ */
+static cvmx_helper_interface_mode_t __cvmx_get_mode_octeon2(int interface)
+{
+	union cvmx_gmxx_inf_mode mode;
+
+	if (OCTEON_IS_MODEL(OCTEON_CN68XX))
+		return __cvmx_get_mode_cn68xx(interface);
+
+	if (interface == 2)
+		return CVMX_HELPER_INTERFACE_MODE_NPI;
+
+	if (interface == 3)
+		return CVMX_HELPER_INTERFACE_MODE_LOOP;
+
+	/* Only present in CN63XX & CN66XX Octeon model */
+	if ((OCTEON_IS_MODEL(OCTEON_CN63XX) &&
+	     (interface == 4 || interface == 5)) ||
+	    (OCTEON_IS_MODEL(OCTEON_CN66XX) &&
+	     interface >= 4 && interface <= 7)) {
+		return CVMX_HELPER_INTERFACE_MODE_DISABLED;
+	}
+
+	if (OCTEON_IS_MODEL(OCTEON_CN66XX)) {
+		union cvmx_mio_qlmx_cfg mio_qlm_cfg;
+
+		/* QLM2 is SGMII0 and QLM1 is SGMII1 */
+		if (interface == 0)
+			mio_qlm_cfg.u64 = cvmx_read_csr(CVMX_MIO_QLMX_CFG(2));
+		else if (interface == 1)
+			mio_qlm_cfg.u64 = cvmx_read_csr(CVMX_MIO_QLMX_CFG(1));
+		else
+			return CVMX_HELPER_INTERFACE_MODE_DISABLED;
+
+		if (mio_qlm_cfg.s.qlm_spd == 15)
+			return CVMX_HELPER_INTERFACE_MODE_DISABLED;
+
+		if (mio_qlm_cfg.s.qlm_cfg == 9)
+			return CVMX_HELPER_INTERFACE_MODE_SGMII;
+		else if (mio_qlm_cfg.s.qlm_cfg == 11)
+			return CVMX_HELPER_INTERFACE_MODE_XAUI;
+		else
+			return CVMX_HELPER_INTERFACE_MODE_DISABLED;
+	} else if (OCTEON_IS_MODEL(OCTEON_CN61XX)) {
+		union cvmx_mio_qlmx_cfg qlm_cfg;
+
+		if (interface == 0) {
+			qlm_cfg.u64 = cvmx_read_csr(CVMX_MIO_QLMX_CFG(2));
+			if (qlm_cfg.s.qlm_cfg == 2)
+				return CVMX_HELPER_INTERFACE_MODE_SGMII;
+			else if (qlm_cfg.s.qlm_cfg == 3)
+				return CVMX_HELPER_INTERFACE_MODE_XAUI;
+			else
+				return CVMX_HELPER_INTERFACE_MODE_DISABLED;
+		} else if (interface == 1) {
+			qlm_cfg.u64 = cvmx_read_csr(CVMX_MIO_QLMX_CFG(0));
+			if (qlm_cfg.s.qlm_cfg == 2)
+				return CVMX_HELPER_INTERFACE_MODE_SGMII;
+			else if (qlm_cfg.s.qlm_cfg == 3)
+				return CVMX_HELPER_INTERFACE_MODE_XAUI;
+			else
+				return CVMX_HELPER_INTERFACE_MODE_DISABLED;
+		}
+	} else if (OCTEON_IS_MODEL(OCTEON_CNF71XX)) {
+		if (interface == 0) {
+			union cvmx_mio_qlmx_cfg qlm_cfg;
+			qlm_cfg.u64 = cvmx_read_csr(CVMX_MIO_QLMX_CFG(0));
+			if (qlm_cfg.s.qlm_cfg == 2)
+				return CVMX_HELPER_INTERFACE_MODE_SGMII;
+		}
+		return CVMX_HELPER_INTERFACE_MODE_DISABLED;
+	}
+
+	if (interface == 1 && OCTEON_IS_MODEL(OCTEON_CN63XX))
+		return CVMX_HELPER_INTERFACE_MODE_DISABLED;
+
+	mode.u64 = cvmx_read_csr(CVMX_GMXX_INF_MODE(interface));
+
+	if (OCTEON_IS_MODEL(OCTEON_CN63XX)) {
+		switch (mode.cn63xx.mode) {
+		case 0:
+			return CVMX_HELPER_INTERFACE_MODE_SGMII;
+		case 1:
+			return CVMX_HELPER_INTERFACE_MODE_XAUI;
+		default:
+			return CVMX_HELPER_INTERFACE_MODE_DISABLED;
+		}
+	} else {
+		if (!mode.s.en)
+			return CVMX_HELPER_INTERFACE_MODE_DISABLED;
+
+		if (mode.s.type)
+			return CVMX_HELPER_INTERFACE_MODE_GMII;
+		else
+			return CVMX_HELPER_INTERFACE_MODE_RGMII;
+	}
+}
+
+/**
+ * @INTERNAL
+ * Return interface mode for CN7XXX.
+ */
+static cvmx_helper_interface_mode_t __cvmx_get_mode_cn7xxx(int interface)
+{
+	union cvmx_gmxx_inf_mode mode;
+
+	mode.u64 = cvmx_read_csr(CVMX_GMXX_INF_MODE(interface));
+
+	switch (interface) {
+	case 0:
+	case 1:
+		switch (mode.cn68xx.mode) {
+		case 0:
+			return CVMX_HELPER_INTERFACE_MODE_DISABLED;
+		case 1:
+		case 2:
+			return CVMX_HELPER_INTERFACE_MODE_SGMII;
+		case 3:
+			return CVMX_HELPER_INTERFACE_MODE_XAUI;
+		default:
+			return CVMX_HELPER_INTERFACE_MODE_SGMII;
+		}
+	case 2:
+		return CVMX_HELPER_INTERFACE_MODE_NPI;
+	case 3:
+		return CVMX_HELPER_INTERFACE_MODE_LOOP;
+	case 4:
+		/* TODO: Implement support for AGL (RGMII). */
+		return CVMX_HELPER_INTERFACE_MODE_DISABLED;
+	default:
+		return CVMX_HELPER_INTERFACE_MODE_DISABLED;
+	}
+}
+
+/**
+ * Get the operating mode of an interface. Depending on the Octeon
+ * chip and configuration, this function returns an enumeration
+ * of the type of packet I/O supported by an interface.
+ *
+ * @interface: Interface to probe
+ *
+ * Returns Mode of the interface. Unknown or unsupported interfaces return
+ *	   DISABLED.
+ */
+cvmx_helper_interface_mode_t cvmx_helper_interface_get_mode(int interface)
+{
+	union cvmx_gmxx_inf_mode mode;
+
+	if (interface < 0 ||
+	    interface >= cvmx_helper_get_number_of_interfaces())
+		return CVMX_HELPER_INTERFACE_MODE_DISABLED;
+
+	/*
+	 * OCTEON III models
+	 */
+	if (OCTEON_IS_MODEL(OCTEON_CN7XXX))
+		return __cvmx_get_mode_cn7xxx(interface);
+
+	/*
+	 * Octeon II models
+	 */
+	if (OCTEON_IS_MODEL(OCTEON_CN6XXX) || OCTEON_IS_MODEL(OCTEON_CNF71XX))
+		return __cvmx_get_mode_octeon2(interface);
+
+	/*
+	 * Octeon and Octeon Plus models
+	 */
+	if (interface == 2)
+		return CVMX_HELPER_INTERFACE_MODE_NPI;
+
+	if (interface == 3) {
+		if (OCTEON_IS_MODEL(OCTEON_CN56XX)
+		    || OCTEON_IS_MODEL(OCTEON_CN52XX))
+			return CVMX_HELPER_INTERFACE_MODE_LOOP;
+		else
+			return CVMX_HELPER_INTERFACE_MODE_DISABLED;
+	}
+
+	if (interface == 0
+	    && cvmx_sysinfo_get()->board_type == CVMX_BOARD_TYPE_CN3005_EVB_HS5
+	    && cvmx_sysinfo_get()->board_rev_major == 1) {
+		/*
+		 * Lie about interface type of CN3005 board.  This
+		 * board has a switch on port 1 like the other
+		 * evaluation boards, but it is connected over RGMII
+		 * instead of GMII.  Report GMII mode so that the
+		 * speed is forced to 1 Gbit full duplex.  Other than
+		 * some initial configuration (which does not use the
+		 * output of this function) there is no difference in
+		 * setup between GMII and RGMII modes.
+		 */
+		return CVMX_HELPER_INTERFACE_MODE_GMII;
+	}
+
+	/* Interface 1 is always disabled on CN31XX and CN30XX */
+	if ((interface == 1)
+	    && (OCTEON_IS_MODEL(OCTEON_CN31XX) || OCTEON_IS_MODEL(OCTEON_CN30XX)
+		|| OCTEON_IS_MODEL(OCTEON_CN50XX)
+		|| OCTEON_IS_MODEL(OCTEON_CN52XX)))
+		return CVMX_HELPER_INTERFACE_MODE_DISABLED;
+
+	mode.u64 = cvmx_read_csr(CVMX_GMXX_INF_MODE(interface));
+
+	if (OCTEON_IS_MODEL(OCTEON_CN56XX) || OCTEON_IS_MODEL(OCTEON_CN52XX)) {
+		switch (mode.cn56xx.mode) {
+		case 0:
+			return CVMX_HELPER_INTERFACE_MODE_DISABLED;
+		case 1:
+			return CVMX_HELPER_INTERFACE_MODE_XAUI;
+		case 2:
+			return CVMX_HELPER_INTERFACE_MODE_SGMII;
+		case 3:
+			return CVMX_HELPER_INTERFACE_MODE_PICMG;
+		default:
+			return CVMX_HELPER_INTERFACE_MODE_DISABLED;
+		}
+	} else {
+		if (!mode.s.en)
+			return CVMX_HELPER_INTERFACE_MODE_DISABLED;
+
+		if (mode.s.type) {
+			if (OCTEON_IS_MODEL(OCTEON_CN38XX)
+			    || OCTEON_IS_MODEL(OCTEON_CN58XX))
+				return CVMX_HELPER_INTERFACE_MODE_SPI;
+			else
+				return CVMX_HELPER_INTERFACE_MODE_GMII;
+		} else
+			return CVMX_HELPER_INTERFACE_MODE_RGMII;
+	}
+}
+EXPORT_SYMBOL_GPL(cvmx_helper_interface_get_mode);
+
+/**
+ * Configure the IPD/PIP tagging and QoS options for a specific
+ * port. This function determines the POW work queue entry
+ * contents for a port. The setup performed here is controlled by
+ * the defines in executive-config.h.
+ *
+ * @ipd_port: Port to configure. This follows the IPD numbering, not the
+ *		   per interface numbering
+ *
+ * Returns Zero on success, negative on failure
+ */
+static int __cvmx_helper_port_setup_ipd(int ipd_port)
+{
+	union cvmx_pip_prt_cfgx port_config;
+	union cvmx_pip_prt_tagx tag_config;
+
+	port_config.u64 = cvmx_read_csr(CVMX_PIP_PRT_CFGX(ipd_port));
+	tag_config.u64 = cvmx_read_csr(CVMX_PIP_PRT_TAGX(ipd_port));
+
+	/* Have each port go to a different POW queue */
+	port_config.s.qos = ipd_port & 0x7;
+
+	/* Process the headers and place the IP header in the work queue */
+	port_config.s.mode = CVMX_HELPER_INPUT_PORT_SKIP_MODE;
+
+	tag_config.s.ip6_src_flag = CVMX_HELPER_INPUT_TAG_IPV6_SRC_IP;
+	tag_config.s.ip6_dst_flag = CVMX_HELPER_INPUT_TAG_IPV6_DST_IP;
+	tag_config.s.ip6_sprt_flag = CVMX_HELPER_INPUT_TAG_IPV6_SRC_PORT;
+	tag_config.s.ip6_dprt_flag = CVMX_HELPER_INPUT_TAG_IPV6_DST_PORT;
+	tag_config.s.ip6_nxth_flag = CVMX_HELPER_INPUT_TAG_IPV6_NEXT_HEADER;
+	tag_config.s.ip4_src_flag = CVMX_HELPER_INPUT_TAG_IPV4_SRC_IP;
+	tag_config.s.ip4_dst_flag = CVMX_HELPER_INPUT_TAG_IPV4_DST_IP;
+	tag_config.s.ip4_sprt_flag = CVMX_HELPER_INPUT_TAG_IPV4_SRC_PORT;
+	tag_config.s.ip4_dprt_flag = CVMX_HELPER_INPUT_TAG_IPV4_DST_PORT;
+	tag_config.s.ip4_pctl_flag = CVMX_HELPER_INPUT_TAG_IPV4_PROTOCOL;
+	tag_config.s.inc_prt_flag = CVMX_HELPER_INPUT_TAG_INPUT_PORT;
+	tag_config.s.tcp6_tag_type = CVMX_HELPER_INPUT_TAG_TYPE;
+	tag_config.s.tcp4_tag_type = CVMX_HELPER_INPUT_TAG_TYPE;
+	tag_config.s.ip6_tag_type = CVMX_HELPER_INPUT_TAG_TYPE;
+	tag_config.s.ip4_tag_type = CVMX_HELPER_INPUT_TAG_TYPE;
+	tag_config.s.non_tag_type = CVMX_HELPER_INPUT_TAG_TYPE;
+	/* Put all packets in group 0. Other groups can be used by the app */
+	tag_config.s.grp = 0;
+
+	cvmx_pip_config_port(ipd_port, port_config, tag_config);
+
+	/* Give the user a chance to override our setting for each port */
+	if (cvmx_override_ipd_port_setup)
+		cvmx_override_ipd_port_setup(ipd_port);
+
+	return 0;
+}
+
+/**
+ * This function sets the interface_port_count[interface] correctly,
+ * without modifying any hardware configuration.  Hardware setup of
+ * the ports will be performed later.
+ *
+ * @interface: Interface to probe
+ *
+ * Returns Zero on success, negative on failure
+ */
+int cvmx_helper_interface_enumerate(int interface)
+{
+	switch (cvmx_helper_interface_get_mode(interface)) {
+		/* These types don't support ports to IPD/PKO */
+	case CVMX_HELPER_INTERFACE_MODE_DISABLED:
+	case CVMX_HELPER_INTERFACE_MODE_PCIE:
+		interface_port_count[interface] = 0;
+		break;
+		/* XAUI is a single high speed port */
+	case CVMX_HELPER_INTERFACE_MODE_XAUI:
+		interface_port_count[interface] =
+		    __cvmx_helper_xaui_enumerate(interface);
+		break;
+		/*
+		 * RGMII/GMII/MII are all treated about the same. Most
+		 * functions refer to these ports as RGMII.
+		 */
+	case CVMX_HELPER_INTERFACE_MODE_RGMII:
+	case CVMX_HELPER_INTERFACE_MODE_GMII:
+		interface_port_count[interface] =
+		    __cvmx_helper_rgmii_enumerate(interface);
+		break;
+		/*
+		 * SPI4 can have 1-16 ports depending on the device at
+		 * the other end.
+		 */
+	case CVMX_HELPER_INTERFACE_MODE_SPI:
+		interface_port_count[interface] =
+		    __cvmx_helper_spi_enumerate(interface);
+		break;
+		/*
+		 * SGMII can have 1-4 ports depending on how many are
+		 * hooked up.
+		 */
+	case CVMX_HELPER_INTERFACE_MODE_SGMII:
+	case CVMX_HELPER_INTERFACE_MODE_PICMG:
+		interface_port_count[interface] =
+		    __cvmx_helper_sgmii_enumerate(interface);
+		break;
+		/* PCI target Network Packet Interface */
+	case CVMX_HELPER_INTERFACE_MODE_NPI:
+		interface_port_count[interface] =
+		    __cvmx_helper_npi_enumerate(interface);
+		break;
+		/*
+		 * Special loopback only ports. These are not the same
+		 * as other ports in loopback mode.
+		 */
+	case CVMX_HELPER_INTERFACE_MODE_LOOP:
+		interface_port_count[interface] =
+		    __cvmx_helper_loop_enumerate(interface);
+		break;
+	}
+
+	interface_port_count[interface] =
+	    __cvmx_helper_board_interface_probe(interface,
+						interface_port_count
+						[interface]);
+
+	/* Make sure all global variables propagate to other cores */
+	CVMX_SYNCWS;
+
+	return 0;
+}
+
+/**
+ * This function probes an interface to determine the actual
+ * number of hardware ports connected to it. It doesn't setup the
+ * ports or enable them. The main goal here is to set the global
+ * interface_port_count[interface] correctly. Hardware setup of the
+ * ports will be performed later.
+ *
+ * @interface: Interface to probe
+ *
+ * Returns Zero on success, negative on failure
+ */
+int cvmx_helper_interface_probe(int interface)
+{
+	cvmx_helper_interface_enumerate(interface);
+	/* At this stage in the game we don't want packets to be moving yet.
+	   The following probe calls should perform hardware setup
+	   needed to determine port counts. Receive must still be disabled */
+	switch (cvmx_helper_interface_get_mode(interface)) {
+		/* These types don't support ports to IPD/PKO */
+	case CVMX_HELPER_INTERFACE_MODE_DISABLED:
+	case CVMX_HELPER_INTERFACE_MODE_PCIE:
+		break;
+		/* XAUI is a single high speed port */
+	case CVMX_HELPER_INTERFACE_MODE_XAUI:
+		__cvmx_helper_xaui_probe(interface);
+		break;
+		/*
+		 * RGMII/GMII/MII are all treated about the same. Most
+		 * functions refer to these ports as RGMII.
+		 */
+	case CVMX_HELPER_INTERFACE_MODE_RGMII:
+	case CVMX_HELPER_INTERFACE_MODE_GMII:
+		__cvmx_helper_rgmii_probe(interface);
+		break;
+		/*
+		 * SPI4 can have 1-16 ports depending on the device at
+		 * the other end.
+		 */
+	case CVMX_HELPER_INTERFACE_MODE_SPI:
+		__cvmx_helper_spi_probe(interface);
+		break;
+		/*
+		 * SGMII can have 1-4 ports depending on how many are
+		 * hooked up.
+		 */
+	case CVMX_HELPER_INTERFACE_MODE_SGMII:
+	case CVMX_HELPER_INTERFACE_MODE_PICMG:
+		__cvmx_helper_sgmii_probe(interface);
+		break;
+		/* PCI target Network Packet Interface */
+	case CVMX_HELPER_INTERFACE_MODE_NPI:
+		__cvmx_helper_npi_probe(interface);
+		break;
+		/*
+		 * Special loopback only ports. These are not the same
+		 * as other ports in loopback mode.
+		 */
+	case CVMX_HELPER_INTERFACE_MODE_LOOP:
+		__cvmx_helper_loop_probe(interface);
+		break;
+	}
+
+	/* Make sure all global variables propagate to other cores */
+	CVMX_SYNCWS;
+
+	return 0;
+}
+
+/**
+ * Setup the IPD/PIP for the ports on an interface. Packet
+ * classification and tagging are set for every port on the
+ * interface. The number of ports on the interface must already
+ * have been probed.
+ *
+ * @interface: Interface to setup IPD/PIP for
+ *
+ * Returns Zero on success, negative on failure
+ */
+static int __cvmx_helper_interface_setup_ipd(int interface)
+{
+	int ipd_port = cvmx_helper_get_ipd_port(interface, 0);
+	int num_ports = interface_port_count[interface];
+
+	while (num_ports--) {
+		__cvmx_helper_port_setup_ipd(ipd_port);
+		ipd_port++;
+	}
+	return 0;
+}
+
+/**
+ * Setup global setting for IPD/PIP not related to a specific
+ * interface or port. This must be called before IPD is enabled.
+ *
+ * Returns Zero on success, negative on failure.
+ */
+static int __cvmx_helper_global_setup_ipd(void)
+{
+	/* Setup the global packet input options */
+	cvmx_ipd_config(CVMX_FPA_PACKET_POOL_SIZE / 8,
+			CVMX_HELPER_FIRST_MBUFF_SKIP / 8,
+			CVMX_HELPER_NOT_FIRST_MBUFF_SKIP / 8,
+			/* The +8 is to account for the next ptr */
+			(CVMX_HELPER_FIRST_MBUFF_SKIP + 8) / 128,
+			/* The +8 is to account for the next ptr */
+			(CVMX_HELPER_NOT_FIRST_MBUFF_SKIP + 8) / 128,
+			CVMX_FPA_WQE_POOL,
+			CVMX_IPD_OPC_MODE_STT,
+			CVMX_HELPER_ENABLE_BACK_PRESSURE);
+	return 0;
+}
+
+/**
+ * Setup the PKO for the ports on an interface. The number of
+ * queues per port and the priority of each PKO output queue
+ * is set here. PKO must be disabled when this function is called.
+ *
+ * @interface: Interface to setup PKO for
+ *
+ * Returns Zero on success, negative on failure
+ */
+static int __cvmx_helper_interface_setup_pko(int interface)
+{
+	/*
+	 * Each packet output queue has an associated priority. The
+	 * higher the priority, the more often it can send a packet. A
+	 * priority of 8 means it can send in all 8 rounds of
+	 * contention. We're going to make each queue one less than
+	 * the last.  The vector of priorities has been extended to
+	 * support CN5xxx CPUs, where up to 16 queues can be
+	 * associated to a port.  To keep backward compatibility we
+	 * don't change the initial 8 priorities and replicate them in
+	 * the second half.  With per-core PKO queues (PKO lockless
+	 * operation) all queues have the same priority.
+	 */
+	uint64_t priorities[16] =
+	    { 8, 7, 6, 5, 4, 3, 2, 1, 8, 7, 6, 5, 4, 3, 2, 1 };
+
+	/*
+	 * Setup the IPD/PIP and PKO for the ports discovered
+	 * above. Here packet classification, tagging and output
+	 * priorities are set.
+	 */
+	int ipd_port = cvmx_helper_get_ipd_port(interface, 0);
+	int num_ports = interface_port_count[interface];
+	while (num_ports--) {
+		/*
+		 * Give the user a chance to override the per queue
+		 * priorities.
+		 */
+		if (cvmx_override_pko_queue_priority)
+			cvmx_override_pko_queue_priority(ipd_port, priorities);
+
+		cvmx_pko_config_port(ipd_port,
+				     cvmx_pko_get_base_queue_per_core(ipd_port,
+								      0),
+				     cvmx_pko_get_num_queues(ipd_port),
+				     priorities);
+		ipd_port++;
+	}
+	return 0;
+}
+
+/**
+ * Setup global setting for PKO not related to a specific
+ * interface or port. This must be called before PKO is enabled.
+ *
+ * Returns Zero on success, negative on failure.
+ */
+static int __cvmx_helper_global_setup_pko(void)
+{
+	/*
+	 * Disable tagwait FAU timeout. This needs to be done before
+	 * anyone might start packet output using tags.
+	 */
+	union cvmx_iob_fau_timeout fau_to;
+	fau_to.u64 = 0;
+	fau_to.s.tout_val = 0xfff;
+	fau_to.s.tout_enb = 0;
+	cvmx_write_csr(CVMX_IOB_FAU_TIMEOUT, fau_to.u64);
+
+	if (OCTEON_IS_MODEL(OCTEON_CN68XX)) {
+		union cvmx_pko_reg_min_pkt min_pkt;
+
+		min_pkt.u64 = 0;
+		min_pkt.s.size1 = 59;
+		min_pkt.s.size2 = 59;
+		min_pkt.s.size3 = 59;
+		min_pkt.s.size4 = 59;
+		min_pkt.s.size5 = 59;
+		min_pkt.s.size6 = 59;
+		min_pkt.s.size7 = 59;
+		cvmx_write_csr(CVMX_PKO_REG_MIN_PKT, min_pkt.u64);
+	}
+
+	return 0;
+}
+
+/**
+ * Setup global backpressure setting.
+ *
+ * Returns Zero on success, negative on failure
+ */
+static int __cvmx_helper_global_setup_backpressure(void)
+{
+#if CVMX_HELPER_DISABLE_RGMII_BACKPRESSURE
+	/* Disable backpressure if configured to do so */
+	/* Disable backpressure (pause frame) generation */
+	int num_interfaces = cvmx_helper_get_number_of_interfaces();
+	int interface;
+	for (interface = 0; interface < num_interfaces; interface++) {
+		switch (cvmx_helper_interface_get_mode(interface)) {
+		case CVMX_HELPER_INTERFACE_MODE_DISABLED:
+		case CVMX_HELPER_INTERFACE_MODE_PCIE:
+		case CVMX_HELPER_INTERFACE_MODE_NPI:
+		case CVMX_HELPER_INTERFACE_MODE_LOOP:
+		case CVMX_HELPER_INTERFACE_MODE_XAUI:
+			break;
+		case CVMX_HELPER_INTERFACE_MODE_RGMII:
+		case CVMX_HELPER_INTERFACE_MODE_GMII:
+		case CVMX_HELPER_INTERFACE_MODE_SPI:
+		case CVMX_HELPER_INTERFACE_MODE_SGMII:
+		case CVMX_HELPER_INTERFACE_MODE_PICMG:
+			cvmx_gmx_set_backpressure_override(interface, 0xf);
+			break;
+		}
+	}
+#endif
+
+	return 0;
+}
+
+/**
+ * Enable packet input/output from the hardware. This function is
+ * called after all internal setup is complete and IPD is enabled.
+ * After this function completes, packets will be accepted from the
+ * hardware ports. PKO should still be disabled to make sure packets
+ * aren't sent out partially setup hardware.
+ *
+ * @interface: Interface to enable
+ *
+ * Returns Zero on success, negative on failure
+ */
+static int __cvmx_helper_packet_hardware_enable(int interface)
+{
+	int result = 0;
+	switch (cvmx_helper_interface_get_mode(interface)) {
+		/* These types don't support ports to IPD/PKO */
+	case CVMX_HELPER_INTERFACE_MODE_DISABLED:
+	case CVMX_HELPER_INTERFACE_MODE_PCIE:
+		/* Nothing to do */
+		break;
+		/* XAUI is a single high speed port */
+	case CVMX_HELPER_INTERFACE_MODE_XAUI:
+		result = __cvmx_helper_xaui_enable(interface);
+		break;
+		/*
+		 * RGMII/GMII/MII are all treated about the same. Most
+		 * functions refer to these ports as RGMII
+		 */
+	case CVMX_HELPER_INTERFACE_MODE_RGMII:
+	case CVMX_HELPER_INTERFACE_MODE_GMII:
+		result = __cvmx_helper_rgmii_enable(interface);
+		break;
+		/*
+		 * SPI4 can have 1-16 ports depending on the device at
+		 * the other end
+		 */
+	case CVMX_HELPER_INTERFACE_MODE_SPI:
+		result = __cvmx_helper_spi_enable(interface);
+		break;
+		/*
+		 * SGMII can have 1-4 ports depending on how many are
+		 * hooked up
+		 */
+	case CVMX_HELPER_INTERFACE_MODE_SGMII:
+	case CVMX_HELPER_INTERFACE_MODE_PICMG:
+		result = __cvmx_helper_sgmii_enable(interface);
+		break;
+		/* PCI target Network Packet Interface */
+	case CVMX_HELPER_INTERFACE_MODE_NPI:
+		result = __cvmx_helper_npi_enable(interface);
+		break;
+		/*
+		 * Special loopback only ports. These are not the same
+		 * as other ports in loopback mode
+		 */
+	case CVMX_HELPER_INTERFACE_MODE_LOOP:
+		result = __cvmx_helper_loop_enable(interface);
+		break;
+	}
+	result |= __cvmx_helper_board_hardware_enable(interface);
+	return result;
+}
+
+/**
+ * Function to adjust internal IPD pointer alignments
+ *
+ * Returns 0 on success
+ *	   !0 on failure
+ */
+int __cvmx_helper_errata_fix_ipd_ptr_alignment(void)
+{
+#define FIX_IPD_FIRST_BUFF_PAYLOAD_BYTES \
+     (CVMX_FPA_PACKET_POOL_SIZE-8-CVMX_HELPER_FIRST_MBUFF_SKIP)
+#define FIX_IPD_NON_FIRST_BUFF_PAYLOAD_BYTES \
+	(CVMX_FPA_PACKET_POOL_SIZE-8-CVMX_HELPER_NOT_FIRST_MBUFF_SKIP)
+#define FIX_IPD_OUTPORT 0
+	/* Ports 0-15 are interface 0, 16-31 are interface 1 */
+#define INTERFACE(port) (port >> 4)
+#define INDEX(port) (port & 0xf)
+	uint64_t *p64;
+	cvmx_pko_command_word0_t pko_command;
+	union cvmx_buf_ptr g_buffer, pkt_buffer;
+	cvmx_wqe_t *work;
+	int size, num_segs = 0, wqe_pcnt, pkt_pcnt;
+	union cvmx_gmxx_prtx_cfg gmx_cfg;
+	int retry_cnt;
+	int retry_loop_cnt;
+	int i;
+
+	/* Save values for restore at end */
+	uint64_t prtx_cfg =
+	    cvmx_read_csr(CVMX_GMXX_PRTX_CFG
+			  (INDEX(FIX_IPD_OUTPORT), INTERFACE(FIX_IPD_OUTPORT)));
+	uint64_t tx_ptr_en =
+	    cvmx_read_csr(CVMX_ASXX_TX_PRT_EN(INTERFACE(FIX_IPD_OUTPORT)));
+	uint64_t rx_ptr_en =
+	    cvmx_read_csr(CVMX_ASXX_RX_PRT_EN(INTERFACE(FIX_IPD_OUTPORT)));
+	uint64_t rxx_jabber =
+	    cvmx_read_csr(CVMX_GMXX_RXX_JABBER
+			  (INDEX(FIX_IPD_OUTPORT), INTERFACE(FIX_IPD_OUTPORT)));
+	uint64_t frame_max =
+	    cvmx_read_csr(CVMX_GMXX_RXX_FRM_MAX
+			  (INDEX(FIX_IPD_OUTPORT), INTERFACE(FIX_IPD_OUTPORT)));
+
+	/* Configure port to gig FDX as required for loopback mode */
+	cvmx_helper_rgmii_internal_loopback(FIX_IPD_OUTPORT);
+
+	/*
+	 * Disable reception on all ports so if traffic is present it
+	 * will not interfere.
+	 */
+	cvmx_write_csr(CVMX_ASXX_RX_PRT_EN(INTERFACE(FIX_IPD_OUTPORT)), 0);
+
+	cvmx_wait(100000000ull);
+
+	for (retry_loop_cnt = 0; retry_loop_cnt < 10; retry_loop_cnt++) {
+		retry_cnt = 100000;
+		wqe_pcnt = cvmx_read_csr(CVMX_IPD_PTR_COUNT);
+		pkt_pcnt = (wqe_pcnt >> 7) & 0x7f;
+		wqe_pcnt &= 0x7f;
+
+		num_segs = (2 + pkt_pcnt - wqe_pcnt) & 3;
+
+		if (num_segs == 0)
+			goto fix_ipd_exit;
+
+		num_segs += 1;
+
+		size =
+		    FIX_IPD_FIRST_BUFF_PAYLOAD_BYTES +
+		    ((num_segs - 1) * FIX_IPD_NON_FIRST_BUFF_PAYLOAD_BYTES) -
+		    (FIX_IPD_NON_FIRST_BUFF_PAYLOAD_BYTES / 2);
+
+		cvmx_write_csr(CVMX_ASXX_PRT_LOOP(INTERFACE(FIX_IPD_OUTPORT)),
+			       1 << INDEX(FIX_IPD_OUTPORT));
+		CVMX_SYNC;
+
+		g_buffer.u64 = 0;
+		g_buffer.s.addr =
+		    cvmx_ptr_to_phys(cvmx_fpa_alloc(CVMX_FPA_WQE_POOL));
+		if (g_buffer.s.addr == 0) {
+			cvmx_dprintf("WARNING: FIX_IPD_PTR_ALIGNMENT "
+				     "buffer allocation failure.\n");
+			goto fix_ipd_exit;
+		}
+
+		g_buffer.s.pool = CVMX_FPA_WQE_POOL;
+		g_buffer.s.size = num_segs;
+
+		pkt_buffer.u64 = 0;
+		pkt_buffer.s.addr =
+		    cvmx_ptr_to_phys(cvmx_fpa_alloc(CVMX_FPA_PACKET_POOL));
+		if (pkt_buffer.s.addr == 0) {
+			cvmx_dprintf("WARNING: FIX_IPD_PTR_ALIGNMENT "
+				     "buffer allocation failure.\n");
+			goto fix_ipd_exit;
+		}
+		pkt_buffer.s.i = 1;
+		pkt_buffer.s.pool = CVMX_FPA_PACKET_POOL;
+		pkt_buffer.s.size = FIX_IPD_FIRST_BUFF_PAYLOAD_BYTES;
+
+		p64 = (uint64_t *) cvmx_phys_to_ptr(pkt_buffer.s.addr);
+		p64[0] = 0xffffffffffff0000ull;
+		p64[1] = 0x08004510ull;
+		p64[2] = ((uint64_t) (size - 14) << 48) | 0x5ae740004000ull;
+		p64[3] = 0x3a5fc0a81073c0a8ull;
+
+		for (i = 0; i < num_segs; i++) {
+			if (i > 0)
+				pkt_buffer.s.size =
+				    FIX_IPD_NON_FIRST_BUFF_PAYLOAD_BYTES;
+
+			if (i == (num_segs - 1))
+				pkt_buffer.s.i = 0;
+
+			*(uint64_t *) cvmx_phys_to_ptr(g_buffer.s.addr +
+						       8 * i) = pkt_buffer.u64;
+		}
+
+		/* Build the PKO command */
+		pko_command.u64 = 0;
+		pko_command.s.segs = num_segs;
+		pko_command.s.total_bytes = size;
+		pko_command.s.dontfree = 0;
+		pko_command.s.gather = 1;
+
+		gmx_cfg.u64 =
+		    cvmx_read_csr(CVMX_GMXX_PRTX_CFG
+				  (INDEX(FIX_IPD_OUTPORT),
+				   INTERFACE(FIX_IPD_OUTPORT)));
+		gmx_cfg.s.en = 1;
+		cvmx_write_csr(CVMX_GMXX_PRTX_CFG
+			       (INDEX(FIX_IPD_OUTPORT),
+				INTERFACE(FIX_IPD_OUTPORT)), gmx_cfg.u64);
+		cvmx_write_csr(CVMX_ASXX_TX_PRT_EN(INTERFACE(FIX_IPD_OUTPORT)),
+			       1 << INDEX(FIX_IPD_OUTPORT));
+		cvmx_write_csr(CVMX_ASXX_RX_PRT_EN(INTERFACE(FIX_IPD_OUTPORT)),
+			       1 << INDEX(FIX_IPD_OUTPORT));
+
+		cvmx_write_csr(CVMX_GMXX_RXX_JABBER
+			       (INDEX(FIX_IPD_OUTPORT),
+				INTERFACE(FIX_IPD_OUTPORT)), 65392 - 14 - 4);
+		cvmx_write_csr(CVMX_GMXX_RXX_FRM_MAX
+			       (INDEX(FIX_IPD_OUTPORT),
+				INTERFACE(FIX_IPD_OUTPORT)), 65392 - 14 - 4);
+
+		cvmx_pko_send_packet_prepare(FIX_IPD_OUTPORT,
+					     cvmx_pko_get_base_queue
+					     (FIX_IPD_OUTPORT),
+					     CVMX_PKO_LOCK_CMD_QUEUE);
+		cvmx_pko_send_packet_finish(FIX_IPD_OUTPORT,
+					    cvmx_pko_get_base_queue
+					    (FIX_IPD_OUTPORT), pko_command,
+					    g_buffer, CVMX_PKO_LOCK_CMD_QUEUE);
+
+		CVMX_SYNC;
+
+		do {
+			work = cvmx_pow_work_request_sync(CVMX_POW_WAIT);
+			retry_cnt--;
+		} while ((work == NULL) && (retry_cnt > 0));
+
+		if (!retry_cnt)
+			cvmx_dprintf("WARNING: FIX_IPD_PTR_ALIGNMENT "
+				     "get_work() timeout occurred.\n");
+
+		/* Free packet */
+		if (work)
+			cvmx_helper_free_packet_data(work);
+	}
+
+fix_ipd_exit:
+
+	/* Return CSR configs to saved values */
+	cvmx_write_csr(CVMX_GMXX_PRTX_CFG
+		       (INDEX(FIX_IPD_OUTPORT), INTERFACE(FIX_IPD_OUTPORT)),
+		       prtx_cfg);
+	cvmx_write_csr(CVMX_ASXX_TX_PRT_EN(INTERFACE(FIX_IPD_OUTPORT)),
+		       tx_ptr_en);
+	cvmx_write_csr(CVMX_ASXX_RX_PRT_EN(INTERFACE(FIX_IPD_OUTPORT)),
+		       rx_ptr_en);
+	cvmx_write_csr(CVMX_GMXX_RXX_JABBER
+		       (INDEX(FIX_IPD_OUTPORT), INTERFACE(FIX_IPD_OUTPORT)),
+		       rxx_jabber);
+	cvmx_write_csr(CVMX_GMXX_RXX_FRM_MAX
+		       (INDEX(FIX_IPD_OUTPORT), INTERFACE(FIX_IPD_OUTPORT)),
+		       frame_max);
+	cvmx_write_csr(CVMX_ASXX_PRT_LOOP(INTERFACE(FIX_IPD_OUTPORT)), 0);
+
+	CVMX_SYNC;
+	if (num_segs)
+		cvmx_dprintf("WARNING: FIX_IPD_PTR_ALIGNMENT failed.\n");
+
+	return !!num_segs;
+
+}
+
+/**
+ * Called after all internal packet IO paths are setup. This
+ * function enables IPD/PIP and begins packet input and output.
+ *
+ * Returns Zero on success, negative on failure
+ */
+int cvmx_helper_ipd_and_packet_input_enable(void)
+{
+	int num_interfaces;
+	int interface;
+
+	/* Enable IPD */
+	cvmx_ipd_enable();
+
+	/*
+	 * Time to enable hardware ports packet input and output. Note
+	 * that at this point IPD/PIP must be fully functional and PKO
+	 * must be disabled
+	 */
+	num_interfaces = cvmx_helper_get_number_of_interfaces();
+	for (interface = 0; interface < num_interfaces; interface++) {
+		if (cvmx_helper_ports_on_interface(interface) > 0)
+			__cvmx_helper_packet_hardware_enable(interface);
+	}
+
+	/* Finally enable PKO now that the entire path is up and running */
+	cvmx_pko_enable();
+
+	if ((OCTEON_IS_MODEL(OCTEON_CN31XX_PASS1)
+	     || OCTEON_IS_MODEL(OCTEON_CN30XX_PASS1))
+	    && (cvmx_sysinfo_get()->board_type != CVMX_BOARD_TYPE_SIM))
+		__cvmx_helper_errata_fix_ipd_ptr_alignment();
+	return 0;
+}
+EXPORT_SYMBOL_GPL(cvmx_helper_ipd_and_packet_input_enable);
+
+/**
+ * Initialize the PIP, IPD, and PKO hardware to support
+ * simple priority based queues for the ethernet ports. Each
+ * port is configured with a number of priority queues based
+ * on CVMX_PKO_QUEUES_PER_PORT_* where each queue is lower
+ * priority than the previous.
+ *
+ * Returns Zero on success, non-zero on failure
+ */
+int cvmx_helper_initialize_packet_io_global(void)
+{
+	int result = 0;
+	int interface;
+	union cvmx_l2c_cfg l2c_cfg;
+	union cvmx_smix_en smix_en;
+	const int num_interfaces = cvmx_helper_get_number_of_interfaces();
+
+	/*
+	 * CN52XX pass 1: Due to a bug in 2nd order CDR, it needs to
+	 * be disabled.
+	 */
+	if (OCTEON_IS_MODEL(OCTEON_CN52XX_PASS1_0))
+		__cvmx_helper_errata_qlm_disable_2nd_order_cdr(1);
+
+	/*
+	 * Tell L2 to give the IOB statically higher priority compared
+	 * to the cores. This avoids conditions where IO blocks might
+	 * be starved under very high L2 loads.
+	 */
+	l2c_cfg.u64 = cvmx_read_csr(CVMX_L2C_CFG);
+	l2c_cfg.s.lrf_arb_mode = 0;
+	l2c_cfg.s.rfb_arb_mode = 0;
+	cvmx_write_csr(CVMX_L2C_CFG, l2c_cfg.u64);
+
+	/* Make sure SMI/MDIO is enabled so we can query PHYs */
+	smix_en.u64 = cvmx_read_csr(CVMX_SMIX_EN(0));
+	if (!smix_en.s.en) {
+		smix_en.s.en = 1;
+		cvmx_write_csr(CVMX_SMIX_EN(0), smix_en.u64);
+	}
+
+	/* Newer chips actually have two SMI/MDIO interfaces */
+	if (!OCTEON_IS_MODEL(OCTEON_CN3XXX) &&
+	    !OCTEON_IS_MODEL(OCTEON_CN58XX) &&
+	    !OCTEON_IS_MODEL(OCTEON_CN50XX)) {
+		smix_en.u64 = cvmx_read_csr(CVMX_SMIX_EN(1));
+		if (!smix_en.s.en) {
+			smix_en.s.en = 1;
+			cvmx_write_csr(CVMX_SMIX_EN(1), smix_en.u64);
+		}
+	}
+
+	cvmx_pko_initialize_global();
+	for (interface = 0; interface < num_interfaces; interface++) {
+		result |= cvmx_helper_interface_probe(interface);
+		if (cvmx_helper_ports_on_interface(interface) > 0)
+			cvmx_dprintf("Interface %d has %d ports (%s)\n",
+				     interface,
+				     cvmx_helper_ports_on_interface(interface),
+				     cvmx_helper_interface_mode_to_string
+				     (cvmx_helper_interface_get_mode
+				      (interface)));
+		result |= __cvmx_helper_interface_setup_ipd(interface);
+		result |= __cvmx_helper_interface_setup_pko(interface);
+	}
+
+	result |= __cvmx_helper_global_setup_ipd();
+	result |= __cvmx_helper_global_setup_pko();
+
+	/* Enable any flow control and backpressure */
+	result |= __cvmx_helper_global_setup_backpressure();
+
+#if CVMX_HELPER_ENABLE_IPD
+	result |= cvmx_helper_ipd_and_packet_input_enable();
+#endif
+	return result;
+}
+EXPORT_SYMBOL_GPL(cvmx_helper_initialize_packet_io_global);
+
+/**
+ * Does core local initialization for packet io
+ *
+ * Returns Zero on success, non-zero on failure
+ */
+int cvmx_helper_initialize_packet_io_local(void)
+{
+	return cvmx_pko_initialize_local();
+}
+
+/**
+ * Return the link state of an IPD/PKO port as returned by
+ * auto negotiation. The result of this function may not match
+ * Octeon's link config if auto negotiation has changed since
+ * the last call to cvmx_helper_link_set().
+ *
+ * @ipd_port: IPD/PKO port to query
+ *
+ * Returns Link state
+ */
+cvmx_helper_link_info_t cvmx_helper_link_get(int ipd_port)
+{
+	cvmx_helper_link_info_t result;
+	int interface = cvmx_helper_get_interface_num(ipd_port);
+	int index = cvmx_helper_get_interface_index_num(ipd_port);
+
+	/* The default result will be a down link unless the code below
+	   changes it */
+	result.u64 = 0;
+
+	if (index >= cvmx_helper_ports_on_interface(interface))
+		return result;
+
+	switch (cvmx_helper_interface_get_mode(interface)) {
+	case CVMX_HELPER_INTERFACE_MODE_DISABLED:
+	case CVMX_HELPER_INTERFACE_MODE_PCIE:
+		/* Network links are not supported */
+		break;
+	case CVMX_HELPER_INTERFACE_MODE_XAUI:
+		result = __cvmx_helper_xaui_link_get(ipd_port);
+		break;
+	case CVMX_HELPER_INTERFACE_MODE_GMII:
+		if (index == 0)
+			result = __cvmx_helper_rgmii_link_get(ipd_port);
+		else {
+			result.s.full_duplex = 1;
+			result.s.link_up = 1;
+			result.s.speed = 1000;
+		}
+		break;
+	case CVMX_HELPER_INTERFACE_MODE_RGMII:
+		result = __cvmx_helper_rgmii_link_get(ipd_port);
+		break;
+	case CVMX_HELPER_INTERFACE_MODE_SPI:
+		result = __cvmx_helper_spi_link_get(ipd_port);
+		break;
+	case CVMX_HELPER_INTERFACE_MODE_SGMII:
+	case CVMX_HELPER_INTERFACE_MODE_PICMG:
+		result = __cvmx_helper_sgmii_link_get(ipd_port);
+		break;
+	case CVMX_HELPER_INTERFACE_MODE_NPI:
+	case CVMX_HELPER_INTERFACE_MODE_LOOP:
+		/* Network links are not supported */
+		break;
+	}
+	return result;
+}
+EXPORT_SYMBOL_GPL(cvmx_helper_link_get);
+
+/**
+ * Configure an IPD/PKO port for the specified link state. This
+ * function does not influence auto negotiation at the PHY level.
+ * The passed link state must always match the link state returned
+ * by cvmx_helper_link_get().
+ *
+ * @ipd_port:  IPD/PKO port to configure
+ * @link_info: The new link state
+ *
+ * Returns Zero on success, negative on failure
+ */
+int cvmx_helper_link_set(int ipd_port, cvmx_helper_link_info_t link_info)
+{
+	int result = -1;
+	int interface = cvmx_helper_get_interface_num(ipd_port);
+	int index = cvmx_helper_get_interface_index_num(ipd_port);
+
+	if (index >= cvmx_helper_ports_on_interface(interface))
+		return -1;
+
+	switch (cvmx_helper_interface_get_mode(interface)) {
+	case CVMX_HELPER_INTERFACE_MODE_DISABLED:
+	case CVMX_HELPER_INTERFACE_MODE_PCIE:
+		break;
+	case CVMX_HELPER_INTERFACE_MODE_XAUI:
+		result = __cvmx_helper_xaui_link_set(ipd_port, link_info);
+		break;
+		/*
+		 * RGMII/GMII/MII are all treated about the same. Most
+		 * functions refer to these ports as RGMII.
+		 */
+	case CVMX_HELPER_INTERFACE_MODE_RGMII:
+	case CVMX_HELPER_INTERFACE_MODE_GMII:
+		result = __cvmx_helper_rgmii_link_set(ipd_port, link_info);
+		break;
+	case CVMX_HELPER_INTERFACE_MODE_SPI:
+		result = __cvmx_helper_spi_link_set(ipd_port, link_info);
+		break;
+	case CVMX_HELPER_INTERFACE_MODE_SGMII:
+	case CVMX_HELPER_INTERFACE_MODE_PICMG:
+		result = __cvmx_helper_sgmii_link_set(ipd_port, link_info);
+		break;
+	case CVMX_HELPER_INTERFACE_MODE_NPI:
+	case CVMX_HELPER_INTERFACE_MODE_LOOP:
+		break;
+	}
+	return result;
+}
+EXPORT_SYMBOL_GPL(cvmx_helper_link_set);
+
+/**
+ * Configure a port for internal and/or external loopback. Internal loopback
+ * causes packets sent by the port to be received by Octeon. External loopback
+ * causes packets received from the wire to sent out again.
+ *
+ * @ipd_port: IPD/PKO port to loopback.
+ * @enable_internal:
+ *		   Non zero if you want internal loopback
+ * @enable_external:
+ *		   Non zero if you want external loopback
+ *
+ * Returns Zero on success, negative on failure.
+ */
+int cvmx_helper_configure_loopback(int ipd_port, int enable_internal,
+				   int enable_external)
+{
+	int result = -1;
+	int interface = cvmx_helper_get_interface_num(ipd_port);
+	int index = cvmx_helper_get_interface_index_num(ipd_port);
+
+	if (index >= cvmx_helper_ports_on_interface(interface))
+		return -1;
+
+	switch (cvmx_helper_interface_get_mode(interface)) {
+	case CVMX_HELPER_INTERFACE_MODE_DISABLED:
+	case CVMX_HELPER_INTERFACE_MODE_PCIE:
+	case CVMX_HELPER_INTERFACE_MODE_SPI:
+	case CVMX_HELPER_INTERFACE_MODE_NPI:
+	case CVMX_HELPER_INTERFACE_MODE_LOOP:
+		break;
+	case CVMX_HELPER_INTERFACE_MODE_XAUI:
+		result =
+		    __cvmx_helper_xaui_configure_loopback(ipd_port,
+							  enable_internal,
+							  enable_external);
+		break;
+	case CVMX_HELPER_INTERFACE_MODE_RGMII:
+	case CVMX_HELPER_INTERFACE_MODE_GMII:
+		result =
+		    __cvmx_helper_rgmii_configure_loopback(ipd_port,
+							   enable_internal,
+							   enable_external);
+		break;
+	case CVMX_HELPER_INTERFACE_MODE_SGMII:
+	case CVMX_HELPER_INTERFACE_MODE_PICMG:
+		result =
+		    __cvmx_helper_sgmii_configure_loopback(ipd_port,
+							   enable_internal,
+							   enable_external);
+		break;
+	}
+	return result;
+}
diff --git a/src/kernel/linux/v4.14/arch/mips/cavium-octeon/executive/cvmx-interrupt-decodes.c b/src/kernel/linux/v4.14/arch/mips/cavium-octeon/executive/cvmx-interrupt-decodes.c
new file mode 100644
index 0000000..2f415d9
--- /dev/null
+++ b/src/kernel/linux/v4.14/arch/mips/cavium-octeon/executive/cvmx-interrupt-decodes.c
@@ -0,0 +1,371 @@
+/***********************license start***************
+ * Author: Cavium Networks
+ *
+ * Contact: support@caviumnetworks.com
+ * This file is part of the OCTEON SDK
+ *
+ * Copyright (c) 2003-2009 Cavium Networks
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, Version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+ * NONINFRINGEMENT.  See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this file; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ * or visit http://www.gnu.org/licenses/.
+ *
+ * This file may also be available under a different license from Cavium.
+ * Contact Cavium Networks for more information
+ ***********************license end**************************************/
+
+/*
+ *
+ * Automatically generated functions useful for enabling
+ * and decoding RSL_INT_BLOCKS interrupts.
+ *
+ */
+
+#include <asm/octeon/octeon.h>
+
+#include <asm/octeon/cvmx-gmxx-defs.h>
+#include <asm/octeon/cvmx-pcsx-defs.h>
+#include <asm/octeon/cvmx-pcsxx-defs.h>
+#include <asm/octeon/cvmx-spxx-defs.h>
+#include <asm/octeon/cvmx-stxx-defs.h>
+
+#ifndef PRINT_ERROR
+#define PRINT_ERROR(format, ...)
+#endif
+
+
+/**
+ * __cvmx_interrupt_gmxx_rxx_int_en_enable enables all interrupt bits in cvmx_gmxx_rxx_int_en_t
+ */
+void __cvmx_interrupt_gmxx_rxx_int_en_enable(int index, int block)
+{
+	union cvmx_gmxx_rxx_int_en gmx_rx_int_en;
+	cvmx_write_csr(CVMX_GMXX_RXX_INT_REG(index, block),
+		       cvmx_read_csr(CVMX_GMXX_RXX_INT_REG(index, block)));
+	gmx_rx_int_en.u64 = 0;
+	if (OCTEON_IS_MODEL(OCTEON_CN56XX)) {
+		/* Skipping gmx_rx_int_en.s.reserved_29_63 */
+		gmx_rx_int_en.s.hg2cc = 1;
+		gmx_rx_int_en.s.hg2fld = 1;
+		gmx_rx_int_en.s.undat = 1;
+		gmx_rx_int_en.s.uneop = 1;
+		gmx_rx_int_en.s.unsop = 1;
+		gmx_rx_int_en.s.bad_term = 1;
+		gmx_rx_int_en.s.bad_seq = 1;
+		gmx_rx_int_en.s.rem_fault = 1;
+		gmx_rx_int_en.s.loc_fault = 1;
+		gmx_rx_int_en.s.pause_drp = 1;
+		/* Skipping gmx_rx_int_en.s.reserved_16_18 */
+		/*gmx_rx_int_en.s.ifgerr = 1; */
+		/*gmx_rx_int_en.s.coldet = 1; // Collision detect */
+		/*gmx_rx_int_en.s.falerr = 1; // False carrier error or extend error after slottime */
+		/*gmx_rx_int_en.s.rsverr = 1; // RGMII reserved opcodes */
+		/*gmx_rx_int_en.s.pcterr = 1; // Bad Preamble / Protocol */
+		gmx_rx_int_en.s.ovrerr = 1;
+		/* Skipping gmx_rx_int_en.s.reserved_9_9 */
+		gmx_rx_int_en.s.skperr = 1;
+		gmx_rx_int_en.s.rcverr = 1;
+		/* Skipping gmx_rx_int_en.s.reserved_5_6 */
+		/*gmx_rx_int_en.s.fcserr = 1; // FCS errors are handled when we get work */
+		gmx_rx_int_en.s.jabber = 1;
+		/* Skipping gmx_rx_int_en.s.reserved_2_2 */
+		gmx_rx_int_en.s.carext = 1;
+		/* Skipping gmx_rx_int_en.s.reserved_0_0 */
+	}
+	if (OCTEON_IS_MODEL(OCTEON_CN30XX)) {
+		/* Skipping gmx_rx_int_en.s.reserved_19_63 */
+		/*gmx_rx_int_en.s.phy_dupx = 1; */
+		/*gmx_rx_int_en.s.phy_spd = 1; */
+		/*gmx_rx_int_en.s.phy_link = 1; */
+		/*gmx_rx_int_en.s.ifgerr = 1; */
+		/*gmx_rx_int_en.s.coldet = 1; // Collision detect */
+		/*gmx_rx_int_en.s.falerr = 1; // False carrier error or extend error after slottime */
+		/*gmx_rx_int_en.s.rsverr = 1; // RGMII reserved opcodes */
+		/*gmx_rx_int_en.s.pcterr = 1; // Bad Preamble / Protocol */
+		gmx_rx_int_en.s.ovrerr = 1;
+		gmx_rx_int_en.s.niberr = 1;
+		gmx_rx_int_en.s.skperr = 1;
+		gmx_rx_int_en.s.rcverr = 1;
+		/*gmx_rx_int_en.s.lenerr = 1; // Length errors are handled when we get work */
+		gmx_rx_int_en.s.alnerr = 1;
+		/*gmx_rx_int_en.s.fcserr = 1; // FCS errors are handled when we get work */
+		gmx_rx_int_en.s.jabber = 1;
+		gmx_rx_int_en.s.maxerr = 1;
+		gmx_rx_int_en.s.carext = 1;
+		gmx_rx_int_en.s.minerr = 1;
+	}
+	if (OCTEON_IS_MODEL(OCTEON_CN50XX)) {
+		/* Skipping gmx_rx_int_en.s.reserved_20_63 */
+		gmx_rx_int_en.s.pause_drp = 1;
+		/*gmx_rx_int_en.s.phy_dupx = 1; */
+		/*gmx_rx_int_en.s.phy_spd = 1; */
+		/*gmx_rx_int_en.s.phy_link = 1; */
+		/*gmx_rx_int_en.s.ifgerr = 1; */
+		/*gmx_rx_int_en.s.coldet = 1; // Collision detect */
+		/*gmx_rx_int_en.s.falerr = 1; // False carrier error or extend error after slottime */
+		/*gmx_rx_int_en.s.rsverr = 1; // RGMII reserved opcodes */
+		/*gmx_rx_int_en.s.pcterr = 1; // Bad Preamble / Protocol */
+		gmx_rx_int_en.s.ovrerr = 1;
+		gmx_rx_int_en.s.niberr = 1;
+		gmx_rx_int_en.s.skperr = 1;
+		gmx_rx_int_en.s.rcverr = 1;
+		/* Skipping gmx_rx_int_en.s.reserved_6_6 */
+		gmx_rx_int_en.s.alnerr = 1;
+		/*gmx_rx_int_en.s.fcserr = 1; // FCS errors are handled when we get work */
+		gmx_rx_int_en.s.jabber = 1;
+		/* Skipping gmx_rx_int_en.s.reserved_2_2 */
+		gmx_rx_int_en.s.carext = 1;
+		/* Skipping gmx_rx_int_en.s.reserved_0_0 */
+	}
+	if (OCTEON_IS_MODEL(OCTEON_CN38XX)) {
+		/* Skipping gmx_rx_int_en.s.reserved_19_63 */
+		/*gmx_rx_int_en.s.phy_dupx = 1; */
+		/*gmx_rx_int_en.s.phy_spd = 1; */
+		/*gmx_rx_int_en.s.phy_link = 1; */
+		/*gmx_rx_int_en.s.ifgerr = 1; */
+		/*gmx_rx_int_en.s.coldet = 1; // Collision detect */
+		/*gmx_rx_int_en.s.falerr = 1; // False carrier error or extend error after slottime */
+		/*gmx_rx_int_en.s.rsverr = 1; // RGMII reserved opcodes */
+		/*gmx_rx_int_en.s.pcterr = 1; // Bad Preamble / Protocol */
+		gmx_rx_int_en.s.ovrerr = 1;
+		gmx_rx_int_en.s.niberr = 1;
+		gmx_rx_int_en.s.skperr = 1;
+		gmx_rx_int_en.s.rcverr = 1;
+		/*gmx_rx_int_en.s.lenerr = 1; // Length errors are handled when we get work */
+		gmx_rx_int_en.s.alnerr = 1;
+		/*gmx_rx_int_en.s.fcserr = 1; // FCS errors are handled when we get work */
+		gmx_rx_int_en.s.jabber = 1;
+		gmx_rx_int_en.s.maxerr = 1;
+		gmx_rx_int_en.s.carext = 1;
+		gmx_rx_int_en.s.minerr = 1;
+	}
+	if (OCTEON_IS_MODEL(OCTEON_CN31XX)) {
+		/* Skipping gmx_rx_int_en.s.reserved_19_63 */
+		/*gmx_rx_int_en.s.phy_dupx = 1; */
+		/*gmx_rx_int_en.s.phy_spd = 1; */
+		/*gmx_rx_int_en.s.phy_link = 1; */
+		/*gmx_rx_int_en.s.ifgerr = 1; */
+		/*gmx_rx_int_en.s.coldet = 1; // Collision detect */
+		/*gmx_rx_int_en.s.falerr = 1; // False carrier error or extend error after slottime */
+		/*gmx_rx_int_en.s.rsverr = 1; // RGMII reserved opcodes */
+		/*gmx_rx_int_en.s.pcterr = 1; // Bad Preamble / Protocol */
+		gmx_rx_int_en.s.ovrerr = 1;
+		gmx_rx_int_en.s.niberr = 1;
+		gmx_rx_int_en.s.skperr = 1;
+		gmx_rx_int_en.s.rcverr = 1;
+		/*gmx_rx_int_en.s.lenerr = 1; // Length errors are handled when we get work */
+		gmx_rx_int_en.s.alnerr = 1;
+		/*gmx_rx_int_en.s.fcserr = 1; // FCS errors are handled when we get work */
+		gmx_rx_int_en.s.jabber = 1;
+		gmx_rx_int_en.s.maxerr = 1;
+		gmx_rx_int_en.s.carext = 1;
+		gmx_rx_int_en.s.minerr = 1;
+	}
+	if (OCTEON_IS_MODEL(OCTEON_CN58XX)) {
+		/* Skipping gmx_rx_int_en.s.reserved_20_63 */
+		gmx_rx_int_en.s.pause_drp = 1;
+		/*gmx_rx_int_en.s.phy_dupx = 1; */
+		/*gmx_rx_int_en.s.phy_spd = 1; */
+		/*gmx_rx_int_en.s.phy_link = 1; */
+		/*gmx_rx_int_en.s.ifgerr = 1; */
+		/*gmx_rx_int_en.s.coldet = 1; // Collision detect */
+		/*gmx_rx_int_en.s.falerr = 1; // False carrier error or extend error after slottime */
+		/*gmx_rx_int_en.s.rsverr = 1; // RGMII reserved opcodes */
+		/*gmx_rx_int_en.s.pcterr = 1; // Bad Preamble / Protocol */
+		gmx_rx_int_en.s.ovrerr = 1;
+		gmx_rx_int_en.s.niberr = 1;
+		gmx_rx_int_en.s.skperr = 1;
+		gmx_rx_int_en.s.rcverr = 1;
+		/*gmx_rx_int_en.s.lenerr = 1; // Length errors are handled when we get work */
+		gmx_rx_int_en.s.alnerr = 1;
+		/*gmx_rx_int_en.s.fcserr = 1; // FCS errors are handled when we get work */
+		gmx_rx_int_en.s.jabber = 1;
+		gmx_rx_int_en.s.maxerr = 1;
+		gmx_rx_int_en.s.carext = 1;
+		gmx_rx_int_en.s.minerr = 1;
+	}
+	if (OCTEON_IS_MODEL(OCTEON_CN52XX)) {
+		/* Skipping gmx_rx_int_en.s.reserved_29_63 */
+		gmx_rx_int_en.s.hg2cc = 1;
+		gmx_rx_int_en.s.hg2fld = 1;
+		gmx_rx_int_en.s.undat = 1;
+		gmx_rx_int_en.s.uneop = 1;
+		gmx_rx_int_en.s.unsop = 1;
+		gmx_rx_int_en.s.bad_term = 1;
+		gmx_rx_int_en.s.bad_seq = 0;
+		gmx_rx_int_en.s.rem_fault = 1;
+		gmx_rx_int_en.s.loc_fault = 0;
+		gmx_rx_int_en.s.pause_drp = 1;
+		/* Skipping gmx_rx_int_en.s.reserved_16_18 */
+		/*gmx_rx_int_en.s.ifgerr = 1; */
+		/*gmx_rx_int_en.s.coldet = 1; // Collision detect */
+		/*gmx_rx_int_en.s.falerr = 1; // False carrier error or extend error after slottime */
+		/*gmx_rx_int_en.s.rsverr = 1; // RGMII reserved opcodes */
+		/*gmx_rx_int_en.s.pcterr = 1; // Bad Preamble / Protocol */
+		gmx_rx_int_en.s.ovrerr = 1;
+		/* Skipping gmx_rx_int_en.s.reserved_9_9 */
+		gmx_rx_int_en.s.skperr = 1;
+		gmx_rx_int_en.s.rcverr = 1;
+		/* Skipping gmx_rx_int_en.s.reserved_5_6 */
+		/*gmx_rx_int_en.s.fcserr = 1; // FCS errors are handled when we get work */
+		gmx_rx_int_en.s.jabber = 1;
+		/* Skipping gmx_rx_int_en.s.reserved_2_2 */
+		gmx_rx_int_en.s.carext = 1;
+		/* Skipping gmx_rx_int_en.s.reserved_0_0 */
+	}
+	cvmx_write_csr(CVMX_GMXX_RXX_INT_EN(index, block), gmx_rx_int_en.u64);
+}
+/**
+ * __cvmx_interrupt_pcsx_intx_en_reg_enable enables all interrupt bits in cvmx_pcsx_intx_en_reg_t
+ */
+void __cvmx_interrupt_pcsx_intx_en_reg_enable(int index, int block)
+{
+	union cvmx_pcsx_intx_en_reg pcs_int_en_reg;
+	cvmx_write_csr(CVMX_PCSX_INTX_REG(index, block),
+		       cvmx_read_csr(CVMX_PCSX_INTX_REG(index, block)));
+	pcs_int_en_reg.u64 = 0;
+	if (OCTEON_IS_MODEL(OCTEON_CN56XX)) {
+		/* Skipping pcs_int_en_reg.s.reserved_12_63 */
+		/*pcs_int_en_reg.s.dup = 1; // This happens during normal operation */
+		pcs_int_en_reg.s.sync_bad_en = 1;
+		pcs_int_en_reg.s.an_bad_en = 1;
+		pcs_int_en_reg.s.rxlock_en = 1;
+		pcs_int_en_reg.s.rxbad_en = 1;
+		/*pcs_int_en_reg.s.rxerr_en = 1; // This happens during normal operation */
+		pcs_int_en_reg.s.txbad_en = 1;
+		pcs_int_en_reg.s.txfifo_en = 1;
+		pcs_int_en_reg.s.txfifu_en = 1;
+		pcs_int_en_reg.s.an_err_en = 1;
+		/*pcs_int_en_reg.s.xmit_en = 1; // This happens during normal operation */
+		/*pcs_int_en_reg.s.lnkspd_en = 1; // This happens during normal operation */
+	}
+	if (OCTEON_IS_MODEL(OCTEON_CN52XX)) {
+		/* Skipping pcs_int_en_reg.s.reserved_12_63 */
+		/*pcs_int_en_reg.s.dup = 1; // This happens during normal operation */
+		pcs_int_en_reg.s.sync_bad_en = 1;
+		pcs_int_en_reg.s.an_bad_en = 1;
+		pcs_int_en_reg.s.rxlock_en = 1;
+		pcs_int_en_reg.s.rxbad_en = 1;
+		/*pcs_int_en_reg.s.rxerr_en = 1; // This happens during normal operation */
+		pcs_int_en_reg.s.txbad_en = 1;
+		pcs_int_en_reg.s.txfifo_en = 1;
+		pcs_int_en_reg.s.txfifu_en = 1;
+		pcs_int_en_reg.s.an_err_en = 1;
+		/*pcs_int_en_reg.s.xmit_en = 1; // This happens during normal operation */
+		/*pcs_int_en_reg.s.lnkspd_en = 1; // This happens during normal operation */
+	}
+	cvmx_write_csr(CVMX_PCSX_INTX_EN_REG(index, block), pcs_int_en_reg.u64);
+}
+/**
+ * __cvmx_interrupt_pcsxx_int_en_reg_enable enables all interrupt bits in cvmx_pcsxx_int_en_reg_t
+ */
+void __cvmx_interrupt_pcsxx_int_en_reg_enable(int index)
+{
+	union cvmx_pcsxx_int_en_reg pcsx_int_en_reg;
+	cvmx_write_csr(CVMX_PCSXX_INT_REG(index),
+		       cvmx_read_csr(CVMX_PCSXX_INT_REG(index)));
+	pcsx_int_en_reg.u64 = 0;
+	if (OCTEON_IS_MODEL(OCTEON_CN56XX)) {
+		/* Skipping pcsx_int_en_reg.s.reserved_6_63 */
+		pcsx_int_en_reg.s.algnlos_en = 1;
+		pcsx_int_en_reg.s.synlos_en = 1;
+		pcsx_int_en_reg.s.bitlckls_en = 1;
+		pcsx_int_en_reg.s.rxsynbad_en = 1;
+		pcsx_int_en_reg.s.rxbad_en = 1;
+		pcsx_int_en_reg.s.txflt_en = 1;
+	}
+	if (OCTEON_IS_MODEL(OCTEON_CN52XX)) {
+		/* Skipping pcsx_int_en_reg.s.reserved_6_63 */
+		pcsx_int_en_reg.s.algnlos_en = 1;
+		pcsx_int_en_reg.s.synlos_en = 1;
+		pcsx_int_en_reg.s.bitlckls_en = 0;	/* Happens if XAUI module is not installed */
+		pcsx_int_en_reg.s.rxsynbad_en = 1;
+		pcsx_int_en_reg.s.rxbad_en = 1;
+		pcsx_int_en_reg.s.txflt_en = 1;
+	}
+	cvmx_write_csr(CVMX_PCSXX_INT_EN_REG(index), pcsx_int_en_reg.u64);
+}
+
+/**
+ * __cvmx_interrupt_spxx_int_msk_enable enables all interrupt bits in cvmx_spxx_int_msk_t
+ */
+void __cvmx_interrupt_spxx_int_msk_enable(int index)
+{
+	union cvmx_spxx_int_msk spx_int_msk;
+	cvmx_write_csr(CVMX_SPXX_INT_REG(index),
+		       cvmx_read_csr(CVMX_SPXX_INT_REG(index)));
+	spx_int_msk.u64 = 0;
+	if (OCTEON_IS_MODEL(OCTEON_CN38XX)) {
+		/* Skipping spx_int_msk.s.reserved_12_63 */
+		spx_int_msk.s.calerr = 1;
+		spx_int_msk.s.syncerr = 1;
+		spx_int_msk.s.diperr = 1;
+		spx_int_msk.s.tpaovr = 1;
+		spx_int_msk.s.rsverr = 1;
+		spx_int_msk.s.drwnng = 1;
+		spx_int_msk.s.clserr = 1;
+		spx_int_msk.s.spiovr = 1;
+		/* Skipping spx_int_msk.s.reserved_2_3 */
+		spx_int_msk.s.abnorm = 1;
+		spx_int_msk.s.prtnxa = 1;
+	}
+	if (OCTEON_IS_MODEL(OCTEON_CN58XX)) {
+		/* Skipping spx_int_msk.s.reserved_12_63 */
+		spx_int_msk.s.calerr = 1;
+		spx_int_msk.s.syncerr = 1;
+		spx_int_msk.s.diperr = 1;
+		spx_int_msk.s.tpaovr = 1;
+		spx_int_msk.s.rsverr = 1;
+		spx_int_msk.s.drwnng = 1;
+		spx_int_msk.s.clserr = 1;
+		spx_int_msk.s.spiovr = 1;
+		/* Skipping spx_int_msk.s.reserved_2_3 */
+		spx_int_msk.s.abnorm = 1;
+		spx_int_msk.s.prtnxa = 1;
+	}
+	cvmx_write_csr(CVMX_SPXX_INT_MSK(index), spx_int_msk.u64);
+}
+/**
+ * __cvmx_interrupt_stxx_int_msk_enable enables all interrupt bits in cvmx_stxx_int_msk_t
+ */
+void __cvmx_interrupt_stxx_int_msk_enable(int index)
+{
+	union cvmx_stxx_int_msk stx_int_msk;
+	cvmx_write_csr(CVMX_STXX_INT_REG(index),
+		       cvmx_read_csr(CVMX_STXX_INT_REG(index)));
+	stx_int_msk.u64 = 0;
+	if (OCTEON_IS_MODEL(OCTEON_CN38XX)) {
+		/* Skipping stx_int_msk.s.reserved_8_63 */
+		stx_int_msk.s.frmerr = 1;
+		stx_int_msk.s.unxfrm = 1;
+		stx_int_msk.s.nosync = 1;
+		stx_int_msk.s.diperr = 1;
+		stx_int_msk.s.datovr = 1;
+		stx_int_msk.s.ovrbst = 1;
+		stx_int_msk.s.calpar1 = 1;
+		stx_int_msk.s.calpar0 = 1;
+	}
+	if (OCTEON_IS_MODEL(OCTEON_CN58XX)) {
+		/* Skipping stx_int_msk.s.reserved_8_63 */
+		stx_int_msk.s.frmerr = 1;
+		stx_int_msk.s.unxfrm = 1;
+		stx_int_msk.s.nosync = 1;
+		stx_int_msk.s.diperr = 1;
+		stx_int_msk.s.datovr = 1;
+		stx_int_msk.s.ovrbst = 1;
+		stx_int_msk.s.calpar1 = 1;
+		stx_int_msk.s.calpar0 = 1;
+	}
+	cvmx_write_csr(CVMX_STXX_INT_MSK(index), stx_int_msk.u64);
+}
diff --git a/src/kernel/linux/v4.14/arch/mips/cavium-octeon/executive/cvmx-interrupt-rsl.c b/src/kernel/linux/v4.14/arch/mips/cavium-octeon/executive/cvmx-interrupt-rsl.c
new file mode 100644
index 0000000..fa327ec
--- /dev/null
+++ b/src/kernel/linux/v4.14/arch/mips/cavium-octeon/executive/cvmx-interrupt-rsl.c
@@ -0,0 +1,140 @@
+/***********************license start***************
+ * Author: Cavium Networks
+ *
+ * Contact: support@caviumnetworks.com
+ * This file is part of the OCTEON SDK
+ *
+ * Copyright (c) 2003-2008 Cavium Networks
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, Version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+ * NONINFRINGEMENT.  See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this file; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ * or visit http://www.gnu.org/licenses/.
+ *
+ * This file may also be available under a different license from Cavium.
+ * Contact Cavium Networks for more information
+ ***********************license end**************************************/
+
+/*
+ * Utility functions to decode Octeon's RSL_INT_BLOCKS
+ * interrupts into error messages.
+ */
+
+#include <asm/octeon/octeon.h>
+
+#include <asm/octeon/cvmx-asxx-defs.h>
+#include <asm/octeon/cvmx-gmxx-defs.h>
+
+#ifndef PRINT_ERROR
+#define PRINT_ERROR(format, ...)
+#endif
+
+void __cvmx_interrupt_gmxx_rxx_int_en_enable(int index, int block);
+
+/**
+ * Enable ASX error interrupts that exist on CN3XXX, CN50XX, and
+ * CN58XX.
+ *
+ * @block:  Interface to enable 0-1
+ */
+void __cvmx_interrupt_asxx_enable(int block)
+{
+	int mask;
+	union cvmx_asxx_int_en csr;
+	/*
+	 * CN38XX and CN58XX have two interfaces with 4 ports per
+	 * interface. All other chips have a max of 3 ports on
+	 * interface 0
+	 */
+	if (OCTEON_IS_MODEL(OCTEON_CN38XX) || OCTEON_IS_MODEL(OCTEON_CN58XX))
+		mask = 0xf;	/* Set enables for 4 ports */
+	else
+		mask = 0x7;	/* Set enables for 3 ports */
+
+	/* Enable interface interrupts */
+	csr.u64 = cvmx_read_csr(CVMX_ASXX_INT_EN(block));
+	csr.s.txpsh = mask;
+	csr.s.txpop = mask;
+	csr.s.ovrflw = mask;
+	cvmx_write_csr(CVMX_ASXX_INT_EN(block), csr.u64);
+}
+/**
+ * Enable GMX error reporting for the supplied interface
+ *
+ * @interface: Interface to enable
+ */
+void __cvmx_interrupt_gmxx_enable(int interface)
+{
+	union cvmx_gmxx_inf_mode mode;
+	union cvmx_gmxx_tx_int_en gmx_tx_int_en;
+	int num_ports;
+	int index;
+
+	mode.u64 = cvmx_read_csr(CVMX_GMXX_INF_MODE(interface));
+
+	if (OCTEON_IS_MODEL(OCTEON_CN56XX) || OCTEON_IS_MODEL(OCTEON_CN52XX)) {
+		if (mode.s.en) {
+			switch (mode.cn56xx.mode) {
+			case 1: /* XAUI */
+				num_ports = 1;
+				break;
+			case 2: /* SGMII */
+			case 3: /* PICMG */
+				num_ports = 4;
+				break;
+			default:	/* Disabled */
+				num_ports = 0;
+				break;
+			}
+		} else
+			num_ports = 0;
+	} else {
+		if (mode.s.en) {
+			if (OCTEON_IS_MODEL(OCTEON_CN38XX)
+			    || OCTEON_IS_MODEL(OCTEON_CN58XX)) {
+				/*
+				 * SPI on CN38XX and CN58XX report all
+				 * errors through port 0.  RGMII needs
+				 * to check all 4 ports
+				 */
+				if (mode.s.type)
+					num_ports = 1;
+				else
+					num_ports = 4;
+			} else {
+				/*
+				 * CN30XX, CN31XX, and CN50XX have two
+				 * or three ports. GMII and MII has 2,
+				 * RGMII has three
+				 */
+				if (mode.s.type)
+					num_ports = 2;
+				else
+					num_ports = 3;
+			}
+		} else
+			num_ports = 0;
+	}
+
+	gmx_tx_int_en.u64 = 0;
+	if (num_ports) {
+		if (OCTEON_IS_MODEL(OCTEON_CN38XX)
+		    || OCTEON_IS_MODEL(OCTEON_CN58XX))
+			gmx_tx_int_en.cn38xx.ncb_nxa = 1;
+		gmx_tx_int_en.s.pko_nxa = 1;
+	}
+	gmx_tx_int_en.s.undflw = (1 << num_ports) - 1;
+	cvmx_write_csr(CVMX_GMXX_TX_INT_EN(interface), gmx_tx_int_en.u64);
+	for (index = 0; index < num_ports; index++)
+		__cvmx_interrupt_gmxx_rxx_int_en_enable(index, interface);
+}
diff --git a/src/kernel/linux/v4.14/arch/mips/cavium-octeon/executive/cvmx-l2c.c b/src/kernel/linux/v4.14/arch/mips/cavium-octeon/executive/cvmx-l2c.c
new file mode 100644
index 0000000..f091c9b
--- /dev/null
+++ b/src/kernel/linux/v4.14/arch/mips/cavium-octeon/executive/cvmx-l2c.c
@@ -0,0 +1,920 @@
+/***********************license start***************
+ * Author: Cavium Networks
+ *
+ * Contact: support@caviumnetworks.com
+ * This file is part of the OCTEON SDK
+ *
+ * Copyright (c) 2003-2017 Cavium, Inc.
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, Version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+ * NONINFRINGEMENT.  See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this file; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ * or visit http://www.gnu.org/licenses/.
+ *
+ * This file may also be available under a different license from Cavium.
+ * Contact Cavium Networks for more information
+ ***********************license end**************************************/
+
+/*
+ * Implementation of the Level 2 Cache (L2C) control,
+ * measurement, and debugging facilities.
+ */
+
+#include <linux/compiler.h>
+#include <linux/irqflags.h>
+#include <asm/octeon/cvmx.h>
+#include <asm/octeon/cvmx-l2c.h>
+#include <asm/octeon/cvmx-spinlock.h>
+
+/*
+ * This spinlock is used internally to ensure that only one core is
+ * performing certain L2 operations at a time.
+ *
+ * NOTE: This only protects calls from within a single application -
+ * if multiple applications or operating systems are running, then it
+ * is up to the user program to coordinate between them.
+ */
+cvmx_spinlock_t cvmx_l2c_spinlock;
+
+int cvmx_l2c_get_core_way_partition(uint32_t core)
+{
+	uint32_t field;
+
+	/* Validate the core number */
+	if (core >= cvmx_octeon_num_cores())
+		return -1;
+
+	if (OCTEON_IS_MODEL(OCTEON_CN63XX))
+		return cvmx_read_csr(CVMX_L2C_WPAR_PPX(core)) & 0xffff;
+
+	/*
+	 * Use the lower two bits of the coreNumber to determine the
+	 * bit offset of the UMSK[] field in the L2C_SPAR register.
+	 */
+	field = (core & 0x3) * 8;
+
+	/*
+	 * Return the UMSK[] field from the appropriate L2C_SPAR
+	 * register based on the coreNumber.
+	 */
+
+	switch (core & 0xC) {
+	case 0x0:
+		return (cvmx_read_csr(CVMX_L2C_SPAR0) & (0xFF << field)) >> field;
+	case 0x4:
+		return (cvmx_read_csr(CVMX_L2C_SPAR1) & (0xFF << field)) >> field;
+	case 0x8:
+		return (cvmx_read_csr(CVMX_L2C_SPAR2) & (0xFF << field)) >> field;
+	case 0xC:
+		return (cvmx_read_csr(CVMX_L2C_SPAR3) & (0xFF << field)) >> field;
+	}
+	return 0;
+}
+
+int cvmx_l2c_set_core_way_partition(uint32_t core, uint32_t mask)
+{
+	uint32_t field;
+	uint32_t valid_mask;
+
+	valid_mask = (0x1 << cvmx_l2c_get_num_assoc()) - 1;
+
+	mask &= valid_mask;
+
+	/* A UMSK setting which blocks all L2C Ways is an error on some chips */
+	if (mask == valid_mask && !OCTEON_IS_MODEL(OCTEON_CN63XX))
+		return -1;
+
+	/* Validate the core number */
+	if (core >= cvmx_octeon_num_cores())
+		return -1;
+
+	if (OCTEON_IS_MODEL(OCTEON_CN63XX)) {
+		cvmx_write_csr(CVMX_L2C_WPAR_PPX(core), mask);
+		return 0;
+	}
+
+	/*
+	 * Use the lower two bits of core to determine the bit offset of the
+	 * UMSK[] field in the L2C_SPAR register.
+	 */
+	field = (core & 0x3) * 8;
+
+	/*
+	 * Assign the new mask setting to the UMSK[] field in the appropriate
+	 * L2C_SPAR register based on the core_num.
+	 *
+	 */
+	switch (core & 0xC) {
+	case 0x0:
+		cvmx_write_csr(CVMX_L2C_SPAR0,
+			       (cvmx_read_csr(CVMX_L2C_SPAR0) & ~(0xFF << field)) |
+			       mask << field);
+		break;
+	case 0x4:
+		cvmx_write_csr(CVMX_L2C_SPAR1,
+			       (cvmx_read_csr(CVMX_L2C_SPAR1) & ~(0xFF << field)) |
+			       mask << field);
+		break;
+	case 0x8:
+		cvmx_write_csr(CVMX_L2C_SPAR2,
+			       (cvmx_read_csr(CVMX_L2C_SPAR2) & ~(0xFF << field)) |
+			       mask << field);
+		break;
+	case 0xC:
+		cvmx_write_csr(CVMX_L2C_SPAR3,
+			       (cvmx_read_csr(CVMX_L2C_SPAR3) & ~(0xFF << field)) |
+			       mask << field);
+		break;
+	}
+	return 0;
+}
+
+int cvmx_l2c_set_hw_way_partition(uint32_t mask)
+{
+	uint32_t valid_mask;
+
+	valid_mask = (0x1 << cvmx_l2c_get_num_assoc()) - 1;
+	mask &= valid_mask;
+
+	/* A UMSK setting which blocks all L2C Ways is an error on some chips */
+	if (mask == valid_mask	&& !OCTEON_IS_MODEL(OCTEON_CN63XX))
+		return -1;
+
+	if (OCTEON_IS_MODEL(OCTEON_CN63XX))
+		cvmx_write_csr(CVMX_L2C_WPAR_IOBX(0), mask);
+	else
+		cvmx_write_csr(CVMX_L2C_SPAR4,
+			       (cvmx_read_csr(CVMX_L2C_SPAR4) & ~0xFF) | mask);
+	return 0;
+}
+
+int cvmx_l2c_get_hw_way_partition(void)
+{
+	if (OCTEON_IS_MODEL(OCTEON_CN63XX))
+		return cvmx_read_csr(CVMX_L2C_WPAR_IOBX(0)) & 0xffff;
+	else
+		return cvmx_read_csr(CVMX_L2C_SPAR4) & (0xFF);
+}
+
+void cvmx_l2c_config_perf(uint32_t counter, enum cvmx_l2c_event event,
+			  uint32_t clear_on_read)
+{
+	if (OCTEON_IS_MODEL(OCTEON_CN5XXX) || OCTEON_IS_MODEL(OCTEON_CN3XXX)) {
+		union cvmx_l2c_pfctl pfctl;
+
+		pfctl.u64 = cvmx_read_csr(CVMX_L2C_PFCTL);
+
+		switch (counter) {
+		case 0:
+			pfctl.s.cnt0sel = event;
+			pfctl.s.cnt0ena = 1;
+			pfctl.s.cnt0rdclr = clear_on_read;
+			break;
+		case 1:
+			pfctl.s.cnt1sel = event;
+			pfctl.s.cnt1ena = 1;
+			pfctl.s.cnt1rdclr = clear_on_read;
+			break;
+		case 2:
+			pfctl.s.cnt2sel = event;
+			pfctl.s.cnt2ena = 1;
+			pfctl.s.cnt2rdclr = clear_on_read;
+			break;
+		case 3:
+		default:
+			pfctl.s.cnt3sel = event;
+			pfctl.s.cnt3ena = 1;
+			pfctl.s.cnt3rdclr = clear_on_read;
+			break;
+		}
+
+		cvmx_write_csr(CVMX_L2C_PFCTL, pfctl.u64);
+	} else {
+		union cvmx_l2c_tadx_prf l2c_tadx_prf;
+		int tad;
+
+		cvmx_dprintf("L2C performance counter events are different for this chip, mapping 'event' to cvmx_l2c_tad_event_t\n");
+		if (clear_on_read)
+			cvmx_dprintf("L2C counters don't support clear on read for this chip\n");
+
+		l2c_tadx_prf.u64 = cvmx_read_csr(CVMX_L2C_TADX_PRF(0));
+
+		switch (counter) {
+		case 0:
+			l2c_tadx_prf.s.cnt0sel = event;
+			break;
+		case 1:
+			l2c_tadx_prf.s.cnt1sel = event;
+			break;
+		case 2:
+			l2c_tadx_prf.s.cnt2sel = event;
+			break;
+		default:
+		case 3:
+			l2c_tadx_prf.s.cnt3sel = event;
+			break;
+		}
+		for (tad = 0; tad < CVMX_L2C_TADS; tad++)
+			cvmx_write_csr(CVMX_L2C_TADX_PRF(tad),
+				       l2c_tadx_prf.u64);
+	}
+}
+
+uint64_t cvmx_l2c_read_perf(uint32_t counter)
+{
+	switch (counter) {
+	case 0:
+		if (OCTEON_IS_MODEL(OCTEON_CN5XXX) || OCTEON_IS_MODEL(OCTEON_CN3XXX))
+			return cvmx_read_csr(CVMX_L2C_PFC0);
+		else {
+			uint64_t counter = 0;
+			int tad;
+
+			for (tad = 0; tad < CVMX_L2C_TADS; tad++)
+				counter += cvmx_read_csr(CVMX_L2C_TADX_PFC0(tad));
+			return counter;
+		}
+	case 1:
+		if (OCTEON_IS_MODEL(OCTEON_CN5XXX) || OCTEON_IS_MODEL(OCTEON_CN3XXX))
+			return cvmx_read_csr(CVMX_L2C_PFC1);
+		else {
+			uint64_t counter = 0;
+			int tad;
+
+			for (tad = 0; tad < CVMX_L2C_TADS; tad++)
+				counter += cvmx_read_csr(CVMX_L2C_TADX_PFC1(tad));
+			return counter;
+		}
+	case 2:
+		if (OCTEON_IS_MODEL(OCTEON_CN5XXX) || OCTEON_IS_MODEL(OCTEON_CN3XXX))
+			return cvmx_read_csr(CVMX_L2C_PFC2);
+		else {
+			uint64_t counter = 0;
+			int tad;
+
+			for (tad = 0; tad < CVMX_L2C_TADS; tad++)
+				counter += cvmx_read_csr(CVMX_L2C_TADX_PFC2(tad));
+			return counter;
+		}
+	case 3:
+	default:
+		if (OCTEON_IS_MODEL(OCTEON_CN5XXX) || OCTEON_IS_MODEL(OCTEON_CN3XXX))
+			return cvmx_read_csr(CVMX_L2C_PFC3);
+		else {
+			uint64_t counter = 0;
+			int tad;
+
+			for (tad = 0; tad < CVMX_L2C_TADS; tad++)
+				counter += cvmx_read_csr(CVMX_L2C_TADX_PFC3(tad));
+			return counter;
+		}
+	}
+}
+
+/**
+ * @INTERNAL
+ * Helper function use to fault in cache lines for L2 cache locking
+ *
+ * @addr:   Address of base of memory region to read into L2 cache
+ * @len:    Length (in bytes) of region to fault in
+ */
+static void fault_in(uint64_t addr, int len)
+{
+	char *ptr;
+
+	/*
+	 * Adjust addr and length so we get all cache lines even for
+	 * small ranges spanning two cache lines.
+	 */
+	len += addr & CVMX_CACHE_LINE_MASK;
+	addr &= ~CVMX_CACHE_LINE_MASK;
+	ptr = cvmx_phys_to_ptr(addr);
+	/*
+	 * Invalidate L1 cache to make sure all loads result in data
+	 * being in L2.
+	 */
+	CVMX_DCACHE_INVALIDATE;
+	while (len > 0) {
+		READ_ONCE(*ptr);
+		len -= CVMX_CACHE_LINE_SIZE;
+		ptr += CVMX_CACHE_LINE_SIZE;
+	}
+}
+
+int cvmx_l2c_lock_line(uint64_t addr)
+{
+	if (OCTEON_IS_MODEL(OCTEON_CN63XX)) {
+		int shift = CVMX_L2C_TAG_ADDR_ALIAS_SHIFT;
+		uint64_t assoc = cvmx_l2c_get_num_assoc();
+		uint64_t tag = addr >> shift;
+		uint64_t index = CVMX_ADD_SEG(CVMX_MIPS_SPACE_XKPHYS, cvmx_l2c_address_to_index(addr) << CVMX_L2C_IDX_ADDR_SHIFT);
+		uint64_t way;
+		union cvmx_l2c_tadx_tag l2c_tadx_tag;
+
+		CVMX_CACHE_LCKL2(CVMX_ADD_SEG(CVMX_MIPS_SPACE_XKPHYS, addr), 0);
+
+		/* Make sure we were able to lock the line */
+		for (way = 0; way < assoc; way++) {
+			CVMX_CACHE_LTGL2I(index | (way << shift), 0);
+			/* make sure CVMX_L2C_TADX_TAG is updated */
+			CVMX_SYNC;
+			l2c_tadx_tag.u64 = cvmx_read_csr(CVMX_L2C_TADX_TAG(0));
+			if (l2c_tadx_tag.s.valid && l2c_tadx_tag.s.tag == tag)
+				break;
+		}
+
+		/* Check if a valid line is found */
+		if (way >= assoc) {
+			/* cvmx_dprintf("ERROR: cvmx_l2c_lock_line: line not found for locking at 0x%llx address\n", (unsigned long long)addr); */
+			return -1;
+		}
+
+		/* Check if lock bit is not set */
+		if (!l2c_tadx_tag.s.lock) {
+			/* cvmx_dprintf("ERROR: cvmx_l2c_lock_line: Not able to lock at 0x%llx address\n", (unsigned long long)addr); */
+			return -1;
+		}
+		return way;
+	} else {
+		int retval = 0;
+		union cvmx_l2c_dbg l2cdbg;
+		union cvmx_l2c_lckbase lckbase;
+		union cvmx_l2c_lckoff lckoff;
+		union cvmx_l2t_err l2t_err;
+
+		cvmx_spinlock_lock(&cvmx_l2c_spinlock);
+
+		l2cdbg.u64 = 0;
+		lckbase.u64 = 0;
+		lckoff.u64 = 0;
+
+		/* Clear l2t error bits if set */
+		l2t_err.u64 = cvmx_read_csr(CVMX_L2T_ERR);
+		l2t_err.s.lckerr = 1;
+		l2t_err.s.lckerr2 = 1;
+		cvmx_write_csr(CVMX_L2T_ERR, l2t_err.u64);
+
+		addr &= ~CVMX_CACHE_LINE_MASK;
+
+		/* Set this core as debug core */
+		l2cdbg.s.ppnum = cvmx_get_core_num();
+		CVMX_SYNC;
+		cvmx_write_csr(CVMX_L2C_DBG, l2cdbg.u64);
+		cvmx_read_csr(CVMX_L2C_DBG);
+
+		lckoff.s.lck_offset = 0; /* Only lock 1 line at a time */
+		cvmx_write_csr(CVMX_L2C_LCKOFF, lckoff.u64);
+		cvmx_read_csr(CVMX_L2C_LCKOFF);
+
+		if (((union cvmx_l2c_cfg)(cvmx_read_csr(CVMX_L2C_CFG))).s.idxalias) {
+			int alias_shift = CVMX_L2C_IDX_ADDR_SHIFT + 2 * CVMX_L2_SET_BITS - 1;
+			uint64_t addr_tmp = addr ^ (addr & ((1 << alias_shift) - 1)) >> CVMX_L2_SET_BITS;
+
+			lckbase.s.lck_base = addr_tmp >> 7;
+
+		} else {
+			lckbase.s.lck_base = addr >> 7;
+		}
+
+		lckbase.s.lck_ena = 1;
+		cvmx_write_csr(CVMX_L2C_LCKBASE, lckbase.u64);
+		/* Make sure it gets there */
+		cvmx_read_csr(CVMX_L2C_LCKBASE);
+
+		fault_in(addr, CVMX_CACHE_LINE_SIZE);
+
+		lckbase.s.lck_ena = 0;
+		cvmx_write_csr(CVMX_L2C_LCKBASE, lckbase.u64);
+		/* Make sure it gets there */
+		cvmx_read_csr(CVMX_L2C_LCKBASE);
+
+		/* Stop being debug core */
+		cvmx_write_csr(CVMX_L2C_DBG, 0);
+		cvmx_read_csr(CVMX_L2C_DBG);
+
+		l2t_err.u64 = cvmx_read_csr(CVMX_L2T_ERR);
+		if (l2t_err.s.lckerr || l2t_err.s.lckerr2)
+			retval = 1;  /* We were unable to lock the line */
+
+		cvmx_spinlock_unlock(&cvmx_l2c_spinlock);
+		return retval;
+	}
+}
+
+int cvmx_l2c_lock_mem_region(uint64_t start, uint64_t len)
+{
+	int retval = 0;
+
+	/* Round start/end to cache line boundaries */
+	len += start & CVMX_CACHE_LINE_MASK;
+	start &= ~CVMX_CACHE_LINE_MASK;
+	len = (len + CVMX_CACHE_LINE_MASK) & ~CVMX_CACHE_LINE_MASK;
+
+	while (len) {
+		retval += cvmx_l2c_lock_line(start);
+		start += CVMX_CACHE_LINE_SIZE;
+		len -= CVMX_CACHE_LINE_SIZE;
+	}
+	return retval;
+}
+
+void cvmx_l2c_flush(void)
+{
+	uint64_t assoc, set;
+	uint64_t n_assoc, n_set;
+
+	n_set = cvmx_l2c_get_num_sets();
+	n_assoc = cvmx_l2c_get_num_assoc();
+
+	if (OCTEON_IS_MODEL(OCTEON_CN6XXX)) {
+		uint64_t address;
+		/* These may look like constants, but they aren't... */
+		int assoc_shift = CVMX_L2C_TAG_ADDR_ALIAS_SHIFT;
+		int set_shift = CVMX_L2C_IDX_ADDR_SHIFT;
+
+		for (set = 0; set < n_set; set++) {
+			for (assoc = 0; assoc < n_assoc; assoc++) {
+				address = CVMX_ADD_SEG(CVMX_MIPS_SPACE_XKPHYS,
+						       (assoc << assoc_shift) | (set << set_shift));
+				CVMX_CACHE_WBIL2I(address, 0);
+			}
+		}
+	} else {
+		for (set = 0; set < n_set; set++)
+			for (assoc = 0; assoc < n_assoc; assoc++)
+				cvmx_l2c_flush_line(assoc, set);
+	}
+}
+
+
+int cvmx_l2c_unlock_line(uint64_t address)
+{
+
+	if (OCTEON_IS_MODEL(OCTEON_CN63XX)) {
+		int assoc;
+		union cvmx_l2c_tag tag;
+		uint32_t tag_addr;
+		uint32_t index = cvmx_l2c_address_to_index(address);
+
+		tag_addr = ((address >> CVMX_L2C_TAG_ADDR_ALIAS_SHIFT) & ((1 << CVMX_L2C_TAG_ADDR_ALIAS_SHIFT) - 1));
+
+		/*
+		 * For 63XX, we can flush a line by using the physical
+		 * address directly, so finding the cache line used by
+		 * the address is only required to provide the proper
+		 * return value for the function.
+		 */
+		for (assoc = 0; assoc < CVMX_L2_ASSOC; assoc++) {
+			tag = cvmx_l2c_get_tag(assoc, index);
+
+			if (tag.s.V && (tag.s.addr == tag_addr)) {
+				CVMX_CACHE_WBIL2(CVMX_ADD_SEG(CVMX_MIPS_SPACE_XKPHYS, address), 0);
+				return tag.s.L;
+			}
+		}
+	} else {
+		int assoc;
+		union cvmx_l2c_tag tag;
+		uint32_t tag_addr;
+
+		uint32_t index = cvmx_l2c_address_to_index(address);
+
+		/* Compute portion of address that is stored in tag */
+		tag_addr = ((address >> CVMX_L2C_TAG_ADDR_ALIAS_SHIFT) & ((1 << CVMX_L2C_TAG_ADDR_ALIAS_SHIFT) - 1));
+		for (assoc = 0; assoc < CVMX_L2_ASSOC; assoc++) {
+			tag = cvmx_l2c_get_tag(assoc, index);
+
+			if (tag.s.V && (tag.s.addr == tag_addr)) {
+				cvmx_l2c_flush_line(assoc, index);
+				return tag.s.L;
+			}
+		}
+	}
+	return 0;
+}
+
+int cvmx_l2c_unlock_mem_region(uint64_t start, uint64_t len)
+{
+	int num_unlocked = 0;
+	/* Round start/end to cache line boundaries */
+	len += start & CVMX_CACHE_LINE_MASK;
+	start &= ~CVMX_CACHE_LINE_MASK;
+	len = (len + CVMX_CACHE_LINE_MASK) & ~CVMX_CACHE_LINE_MASK;
+	while (len > 0) {
+		num_unlocked += cvmx_l2c_unlock_line(start);
+		start += CVMX_CACHE_LINE_SIZE;
+		len -= CVMX_CACHE_LINE_SIZE;
+	}
+
+	return num_unlocked;
+}
+
+/*
+ * Internal l2c tag types.  These are converted to a generic structure
+ * that can be used on all chips.
+ */
+union __cvmx_l2c_tag {
+	uint64_t u64;
+	struct cvmx_l2c_tag_cn50xx {
+		__BITFIELD_FIELD(uint64_t reserved:40,
+		__BITFIELD_FIELD(uint64_t V:1,		/* Line valid */
+		__BITFIELD_FIELD(uint64_t D:1,		/* Line dirty */
+		__BITFIELD_FIELD(uint64_t L:1,		/* Line locked */
+		__BITFIELD_FIELD(uint64_t U:1,		/* Use, LRU eviction */
+		__BITFIELD_FIELD(uint64_t addr:20,	/* Phys addr (33..14) */
+		;))))))
+	} cn50xx;
+	struct cvmx_l2c_tag_cn30xx {
+		__BITFIELD_FIELD(uint64_t reserved:41,
+		__BITFIELD_FIELD(uint64_t V:1,		/* Line valid */
+		__BITFIELD_FIELD(uint64_t D:1,		/* Line dirty */
+		__BITFIELD_FIELD(uint64_t L:1,		/* Line locked */
+		__BITFIELD_FIELD(uint64_t U:1,		/* Use, LRU eviction */
+		__BITFIELD_FIELD(uint64_t addr:19,	/* Phys addr (33..15) */
+		;))))))
+	} cn30xx;
+	struct cvmx_l2c_tag_cn31xx {
+		__BITFIELD_FIELD(uint64_t reserved:42,
+		__BITFIELD_FIELD(uint64_t V:1,		/* Line valid */
+		__BITFIELD_FIELD(uint64_t D:1,		/* Line dirty */
+		__BITFIELD_FIELD(uint64_t L:1,		/* Line locked */
+		__BITFIELD_FIELD(uint64_t U:1,		/* Use, LRU eviction */
+		__BITFIELD_FIELD(uint64_t addr:18,	/* Phys addr (33..16) */
+		;))))))
+	} cn31xx;
+	struct cvmx_l2c_tag_cn38xx {
+		__BITFIELD_FIELD(uint64_t reserved:43,
+		__BITFIELD_FIELD(uint64_t V:1,		/* Line valid */
+		__BITFIELD_FIELD(uint64_t D:1,		/* Line dirty */
+		__BITFIELD_FIELD(uint64_t L:1,		/* Line locked */
+		__BITFIELD_FIELD(uint64_t U:1,		/* Use, LRU eviction */
+		__BITFIELD_FIELD(uint64_t addr:17,	/* Phys addr (33..17) */
+		;))))))
+	} cn38xx;
+	struct cvmx_l2c_tag_cn58xx {
+		__BITFIELD_FIELD(uint64_t reserved:44,
+		__BITFIELD_FIELD(uint64_t V:1,		/* Line valid */
+		__BITFIELD_FIELD(uint64_t D:1,		/* Line dirty */
+		__BITFIELD_FIELD(uint64_t L:1,		/* Line locked */
+		__BITFIELD_FIELD(uint64_t U:1,		/* Use, LRU eviction */
+		__BITFIELD_FIELD(uint64_t addr:16,	/* Phys addr (33..18) */
+		;))))))
+	} cn58xx;
+	struct cvmx_l2c_tag_cn58xx cn56xx;	/* 2048 sets */
+	struct cvmx_l2c_tag_cn31xx cn52xx;	/* 512 sets */
+};
+
+
+/**
+ * @INTERNAL
+ * Function to read a L2C tag.  This code make the current core
+ * the 'debug core' for the L2.  This code must only be executed by
+ * 1 core at a time.
+ *
+ * @assoc:  Association (way) of the tag to dump
+ * @index:  Index of the cacheline
+ *
+ * Returns The Octeon model specific tag structure.  This is
+ *	   translated by a wrapper function to a generic form that is
+ *	   easier for applications to use.
+ */
+static union __cvmx_l2c_tag __read_l2_tag(uint64_t assoc, uint64_t index)
+{
+
+	uint64_t debug_tag_addr = CVMX_ADD_SEG(CVMX_MIPS_SPACE_XKPHYS, (index << 7) + 96);
+	uint64_t core = cvmx_get_core_num();
+	union __cvmx_l2c_tag tag_val;
+	uint64_t dbg_addr = CVMX_L2C_DBG;
+	unsigned long flags;
+	union cvmx_l2c_dbg debug_val;
+
+	debug_val.u64 = 0;
+	/*
+	 * For low core count parts, the core number is always small
+	 * enough to stay in the correct field and not set any
+	 * reserved bits.
+	 */
+	debug_val.s.ppnum = core;
+	debug_val.s.l2t = 1;
+	debug_val.s.set = assoc;
+
+	local_irq_save(flags);
+	/*
+	 * Make sure core is quiet (no prefetches, etc.) before
+	 * entering debug mode.
+	 */
+	CVMX_SYNC;
+	/* Flush L1 to make sure debug load misses L1 */
+	CVMX_DCACHE_INVALIDATE;
+
+	/*
+	 * The following must be done in assembly as when in debug
+	 * mode all data loads from L2 return special debug data, not
+	 * normal memory contents.  Also, interrupts must be disabled,
+	 * since if an interrupt occurs while in debug mode the ISR
+	 * will get debug data from all its memory * reads instead of
+	 * the contents of memory.
+	 */
+
+	asm volatile (
+		".set push\n\t"
+		".set mips64\n\t"
+		".set noreorder\n\t"
+		"sd    %[dbg_val], 0(%[dbg_addr])\n\t"	 /* Enter debug mode, wait for store */
+		"ld    $0, 0(%[dbg_addr])\n\t"
+		"ld    %[tag_val], 0(%[tag_addr])\n\t"	 /* Read L2C tag data */
+		"sd    $0, 0(%[dbg_addr])\n\t"		/* Exit debug mode, wait for store */
+		"ld    $0, 0(%[dbg_addr])\n\t"
+		"cache 9, 0($0)\n\t"		 /* Invalidate dcache to discard debug data */
+		".set pop"
+		: [tag_val] "=r" (tag_val)
+		: [dbg_addr] "r" (dbg_addr), [dbg_val] "r" (debug_val), [tag_addr] "r" (debug_tag_addr)
+		: "memory");
+
+	local_irq_restore(flags);
+
+	return tag_val;
+}
+
+
+union cvmx_l2c_tag cvmx_l2c_get_tag(uint32_t association, uint32_t index)
+{
+	union cvmx_l2c_tag tag;
+
+	tag.u64 = 0;
+	if ((int)association >= cvmx_l2c_get_num_assoc()) {
+		cvmx_dprintf("ERROR: cvmx_l2c_get_tag association out of range\n");
+		return tag;
+	}
+	if ((int)index >= cvmx_l2c_get_num_sets()) {
+		cvmx_dprintf("ERROR: cvmx_l2c_get_tag index out of range (arg: %d, max: %d)\n",
+			     (int)index, cvmx_l2c_get_num_sets());
+		return tag;
+	}
+	if (OCTEON_IS_MODEL(OCTEON_CN63XX)) {
+		union cvmx_l2c_tadx_tag l2c_tadx_tag;
+		uint64_t address = CVMX_ADD_SEG(CVMX_MIPS_SPACE_XKPHYS,
+						(association << CVMX_L2C_TAG_ADDR_ALIAS_SHIFT) |
+						(index << CVMX_L2C_IDX_ADDR_SHIFT));
+		/*
+		 * Use L2 cache Index load tag cache instruction, as
+		 * hardware loads the virtual tag for the L2 cache
+		 * block with the contents of L2C_TAD0_TAG
+		 * register.
+		 */
+		CVMX_CACHE_LTGL2I(address, 0);
+		CVMX_SYNC;   /* make sure CVMX_L2C_TADX_TAG is updated */
+		l2c_tadx_tag.u64 = cvmx_read_csr(CVMX_L2C_TADX_TAG(0));
+
+		tag.s.V	    = l2c_tadx_tag.s.valid;
+		tag.s.D	    = l2c_tadx_tag.s.dirty;
+		tag.s.L	    = l2c_tadx_tag.s.lock;
+		tag.s.U	    = l2c_tadx_tag.s.use;
+		tag.s.addr  = l2c_tadx_tag.s.tag;
+	} else {
+		union __cvmx_l2c_tag tmp_tag;
+		/* __read_l2_tag is intended for internal use only */
+		tmp_tag = __read_l2_tag(association, index);
+
+		/*
+		 * Convert all tag structure types to generic version,
+		 * as it can represent all models.
+		 */
+		if (OCTEON_IS_MODEL(OCTEON_CN58XX) || OCTEON_IS_MODEL(OCTEON_CN56XX)) {
+			tag.s.V	   = tmp_tag.cn58xx.V;
+			tag.s.D	   = tmp_tag.cn58xx.D;
+			tag.s.L	   = tmp_tag.cn58xx.L;
+			tag.s.U	   = tmp_tag.cn58xx.U;
+			tag.s.addr = tmp_tag.cn58xx.addr;
+		} else if (OCTEON_IS_MODEL(OCTEON_CN38XX)) {
+			tag.s.V	   = tmp_tag.cn38xx.V;
+			tag.s.D	   = tmp_tag.cn38xx.D;
+			tag.s.L	   = tmp_tag.cn38xx.L;
+			tag.s.U	   = tmp_tag.cn38xx.U;
+			tag.s.addr = tmp_tag.cn38xx.addr;
+		} else if (OCTEON_IS_MODEL(OCTEON_CN31XX) || OCTEON_IS_MODEL(OCTEON_CN52XX)) {
+			tag.s.V	   = tmp_tag.cn31xx.V;
+			tag.s.D	   = tmp_tag.cn31xx.D;
+			tag.s.L	   = tmp_tag.cn31xx.L;
+			tag.s.U	   = tmp_tag.cn31xx.U;
+			tag.s.addr = tmp_tag.cn31xx.addr;
+		} else if (OCTEON_IS_MODEL(OCTEON_CN30XX)) {
+			tag.s.V	   = tmp_tag.cn30xx.V;
+			tag.s.D	   = tmp_tag.cn30xx.D;
+			tag.s.L	   = tmp_tag.cn30xx.L;
+			tag.s.U	   = tmp_tag.cn30xx.U;
+			tag.s.addr = tmp_tag.cn30xx.addr;
+		} else if (OCTEON_IS_MODEL(OCTEON_CN50XX)) {
+			tag.s.V	   = tmp_tag.cn50xx.V;
+			tag.s.D	   = tmp_tag.cn50xx.D;
+			tag.s.L	   = tmp_tag.cn50xx.L;
+			tag.s.U	   = tmp_tag.cn50xx.U;
+			tag.s.addr = tmp_tag.cn50xx.addr;
+		} else {
+			cvmx_dprintf("Unsupported OCTEON Model in %s\n", __func__);
+		}
+	}
+	return tag;
+}
+
+uint32_t cvmx_l2c_address_to_index(uint64_t addr)
+{
+	uint64_t idx = addr >> CVMX_L2C_IDX_ADDR_SHIFT;
+	int indxalias = 0;
+
+	if (OCTEON_IS_MODEL(OCTEON_CN6XXX)) {
+		union cvmx_l2c_ctl l2c_ctl;
+
+		l2c_ctl.u64 = cvmx_read_csr(CVMX_L2C_CTL);
+		indxalias = !l2c_ctl.s.disidxalias;
+	} else {
+		union cvmx_l2c_cfg l2c_cfg;
+
+		l2c_cfg.u64 = cvmx_read_csr(CVMX_L2C_CFG);
+		indxalias = l2c_cfg.s.idxalias;
+	}
+
+	if (indxalias) {
+		if (OCTEON_IS_MODEL(OCTEON_CN63XX)) {
+			uint32_t a_14_12 = (idx / (CVMX_L2C_MEMBANK_SELECT_SIZE/(1<<CVMX_L2C_IDX_ADDR_SHIFT))) & 0x7;
+
+			idx ^= idx / cvmx_l2c_get_num_sets();
+			idx ^= a_14_12;
+		} else {
+			idx ^= ((addr & CVMX_L2C_ALIAS_MASK) >> CVMX_L2C_TAG_ADDR_ALIAS_SHIFT);
+		}
+	}
+	idx &= CVMX_L2C_IDX_MASK;
+	return idx;
+}
+
+int cvmx_l2c_get_cache_size_bytes(void)
+{
+	return cvmx_l2c_get_num_sets() * cvmx_l2c_get_num_assoc() *
+		CVMX_CACHE_LINE_SIZE;
+}
+
+/**
+ * Return log base 2 of the number of sets in the L2 cache
+ * Returns
+ */
+int cvmx_l2c_get_set_bits(void)
+{
+	int l2_set_bits;
+
+	if (OCTEON_IS_MODEL(OCTEON_CN56XX) || OCTEON_IS_MODEL(OCTEON_CN58XX))
+		l2_set_bits = 11;	/* 2048 sets */
+	else if (OCTEON_IS_MODEL(OCTEON_CN38XX) || OCTEON_IS_MODEL(OCTEON_CN63XX))
+		l2_set_bits = 10;	/* 1024 sets */
+	else if (OCTEON_IS_MODEL(OCTEON_CN31XX) || OCTEON_IS_MODEL(OCTEON_CN52XX))
+		l2_set_bits = 9;	/* 512 sets */
+	else if (OCTEON_IS_MODEL(OCTEON_CN30XX))
+		l2_set_bits = 8;	/* 256 sets */
+	else if (OCTEON_IS_MODEL(OCTEON_CN50XX))
+		l2_set_bits = 7;	/* 128 sets */
+	else {
+		cvmx_dprintf("Unsupported OCTEON Model in %s\n", __func__);
+		l2_set_bits = 11;	/* 2048 sets */
+	}
+	return l2_set_bits;
+}
+
+/* Return the number of sets in the L2 Cache */
+int cvmx_l2c_get_num_sets(void)
+{
+	return 1 << cvmx_l2c_get_set_bits();
+}
+
+/* Return the number of associations in the L2 Cache */
+int cvmx_l2c_get_num_assoc(void)
+{
+	int l2_assoc;
+
+	if (OCTEON_IS_MODEL(OCTEON_CN56XX) ||
+	    OCTEON_IS_MODEL(OCTEON_CN52XX) ||
+	    OCTEON_IS_MODEL(OCTEON_CN58XX) ||
+	    OCTEON_IS_MODEL(OCTEON_CN50XX) ||
+	    OCTEON_IS_MODEL(OCTEON_CN38XX))
+		l2_assoc = 8;
+	else if (OCTEON_IS_MODEL(OCTEON_CN63XX))
+		l2_assoc = 16;
+	else if (OCTEON_IS_MODEL(OCTEON_CN31XX) ||
+		 OCTEON_IS_MODEL(OCTEON_CN30XX))
+		l2_assoc = 4;
+	else {
+		cvmx_dprintf("Unsupported OCTEON Model in %s\n", __func__);
+		l2_assoc = 8;
+	}
+
+	/* Check to see if part of the cache is disabled */
+	if (OCTEON_IS_MODEL(OCTEON_CN63XX)) {
+		union cvmx_mio_fus_dat3 mio_fus_dat3;
+
+		mio_fus_dat3.u64 = cvmx_read_csr(CVMX_MIO_FUS_DAT3);
+		/*
+		 * cvmx_mio_fus_dat3.s.l2c_crip fuses map as follows
+		 * <2> will be not used for 63xx
+		 * <1> disables 1/2 ways
+		 * <0> disables 1/4 ways
+		 * They are cumulative, so for 63xx:
+		 * <1> <0>
+		 * 0 0 16-way 2MB cache
+		 * 0 1 12-way 1.5MB cache
+		 * 1 0 8-way 1MB cache
+		 * 1 1 4-way 512KB cache
+		 */
+
+		if (mio_fus_dat3.s.l2c_crip == 3)
+			l2_assoc = 4;
+		else if (mio_fus_dat3.s.l2c_crip == 2)
+			l2_assoc = 8;
+		else if (mio_fus_dat3.s.l2c_crip == 1)
+			l2_assoc = 12;
+	} else {
+		uint64_t l2d_fus3;
+
+		l2d_fus3 = cvmx_read_csr(CVMX_L2D_FUS3);
+		/*
+		 * Using shifts here, as bit position names are
+		 * different for each model but they all mean the
+		 * same.
+		 */
+		if ((l2d_fus3 >> 35) & 0x1)
+			l2_assoc = l2_assoc >> 2;
+		else if ((l2d_fus3 >> 34) & 0x1)
+			l2_assoc = l2_assoc >> 1;
+	}
+	return l2_assoc;
+}
+
+/**
+ * Flush a line from the L2 cache
+ * This should only be called from one core at a time, as this routine
+ * sets the core to the 'debug' core in order to flush the line.
+ *
+ * @assoc:  Association (or way) to flush
+ * @index:  Index to flush
+ */
+void cvmx_l2c_flush_line(uint32_t assoc, uint32_t index)
+{
+	/* Check the range of the index. */
+	if (index > (uint32_t)cvmx_l2c_get_num_sets()) {
+		cvmx_dprintf("ERROR: cvmx_l2c_flush_line index out of range.\n");
+		return;
+	}
+
+	/* Check the range of association. */
+	if (assoc > (uint32_t)cvmx_l2c_get_num_assoc()) {
+		cvmx_dprintf("ERROR: cvmx_l2c_flush_line association out of range.\n");
+		return;
+	}
+
+	if (OCTEON_IS_MODEL(OCTEON_CN63XX)) {
+		uint64_t address;
+		/* Create the address based on index and association.
+		 * Bits<20:17> select the way of the cache block involved in
+		 *	       the operation
+		 * Bits<16:7> of the effect address select the index
+		 */
+		address = CVMX_ADD_SEG(CVMX_MIPS_SPACE_XKPHYS,
+				(assoc << CVMX_L2C_TAG_ADDR_ALIAS_SHIFT) |
+				(index << CVMX_L2C_IDX_ADDR_SHIFT));
+		CVMX_CACHE_WBIL2I(address, 0);
+	} else {
+		union cvmx_l2c_dbg l2cdbg;
+
+		l2cdbg.u64 = 0;
+		if (!OCTEON_IS_MODEL(OCTEON_CN30XX))
+			l2cdbg.s.ppnum = cvmx_get_core_num();
+		l2cdbg.s.finv = 1;
+
+		l2cdbg.s.set = assoc;
+		cvmx_spinlock_lock(&cvmx_l2c_spinlock);
+		/*
+		 * Enter debug mode, and make sure all other writes
+		 * complete before we enter debug mode
+		 */
+		CVMX_SYNC;
+		cvmx_write_csr(CVMX_L2C_DBG, l2cdbg.u64);
+		cvmx_read_csr(CVMX_L2C_DBG);
+
+		CVMX_PREPARE_FOR_STORE(CVMX_ADD_SEG(CVMX_MIPS_SPACE_XKPHYS,
+						    index * CVMX_CACHE_LINE_SIZE),
+				       0);
+		/* Exit debug mode */
+		CVMX_SYNC;
+		cvmx_write_csr(CVMX_L2C_DBG, 0);
+		cvmx_read_csr(CVMX_L2C_DBG);
+		cvmx_spinlock_unlock(&cvmx_l2c_spinlock);
+	}
+}
diff --git a/src/kernel/linux/v4.14/arch/mips/cavium-octeon/executive/cvmx-pko.c b/src/kernel/linux/v4.14/arch/mips/cavium-octeon/executive/cvmx-pko.c
new file mode 100644
index 0000000..676fab5
--- /dev/null
+++ b/src/kernel/linux/v4.14/arch/mips/cavium-octeon/executive/cvmx-pko.c
@@ -0,0 +1,646 @@
+/***********************license start***************
+ * Author: Cavium Networks
+ *
+ * Contact: support@caviumnetworks.com
+ * This file is part of the OCTEON SDK
+ *
+ * Copyright (c) 2003-2008 Cavium Networks
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, Version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+ * NONINFRINGEMENT.  See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this file; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ * or visit http://www.gnu.org/licenses/.
+ *
+ * This file may also be available under a different license from Cavium.
+ * Contact Cavium Networks for more information
+ ***********************license end**************************************/
+
+/*
+ * Support library for the hardware Packet Output unit.
+ */
+
+#include <asm/octeon/octeon.h>
+
+#include <asm/octeon/cvmx-config.h>
+#include <asm/octeon/cvmx-pko.h>
+#include <asm/octeon/cvmx-helper.h>
+
+/**
+ * Internal state of packet output
+ */
+
+static int __cvmx_pko_int(int interface, int index)
+{
+	switch (interface) {
+	case 0:
+		return index;
+	case 1:
+		return 4;
+	case 2:
+		return index + 0x08;
+	case 3:
+		return index + 0x0c;
+	case 4:
+		return index + 0x10;
+	case 5:
+		return 0x1c;
+	case 6:
+		return 0x1d;
+	case 7:
+		return 0x1e;
+	case 8:
+		return 0x1f;
+	default:
+		return -1;
+	}
+}
+
+static void __cvmx_pko_iport_config(int pko_port)
+{
+	int queue;
+	const int num_queues = 1;
+	const int base_queue = pko_port;
+	const int static_priority_end = 1;
+	const int static_priority_base = 1;
+
+	for (queue = 0; queue < num_queues; queue++) {
+		union cvmx_pko_mem_iqueue_ptrs config;
+		cvmx_cmd_queue_result_t cmd_res;
+		uint64_t *buf_ptr;
+
+		config.u64		= 0;
+		config.s.index		= queue;
+		config.s.qid		= base_queue + queue;
+		config.s.ipid		= pko_port;
+		config.s.tail		= (queue == (num_queues - 1));
+		config.s.s_tail		= (queue == static_priority_end);
+		config.s.static_p	= (static_priority_base >= 0);
+		config.s.static_q	= (queue <= static_priority_end);
+		config.s.qos_mask	= 0xff;
+
+		cmd_res = cvmx_cmd_queue_initialize(
+				CVMX_CMD_QUEUE_PKO(base_queue + queue),
+				CVMX_PKO_MAX_QUEUE_DEPTH,
+				CVMX_FPA_OUTPUT_BUFFER_POOL,
+				(CVMX_FPA_OUTPUT_BUFFER_POOL_SIZE -
+				 CVMX_PKO_COMMAND_BUFFER_SIZE_ADJUST * 8));
+
+		WARN(cmd_res,
+		     "%s: cmd_res=%d pko_port=%d base_queue=%d num_queues=%d queue=%d\n",
+			__func__, (int)cmd_res, pko_port, base_queue,
+			num_queues, queue);
+
+		buf_ptr = (uint64_t *)cvmx_cmd_queue_buffer(
+				CVMX_CMD_QUEUE_PKO(base_queue + queue));
+		config.s.buf_ptr = cvmx_ptr_to_phys(buf_ptr) >> 7;
+		CVMX_SYNCWS;
+		cvmx_write_csr(CVMX_PKO_MEM_IQUEUE_PTRS, config.u64);
+	}
+}
+
+static void __cvmx_pko_queue_alloc_o68(void)
+{
+	int port;
+
+	for (port = 0; port < 48; port++)
+		__cvmx_pko_iport_config(port);
+}
+
+static void __cvmx_pko_port_map_o68(void)
+{
+	int port;
+	int interface, index;
+	cvmx_helper_interface_mode_t mode;
+	union cvmx_pko_mem_iport_ptrs config;
+
+	/*
+	 * Initialize every iport with the invalid eid.
+	 */
+	config.u64 = 0;
+	config.s.eid = 31; /* Invalid */
+	for (port = 0; port < 128; port++) {
+		config.s.ipid = port;
+		cvmx_write_csr(CVMX_PKO_MEM_IPORT_PTRS, config.u64);
+	}
+
+	/*
+	 * Set up PKO_MEM_IPORT_PTRS
+	 */
+	for (port = 0; port < 48; port++) {
+		interface = cvmx_helper_get_interface_num(port);
+		index = cvmx_helper_get_interface_index_num(port);
+		mode = cvmx_helper_interface_get_mode(interface);
+		if (mode == CVMX_HELPER_INTERFACE_MODE_DISABLED)
+			continue;
+
+		config.s.ipid = port;
+		config.s.qos_mask = 0xff;
+		config.s.crc = 1;
+		config.s.min_pkt = 1;
+		config.s.intr = __cvmx_pko_int(interface, index);
+		config.s.eid = config.s.intr;
+		config.s.pipe = (mode == CVMX_HELPER_INTERFACE_MODE_LOOP) ?
+			index : port;
+		cvmx_write_csr(CVMX_PKO_MEM_IPORT_PTRS, config.u64);
+	}
+}
+
+static void __cvmx_pko_chip_init(void)
+{
+	int i;
+
+	if (OCTEON_IS_MODEL(OCTEON_CN68XX)) {
+		__cvmx_pko_port_map_o68();
+		__cvmx_pko_queue_alloc_o68();
+		return;
+	}
+
+	/*
+	 * Initialize queues
+	 */
+	for (i = 0; i < CVMX_PKO_MAX_OUTPUT_QUEUES; i++) {
+		const uint64_t priority = 8;
+
+		cvmx_pko_config_port(CVMX_PKO_MEM_QUEUE_PTRS_ILLEGAL_PID, i, 1,
+				     &priority);
+	}
+}
+
+/**
+ * Call before any other calls to initialize the packet
+ * output system.  This does chip global config, and should only be
+ * done by one core.
+ */
+
+void cvmx_pko_initialize_global(void)
+{
+	union cvmx_pko_reg_cmd_buf config;
+
+	/*
+	 * Set the size of the PKO command buffers to an odd number of
+	 * 64bit words. This allows the normal two word send to stay
+	 * aligned and never span a command word buffer.
+	 */
+	config.u64 = 0;
+	config.s.pool = CVMX_FPA_OUTPUT_BUFFER_POOL;
+	config.s.size = CVMX_FPA_OUTPUT_BUFFER_POOL_SIZE / 8 - 1;
+
+	cvmx_write_csr(CVMX_PKO_REG_CMD_BUF, config.u64);
+
+	/*
+	 * Chip-specific setup.
+	 */
+	__cvmx_pko_chip_init();
+
+	/*
+	 * If we aren't using all of the queues optimize PKO's
+	 * internal memory.
+	 */
+	if (OCTEON_IS_MODEL(OCTEON_CN38XX) || OCTEON_IS_MODEL(OCTEON_CN58XX)
+	    || OCTEON_IS_MODEL(OCTEON_CN56XX)
+	    || OCTEON_IS_MODEL(OCTEON_CN52XX)) {
+		int num_interfaces = cvmx_helper_get_number_of_interfaces();
+		int last_port =
+		    cvmx_helper_get_last_ipd_port(num_interfaces - 1);
+		int max_queues =
+		    cvmx_pko_get_base_queue(last_port) +
+		    cvmx_pko_get_num_queues(last_port);
+		if (OCTEON_IS_MODEL(OCTEON_CN38XX)) {
+			if (max_queues <= 32)
+				cvmx_write_csr(CVMX_PKO_REG_QUEUE_MODE, 2);
+			else if (max_queues <= 64)
+				cvmx_write_csr(CVMX_PKO_REG_QUEUE_MODE, 1);
+		} else {
+			if (max_queues <= 64)
+				cvmx_write_csr(CVMX_PKO_REG_QUEUE_MODE, 2);
+			else if (max_queues <= 128)
+				cvmx_write_csr(CVMX_PKO_REG_QUEUE_MODE, 1);
+		}
+	}
+}
+
+/**
+ * This function does per-core initialization required by the PKO routines.
+ * This must be called on all cores that will do packet output, and must
+ * be called after the FPA has been initialized and filled with pages.
+ *
+ * Returns 0 on success
+ *	   !0 on failure
+ */
+int cvmx_pko_initialize_local(void)
+{
+	/* Nothing to do */
+	return 0;
+}
+
+/**
+ * Enables the packet output hardware. It must already be
+ * configured.
+ */
+void cvmx_pko_enable(void)
+{
+	union cvmx_pko_reg_flags flags;
+
+	flags.u64 = cvmx_read_csr(CVMX_PKO_REG_FLAGS);
+	if (flags.s.ena_pko)
+		cvmx_dprintf
+		    ("Warning: Enabling PKO when PKO already enabled.\n");
+
+	flags.s.ena_dwb = 1;
+	flags.s.ena_pko = 1;
+	/*
+	 * always enable big endian for 3-word command. Does nothing
+	 * for 2-word.
+	 */
+	flags.s.store_be = 1;
+	cvmx_write_csr(CVMX_PKO_REG_FLAGS, flags.u64);
+}
+
+/**
+ * Disables the packet output. Does not affect any configuration.
+ */
+void cvmx_pko_disable(void)
+{
+	union cvmx_pko_reg_flags pko_reg_flags;
+	pko_reg_flags.u64 = cvmx_read_csr(CVMX_PKO_REG_FLAGS);
+	pko_reg_flags.s.ena_pko = 0;
+	cvmx_write_csr(CVMX_PKO_REG_FLAGS, pko_reg_flags.u64);
+}
+EXPORT_SYMBOL_GPL(cvmx_pko_disable);
+
+/**
+ * Reset the packet output.
+ */
+static void __cvmx_pko_reset(void)
+{
+	union cvmx_pko_reg_flags pko_reg_flags;
+	pko_reg_flags.u64 = cvmx_read_csr(CVMX_PKO_REG_FLAGS);
+	pko_reg_flags.s.reset = 1;
+	cvmx_write_csr(CVMX_PKO_REG_FLAGS, pko_reg_flags.u64);
+}
+
+/**
+ * Shutdown and free resources required by packet output.
+ */
+void cvmx_pko_shutdown(void)
+{
+	union cvmx_pko_mem_queue_ptrs config;
+	int queue;
+
+	cvmx_pko_disable();
+
+	for (queue = 0; queue < CVMX_PKO_MAX_OUTPUT_QUEUES; queue++) {
+		config.u64 = 0;
+		config.s.tail = 1;
+		config.s.index = 0;
+		config.s.port = CVMX_PKO_MEM_QUEUE_PTRS_ILLEGAL_PID;
+		config.s.queue = queue & 0x7f;
+		config.s.qos_mask = 0;
+		config.s.buf_ptr = 0;
+		if (!OCTEON_IS_MODEL(OCTEON_CN3XXX)) {
+			union cvmx_pko_reg_queue_ptrs1 config1;
+			config1.u64 = 0;
+			config1.s.qid7 = queue >> 7;
+			cvmx_write_csr(CVMX_PKO_REG_QUEUE_PTRS1, config1.u64);
+		}
+		cvmx_write_csr(CVMX_PKO_MEM_QUEUE_PTRS, config.u64);
+		cvmx_cmd_queue_shutdown(CVMX_CMD_QUEUE_PKO(queue));
+	}
+	__cvmx_pko_reset();
+}
+EXPORT_SYMBOL_GPL(cvmx_pko_shutdown);
+
+/**
+ * Configure a output port and the associated queues for use.
+ *
+ * @port:	Port to configure.
+ * @base_queue: First queue number to associate with this port.
+ * @num_queues: Number of queues to associate with this port
+ * @priority:	Array of priority levels for each queue. Values are
+ *		     allowed to be 0-8. A value of 8 get 8 times the traffic
+ *		     of a value of 1.  A value of 0 indicates that no rounds
+ *		     will be participated in. These priorities can be changed
+ *		     on the fly while the pko is enabled. A priority of 9
+ *		     indicates that static priority should be used.  If static
+ *		     priority is used all queues with static priority must be
+ *		     contiguous starting at the base_queue, and lower numbered
+ *		     queues have higher priority than higher numbered queues.
+ *		     There must be num_queues elements in the array.
+ */
+cvmx_pko_status_t cvmx_pko_config_port(uint64_t port, uint64_t base_queue,
+				       uint64_t num_queues,
+				       const uint64_t priority[])
+{
+	cvmx_pko_status_t result_code;
+	uint64_t queue;
+	union cvmx_pko_mem_queue_ptrs config;
+	union cvmx_pko_reg_queue_ptrs1 config1;
+	int static_priority_base = -1;
+	int static_priority_end = -1;
+
+	if (OCTEON_IS_MODEL(OCTEON_CN68XX))
+		return CVMX_PKO_SUCCESS;
+
+	if ((port >= CVMX_PKO_NUM_OUTPUT_PORTS)
+	    && (port != CVMX_PKO_MEM_QUEUE_PTRS_ILLEGAL_PID)) {
+		cvmx_dprintf("ERROR: cvmx_pko_config_port: Invalid port %llu\n",
+			     (unsigned long long)port);
+		return CVMX_PKO_INVALID_PORT;
+	}
+
+	if (base_queue + num_queues > CVMX_PKO_MAX_OUTPUT_QUEUES) {
+		cvmx_dprintf
+		    ("ERROR: cvmx_pko_config_port: Invalid queue range %llu\n",
+		     (unsigned long long)(base_queue + num_queues));
+		return CVMX_PKO_INVALID_QUEUE;
+	}
+
+	if (port != CVMX_PKO_MEM_QUEUE_PTRS_ILLEGAL_PID) {
+		/*
+		 * Validate the static queue priority setup and set
+		 * static_priority_base and static_priority_end
+		 * accordingly.
+		 */
+		for (queue = 0; queue < num_queues; queue++) {
+			/* Find first queue of static priority */
+			if (static_priority_base == -1
+			    && priority[queue] ==
+			    CVMX_PKO_QUEUE_STATIC_PRIORITY)
+				static_priority_base = queue;
+			/* Find last queue of static priority */
+			if (static_priority_base != -1
+			    && static_priority_end == -1
+			    && priority[queue] != CVMX_PKO_QUEUE_STATIC_PRIORITY
+			    && queue)
+				static_priority_end = queue - 1;
+			else if (static_priority_base != -1
+				 && static_priority_end == -1
+				 && queue == num_queues - 1)
+				/* all queues are static priority */
+				static_priority_end = queue;
+			/*
+			 * Check to make sure all static priority
+			 * queues are contiguous.  Also catches some
+			 * cases of static priorites not starting at
+			 * queue 0.
+			 */
+			if (static_priority_end != -1
+			    && (int)queue > static_priority_end
+			    && priority[queue] ==
+			    CVMX_PKO_QUEUE_STATIC_PRIORITY) {
+				cvmx_dprintf("ERROR: cvmx_pko_config_port: "
+					     "Static priority queues aren't "
+					     "contiguous or don't start at "
+					     "base queue. q: %d, eq: %d\n",
+					(int)queue, static_priority_end);
+				return CVMX_PKO_INVALID_PRIORITY;
+			}
+		}
+		if (static_priority_base > 0) {
+			cvmx_dprintf("ERROR: cvmx_pko_config_port: Static "
+				     "priority queues don't start at base "
+				     "queue. sq: %d\n",
+				static_priority_base);
+			return CVMX_PKO_INVALID_PRIORITY;
+		}
+#if 0
+		cvmx_dprintf("Port %d: Static priority queue base: %d, "
+			     "end: %d\n", port,
+			static_priority_base, static_priority_end);
+#endif
+	}
+	/*
+	 * At this point, static_priority_base and static_priority_end
+	 * are either both -1, or are valid start/end queue
+	 * numbers.
+	 */
+
+	result_code = CVMX_PKO_SUCCESS;
+
+#ifdef PKO_DEBUG
+	cvmx_dprintf("num queues: %d (%lld,%lld)\n", num_queues,
+		     CVMX_PKO_QUEUES_PER_PORT_INTERFACE0,
+		     CVMX_PKO_QUEUES_PER_PORT_INTERFACE1);
+#endif
+
+	for (queue = 0; queue < num_queues; queue++) {
+		uint64_t *buf_ptr = NULL;
+
+		config1.u64 = 0;
+		config1.s.idx3 = queue >> 3;
+		config1.s.qid7 = (base_queue + queue) >> 7;
+
+		config.u64 = 0;
+		config.s.tail = queue == (num_queues - 1);
+		config.s.index = queue;
+		config.s.port = port;
+		config.s.queue = base_queue + queue;
+
+		if (!cvmx_octeon_is_pass1()) {
+			config.s.static_p = static_priority_base >= 0;
+			config.s.static_q = (int)queue <= static_priority_end;
+			config.s.s_tail = (int)queue == static_priority_end;
+		}
+		/*
+		 * Convert the priority into an enable bit field. Try
+		 * to space the bits out evenly so the packet don't
+		 * get grouped up
+		 */
+		switch ((int)priority[queue]) {
+		case 0:
+			config.s.qos_mask = 0x00;
+			break;
+		case 1:
+			config.s.qos_mask = 0x01;
+			break;
+		case 2:
+			config.s.qos_mask = 0x11;
+			break;
+		case 3:
+			config.s.qos_mask = 0x49;
+			break;
+		case 4:
+			config.s.qos_mask = 0x55;
+			break;
+		case 5:
+			config.s.qos_mask = 0x57;
+			break;
+		case 6:
+			config.s.qos_mask = 0x77;
+			break;
+		case 7:
+			config.s.qos_mask = 0x7f;
+			break;
+		case 8:
+			config.s.qos_mask = 0xff;
+			break;
+		case CVMX_PKO_QUEUE_STATIC_PRIORITY:
+			/* Pass 1 will fall through to the error case */
+			if (!cvmx_octeon_is_pass1()) {
+				config.s.qos_mask = 0xff;
+				break;
+			}
+		default:
+			cvmx_dprintf("ERROR: cvmx_pko_config_port: Invalid "
+				     "priority %llu\n",
+				(unsigned long long)priority[queue]);
+			config.s.qos_mask = 0xff;
+			result_code = CVMX_PKO_INVALID_PRIORITY;
+			break;
+		}
+
+		if (port != CVMX_PKO_MEM_QUEUE_PTRS_ILLEGAL_PID) {
+			cvmx_cmd_queue_result_t cmd_res =
+			    cvmx_cmd_queue_initialize(CVMX_CMD_QUEUE_PKO
+						      (base_queue + queue),
+						      CVMX_PKO_MAX_QUEUE_DEPTH,
+						      CVMX_FPA_OUTPUT_BUFFER_POOL,
+						      CVMX_FPA_OUTPUT_BUFFER_POOL_SIZE
+						      -
+						      CVMX_PKO_COMMAND_BUFFER_SIZE_ADJUST
+						      * 8);
+			if (cmd_res != CVMX_CMD_QUEUE_SUCCESS) {
+				switch (cmd_res) {
+				case CVMX_CMD_QUEUE_NO_MEMORY:
+					cvmx_dprintf("ERROR: "
+						     "cvmx_pko_config_port: "
+						     "Unable to allocate "
+						     "output buffer.\n");
+					return CVMX_PKO_NO_MEMORY;
+				case CVMX_CMD_QUEUE_ALREADY_SETUP:
+					cvmx_dprintf
+					    ("ERROR: cvmx_pko_config_port: Port already setup.\n");
+					return CVMX_PKO_PORT_ALREADY_SETUP;
+				case CVMX_CMD_QUEUE_INVALID_PARAM:
+				default:
+					cvmx_dprintf
+					    ("ERROR: cvmx_pko_config_port: Command queue initialization failed.\n");
+					return CVMX_PKO_CMD_QUEUE_INIT_ERROR;
+				}
+			}
+
+			buf_ptr =
+			    (uint64_t *)
+			    cvmx_cmd_queue_buffer(CVMX_CMD_QUEUE_PKO
+						  (base_queue + queue));
+			config.s.buf_ptr = cvmx_ptr_to_phys(buf_ptr);
+		} else
+			config.s.buf_ptr = 0;
+
+		CVMX_SYNCWS;
+
+		if (!OCTEON_IS_MODEL(OCTEON_CN3XXX))
+			cvmx_write_csr(CVMX_PKO_REG_QUEUE_PTRS1, config1.u64);
+		cvmx_write_csr(CVMX_PKO_MEM_QUEUE_PTRS, config.u64);
+	}
+
+	return result_code;
+}
+
+#ifdef PKO_DEBUG
+/**
+ * Show map of ports -> queues for different cores.
+ */
+void cvmx_pko_show_queue_map()
+{
+	int core, port;
+	int pko_output_ports = 36;
+
+	cvmx_dprintf("port");
+	for (port = 0; port < pko_output_ports; port++)
+		cvmx_dprintf("%3d ", port);
+	cvmx_dprintf("\n");
+
+	for (core = 0; core < CVMX_MAX_CORES; core++) {
+		cvmx_dprintf("\n%2d: ", core);
+		for (port = 0; port < pko_output_ports; port++) {
+			cvmx_dprintf("%3d ",
+				     cvmx_pko_get_base_queue_per_core(port,
+								      core));
+		}
+	}
+	cvmx_dprintf("\n");
+}
+#endif
+
+/**
+ * Rate limit a PKO port to a max packets/sec. This function is only
+ * supported on CN51XX and higher, excluding CN58XX.
+ *
+ * @port:      Port to rate limit
+ * @packets_s: Maximum packet/sec
+ * @burst:     Maximum number of packets to burst in a row before rate
+ *		    limiting cuts in.
+ *
+ * Returns Zero on success, negative on failure
+ */
+int cvmx_pko_rate_limit_packets(int port, int packets_s, int burst)
+{
+	union cvmx_pko_mem_port_rate0 pko_mem_port_rate0;
+	union cvmx_pko_mem_port_rate1 pko_mem_port_rate1;
+
+	pko_mem_port_rate0.u64 = 0;
+	pko_mem_port_rate0.s.pid = port;
+	pko_mem_port_rate0.s.rate_pkt =
+	    cvmx_sysinfo_get()->cpu_clock_hz / packets_s / 16;
+	/* No cost per word since we are limited by packets/sec, not bits/sec */
+	pko_mem_port_rate0.s.rate_word = 0;
+
+	pko_mem_port_rate1.u64 = 0;
+	pko_mem_port_rate1.s.pid = port;
+	pko_mem_port_rate1.s.rate_lim =
+	    ((uint64_t) pko_mem_port_rate0.s.rate_pkt * burst) >> 8;
+
+	cvmx_write_csr(CVMX_PKO_MEM_PORT_RATE0, pko_mem_port_rate0.u64);
+	cvmx_write_csr(CVMX_PKO_MEM_PORT_RATE1, pko_mem_port_rate1.u64);
+	return 0;
+}
+
+/**
+ * Rate limit a PKO port to a max bits/sec. This function is only
+ * supported on CN51XX and higher, excluding CN58XX.
+ *
+ * @port:   Port to rate limit
+ * @bits_s: PKO rate limit in bits/sec
+ * @burst:  Maximum number of bits to burst before rate
+ *		 limiting cuts in.
+ *
+ * Returns Zero on success, negative on failure
+ */
+int cvmx_pko_rate_limit_bits(int port, uint64_t bits_s, int burst)
+{
+	union cvmx_pko_mem_port_rate0 pko_mem_port_rate0;
+	union cvmx_pko_mem_port_rate1 pko_mem_port_rate1;
+	uint64_t clock_rate = cvmx_sysinfo_get()->cpu_clock_hz;
+	uint64_t tokens_per_bit = clock_rate * 16 / bits_s;
+
+	pko_mem_port_rate0.u64 = 0;
+	pko_mem_port_rate0.s.pid = port;
+	/*
+	 * Each packet has a 12 bytes of interframe gap, an 8 byte
+	 * preamble, and a 4 byte CRC. These are not included in the
+	 * per word count. Multiply by 8 to covert to bits and divide
+	 * by 256 for limit granularity.
+	 */
+	pko_mem_port_rate0.s.rate_pkt = (12 + 8 + 4) * 8 * tokens_per_bit / 256;
+	/* Each 8 byte word has 64bits */
+	pko_mem_port_rate0.s.rate_word = 64 * tokens_per_bit;
+
+	pko_mem_port_rate1.u64 = 0;
+	pko_mem_port_rate1.s.pid = port;
+	pko_mem_port_rate1.s.rate_lim = tokens_per_bit * burst / 256;
+
+	cvmx_write_csr(CVMX_PKO_MEM_PORT_RATE0, pko_mem_port_rate0.u64);
+	cvmx_write_csr(CVMX_PKO_MEM_PORT_RATE1, pko_mem_port_rate1.u64);
+	return 0;
+}
diff --git a/src/kernel/linux/v4.14/arch/mips/cavium-octeon/executive/cvmx-spi.c b/src/kernel/linux/v4.14/arch/mips/cavium-octeon/executive/cvmx-spi.c
new file mode 100644
index 0000000..459e3b1
--- /dev/null
+++ b/src/kernel/linux/v4.14/arch/mips/cavium-octeon/executive/cvmx-spi.c
@@ -0,0 +1,668 @@
+/***********************license start***************
+ * Author: Cavium Networks
+ *
+ * Contact: support@caviumnetworks.com
+ * This file is part of the OCTEON SDK
+ *
+ * Copyright (c) 2003-2008 Cavium Networks
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, Version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+ * NONINFRINGEMENT.  See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this file; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ * or visit http://www.gnu.org/licenses/.
+ *
+ * This file may also be available under a different license from Cavium.
+ * Contact Cavium Networks for more information
+ ***********************license end**************************************/
+
+/*
+ *
+ * Support library for the SPI
+ */
+#include <asm/octeon/octeon.h>
+
+#include <asm/octeon/cvmx-config.h>
+
+#include <asm/octeon/cvmx-pko.h>
+#include <asm/octeon/cvmx-spi.h>
+
+#include <asm/octeon/cvmx-spxx-defs.h>
+#include <asm/octeon/cvmx-stxx-defs.h>
+#include <asm/octeon/cvmx-srxx-defs.h>
+
+#define INVOKE_CB(function_p, args...)		\
+	do {					\
+		if (function_p) {		\
+			res = function_p(args); \
+			if (res)		\
+				return res;	\
+		}				\
+	} while (0)
+
+#if CVMX_ENABLE_DEBUG_PRINTS
+static const char *modes[] =
+    { "UNKNOWN", "TX Halfplex", "Rx Halfplex", "Duplex" };
+#endif
+
+/* Default callbacks, can be overridden
+ *  using cvmx_spi_get_callbacks/cvmx_spi_set_callbacks
+ */
+static cvmx_spi_callbacks_t cvmx_spi_callbacks = {
+	.reset_cb = cvmx_spi_reset_cb,
+	.calendar_setup_cb = cvmx_spi_calendar_setup_cb,
+	.clock_detect_cb = cvmx_spi_clock_detect_cb,
+	.training_cb = cvmx_spi_training_cb,
+	.calendar_sync_cb = cvmx_spi_calendar_sync_cb,
+	.interface_up_cb = cvmx_spi_interface_up_cb
+};
+
+/**
+ * Get current SPI4 initialization callbacks
+ *
+ * @callbacks:	Pointer to the callbacks structure.to fill
+ *
+ * Returns Pointer to cvmx_spi_callbacks_t structure.
+ */
+void cvmx_spi_get_callbacks(cvmx_spi_callbacks_t *callbacks)
+{
+	memcpy(callbacks, &cvmx_spi_callbacks, sizeof(cvmx_spi_callbacks));
+}
+
+/**
+ * Set new SPI4 initialization callbacks
+ *
+ * @new_callbacks:  Pointer to an updated callbacks structure.
+ */
+void cvmx_spi_set_callbacks(cvmx_spi_callbacks_t *new_callbacks)
+{
+	memcpy(&cvmx_spi_callbacks, new_callbacks, sizeof(cvmx_spi_callbacks));
+}
+
+/**
+ * Initialize and start the SPI interface.
+ *
+ * @interface: The identifier of the packet interface to configure and
+ *		    use as a SPI interface.
+ * @mode:      The operating mode for the SPI interface. The interface
+ *		    can operate as a full duplex (both Tx and Rx data paths
+ *		    active) or as a halfplex (either the Tx data path is
+ *		    active or the Rx data path is active, but not both).
+ * @timeout:   Timeout to wait for clock synchronization in seconds
+ * @num_ports: Number of SPI ports to configure
+ *
+ * Returns Zero on success, negative of failure.
+ */
+int cvmx_spi_start_interface(int interface, cvmx_spi_mode_t mode, int timeout,
+			     int num_ports)
+{
+	int res = -1;
+
+	if (!(OCTEON_IS_MODEL(OCTEON_CN38XX) || OCTEON_IS_MODEL(OCTEON_CN58XX)))
+		return res;
+
+	/* Callback to perform SPI4 reset */
+	INVOKE_CB(cvmx_spi_callbacks.reset_cb, interface, mode);
+
+	/* Callback to perform calendar setup */
+	INVOKE_CB(cvmx_spi_callbacks.calendar_setup_cb, interface, mode,
+		  num_ports);
+
+	/* Callback to perform clock detection */
+	INVOKE_CB(cvmx_spi_callbacks.clock_detect_cb, interface, mode, timeout);
+
+	/* Callback to perform SPI4 link training */
+	INVOKE_CB(cvmx_spi_callbacks.training_cb, interface, mode, timeout);
+
+	/* Callback to perform calendar sync */
+	INVOKE_CB(cvmx_spi_callbacks.calendar_sync_cb, interface, mode,
+		  timeout);
+
+	/* Callback to handle interface coming up */
+	INVOKE_CB(cvmx_spi_callbacks.interface_up_cb, interface, mode);
+
+	return res;
+}
+
+/**
+ * This routine restarts the SPI interface after it has lost synchronization
+ * with its correspondent system.
+ *
+ * @interface: The identifier of the packet interface to configure and
+ *		    use as a SPI interface.
+ * @mode:      The operating mode for the SPI interface. The interface
+ *		    can operate as a full duplex (both Tx and Rx data paths
+ *		    active) or as a halfplex (either the Tx data path is
+ *		    active or the Rx data path is active, but not both).
+ * @timeout:   Timeout to wait for clock synchronization in seconds
+ *
+ * Returns Zero on success, negative of failure.
+ */
+int cvmx_spi_restart_interface(int interface, cvmx_spi_mode_t mode, int timeout)
+{
+	int res = -1;
+
+	if (!(OCTEON_IS_MODEL(OCTEON_CN38XX) || OCTEON_IS_MODEL(OCTEON_CN58XX)))
+		return res;
+
+	cvmx_dprintf("SPI%d: Restart %s\n", interface, modes[mode]);
+
+	/* Callback to perform SPI4 reset */
+	INVOKE_CB(cvmx_spi_callbacks.reset_cb, interface, mode);
+
+	/* NOTE: Calendar setup is not performed during restart */
+	/*	 Refer to cvmx_spi_start_interface() for the full sequence */
+
+	/* Callback to perform clock detection */
+	INVOKE_CB(cvmx_spi_callbacks.clock_detect_cb, interface, mode, timeout);
+
+	/* Callback to perform SPI4 link training */
+	INVOKE_CB(cvmx_spi_callbacks.training_cb, interface, mode, timeout);
+
+	/* Callback to perform calendar sync */
+	INVOKE_CB(cvmx_spi_callbacks.calendar_sync_cb, interface, mode,
+		  timeout);
+
+	/* Callback to handle interface coming up */
+	INVOKE_CB(cvmx_spi_callbacks.interface_up_cb, interface, mode);
+
+	return res;
+}
+EXPORT_SYMBOL_GPL(cvmx_spi_restart_interface);
+
+/**
+ * Callback to perform SPI4 reset
+ *
+ * @interface: The identifier of the packet interface to configure and
+ *		    use as a SPI interface.
+ * @mode:      The operating mode for the SPI interface. The interface
+ *		    can operate as a full duplex (both Tx and Rx data paths
+ *		    active) or as a halfplex (either the Tx data path is
+ *		    active or the Rx data path is active, but not both).
+ *
+ * Returns Zero on success, non-zero error code on failure (will cause
+ * SPI initialization to abort)
+ */
+int cvmx_spi_reset_cb(int interface, cvmx_spi_mode_t mode)
+{
+	union cvmx_spxx_dbg_deskew_ctl spxx_dbg_deskew_ctl;
+	union cvmx_spxx_clk_ctl spxx_clk_ctl;
+	union cvmx_spxx_bist_stat spxx_bist_stat;
+	union cvmx_spxx_int_msk spxx_int_msk;
+	union cvmx_stxx_int_msk stxx_int_msk;
+	union cvmx_spxx_trn4_ctl spxx_trn4_ctl;
+	int index;
+	uint64_t MS = cvmx_sysinfo_get()->cpu_clock_hz / 1000;
+
+	/* Disable SPI error events while we run BIST */
+	spxx_int_msk.u64 = cvmx_read_csr(CVMX_SPXX_INT_MSK(interface));
+	cvmx_write_csr(CVMX_SPXX_INT_MSK(interface), 0);
+	stxx_int_msk.u64 = cvmx_read_csr(CVMX_STXX_INT_MSK(interface));
+	cvmx_write_csr(CVMX_STXX_INT_MSK(interface), 0);
+
+	/* Run BIST in the SPI interface */
+	cvmx_write_csr(CVMX_SRXX_COM_CTL(interface), 0);
+	cvmx_write_csr(CVMX_STXX_COM_CTL(interface), 0);
+	spxx_clk_ctl.u64 = 0;
+	spxx_clk_ctl.s.runbist = 1;
+	cvmx_write_csr(CVMX_SPXX_CLK_CTL(interface), spxx_clk_ctl.u64);
+	cvmx_wait(10 * MS);
+	spxx_bist_stat.u64 = cvmx_read_csr(CVMX_SPXX_BIST_STAT(interface));
+	if (spxx_bist_stat.s.stat0)
+		cvmx_dprintf
+		    ("ERROR SPI%d: BIST failed on receive datapath FIFO\n",
+		     interface);
+	if (spxx_bist_stat.s.stat1)
+		cvmx_dprintf("ERROR SPI%d: BIST failed on RX calendar table\n",
+			     interface);
+	if (spxx_bist_stat.s.stat2)
+		cvmx_dprintf("ERROR SPI%d: BIST failed on TX calendar table\n",
+			     interface);
+
+	/* Clear the calendar table after BIST to fix parity errors */
+	for (index = 0; index < 32; index++) {
+		union cvmx_srxx_spi4_calx srxx_spi4_calx;
+		union cvmx_stxx_spi4_calx stxx_spi4_calx;
+
+		srxx_spi4_calx.u64 = 0;
+		srxx_spi4_calx.s.oddpar = 1;
+		cvmx_write_csr(CVMX_SRXX_SPI4_CALX(index, interface),
+			       srxx_spi4_calx.u64);
+
+		stxx_spi4_calx.u64 = 0;
+		stxx_spi4_calx.s.oddpar = 1;
+		cvmx_write_csr(CVMX_STXX_SPI4_CALX(index, interface),
+			       stxx_spi4_calx.u64);
+	}
+
+	/* Re enable reporting of error interrupts */
+	cvmx_write_csr(CVMX_SPXX_INT_REG(interface),
+		       cvmx_read_csr(CVMX_SPXX_INT_REG(interface)));
+	cvmx_write_csr(CVMX_SPXX_INT_MSK(interface), spxx_int_msk.u64);
+	cvmx_write_csr(CVMX_STXX_INT_REG(interface),
+		       cvmx_read_csr(CVMX_STXX_INT_REG(interface)));
+	cvmx_write_csr(CVMX_STXX_INT_MSK(interface), stxx_int_msk.u64);
+
+	/* Setup the CLKDLY right in the middle */
+	spxx_clk_ctl.u64 = 0;
+	spxx_clk_ctl.s.seetrn = 0;
+	spxx_clk_ctl.s.clkdly = 0x10;
+	spxx_clk_ctl.s.runbist = 0;
+	spxx_clk_ctl.s.statdrv = 0;
+	/* This should always be on the opposite edge as statdrv */
+	spxx_clk_ctl.s.statrcv = 1;
+	spxx_clk_ctl.s.sndtrn = 0;
+	spxx_clk_ctl.s.drptrn = 0;
+	spxx_clk_ctl.s.rcvtrn = 0;
+	spxx_clk_ctl.s.srxdlck = 0;
+	cvmx_write_csr(CVMX_SPXX_CLK_CTL(interface), spxx_clk_ctl.u64);
+	cvmx_wait(100 * MS);
+
+	/* Reset SRX0 DLL */
+	spxx_clk_ctl.s.srxdlck = 1;
+	cvmx_write_csr(CVMX_SPXX_CLK_CTL(interface), spxx_clk_ctl.u64);
+
+	/* Waiting for Inf0 Spi4 RX DLL to lock */
+	cvmx_wait(100 * MS);
+
+	/* Enable dynamic alignment */
+	spxx_trn4_ctl.s.trntest = 0;
+	spxx_trn4_ctl.s.jitter = 1;
+	spxx_trn4_ctl.s.clr_boot = 1;
+	spxx_trn4_ctl.s.set_boot = 0;
+	if (OCTEON_IS_MODEL(OCTEON_CN58XX))
+		spxx_trn4_ctl.s.maxdist = 3;
+	else
+		spxx_trn4_ctl.s.maxdist = 8;
+	spxx_trn4_ctl.s.macro_en = 1;
+	spxx_trn4_ctl.s.mux_en = 1;
+	cvmx_write_csr(CVMX_SPXX_TRN4_CTL(interface), spxx_trn4_ctl.u64);
+
+	spxx_dbg_deskew_ctl.u64 = 0;
+	cvmx_write_csr(CVMX_SPXX_DBG_DESKEW_CTL(interface),
+		       spxx_dbg_deskew_ctl.u64);
+
+	return 0;
+}
+
+/**
+ * Callback to setup calendar and miscellaneous settings before clock detection
+ *
+ * @interface: The identifier of the packet interface to configure and
+ *		    use as a SPI interface.
+ * @mode:      The operating mode for the SPI interface. The interface
+ *		    can operate as a full duplex (both Tx and Rx data paths
+ *		    active) or as a halfplex (either the Tx data path is
+ *		    active or the Rx data path is active, but not both).
+ * @num_ports: Number of ports to configure on SPI
+ *
+ * Returns Zero on success, non-zero error code on failure (will cause
+ * SPI initialization to abort)
+ */
+int cvmx_spi_calendar_setup_cb(int interface, cvmx_spi_mode_t mode,
+			       int num_ports)
+{
+	int port;
+	int index;
+	if (mode & CVMX_SPI_MODE_RX_HALFPLEX) {
+		union cvmx_srxx_com_ctl srxx_com_ctl;
+		union cvmx_srxx_spi4_stat srxx_spi4_stat;
+
+		/* SRX0 number of Ports */
+		srxx_com_ctl.u64 = 0;
+		srxx_com_ctl.s.prts = num_ports - 1;
+		srxx_com_ctl.s.st_en = 0;
+		srxx_com_ctl.s.inf_en = 0;
+		cvmx_write_csr(CVMX_SRXX_COM_CTL(interface), srxx_com_ctl.u64);
+
+		/* SRX0 Calendar Table. This round robbins through all ports */
+		port = 0;
+		index = 0;
+		while (port < num_ports) {
+			union cvmx_srxx_spi4_calx srxx_spi4_calx;
+			srxx_spi4_calx.u64 = 0;
+			srxx_spi4_calx.s.prt0 = port++;
+			srxx_spi4_calx.s.prt1 = port++;
+			srxx_spi4_calx.s.prt2 = port++;
+			srxx_spi4_calx.s.prt3 = port++;
+			srxx_spi4_calx.s.oddpar =
+			    ~(cvmx_dpop(srxx_spi4_calx.u64) & 1);
+			cvmx_write_csr(CVMX_SRXX_SPI4_CALX(index, interface),
+				       srxx_spi4_calx.u64);
+			index++;
+		}
+		srxx_spi4_stat.u64 = 0;
+		srxx_spi4_stat.s.len = num_ports;
+		srxx_spi4_stat.s.m = 1;
+		cvmx_write_csr(CVMX_SRXX_SPI4_STAT(interface),
+			       srxx_spi4_stat.u64);
+	}
+
+	if (mode & CVMX_SPI_MODE_TX_HALFPLEX) {
+		union cvmx_stxx_arb_ctl stxx_arb_ctl;
+		union cvmx_gmxx_tx_spi_max gmxx_tx_spi_max;
+		union cvmx_gmxx_tx_spi_thresh gmxx_tx_spi_thresh;
+		union cvmx_gmxx_tx_spi_ctl gmxx_tx_spi_ctl;
+		union cvmx_stxx_spi4_stat stxx_spi4_stat;
+		union cvmx_stxx_spi4_dat stxx_spi4_dat;
+
+		/* STX0 Config */
+		stxx_arb_ctl.u64 = 0;
+		stxx_arb_ctl.s.igntpa = 0;
+		stxx_arb_ctl.s.mintrn = 0;
+		cvmx_write_csr(CVMX_STXX_ARB_CTL(interface), stxx_arb_ctl.u64);
+
+		gmxx_tx_spi_max.u64 = 0;
+		gmxx_tx_spi_max.s.max1 = 8;
+		gmxx_tx_spi_max.s.max2 = 4;
+		gmxx_tx_spi_max.s.slice = 0;
+		cvmx_write_csr(CVMX_GMXX_TX_SPI_MAX(interface),
+			       gmxx_tx_spi_max.u64);
+
+		gmxx_tx_spi_thresh.u64 = 0;
+		gmxx_tx_spi_thresh.s.thresh = 4;
+		cvmx_write_csr(CVMX_GMXX_TX_SPI_THRESH(interface),
+			       gmxx_tx_spi_thresh.u64);
+
+		gmxx_tx_spi_ctl.u64 = 0;
+		gmxx_tx_spi_ctl.s.tpa_clr = 0;
+		gmxx_tx_spi_ctl.s.cont_pkt = 0;
+		cvmx_write_csr(CVMX_GMXX_TX_SPI_CTL(interface),
+			       gmxx_tx_spi_ctl.u64);
+
+		/* STX0 Training Control */
+		stxx_spi4_dat.u64 = 0;
+		/*Minimum needed by dynamic alignment */
+		stxx_spi4_dat.s.alpha = 32;
+		stxx_spi4_dat.s.max_t = 0xFFFF; /*Minimum interval is 0x20 */
+		cvmx_write_csr(CVMX_STXX_SPI4_DAT(interface),
+			       stxx_spi4_dat.u64);
+
+		/* STX0 Calendar Table. This round robbins through all ports */
+		port = 0;
+		index = 0;
+		while (port < num_ports) {
+			union cvmx_stxx_spi4_calx stxx_spi4_calx;
+			stxx_spi4_calx.u64 = 0;
+			stxx_spi4_calx.s.prt0 = port++;
+			stxx_spi4_calx.s.prt1 = port++;
+			stxx_spi4_calx.s.prt2 = port++;
+			stxx_spi4_calx.s.prt3 = port++;
+			stxx_spi4_calx.s.oddpar =
+			    ~(cvmx_dpop(stxx_spi4_calx.u64) & 1);
+			cvmx_write_csr(CVMX_STXX_SPI4_CALX(index, interface),
+				       stxx_spi4_calx.u64);
+			index++;
+		}
+		stxx_spi4_stat.u64 = 0;
+		stxx_spi4_stat.s.len = num_ports;
+		stxx_spi4_stat.s.m = 1;
+		cvmx_write_csr(CVMX_STXX_SPI4_STAT(interface),
+			       stxx_spi4_stat.u64);
+	}
+
+	return 0;
+}
+
+/**
+ * Callback to perform clock detection
+ *
+ * @interface: The identifier of the packet interface to configure and
+ *		    use as a SPI interface.
+ * @mode:      The operating mode for the SPI interface. The interface
+ *		    can operate as a full duplex (both Tx and Rx data paths
+ *		    active) or as a halfplex (either the Tx data path is
+ *		    active or the Rx data path is active, but not both).
+ * @timeout:   Timeout to wait for clock synchronization in seconds
+ *
+ * Returns Zero on success, non-zero error code on failure (will cause
+ * SPI initialization to abort)
+ */
+int cvmx_spi_clock_detect_cb(int interface, cvmx_spi_mode_t mode, int timeout)
+{
+	int clock_transitions;
+	union cvmx_spxx_clk_stat stat;
+	uint64_t timeout_time;
+	uint64_t MS = cvmx_sysinfo_get()->cpu_clock_hz / 1000;
+
+	/*
+	 * Regardless of operating mode, both Tx and Rx clocks must be
+	 * present for the SPI interface to operate.
+	 */
+	cvmx_dprintf("SPI%d: Waiting to see TsClk...\n", interface);
+	timeout_time = cvmx_get_cycle() + 1000ull * MS * timeout;
+	/*
+	 * Require 100 clock transitions in order to avoid any noise
+	 * in the beginning.
+	 */
+	clock_transitions = 100;
+	do {
+		stat.u64 = cvmx_read_csr(CVMX_SPXX_CLK_STAT(interface));
+		if (stat.s.s4clk0 && stat.s.s4clk1 && clock_transitions) {
+			/*
+			 * We've seen a clock transition, so decrement
+			 * the number we still need.
+			 */
+			clock_transitions--;
+			cvmx_write_csr(CVMX_SPXX_CLK_STAT(interface), stat.u64);
+			stat.s.s4clk0 = 0;
+			stat.s.s4clk1 = 0;
+		}
+		if (cvmx_get_cycle() > timeout_time) {
+			cvmx_dprintf("SPI%d: Timeout\n", interface);
+			return -1;
+		}
+	} while (stat.s.s4clk0 == 0 || stat.s.s4clk1 == 0);
+
+	cvmx_dprintf("SPI%d: Waiting to see RsClk...\n", interface);
+	timeout_time = cvmx_get_cycle() + 1000ull * MS * timeout;
+	/*
+	 * Require 100 clock transitions in order to avoid any noise in the
+	 * beginning.
+	 */
+	clock_transitions = 100;
+	do {
+		stat.u64 = cvmx_read_csr(CVMX_SPXX_CLK_STAT(interface));
+		if (stat.s.d4clk0 && stat.s.d4clk1 && clock_transitions) {
+			/*
+			 * We've seen a clock transition, so decrement
+			 * the number we still need
+			 */
+			clock_transitions--;
+			cvmx_write_csr(CVMX_SPXX_CLK_STAT(interface), stat.u64);
+			stat.s.d4clk0 = 0;
+			stat.s.d4clk1 = 0;
+		}
+		if (cvmx_get_cycle() > timeout_time) {
+			cvmx_dprintf("SPI%d: Timeout\n", interface);
+			return -1;
+		}
+	} while (stat.s.d4clk0 == 0 || stat.s.d4clk1 == 0);
+
+	return 0;
+}
+
+/**
+ * Callback to perform link training
+ *
+ * @interface: The identifier of the packet interface to configure and
+ *		    use as a SPI interface.
+ * @mode:      The operating mode for the SPI interface. The interface
+ *		    can operate as a full duplex (both Tx and Rx data paths
+ *		    active) or as a halfplex (either the Tx data path is
+ *		    active or the Rx data path is active, but not both).
+ * @timeout:   Timeout to wait for link to be trained (in seconds)
+ *
+ * Returns Zero on success, non-zero error code on failure (will cause
+ * SPI initialization to abort)
+ */
+int cvmx_spi_training_cb(int interface, cvmx_spi_mode_t mode, int timeout)
+{
+	union cvmx_spxx_trn4_ctl spxx_trn4_ctl;
+	union cvmx_spxx_clk_stat stat;
+	uint64_t MS = cvmx_sysinfo_get()->cpu_clock_hz / 1000;
+	uint64_t timeout_time = cvmx_get_cycle() + 1000ull * MS * timeout;
+	int rx_training_needed;
+
+	/* SRX0 & STX0 Inf0 Links are configured - begin training */
+	union cvmx_spxx_clk_ctl spxx_clk_ctl;
+	spxx_clk_ctl.u64 = 0;
+	spxx_clk_ctl.s.seetrn = 0;
+	spxx_clk_ctl.s.clkdly = 0x10;
+	spxx_clk_ctl.s.runbist = 0;
+	spxx_clk_ctl.s.statdrv = 0;
+	/* This should always be on the opposite edge as statdrv */
+	spxx_clk_ctl.s.statrcv = 1;
+	spxx_clk_ctl.s.sndtrn = 1;
+	spxx_clk_ctl.s.drptrn = 1;
+	spxx_clk_ctl.s.rcvtrn = 1;
+	spxx_clk_ctl.s.srxdlck = 1;
+	cvmx_write_csr(CVMX_SPXX_CLK_CTL(interface), spxx_clk_ctl.u64);
+	cvmx_wait(1000 * MS);
+
+	/* SRX0 clear the boot bit */
+	spxx_trn4_ctl.u64 = cvmx_read_csr(CVMX_SPXX_TRN4_CTL(interface));
+	spxx_trn4_ctl.s.clr_boot = 1;
+	cvmx_write_csr(CVMX_SPXX_TRN4_CTL(interface), spxx_trn4_ctl.u64);
+
+	/* Wait for the training sequence to complete */
+	cvmx_dprintf("SPI%d: Waiting for training\n", interface);
+	cvmx_wait(1000 * MS);
+	/* Wait a really long time here */
+	timeout_time = cvmx_get_cycle() + 1000ull * MS * 600;
+	/*
+	 * The HRM says we must wait for 34 + 16 * MAXDIST training sequences.
+	 * We'll be pessimistic and wait for a lot more.
+	 */
+	rx_training_needed = 500;
+	do {
+		stat.u64 = cvmx_read_csr(CVMX_SPXX_CLK_STAT(interface));
+		if (stat.s.srxtrn && rx_training_needed) {
+			rx_training_needed--;
+			cvmx_write_csr(CVMX_SPXX_CLK_STAT(interface), stat.u64);
+			stat.s.srxtrn = 0;
+		}
+		if (cvmx_get_cycle() > timeout_time) {
+			cvmx_dprintf("SPI%d: Timeout\n", interface);
+			return -1;
+		}
+	} while (stat.s.srxtrn == 0);
+
+	return 0;
+}
+
+/**
+ * Callback to perform calendar data synchronization
+ *
+ * @interface: The identifier of the packet interface to configure and
+ *		    use as a SPI interface.
+ * @mode:      The operating mode for the SPI interface. The interface
+ *		    can operate as a full duplex (both Tx and Rx data paths
+ *		    active) or as a halfplex (either the Tx data path is
+ *		    active or the Rx data path is active, but not both).
+ * @timeout:   Timeout to wait for calendar data in seconds
+ *
+ * Returns Zero on success, non-zero error code on failure (will cause
+ * SPI initialization to abort)
+ */
+int cvmx_spi_calendar_sync_cb(int interface, cvmx_spi_mode_t mode, int timeout)
+{
+	uint64_t MS = cvmx_sysinfo_get()->cpu_clock_hz / 1000;
+	if (mode & CVMX_SPI_MODE_RX_HALFPLEX) {
+		/* SRX0 interface should be good, send calendar data */
+		union cvmx_srxx_com_ctl srxx_com_ctl;
+		cvmx_dprintf
+		    ("SPI%d: Rx is synchronized, start sending calendar data\n",
+		     interface);
+		srxx_com_ctl.u64 = cvmx_read_csr(CVMX_SRXX_COM_CTL(interface));
+		srxx_com_ctl.s.inf_en = 1;
+		srxx_com_ctl.s.st_en = 1;
+		cvmx_write_csr(CVMX_SRXX_COM_CTL(interface), srxx_com_ctl.u64);
+	}
+
+	if (mode & CVMX_SPI_MODE_TX_HALFPLEX) {
+		/* STX0 has achieved sync */
+		/* The corespondant board should be sending calendar data */
+		/* Enable the STX0 STAT receiver. */
+		union cvmx_spxx_clk_stat stat;
+		uint64_t timeout_time;
+		union cvmx_stxx_com_ctl stxx_com_ctl;
+		stxx_com_ctl.u64 = 0;
+		stxx_com_ctl.s.st_en = 1;
+		cvmx_write_csr(CVMX_STXX_COM_CTL(interface), stxx_com_ctl.u64);
+
+		/* Waiting for calendar sync on STX0 STAT */
+		cvmx_dprintf("SPI%d: Waiting to sync on STX[%d] STAT\n",
+			     interface, interface);
+		timeout_time = cvmx_get_cycle() + 1000ull * MS * timeout;
+		/* SPX0_CLK_STAT - SPX0_CLK_STAT[STXCAL] should be 1 (bit10) */
+		do {
+			stat.u64 = cvmx_read_csr(CVMX_SPXX_CLK_STAT(interface));
+			if (cvmx_get_cycle() > timeout_time) {
+				cvmx_dprintf("SPI%d: Timeout\n", interface);
+				return -1;
+			}
+		} while (stat.s.stxcal == 0);
+	}
+
+	return 0;
+}
+
+/**
+ * Callback to handle interface up
+ *
+ * @interface: The identifier of the packet interface to configure and
+ *		    use as a SPI interface.
+ * @mode:      The operating mode for the SPI interface. The interface
+ *		    can operate as a full duplex (both Tx and Rx data paths
+ *		    active) or as a halfplex (either the Tx data path is
+ *		    active or the Rx data path is active, but not both).
+ *
+ * Returns Zero on success, non-zero error code on failure (will cause
+ * SPI initialization to abort)
+ */
+int cvmx_spi_interface_up_cb(int interface, cvmx_spi_mode_t mode)
+{
+	union cvmx_gmxx_rxx_frm_min gmxx_rxx_frm_min;
+	union cvmx_gmxx_rxx_frm_max gmxx_rxx_frm_max;
+	union cvmx_gmxx_rxx_jabber gmxx_rxx_jabber;
+
+	if (mode & CVMX_SPI_MODE_RX_HALFPLEX) {
+		union cvmx_srxx_com_ctl srxx_com_ctl;
+		srxx_com_ctl.u64 = cvmx_read_csr(CVMX_SRXX_COM_CTL(interface));
+		srxx_com_ctl.s.inf_en = 1;
+		cvmx_write_csr(CVMX_SRXX_COM_CTL(interface), srxx_com_ctl.u64);
+		cvmx_dprintf("SPI%d: Rx is now up\n", interface);
+	}
+
+	if (mode & CVMX_SPI_MODE_TX_HALFPLEX) {
+		union cvmx_stxx_com_ctl stxx_com_ctl;
+		stxx_com_ctl.u64 = cvmx_read_csr(CVMX_STXX_COM_CTL(interface));
+		stxx_com_ctl.s.inf_en = 1;
+		cvmx_write_csr(CVMX_STXX_COM_CTL(interface), stxx_com_ctl.u64);
+		cvmx_dprintf("SPI%d: Tx is now up\n", interface);
+	}
+
+	gmxx_rxx_frm_min.u64 = 0;
+	gmxx_rxx_frm_min.s.len = 64;
+	cvmx_write_csr(CVMX_GMXX_RXX_FRM_MIN(0, interface),
+		       gmxx_rxx_frm_min.u64);
+	gmxx_rxx_frm_max.u64 = 0;
+	gmxx_rxx_frm_max.s.len = 64 * 1024 - 4;
+	cvmx_write_csr(CVMX_GMXX_RXX_FRM_MAX(0, interface),
+		       gmxx_rxx_frm_max.u64);
+	gmxx_rxx_jabber.u64 = 0;
+	gmxx_rxx_jabber.s.cnt = 64 * 1024 - 4;
+	cvmx_write_csr(CVMX_GMXX_RXX_JABBER(0, interface), gmxx_rxx_jabber.u64);
+
+	return 0;
+}
diff --git a/src/kernel/linux/v4.14/arch/mips/cavium-octeon/executive/cvmx-sysinfo.c b/src/kernel/linux/v4.14/arch/mips/cavium-octeon/executive/cvmx-sysinfo.c
new file mode 100644
index 0000000..30ecba1
--- /dev/null
+++ b/src/kernel/linux/v4.14/arch/mips/cavium-octeon/executive/cvmx-sysinfo.c
@@ -0,0 +1,53 @@
+/***********************license start***************
+ * Author: Cavium Networks
+ *
+ * Contact: support@caviumnetworks.com
+ * This file is part of the OCTEON SDK
+ *
+ * Copyright (c) 2003-2008 Cavium Networks
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, Version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+ * NONINFRINGEMENT.  See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this file; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ * or visit http://www.gnu.org/licenses/.
+ *
+ * This file may also be available under a different license from Cavium.
+ * Contact Cavium Networks for more information
+ ***********************license end**************************************/
+
+/*
+ * This module provides system/board/application information obtained
+ * by the bootloader.
+ */
+#include <linux/export.h>
+
+#include <asm/octeon/cvmx.h>
+#include <asm/octeon/cvmx-sysinfo.h>
+
+/*
+ * This structure defines the private state maintained by sysinfo module.
+ */
+static struct cvmx_sysinfo sysinfo;	   /* system information */
+
+/*
+ * Returns the application information as obtained
+ * by the bootloader.  This provides the core mask of the cores
+ * running the same application image, as well as the physical
+ * memory regions available to the core.
+ */
+struct cvmx_sysinfo *cvmx_sysinfo_get(void)
+{
+	return &sysinfo;
+}
+EXPORT_SYMBOL(cvmx_sysinfo_get);
+
diff --git a/src/kernel/linux/v4.14/arch/mips/cavium-octeon/executive/octeon-model.c b/src/kernel/linux/v4.14/arch/mips/cavium-octeon/executive/octeon-model.c
new file mode 100644
index 0000000..3410523
--- /dev/null
+++ b/src/kernel/linux/v4.14/arch/mips/cavium-octeon/executive/octeon-model.c
@@ -0,0 +1,511 @@
+/***********************license start***************
+ * Author: Cavium Networks
+ *
+ * Contact: support@caviumnetworks.com
+ * This file is part of the OCTEON SDK
+ *
+ * Copyright (c) 2003-2017 Cavium, Inc.
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, Version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+ * NONINFRINGEMENT.  See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this file; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ * or visit http://www.gnu.org/licenses/.
+ *
+ * This file may also be available under a different license from Cavium.
+ * Contact Cavium Networks for more information
+ ***********************license end**************************************/
+
+#include <asm/octeon/octeon.h>
+
+enum octeon_feature_bits __octeon_feature_bits __read_mostly;
+EXPORT_SYMBOL_GPL(__octeon_feature_bits);
+
+/**
+ * Read a byte of fuse data
+ * @byte_addr:	 address to read
+ *
+ * Returns fuse value: 0 or 1
+ */
+static uint8_t __init cvmx_fuse_read_byte(int byte_addr)
+{
+	union cvmx_mio_fus_rcmd read_cmd;
+
+	read_cmd.u64 = 0;
+	read_cmd.s.addr = byte_addr;
+	read_cmd.s.pend = 1;
+	cvmx_write_csr(CVMX_MIO_FUS_RCMD, read_cmd.u64);
+	while ((read_cmd.u64 = cvmx_read_csr(CVMX_MIO_FUS_RCMD))
+	       && read_cmd.s.pend)
+		;
+	return read_cmd.s.dat;
+}
+
+/*
+ * Version of octeon_model_get_string() that takes buffer as argument,
+ * as running early in u-boot static/global variables don't work when
+ * running from flash.
+ */
+static const char *__init octeon_model_get_string_buffer(uint32_t chip_id,
+							 char *buffer)
+{
+	const char *family;
+	const char *core_model;
+	char pass[4];
+	int clock_mhz;
+	const char *suffix;
+	int num_cores;
+	union cvmx_mio_fus_dat2 fus_dat2;
+	union cvmx_mio_fus_dat3 fus_dat3;
+	char fuse_model[10];
+	uint32_t fuse_data = 0;
+	uint64_t l2d_fus3 = 0;
+
+	if (OCTEON_IS_MODEL(OCTEON_CN3XXX) || OCTEON_IS_MODEL(OCTEON_CN5XXX))
+		l2d_fus3 = (cvmx_read_csr(CVMX_L2D_FUS3) >> 34) & 0x3;
+	fus_dat2.u64 = cvmx_read_csr(CVMX_MIO_FUS_DAT2);
+	fus_dat3.u64 = cvmx_read_csr(CVMX_MIO_FUS_DAT3);
+	num_cores = cvmx_octeon_num_cores();
+
+	/* Make sure the non existent devices look disabled */
+	switch ((chip_id >> 8) & 0xff) {
+	case 6:		/* CN50XX */
+	case 2:		/* CN30XX */
+		fus_dat3.s.nodfa_dte = 1;
+		fus_dat3.s.nozip = 1;
+		break;
+	case 4:		/* CN57XX or CN56XX */
+		fus_dat3.s.nodfa_dte = 1;
+		break;
+	default:
+		break;
+	}
+
+	/* Make a guess at the suffix */
+	/* NSP = everything */
+	/* EXP = No crypto */
+	/* SCP = No DFA, No zip */
+	/* CP = No DFA, No crypto, No zip */
+	if (fus_dat3.s.nodfa_dte) {
+		if (fus_dat2.s.nocrypto)
+			suffix = "CP";
+		else
+			suffix = "SCP";
+	} else if (fus_dat2.s.nocrypto)
+		suffix = "EXP";
+	else
+		suffix = "NSP";
+
+	if (!fus_dat2.s.nocrypto)
+		__octeon_feature_bits |= OCTEON_HAS_CRYPTO;
+
+	/*
+	 * Assume pass number is encoded using <5:3><2:0>. Exceptions
+	 * will be fixed later.
+	 */
+	sprintf(pass, "%d.%d", (int)((chip_id >> 3) & 7) + 1, (int)chip_id & 7);
+
+	/*
+	 * Use the number of cores to determine the last 2 digits of
+	 * the model number. There are some exceptions that are fixed
+	 * later.
+	 */
+	switch (num_cores) {
+	case 48:
+		core_model = "90";
+		break;
+	case 44:
+		core_model = "88";
+		break;
+	case 40:
+		core_model = "85";
+		break;
+	case 32:
+		core_model = "80";
+		break;
+	case 24:
+		core_model = "70";
+		break;
+	case 16:
+		core_model = "60";
+		break;
+	case 15:
+		core_model = "58";
+		break;
+	case 14:
+		core_model = "55";
+		break;
+	case 13:
+		core_model = "52";
+		break;
+	case 12:
+		core_model = "50";
+		break;
+	case 11:
+		core_model = "48";
+		break;
+	case 10:
+		core_model = "45";
+		break;
+	case 9:
+		core_model = "42";
+		break;
+	case 8:
+		core_model = "40";
+		break;
+	case 7:
+		core_model = "38";
+		break;
+	case 6:
+		core_model = "34";
+		break;
+	case 5:
+		core_model = "32";
+		break;
+	case 4:
+		core_model = "30";
+		break;
+	case 3:
+		core_model = "25";
+		break;
+	case 2:
+		core_model = "20";
+		break;
+	case 1:
+		core_model = "10";
+		break;
+	default:
+		core_model = "XX";
+		break;
+	}
+
+	/* Now figure out the family, the first two digits */
+	switch ((chip_id >> 8) & 0xff) {
+	case 0:		/* CN38XX, CN37XX or CN36XX */
+		if (l2d_fus3) {
+			/*
+			 * For some unknown reason, the 16 core one is
+			 * called 37 instead of 36.
+			 */
+			if (num_cores >= 16)
+				family = "37";
+			else
+				family = "36";
+		} else
+			family = "38";
+		/*
+		 * This series of chips didn't follow the standard
+		 * pass numbering.
+		 */
+		switch (chip_id & 0xf) {
+		case 0:
+			strcpy(pass, "1.X");
+			break;
+		case 1:
+			strcpy(pass, "2.X");
+			break;
+		case 3:
+			strcpy(pass, "3.X");
+			break;
+		default:
+			strcpy(pass, "X.X");
+			break;
+		}
+		break;
+	case 1:		/* CN31XX or CN3020 */
+		if ((chip_id & 0x10) || l2d_fus3)
+			family = "30";
+		else
+			family = "31";
+		/*
+		 * This series of chips didn't follow the standard
+		 * pass numbering.
+		 */
+		switch (chip_id & 0xf) {
+		case 0:
+			strcpy(pass, "1.0");
+			break;
+		case 2:
+			strcpy(pass, "1.1");
+			break;
+		default:
+			strcpy(pass, "X.X");
+			break;
+		}
+		break;
+	case 2:		/* CN3010 or CN3005 */
+		family = "30";
+		/* A chip with half cache is an 05 */
+		if (l2d_fus3)
+			core_model = "05";
+		/*
+		 * This series of chips didn't follow the standard
+		 * pass numbering.
+		 */
+		switch (chip_id & 0xf) {
+		case 0:
+			strcpy(pass, "1.0");
+			break;
+		case 2:
+			strcpy(pass, "1.1");
+			break;
+		default:
+			strcpy(pass, "X.X");
+			break;
+		}
+		break;
+	case 3:		/* CN58XX */
+		family = "58";
+		/* Special case. 4 core, half cache (CP with half cache) */
+		if ((num_cores == 4) && l2d_fus3 && !strncmp(suffix, "CP", 2))
+			core_model = "29";
+
+		/* Pass 1 uses different encodings for pass numbers */
+		if ((chip_id & 0xFF) < 0x8) {
+			switch (chip_id & 0x3) {
+			case 0:
+				strcpy(pass, "1.0");
+				break;
+			case 1:
+				strcpy(pass, "1.1");
+				break;
+			case 3:
+				strcpy(pass, "1.2");
+				break;
+			default:
+				strcpy(pass, "1.X");
+				break;
+			}
+		}
+		break;
+	case 4:		/* CN57XX, CN56XX, CN55XX, CN54XX */
+		if (fus_dat2.cn56xx.raid_en) {
+			if (l2d_fus3)
+				family = "55";
+			else
+				family = "57";
+			if (fus_dat2.cn56xx.nocrypto)
+				suffix = "SP";
+			else
+				suffix = "SSP";
+		} else {
+			if (fus_dat2.cn56xx.nocrypto)
+				suffix = "CP";
+			else {
+				suffix = "NSP";
+				if (fus_dat3.s.nozip)
+					suffix = "SCP";
+
+				if (fus_dat3.cn56xx.bar2_en)
+					suffix = "NSPB2";
+			}
+			if (l2d_fus3)
+				family = "54";
+			else
+				family = "56";
+		}
+		break;
+	case 6:		/* CN50XX */
+		family = "50";
+		break;
+	case 7:		/* CN52XX */
+		if (l2d_fus3)
+			family = "51";
+		else
+			family = "52";
+		break;
+	case 0x93:		/* CN61XX */
+		family = "61";
+		if (fus_dat2.cn61xx.nocrypto && fus_dat2.cn61xx.dorm_crypto)
+			suffix = "AP";
+		if (fus_dat2.cn61xx.nocrypto)
+			suffix = "CP";
+		else if (fus_dat2.cn61xx.dorm_crypto)
+			suffix = "DAP";
+		else if (fus_dat3.cn61xx.nozip)
+			suffix = "SCP";
+		break;
+	case 0x90:		/* CN63XX */
+		family = "63";
+		if (fus_dat3.s.l2c_crip == 2)
+			family = "62";
+		if (num_cores == 6)	/* Other core counts match generic */
+			core_model = "35";
+		if (fus_dat2.cn63xx.nocrypto)
+			suffix = "CP";
+		else if (fus_dat2.cn63xx.dorm_crypto)
+			suffix = "DAP";
+		else if (fus_dat3.cn63xx.nozip)
+			suffix = "SCP";
+		else
+			suffix = "AAP";
+		break;
+	case 0x92:		/* CN66XX */
+		family = "66";
+		if (num_cores == 6)	/* Other core counts match generic */
+			core_model = "35";
+		if (fus_dat2.cn66xx.nocrypto && fus_dat2.cn66xx.dorm_crypto)
+			suffix = "AP";
+		if (fus_dat2.cn66xx.nocrypto)
+			suffix = "CP";
+		else if (fus_dat2.cn66xx.dorm_crypto)
+			suffix = "DAP";
+		else if (fus_dat3.cn66xx.nozip)
+			suffix = "SCP";
+		else
+			suffix = "AAP";
+		break;
+	case 0x91:		/* CN68XX */
+		family = "68";
+		if (fus_dat2.cn68xx.nocrypto && fus_dat3.cn68xx.nozip)
+			suffix = "CP";
+		else if (fus_dat2.cn68xx.dorm_crypto)
+			suffix = "DAP";
+		else if (fus_dat3.cn68xx.nozip)
+			suffix = "SCP";
+		else if (fus_dat2.cn68xx.nocrypto)
+			suffix = "SP";
+		else
+			suffix = "AAP";
+		break;
+	case 0x94:		/* CNF71XX */
+		family = "F71";
+		if (fus_dat3.cnf71xx.nozip)
+			suffix = "SCP";
+		else
+			suffix = "AAP";
+		break;
+	case 0x95:		/* CN78XX */
+		if (num_cores == 6)	/* Other core counts match generic */
+			core_model = "35";
+		if (OCTEON_IS_MODEL(OCTEON_CN76XX))
+			family = "76";
+		else
+			family = "78";
+		if (fus_dat3.cn78xx.l2c_crip == 2)
+			family = "77";
+		if (fus_dat3.cn78xx.nozip
+		    && fus_dat3.cn78xx.nodfa_dte
+		    && fus_dat3.cn78xx.nohna_dte) {
+			if (fus_dat3.cn78xx.nozip &&
+				!fus_dat2.cn78xx.raid_en &&
+				fus_dat3.cn78xx.nohna_dte) {
+				suffix = "CP";
+			} else {
+				suffix = "SCP";
+			}
+		} else if (fus_dat2.cn78xx.raid_en == 0)
+			suffix = "HCP";
+		else
+			suffix = "AAP";
+		break;
+	case 0x96:		/* CN70XX */
+		family = "70";
+		if (cvmx_read_csr(CVMX_MIO_FUS_PDF) & (0x1ULL << 32))
+			family = "71";
+		if (fus_dat2.cn70xx.nocrypto)
+			suffix = "CP";
+		else if (fus_dat3.cn70xx.nodfa_dte)
+			suffix = "SCP";
+		else
+			suffix = "AAP";
+		break;
+	case 0x97:		/* CN73XX */
+		if (num_cores == 6)	/* Other core counts match generic */
+			core_model = "35";
+		family = "73";
+		if (fus_dat3.cn73xx.l2c_crip == 2)
+			family = "72";
+		if (fus_dat3.cn73xx.nozip
+				&& fus_dat3.cn73xx.nodfa_dte
+				&& fus_dat3.cn73xx.nohna_dte) {
+			if (!fus_dat2.cn73xx.raid_en)
+				suffix = "CP";
+			else
+				suffix = "SCP";
+		} else
+			suffix = "AAP";
+		break;
+	case 0x98:		/* CN75XX */
+		family = "F75";
+		if (fus_dat3.cn78xx.nozip
+		    && fus_dat3.cn78xx.nodfa_dte
+		    && fus_dat3.cn78xx.nohna_dte)
+			suffix = "SCP";
+		else
+			suffix = "AAP";
+		break;
+	default:
+		family = "XX";
+		core_model = "XX";
+		strcpy(pass, "X.X");
+		suffix = "XXX";
+		break;
+	}
+
+	clock_mhz = octeon_get_clock_rate() / 1000000;
+	if (family[0] != '3') {
+		int fuse_base = 384 / 8;
+		if (family[0] == '6')
+			fuse_base = 832 / 8;
+
+		/* Check for model in fuses, overrides normal decode */
+		/* This is _not_ valid for Octeon CN3XXX models */
+		fuse_data |= cvmx_fuse_read_byte(fuse_base + 3);
+		fuse_data = fuse_data << 8;
+		fuse_data |= cvmx_fuse_read_byte(fuse_base + 2);
+		fuse_data = fuse_data << 8;
+		fuse_data |= cvmx_fuse_read_byte(fuse_base + 1);
+		fuse_data = fuse_data << 8;
+		fuse_data |= cvmx_fuse_read_byte(fuse_base);
+		if (fuse_data & 0x7ffff) {
+			int model = fuse_data & 0x3fff;
+			int suffix = (fuse_data >> 14) & 0x1f;
+			if (suffix && model) {
+				/* Have both number and suffix in fuses, so both */
+				sprintf(fuse_model, "%d%c", model, 'A' + suffix - 1);
+				core_model = "";
+				family = fuse_model;
+			} else if (suffix && !model) {
+				/* Only have suffix, so add suffix to 'normal' model number */
+				sprintf(fuse_model, "%s%c", core_model, 'A' + suffix - 1);
+				core_model = fuse_model;
+			} else {
+				/* Don't have suffix, so just use model from fuses */
+				sprintf(fuse_model, "%d", model);
+				core_model = "";
+				family = fuse_model;
+			}
+		}
+	}
+	sprintf(buffer, "CN%s%sp%s-%d-%s", family, core_model, pass, clock_mhz, suffix);
+	return buffer;
+}
+
+/**
+ * Given the chip processor ID from COP0, this function returns a
+ * string representing the chip model number. The string is of the
+ * form CNXXXXpX.X-FREQ-SUFFIX.
+ * - XXXX = The chip model number
+ * - X.X = Chip pass number
+ * - FREQ = Current frequency in Mhz
+ * - SUFFIX = NSP, EXP, SCP, SSP, or CP
+ *
+ * @chip_id: Chip ID
+ *
+ * Returns Model string
+ */
+const char *__init octeon_model_get_string(uint32_t chip_id)
+{
+	static char buffer[32];
+	return octeon_model_get_string_buffer(chip_id, buffer);
+}
diff --git a/src/kernel/linux/v4.14/arch/mips/cavium-octeon/flash_setup.c b/src/kernel/linux/v4.14/arch/mips/cavium-octeon/flash_setup.c
new file mode 100644
index 0000000..a5e8f4a
--- /dev/null
+++ b/src/kernel/linux/v4.14/arch/mips/cavium-octeon/flash_setup.c
@@ -0,0 +1,143 @@
+/*
+ *   Octeon Bootbus flash setup
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2007, 2008 Cavium Networks
+ */
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/semaphore.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/map.h>
+#include <linux/of_platform.h>
+#include <linux/mtd/partitions.h>
+
+#include <asm/octeon/octeon.h>
+
+static struct map_info flash_map;
+static struct mtd_info *mymtd;
+static const char *part_probe_types[] = {
+	"cmdlinepart",
+#ifdef CONFIG_MTD_REDBOOT_PARTS
+	"RedBoot",
+#endif
+	NULL
+};
+
+static map_word octeon_flash_map_read(struct map_info *map, unsigned long ofs)
+{
+	map_word r;
+
+	down(&octeon_bootbus_sem);
+	r = inline_map_read(map, ofs);
+	up(&octeon_bootbus_sem);
+
+	return r;
+}
+
+static void octeon_flash_map_write(struct map_info *map, const map_word datum,
+				   unsigned long ofs)
+{
+	down(&octeon_bootbus_sem);
+	inline_map_write(map, datum, ofs);
+	up(&octeon_bootbus_sem);
+}
+
+static void octeon_flash_map_copy_from(struct map_info *map, void *to,
+				       unsigned long from, ssize_t len)
+{
+	down(&octeon_bootbus_sem);
+	inline_map_copy_from(map, to, from, len);
+	up(&octeon_bootbus_sem);
+}
+
+static void octeon_flash_map_copy_to(struct map_info *map, unsigned long to,
+				     const void *from, ssize_t len)
+{
+	down(&octeon_bootbus_sem);
+	inline_map_copy_to(map, to, from, len);
+	up(&octeon_bootbus_sem);
+}
+
+/**
+ * Module/ driver initialization.
+ *
+ * Returns Zero on success
+ */
+static int octeon_flash_probe(struct platform_device *pdev)
+{
+	union cvmx_mio_boot_reg_cfgx region_cfg;
+	u32 cs;
+	int r;
+	struct device_node *np = pdev->dev.of_node;
+
+	r = of_property_read_u32(np, "reg", &cs);
+	if (r)
+		return r;
+
+	/*
+	 * Read the bootbus region 0 setup to determine the base
+	 * address of the flash.
+	 */
+	region_cfg.u64 = cvmx_read_csr(CVMX_MIO_BOOT_REG_CFGX(cs));
+	if (region_cfg.s.en) {
+		/*
+		 * The bootloader always takes the flash and sets its
+		 * address so the entire flash fits below
+		 * 0x1fc00000. This way the flash aliases to
+		 * 0x1fc00000 for booting. Software can access the
+		 * full flash at the true address, while core boot can
+		 * access 4MB.
+		 */
+		/* Use this name so old part lines work */
+		flash_map.name = "phys_mapped_flash";
+		flash_map.phys = region_cfg.s.base << 16;
+		flash_map.size = 0x1fc00000 - flash_map.phys;
+		/* 8-bit bus (0 + 1) or 16-bit bus (1 + 1) */
+		flash_map.bankwidth = region_cfg.s.width + 1;
+		flash_map.virt = ioremap(flash_map.phys, flash_map.size);
+		pr_notice("Bootbus flash: Setting flash for %luMB flash at "
+			  "0x%08llx\n", flash_map.size >> 20, flash_map.phys);
+		WARN_ON(!map_bankwidth_supported(flash_map.bankwidth));
+		flash_map.read = octeon_flash_map_read;
+		flash_map.write = octeon_flash_map_write;
+		flash_map.copy_from = octeon_flash_map_copy_from;
+		flash_map.copy_to = octeon_flash_map_copy_to;
+		mymtd = do_map_probe("cfi_probe", &flash_map);
+		if (mymtd) {
+			mymtd->owner = THIS_MODULE;
+			mtd_device_parse_register(mymtd, part_probe_types,
+						  NULL, NULL, 0);
+		} else {
+			pr_err("Failed to register MTD device for flash\n");
+		}
+	}
+	return 0;
+}
+
+static const struct of_device_id of_flash_match[] = {
+	{
+		.compatible	= "cfi-flash",
+	},
+	{ },
+};
+MODULE_DEVICE_TABLE(of, of_flash_match);
+
+static struct platform_driver of_flash_driver = {
+	.driver = {
+		.name = "octeon-of-flash",
+		.of_match_table = of_flash_match,
+	},
+	.probe		= octeon_flash_probe,
+};
+
+static int octeon_flash_init(void)
+{
+	return platform_driver_register(&of_flash_driver);
+}
+late_initcall(octeon_flash_init);
+
+MODULE_LICENSE("GPL");
diff --git a/src/kernel/linux/v4.14/arch/mips/cavium-octeon/oct_ilm.c b/src/kernel/linux/v4.14/arch/mips/cavium-octeon/oct_ilm.c
new file mode 100644
index 0000000..2d68a39
--- /dev/null
+++ b/src/kernel/linux/v4.14/arch/mips/cavium-octeon/oct_ilm.c
@@ -0,0 +1,205 @@
+#include <linux/fs.h>
+#include <linux/interrupt.h>
+#include <asm/octeon/octeon.h>
+#include <asm/octeon/cvmx-ciu-defs.h>
+#include <asm/octeon/cvmx.h>
+#include <linux/debugfs.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/seq_file.h>
+
+#define TIMER_NUM 3
+
+static bool reset_stats;
+
+struct latency_info {
+	u64 io_interval;
+	u64 cpu_interval;
+	u64 timer_start1;
+	u64 timer_start2;
+	u64 max_latency;
+	u64 min_latency;
+	u64 latency_sum;
+	u64 average_latency;
+	u64 interrupt_cnt;
+};
+
+static struct latency_info li;
+static struct dentry *dir;
+
+static int show_latency(struct seq_file *m, void *v)
+{
+	u64 cpuclk, avg, max, min;
+	struct latency_info curr_li = li;
+
+	cpuclk = octeon_get_clock_rate();
+
+	max = (curr_li.max_latency * 1000000000) / cpuclk;
+	min = (curr_li.min_latency * 1000000000) / cpuclk;
+	avg = (curr_li.latency_sum * 1000000000) / (cpuclk * curr_li.interrupt_cnt);
+
+	seq_printf(m, "cnt: %10lld, avg: %7lld ns, max: %7lld ns, min: %7lld ns\n",
+		   curr_li.interrupt_cnt, avg, max, min);
+	return 0;
+}
+
+static int oct_ilm_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, show_latency, NULL);
+}
+
+static const struct file_operations oct_ilm_ops = {
+	.open = oct_ilm_open,
+	.read = seq_read,
+	.llseek = seq_lseek,
+	.release = single_release,
+};
+
+static int reset_statistics(void *data, u64 value)
+{
+	reset_stats = true;
+	return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(reset_statistics_ops, NULL, reset_statistics, "%llu\n");
+
+static int init_debufs(void)
+{
+	struct dentry *show_dentry;
+	dir = debugfs_create_dir("oct_ilm", 0);
+	if (!dir) {
+		pr_err("oct_ilm: failed to create debugfs entry oct_ilm\n");
+		return -1;
+	}
+
+	show_dentry = debugfs_create_file("statistics", 0222, dir, NULL,
+					  &oct_ilm_ops);
+	if (!show_dentry) {
+		pr_err("oct_ilm: failed to create debugfs entry oct_ilm/statistics\n");
+		return -1;
+	}
+
+	show_dentry = debugfs_create_file("reset", 0222, dir, NULL,
+					  &reset_statistics_ops);
+	if (!show_dentry) {
+		pr_err("oct_ilm: failed to create debugfs entry oct_ilm/reset\n");
+		return -1;
+	}
+
+	return 0;
+
+}
+
+static void init_latency_info(struct latency_info *li, int startup)
+{
+	/* interval in milli seconds after which the interrupt will
+	 * be triggered
+	 */
+	int interval = 1;
+
+	if (startup) {
+		/* Calculating by the amounts io clock and cpu clock would
+		 *  increment in interval amount of ms
+		 */
+		li->io_interval = (octeon_get_io_clock_rate() * interval) / 1000;
+		li->cpu_interval = (octeon_get_clock_rate() * interval) / 1000;
+	}
+	li->timer_start1 = 0;
+	li->timer_start2 = 0;
+	li->max_latency = 0;
+	li->min_latency = (u64)-1;
+	li->latency_sum = 0;
+	li->interrupt_cnt = 0;
+}
+
+
+static void start_timer(int timer, u64 interval)
+{
+	union cvmx_ciu_timx timx;
+	unsigned long flags;
+
+	timx.u64 = 0;
+	timx.s.one_shot = 1;
+	timx.s.len = interval;
+	raw_local_irq_save(flags);
+	li.timer_start1 = read_c0_cvmcount();
+	cvmx_write_csr(CVMX_CIU_TIMX(timer), timx.u64);
+	/* Read it back to force wait until register is written. */
+	timx.u64 = cvmx_read_csr(CVMX_CIU_TIMX(timer));
+	li.timer_start2 = read_c0_cvmcount();
+	raw_local_irq_restore(flags);
+}
+
+
+static irqreturn_t cvm_oct_ciu_timer_interrupt(int cpl, void *dev_id)
+{
+	u64 last_latency;
+	u64 last_int_cnt;
+
+	if (reset_stats) {
+		init_latency_info(&li, 0);
+		reset_stats = false;
+	} else {
+		last_int_cnt = read_c0_cvmcount();
+		last_latency = last_int_cnt - (li.timer_start1 + li.cpu_interval);
+		li.interrupt_cnt++;
+		li.latency_sum += last_latency;
+		if (last_latency > li.max_latency)
+			li.max_latency = last_latency;
+		if (last_latency < li.min_latency)
+			li.min_latency = last_latency;
+	}
+	start_timer(TIMER_NUM, li.io_interval);
+	return IRQ_HANDLED;
+}
+
+static void disable_timer(int timer)
+{
+	union cvmx_ciu_timx timx;
+
+	timx.s.one_shot = 0;
+	timx.s.len = 0;
+	cvmx_write_csr(CVMX_CIU_TIMX(timer), timx.u64);
+	/* Read it back to force immediate write of timer register*/
+	timx.u64 = cvmx_read_csr(CVMX_CIU_TIMX(timer));
+}
+
+static __init int oct_ilm_module_init(void)
+{
+	int rc;
+	int irq = OCTEON_IRQ_TIMER0 + TIMER_NUM;
+
+	rc = init_debufs();
+	if (rc) {
+		WARN(1, "Could not create debugfs entries");
+		return rc;
+	}
+
+	rc = request_irq(irq, cvm_oct_ciu_timer_interrupt, IRQF_NO_THREAD,
+			 "oct_ilm", 0);
+	if (rc) {
+		WARN(1, "Could not acquire IRQ %d", irq);
+		goto err_irq;
+	}
+
+	init_latency_info(&li, 1);
+	start_timer(TIMER_NUM, li.io_interval);
+
+	return 0;
+err_irq:
+	debugfs_remove_recursive(dir);
+	return rc;
+}
+
+static __exit void oct_ilm_module_exit(void)
+{
+	disable_timer(TIMER_NUM);
+	debugfs_remove_recursive(dir);
+	free_irq(OCTEON_IRQ_TIMER0 + TIMER_NUM, 0);
+}
+
+module_exit(oct_ilm_module_exit);
+module_init(oct_ilm_module_init);
+MODULE_AUTHOR("Venkat Subbiah, Cavium");
+MODULE_DESCRIPTION("Measures interrupt latency on Octeon chips.");
+MODULE_LICENSE("GPL");
diff --git a/src/kernel/linux/v4.14/arch/mips/cavium-octeon/octeon-irq.c b/src/kernel/linux/v4.14/arch/mips/cavium-octeon/octeon-irq.c
new file mode 100644
index 0000000..a27b3d7
--- /dev/null
+++ b/src/kernel/linux/v4.14/arch/mips/cavium-octeon/octeon-irq.c
@@ -0,0 +1,2979 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2004-2016 Cavium, Inc.
+ */
+
+#include <linux/of_address.h>
+#include <linux/interrupt.h>
+#include <linux/irqdomain.h>
+#include <linux/bitops.h>
+#include <linux/of_irq.h>
+#include <linux/percpu.h>
+#include <linux/slab.h>
+#include <linux/irq.h>
+#include <linux/smp.h>
+#include <linux/of.h>
+
+#include <asm/octeon/octeon.h>
+#include <asm/octeon/cvmx-ciu2-defs.h>
+#include <asm/octeon/cvmx-ciu3-defs.h>
+
+static DEFINE_PER_CPU(unsigned long, octeon_irq_ciu0_en_mirror);
+static DEFINE_PER_CPU(unsigned long, octeon_irq_ciu1_en_mirror);
+static DEFINE_PER_CPU(raw_spinlock_t, octeon_irq_ciu_spinlock);
+static DEFINE_PER_CPU(unsigned int, octeon_irq_ciu3_idt_ip2);
+
+static DEFINE_PER_CPU(unsigned int, octeon_irq_ciu3_idt_ip3);
+static DEFINE_PER_CPU(struct octeon_ciu3_info *, octeon_ciu3_info);
+#define CIU3_MBOX_PER_CORE 10
+
+/*
+ * The 8 most significant bits of the intsn identify the interrupt major block.
+ * Each major block might use its own interrupt domain. Thus 256 domains are
+ * needed.
+ */
+#define MAX_CIU3_DOMAINS		256
+
+typedef irq_hw_number_t (*octeon_ciu3_intsn2hw_t)(struct irq_domain *, unsigned int);
+
+/* Information for each ciu3 in the system */
+struct octeon_ciu3_info {
+	u64			ciu3_addr;
+	int			node;
+	struct irq_domain	*domain[MAX_CIU3_DOMAINS];
+	octeon_ciu3_intsn2hw_t	intsn2hw[MAX_CIU3_DOMAINS];
+};
+
+/* Each ciu3 in the system uses its own data (one ciu3 per node) */
+static struct octeon_ciu3_info	*octeon_ciu3_info_per_node[4];
+
+struct octeon_irq_ciu_domain_data {
+	int num_sum;  /* number of sum registers (2 or 3). */
+};
+
+/* Register offsets from ciu3_addr */
+#define CIU3_CONST		0x220
+#define CIU3_IDT_CTL(_idt)	((_idt) * 8 + 0x110000)
+#define CIU3_IDT_PP(_idt, _idx)	((_idt) * 32 + (_idx) * 8 + 0x120000)
+#define CIU3_IDT_IO(_idt)	((_idt) * 8 + 0x130000)
+#define CIU3_DEST_PP_INT(_pp_ip) ((_pp_ip) * 8 + 0x200000)
+#define CIU3_DEST_IO_INT(_io)	((_io) * 8 + 0x210000)
+#define CIU3_ISC_CTL(_intsn)	((_intsn) * 8 + 0x80000000)
+#define CIU3_ISC_W1C(_intsn)	((_intsn) * 8 + 0x90000000)
+#define CIU3_ISC_W1S(_intsn)	((_intsn) * 8 + 0xa0000000)
+
+static __read_mostly int octeon_irq_ciu_to_irq[8][64];
+
+struct octeon_ciu_chip_data {
+	union {
+		struct {		/* only used for ciu3 */
+			u64 ciu3_addr;
+			unsigned int intsn;
+		};
+		struct {		/* only used for ciu/ciu2 */
+			u8 line;
+			u8 bit;
+		};
+	};
+	int gpio_line;
+	int current_cpu;	/* Next CPU expected to take this irq */
+	int ciu_node; /* NUMA node number of the CIU */
+};
+
+struct octeon_core_chip_data {
+	struct mutex core_irq_mutex;
+	bool current_en;
+	bool desired_en;
+	u8 bit;
+};
+
+#define MIPS_CORE_IRQ_LINES 8
+
+static struct octeon_core_chip_data octeon_irq_core_chip_data[MIPS_CORE_IRQ_LINES];
+
+static int octeon_irq_set_ciu_mapping(int irq, int line, int bit, int gpio_line,
+				      struct irq_chip *chip,
+				      irq_flow_handler_t handler)
+{
+	struct octeon_ciu_chip_data *cd;
+
+	cd = kzalloc(sizeof(*cd), GFP_KERNEL);
+	if (!cd)
+		return -ENOMEM;
+
+	irq_set_chip_and_handler(irq, chip, handler);
+
+	cd->line = line;
+	cd->bit = bit;
+	cd->gpio_line = gpio_line;
+
+	irq_set_chip_data(irq, cd);
+	octeon_irq_ciu_to_irq[line][bit] = irq;
+	return 0;
+}
+
+static void octeon_irq_free_cd(struct irq_domain *d, unsigned int irq)
+{
+	struct irq_data *data = irq_get_irq_data(irq);
+	struct octeon_ciu_chip_data *cd = irq_data_get_irq_chip_data(data);
+
+	irq_set_chip_data(irq, NULL);
+	kfree(cd);
+}
+
+static int octeon_irq_force_ciu_mapping(struct irq_domain *domain,
+					int irq, int line, int bit)
+{
+	return irq_domain_associate(domain, irq, line << 6 | bit);
+}
+
+static int octeon_coreid_for_cpu(int cpu)
+{
+#ifdef CONFIG_SMP
+	return cpu_logical_map(cpu);
+#else
+	return cvmx_get_core_num();
+#endif
+}
+
+static int octeon_cpu_for_coreid(int coreid)
+{
+#ifdef CONFIG_SMP
+	return cpu_number_map(coreid);
+#else
+	return smp_processor_id();
+#endif
+}
+
+static void octeon_irq_core_ack(struct irq_data *data)
+{
+	struct octeon_core_chip_data *cd = irq_data_get_irq_chip_data(data);
+	unsigned int bit = cd->bit;
+
+	/*
+	 * We don't need to disable IRQs to make these atomic since
+	 * they are already disabled earlier in the low level
+	 * interrupt code.
+	 */
+	clear_c0_status(0x100 << bit);
+	/* The two user interrupts must be cleared manually. */
+	if (bit < 2)
+		clear_c0_cause(0x100 << bit);
+}
+
+static void octeon_irq_core_eoi(struct irq_data *data)
+{
+	struct octeon_core_chip_data *cd = irq_data_get_irq_chip_data(data);
+
+	/*
+	 * We don't need to disable IRQs to make these atomic since
+	 * they are already disabled earlier in the low level
+	 * interrupt code.
+	 */
+	set_c0_status(0x100 << cd->bit);
+}
+
+static void octeon_irq_core_set_enable_local(void *arg)
+{
+	struct irq_data *data = arg;
+	struct octeon_core_chip_data *cd = irq_data_get_irq_chip_data(data);
+	unsigned int mask = 0x100 << cd->bit;
+
+	/*
+	 * Interrupts are already disabled, so these are atomic.
+	 */
+	if (cd->desired_en)
+		set_c0_status(mask);
+	else
+		clear_c0_status(mask);
+
+}
+
+static void octeon_irq_core_disable(struct irq_data *data)
+{
+	struct octeon_core_chip_data *cd = irq_data_get_irq_chip_data(data);
+	cd->desired_en = false;
+}
+
+static void octeon_irq_core_enable(struct irq_data *data)
+{
+	struct octeon_core_chip_data *cd = irq_data_get_irq_chip_data(data);
+	cd->desired_en = true;
+}
+
+static void octeon_irq_core_bus_lock(struct irq_data *data)
+{
+	struct octeon_core_chip_data *cd = irq_data_get_irq_chip_data(data);
+
+	mutex_lock(&cd->core_irq_mutex);
+}
+
+static void octeon_irq_core_bus_sync_unlock(struct irq_data *data)
+{
+	struct octeon_core_chip_data *cd = irq_data_get_irq_chip_data(data);
+
+	if (cd->desired_en != cd->current_en) {
+		on_each_cpu(octeon_irq_core_set_enable_local, data, 1);
+
+		cd->current_en = cd->desired_en;
+	}
+
+	mutex_unlock(&cd->core_irq_mutex);
+}
+
+static struct irq_chip octeon_irq_chip_core = {
+	.name = "Core",
+	.irq_enable = octeon_irq_core_enable,
+	.irq_disable = octeon_irq_core_disable,
+	.irq_ack = octeon_irq_core_ack,
+	.irq_eoi = octeon_irq_core_eoi,
+	.irq_bus_lock = octeon_irq_core_bus_lock,
+	.irq_bus_sync_unlock = octeon_irq_core_bus_sync_unlock,
+
+	.irq_cpu_online = octeon_irq_core_eoi,
+	.irq_cpu_offline = octeon_irq_core_ack,
+	.flags = IRQCHIP_ONOFFLINE_ENABLED,
+};
+
+static void __init octeon_irq_init_core(void)
+{
+	int i;
+	int irq;
+	struct octeon_core_chip_data *cd;
+
+	for (i = 0; i < MIPS_CORE_IRQ_LINES; i++) {
+		cd = &octeon_irq_core_chip_data[i];
+		cd->current_en = false;
+		cd->desired_en = false;
+		cd->bit = i;
+		mutex_init(&cd->core_irq_mutex);
+
+		irq = OCTEON_IRQ_SW0 + i;
+		irq_set_chip_data(irq, cd);
+		irq_set_chip_and_handler(irq, &octeon_irq_chip_core,
+					 handle_percpu_irq);
+	}
+}
+
+static int next_cpu_for_irq(struct irq_data *data)
+{
+
+#ifdef CONFIG_SMP
+	int cpu;
+	struct cpumask *mask = irq_data_get_affinity_mask(data);
+	int weight = cpumask_weight(mask);
+	struct octeon_ciu_chip_data *cd = irq_data_get_irq_chip_data(data);
+
+	if (weight > 1) {
+		cpu = cd->current_cpu;
+		for (;;) {
+			cpu = cpumask_next(cpu, mask);
+			if (cpu >= nr_cpu_ids) {
+				cpu = -1;
+				continue;
+			} else if (cpumask_test_cpu(cpu, cpu_online_mask)) {
+				break;
+			}
+		}
+	} else if (weight == 1) {
+		cpu = cpumask_first(mask);
+	} else {
+		cpu = smp_processor_id();
+	}
+	cd->current_cpu = cpu;
+	return cpu;
+#else
+	return smp_processor_id();
+#endif
+}
+
+static void octeon_irq_ciu_enable(struct irq_data *data)
+{
+	int cpu = next_cpu_for_irq(data);
+	int coreid = octeon_coreid_for_cpu(cpu);
+	unsigned long *pen;
+	unsigned long flags;
+	struct octeon_ciu_chip_data *cd;
+	raw_spinlock_t *lock = &per_cpu(octeon_irq_ciu_spinlock, cpu);
+
+	cd = irq_data_get_irq_chip_data(data);
+
+	raw_spin_lock_irqsave(lock, flags);
+	if (cd->line == 0) {
+		pen = &per_cpu(octeon_irq_ciu0_en_mirror, cpu);
+		__set_bit(cd->bit, pen);
+		/*
+		 * Must be visible to octeon_irq_ip{2,3}_ciu() before
+		 * enabling the irq.
+		 */
+		wmb();
+		cvmx_write_csr(CVMX_CIU_INTX_EN0(coreid * 2), *pen);
+	} else {
+		pen = &per_cpu(octeon_irq_ciu1_en_mirror, cpu);
+		__set_bit(cd->bit, pen);
+		/*
+		 * Must be visible to octeon_irq_ip{2,3}_ciu() before
+		 * enabling the irq.
+		 */
+		wmb();
+		cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1), *pen);
+	}
+	raw_spin_unlock_irqrestore(lock, flags);
+}
+
+static void octeon_irq_ciu_enable_local(struct irq_data *data)
+{
+	unsigned long *pen;
+	unsigned long flags;
+	struct octeon_ciu_chip_data *cd;
+	raw_spinlock_t *lock = this_cpu_ptr(&octeon_irq_ciu_spinlock);
+
+	cd = irq_data_get_irq_chip_data(data);
+
+	raw_spin_lock_irqsave(lock, flags);
+	if (cd->line == 0) {
+		pen = this_cpu_ptr(&octeon_irq_ciu0_en_mirror);
+		__set_bit(cd->bit, pen);
+		/*
+		 * Must be visible to octeon_irq_ip{2,3}_ciu() before
+		 * enabling the irq.
+		 */
+		wmb();
+		cvmx_write_csr(CVMX_CIU_INTX_EN0(cvmx_get_core_num() * 2), *pen);
+	} else {
+		pen = this_cpu_ptr(&octeon_irq_ciu1_en_mirror);
+		__set_bit(cd->bit, pen);
+		/*
+		 * Must be visible to octeon_irq_ip{2,3}_ciu() before
+		 * enabling the irq.
+		 */
+		wmb();
+		cvmx_write_csr(CVMX_CIU_INTX_EN1(cvmx_get_core_num() * 2 + 1), *pen);
+	}
+	raw_spin_unlock_irqrestore(lock, flags);
+}
+
+static void octeon_irq_ciu_disable_local(struct irq_data *data)
+{
+	unsigned long *pen;
+	unsigned long flags;
+	struct octeon_ciu_chip_data *cd;
+	raw_spinlock_t *lock = this_cpu_ptr(&octeon_irq_ciu_spinlock);
+
+	cd = irq_data_get_irq_chip_data(data);
+
+	raw_spin_lock_irqsave(lock, flags);
+	if (cd->line == 0) {
+		pen = this_cpu_ptr(&octeon_irq_ciu0_en_mirror);
+		__clear_bit(cd->bit, pen);
+		/*
+		 * Must be visible to octeon_irq_ip{2,3}_ciu() before
+		 * enabling the irq.
+		 */
+		wmb();
+		cvmx_write_csr(CVMX_CIU_INTX_EN0(cvmx_get_core_num() * 2), *pen);
+	} else {
+		pen = this_cpu_ptr(&octeon_irq_ciu1_en_mirror);
+		__clear_bit(cd->bit, pen);
+		/*
+		 * Must be visible to octeon_irq_ip{2,3}_ciu() before
+		 * enabling the irq.
+		 */
+		wmb();
+		cvmx_write_csr(CVMX_CIU_INTX_EN1(cvmx_get_core_num() * 2 + 1), *pen);
+	}
+	raw_spin_unlock_irqrestore(lock, flags);
+}
+
+static void octeon_irq_ciu_disable_all(struct irq_data *data)
+{
+	unsigned long flags;
+	unsigned long *pen;
+	int cpu;
+	struct octeon_ciu_chip_data *cd;
+	raw_spinlock_t *lock;
+
+	cd = irq_data_get_irq_chip_data(data);
+
+	for_each_online_cpu(cpu) {
+		int coreid = octeon_coreid_for_cpu(cpu);
+		lock = &per_cpu(octeon_irq_ciu_spinlock, cpu);
+		if (cd->line == 0)
+			pen = &per_cpu(octeon_irq_ciu0_en_mirror, cpu);
+		else
+			pen = &per_cpu(octeon_irq_ciu1_en_mirror, cpu);
+
+		raw_spin_lock_irqsave(lock, flags);
+		__clear_bit(cd->bit, pen);
+		/*
+		 * Must be visible to octeon_irq_ip{2,3}_ciu() before
+		 * enabling the irq.
+		 */
+		wmb();
+		if (cd->line == 0)
+			cvmx_write_csr(CVMX_CIU_INTX_EN0(coreid * 2), *pen);
+		else
+			cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1), *pen);
+		raw_spin_unlock_irqrestore(lock, flags);
+	}
+}
+
+static void octeon_irq_ciu_enable_all(struct irq_data *data)
+{
+	unsigned long flags;
+	unsigned long *pen;
+	int cpu;
+	struct octeon_ciu_chip_data *cd;
+	raw_spinlock_t *lock;
+
+	cd = irq_data_get_irq_chip_data(data);
+
+	for_each_online_cpu(cpu) {
+		int coreid = octeon_coreid_for_cpu(cpu);
+		lock = &per_cpu(octeon_irq_ciu_spinlock, cpu);
+		if (cd->line == 0)
+			pen = &per_cpu(octeon_irq_ciu0_en_mirror, cpu);
+		else
+			pen = &per_cpu(octeon_irq_ciu1_en_mirror, cpu);
+
+		raw_spin_lock_irqsave(lock, flags);
+		__set_bit(cd->bit, pen);
+		/*
+		 * Must be visible to octeon_irq_ip{2,3}_ciu() before
+		 * enabling the irq.
+		 */
+		wmb();
+		if (cd->line == 0)
+			cvmx_write_csr(CVMX_CIU_INTX_EN0(coreid * 2), *pen);
+		else
+			cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1), *pen);
+		raw_spin_unlock_irqrestore(lock, flags);
+	}
+}
+
+/*
+ * Enable the irq on the next core in the affinity set for chips that
+ * have the EN*_W1{S,C} registers.
+ */
+static void octeon_irq_ciu_enable_v2(struct irq_data *data)
+{
+	u64 mask;
+	int cpu = next_cpu_for_irq(data);
+	struct octeon_ciu_chip_data *cd;
+
+	cd = irq_data_get_irq_chip_data(data);
+	mask = 1ull << (cd->bit);
+
+	/*
+	 * Called under the desc lock, so these should never get out
+	 * of sync.
+	 */
+	if (cd->line == 0) {
+		int index = octeon_coreid_for_cpu(cpu) * 2;
+		set_bit(cd->bit, &per_cpu(octeon_irq_ciu0_en_mirror, cpu));
+		cvmx_write_csr(CVMX_CIU_INTX_EN0_W1S(index), mask);
+	} else {
+		int index = octeon_coreid_for_cpu(cpu) * 2 + 1;
+		set_bit(cd->bit, &per_cpu(octeon_irq_ciu1_en_mirror, cpu));
+		cvmx_write_csr(CVMX_CIU_INTX_EN1_W1S(index), mask);
+	}
+}
+
+/*
+ * Enable the irq in the sum2 registers.
+ */
+static void octeon_irq_ciu_enable_sum2(struct irq_data *data)
+{
+	u64 mask;
+	int cpu = next_cpu_for_irq(data);
+	int index = octeon_coreid_for_cpu(cpu);
+	struct octeon_ciu_chip_data *cd;
+
+	cd = irq_data_get_irq_chip_data(data);
+	mask = 1ull << (cd->bit);
+
+	cvmx_write_csr(CVMX_CIU_EN2_PPX_IP4_W1S(index), mask);
+}
+
+/*
+ * Disable the irq in the sum2 registers.
+ */
+static void octeon_irq_ciu_disable_local_sum2(struct irq_data *data)
+{
+	u64 mask;
+	int cpu = next_cpu_for_irq(data);
+	int index = octeon_coreid_for_cpu(cpu);
+	struct octeon_ciu_chip_data *cd;
+
+	cd = irq_data_get_irq_chip_data(data);
+	mask = 1ull << (cd->bit);
+
+	cvmx_write_csr(CVMX_CIU_EN2_PPX_IP4_W1C(index), mask);
+}
+
+static void octeon_irq_ciu_ack_sum2(struct irq_data *data)
+{
+	u64 mask;
+	int cpu = next_cpu_for_irq(data);
+	int index = octeon_coreid_for_cpu(cpu);
+	struct octeon_ciu_chip_data *cd;
+
+	cd = irq_data_get_irq_chip_data(data);
+	mask = 1ull << (cd->bit);
+
+	cvmx_write_csr(CVMX_CIU_SUM2_PPX_IP4(index), mask);
+}
+
+static void octeon_irq_ciu_disable_all_sum2(struct irq_data *data)
+{
+	int cpu;
+	struct octeon_ciu_chip_data *cd;
+	u64 mask;
+
+	cd = irq_data_get_irq_chip_data(data);
+	mask = 1ull << (cd->bit);
+
+	for_each_online_cpu(cpu) {
+		int coreid = octeon_coreid_for_cpu(cpu);
+
+		cvmx_write_csr(CVMX_CIU_EN2_PPX_IP4_W1C(coreid), mask);
+	}
+}
+
+/*
+ * Enable the irq on the current CPU for chips that
+ * have the EN*_W1{S,C} registers.
+ */
+static void octeon_irq_ciu_enable_local_v2(struct irq_data *data)
+{
+	u64 mask;
+	struct octeon_ciu_chip_data *cd;
+
+	cd = irq_data_get_irq_chip_data(data);
+	mask = 1ull << (cd->bit);
+
+	if (cd->line == 0) {
+		int index = cvmx_get_core_num() * 2;
+		set_bit(cd->bit, this_cpu_ptr(&octeon_irq_ciu0_en_mirror));
+		cvmx_write_csr(CVMX_CIU_INTX_EN0_W1S(index), mask);
+	} else {
+		int index = cvmx_get_core_num() * 2 + 1;
+		set_bit(cd->bit, this_cpu_ptr(&octeon_irq_ciu1_en_mirror));
+		cvmx_write_csr(CVMX_CIU_INTX_EN1_W1S(index), mask);
+	}
+}
+
+static void octeon_irq_ciu_disable_local_v2(struct irq_data *data)
+{
+	u64 mask;
+	struct octeon_ciu_chip_data *cd;
+
+	cd = irq_data_get_irq_chip_data(data);
+	mask = 1ull << (cd->bit);
+
+	if (cd->line == 0) {
+		int index = cvmx_get_core_num() * 2;
+		clear_bit(cd->bit, this_cpu_ptr(&octeon_irq_ciu0_en_mirror));
+		cvmx_write_csr(CVMX_CIU_INTX_EN0_W1C(index), mask);
+	} else {
+		int index = cvmx_get_core_num() * 2 + 1;
+		clear_bit(cd->bit, this_cpu_ptr(&octeon_irq_ciu1_en_mirror));
+		cvmx_write_csr(CVMX_CIU_INTX_EN1_W1C(index), mask);
+	}
+}
+
+/*
+ * Write to the W1C bit in CVMX_CIU_INTX_SUM0 to clear the irq.
+ */
+static void octeon_irq_ciu_ack(struct irq_data *data)
+{
+	u64 mask;
+	struct octeon_ciu_chip_data *cd;
+
+	cd = irq_data_get_irq_chip_data(data);
+	mask = 1ull << (cd->bit);
+
+	if (cd->line == 0) {
+		int index = cvmx_get_core_num() * 2;
+		cvmx_write_csr(CVMX_CIU_INTX_SUM0(index), mask);
+	} else {
+		cvmx_write_csr(CVMX_CIU_INT_SUM1, mask);
+	}
+}
+
+/*
+ * Disable the irq on the all cores for chips that have the EN*_W1{S,C}
+ * registers.
+ */
+static void octeon_irq_ciu_disable_all_v2(struct irq_data *data)
+{
+	int cpu;
+	u64 mask;
+	struct octeon_ciu_chip_data *cd;
+
+	cd = irq_data_get_irq_chip_data(data);
+	mask = 1ull << (cd->bit);
+
+	if (cd->line == 0) {
+		for_each_online_cpu(cpu) {
+			int index = octeon_coreid_for_cpu(cpu) * 2;
+			clear_bit(cd->bit,
+				&per_cpu(octeon_irq_ciu0_en_mirror, cpu));
+			cvmx_write_csr(CVMX_CIU_INTX_EN0_W1C(index), mask);
+		}
+	} else {
+		for_each_online_cpu(cpu) {
+			int index = octeon_coreid_for_cpu(cpu) * 2 + 1;
+			clear_bit(cd->bit,
+				&per_cpu(octeon_irq_ciu1_en_mirror, cpu));
+			cvmx_write_csr(CVMX_CIU_INTX_EN1_W1C(index), mask);
+		}
+	}
+}
+
+/*
+ * Enable the irq on the all cores for chips that have the EN*_W1{S,C}
+ * registers.
+ */
+static void octeon_irq_ciu_enable_all_v2(struct irq_data *data)
+{
+	int cpu;
+	u64 mask;
+	struct octeon_ciu_chip_data *cd;
+
+	cd = irq_data_get_irq_chip_data(data);
+	mask = 1ull << (cd->bit);
+
+	if (cd->line == 0) {
+		for_each_online_cpu(cpu) {
+			int index = octeon_coreid_for_cpu(cpu) * 2;
+			set_bit(cd->bit,
+				&per_cpu(octeon_irq_ciu0_en_mirror, cpu));
+			cvmx_write_csr(CVMX_CIU_INTX_EN0_W1S(index), mask);
+		}
+	} else {
+		for_each_online_cpu(cpu) {
+			int index = octeon_coreid_for_cpu(cpu) * 2 + 1;
+			set_bit(cd->bit,
+				&per_cpu(octeon_irq_ciu1_en_mirror, cpu));
+			cvmx_write_csr(CVMX_CIU_INTX_EN1_W1S(index), mask);
+		}
+	}
+}
+
+static int octeon_irq_ciu_set_type(struct irq_data *data, unsigned int t)
+{
+	irqd_set_trigger_type(data, t);
+
+	if (t & IRQ_TYPE_EDGE_BOTH)
+		irq_set_handler_locked(data, handle_edge_irq);
+	else
+		irq_set_handler_locked(data, handle_level_irq);
+
+	return IRQ_SET_MASK_OK;
+}
+
+static void octeon_irq_gpio_setup(struct irq_data *data)
+{
+	union cvmx_gpio_bit_cfgx cfg;
+	struct octeon_ciu_chip_data *cd;
+	u32 t = irqd_get_trigger_type(data);
+
+	cd = irq_data_get_irq_chip_data(data);
+
+	cfg.u64 = 0;
+	cfg.s.int_en = 1;
+	cfg.s.int_type = (t & IRQ_TYPE_EDGE_BOTH) != 0;
+	cfg.s.rx_xor = (t & (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_EDGE_FALLING)) != 0;
+
+	/* 140 nS glitch filter*/
+	cfg.s.fil_cnt = 7;
+	cfg.s.fil_sel = 3;
+
+	cvmx_write_csr(CVMX_GPIO_BIT_CFGX(cd->gpio_line), cfg.u64);
+}
+
+static void octeon_irq_ciu_enable_gpio_v2(struct irq_data *data)
+{
+	octeon_irq_gpio_setup(data);
+	octeon_irq_ciu_enable_v2(data);
+}
+
+static void octeon_irq_ciu_enable_gpio(struct irq_data *data)
+{
+	octeon_irq_gpio_setup(data);
+	octeon_irq_ciu_enable(data);
+}
+
+static int octeon_irq_ciu_gpio_set_type(struct irq_data *data, unsigned int t)
+{
+	irqd_set_trigger_type(data, t);
+	octeon_irq_gpio_setup(data);
+
+	if (t & IRQ_TYPE_EDGE_BOTH)
+		irq_set_handler_locked(data, handle_edge_irq);
+	else
+		irq_set_handler_locked(data, handle_level_irq);
+
+	return IRQ_SET_MASK_OK;
+}
+
+static void octeon_irq_ciu_disable_gpio_v2(struct irq_data *data)
+{
+	struct octeon_ciu_chip_data *cd;
+
+	cd = irq_data_get_irq_chip_data(data);
+	cvmx_write_csr(CVMX_GPIO_BIT_CFGX(cd->gpio_line), 0);
+
+	octeon_irq_ciu_disable_all_v2(data);
+}
+
+static void octeon_irq_ciu_disable_gpio(struct irq_data *data)
+{
+	struct octeon_ciu_chip_data *cd;
+
+	cd = irq_data_get_irq_chip_data(data);
+	cvmx_write_csr(CVMX_GPIO_BIT_CFGX(cd->gpio_line), 0);
+
+	octeon_irq_ciu_disable_all(data);
+}
+
+static void octeon_irq_ciu_gpio_ack(struct irq_data *data)
+{
+	struct octeon_ciu_chip_data *cd;
+	u64 mask;
+
+	cd = irq_data_get_irq_chip_data(data);
+	mask = 1ull << (cd->gpio_line);
+
+	cvmx_write_csr(CVMX_GPIO_INT_CLR, mask);
+}
+
+#ifdef CONFIG_SMP
+
+static void octeon_irq_cpu_offline_ciu(struct irq_data *data)
+{
+	int cpu = smp_processor_id();
+	cpumask_t new_affinity;
+	struct cpumask *mask = irq_data_get_affinity_mask(data);
+
+	if (!cpumask_test_cpu(cpu, mask))
+		return;
+
+	if (cpumask_weight(mask) > 1) {
+		/*
+		 * It has multi CPU affinity, just remove this CPU
+		 * from the affinity set.
+		 */
+		cpumask_copy(&new_affinity, mask);
+		cpumask_clear_cpu(cpu, &new_affinity);
+	} else {
+		/* Otherwise, put it on lowest numbered online CPU. */
+		cpumask_clear(&new_affinity);
+		cpumask_set_cpu(cpumask_first(cpu_online_mask), &new_affinity);
+	}
+	irq_set_affinity_locked(data, &new_affinity, false);
+}
+
+static int octeon_irq_ciu_set_affinity(struct irq_data *data,
+				       const struct cpumask *dest, bool force)
+{
+	int cpu;
+	bool enable_one = !irqd_irq_disabled(data) && !irqd_irq_masked(data);
+	unsigned long flags;
+	struct octeon_ciu_chip_data *cd;
+	unsigned long *pen;
+	raw_spinlock_t *lock;
+
+	cd = irq_data_get_irq_chip_data(data);
+
+	/*
+	 * For non-v2 CIU, we will allow only single CPU affinity.
+	 * This removes the need to do locking in the .ack/.eoi
+	 * functions.
+	 */
+	if (cpumask_weight(dest) != 1)
+		return -EINVAL;
+
+	if (!enable_one)
+		return 0;
+
+
+	for_each_online_cpu(cpu) {
+		int coreid = octeon_coreid_for_cpu(cpu);
+
+		lock = &per_cpu(octeon_irq_ciu_spinlock, cpu);
+		raw_spin_lock_irqsave(lock, flags);
+
+		if (cd->line == 0)
+			pen = &per_cpu(octeon_irq_ciu0_en_mirror, cpu);
+		else
+			pen = &per_cpu(octeon_irq_ciu1_en_mirror, cpu);
+
+		if (cpumask_test_cpu(cpu, dest) && enable_one) {
+			enable_one = 0;
+			__set_bit(cd->bit, pen);
+		} else {
+			__clear_bit(cd->bit, pen);
+		}
+		/*
+		 * Must be visible to octeon_irq_ip{2,3}_ciu() before
+		 * enabling the irq.
+		 */
+		wmb();
+
+		if (cd->line == 0)
+			cvmx_write_csr(CVMX_CIU_INTX_EN0(coreid * 2), *pen);
+		else
+			cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1), *pen);
+
+		raw_spin_unlock_irqrestore(lock, flags);
+	}
+	return 0;
+}
+
+/*
+ * Set affinity for the irq for chips that have the EN*_W1{S,C}
+ * registers.
+ */
+static int octeon_irq_ciu_set_affinity_v2(struct irq_data *data,
+					  const struct cpumask *dest,
+					  bool force)
+{
+	int cpu;
+	bool enable_one = !irqd_irq_disabled(data) && !irqd_irq_masked(data);
+	u64 mask;
+	struct octeon_ciu_chip_data *cd;
+
+	if (!enable_one)
+		return 0;
+
+	cd = irq_data_get_irq_chip_data(data);
+	mask = 1ull << cd->bit;
+
+	if (cd->line == 0) {
+		for_each_online_cpu(cpu) {
+			unsigned long *pen = &per_cpu(octeon_irq_ciu0_en_mirror, cpu);
+			int index = octeon_coreid_for_cpu(cpu) * 2;
+			if (cpumask_test_cpu(cpu, dest) && enable_one) {
+				enable_one = false;
+				set_bit(cd->bit, pen);
+				cvmx_write_csr(CVMX_CIU_INTX_EN0_W1S(index), mask);
+			} else {
+				clear_bit(cd->bit, pen);
+				cvmx_write_csr(CVMX_CIU_INTX_EN0_W1C(index), mask);
+			}
+		}
+	} else {
+		for_each_online_cpu(cpu) {
+			unsigned long *pen = &per_cpu(octeon_irq_ciu1_en_mirror, cpu);
+			int index = octeon_coreid_for_cpu(cpu) * 2 + 1;
+			if (cpumask_test_cpu(cpu, dest) && enable_one) {
+				enable_one = false;
+				set_bit(cd->bit, pen);
+				cvmx_write_csr(CVMX_CIU_INTX_EN1_W1S(index), mask);
+			} else {
+				clear_bit(cd->bit, pen);
+				cvmx_write_csr(CVMX_CIU_INTX_EN1_W1C(index), mask);
+			}
+		}
+	}
+	return 0;
+}
+
+static int octeon_irq_ciu_set_affinity_sum2(struct irq_data *data,
+					    const struct cpumask *dest,
+					    bool force)
+{
+	int cpu;
+	bool enable_one = !irqd_irq_disabled(data) && !irqd_irq_masked(data);
+	u64 mask;
+	struct octeon_ciu_chip_data *cd;
+
+	if (!enable_one)
+		return 0;
+
+	cd = irq_data_get_irq_chip_data(data);
+	mask = 1ull << cd->bit;
+
+	for_each_online_cpu(cpu) {
+		int index = octeon_coreid_for_cpu(cpu);
+
+		if (cpumask_test_cpu(cpu, dest) && enable_one) {
+			enable_one = false;
+			cvmx_write_csr(CVMX_CIU_EN2_PPX_IP4_W1S(index), mask);
+		} else {
+			cvmx_write_csr(CVMX_CIU_EN2_PPX_IP4_W1C(index), mask);
+		}
+	}
+	return 0;
+}
+#endif
+
+static unsigned int edge_startup(struct irq_data *data)
+{
+	/* ack any pending edge-irq at startup, so there is
+	 * an _edge_ to fire on when the event reappears.
+	 */
+	data->chip->irq_ack(data);
+	data->chip->irq_enable(data);
+	return 0;
+}
+
+/*
+ * Newer octeon chips have support for lockless CIU operation.
+ */
+static struct irq_chip octeon_irq_chip_ciu_v2 = {
+	.name = "CIU",
+	.irq_enable = octeon_irq_ciu_enable_v2,
+	.irq_disable = octeon_irq_ciu_disable_all_v2,
+	.irq_mask = octeon_irq_ciu_disable_local_v2,
+	.irq_unmask = octeon_irq_ciu_enable_v2,
+#ifdef CONFIG_SMP
+	.irq_set_affinity = octeon_irq_ciu_set_affinity_v2,
+	.irq_cpu_offline = octeon_irq_cpu_offline_ciu,
+#endif
+};
+
+static struct irq_chip octeon_irq_chip_ciu_v2_edge = {
+	.name = "CIU",
+	.irq_enable = octeon_irq_ciu_enable_v2,
+	.irq_disable = octeon_irq_ciu_disable_all_v2,
+	.irq_ack = octeon_irq_ciu_ack,
+	.irq_mask = octeon_irq_ciu_disable_local_v2,
+	.irq_unmask = octeon_irq_ciu_enable_v2,
+#ifdef CONFIG_SMP
+	.irq_set_affinity = octeon_irq_ciu_set_affinity_v2,
+	.irq_cpu_offline = octeon_irq_cpu_offline_ciu,
+#endif
+};
+
+/*
+ * Newer octeon chips have support for lockless CIU operation.
+ */
+static struct irq_chip octeon_irq_chip_ciu_sum2 = {
+	.name = "CIU",
+	.irq_enable = octeon_irq_ciu_enable_sum2,
+	.irq_disable = octeon_irq_ciu_disable_all_sum2,
+	.irq_mask = octeon_irq_ciu_disable_local_sum2,
+	.irq_unmask = octeon_irq_ciu_enable_sum2,
+#ifdef CONFIG_SMP
+	.irq_set_affinity = octeon_irq_ciu_set_affinity_sum2,
+	.irq_cpu_offline = octeon_irq_cpu_offline_ciu,
+#endif
+};
+
+static struct irq_chip octeon_irq_chip_ciu_sum2_edge = {
+	.name = "CIU",
+	.irq_enable = octeon_irq_ciu_enable_sum2,
+	.irq_disable = octeon_irq_ciu_disable_all_sum2,
+	.irq_ack = octeon_irq_ciu_ack_sum2,
+	.irq_mask = octeon_irq_ciu_disable_local_sum2,
+	.irq_unmask = octeon_irq_ciu_enable_sum2,
+#ifdef CONFIG_SMP
+	.irq_set_affinity = octeon_irq_ciu_set_affinity_sum2,
+	.irq_cpu_offline = octeon_irq_cpu_offline_ciu,
+#endif
+};
+
+static struct irq_chip octeon_irq_chip_ciu = {
+	.name = "CIU",
+	.irq_enable = octeon_irq_ciu_enable,
+	.irq_disable = octeon_irq_ciu_disable_all,
+	.irq_mask = octeon_irq_ciu_disable_local,
+	.irq_unmask = octeon_irq_ciu_enable,
+#ifdef CONFIG_SMP
+	.irq_set_affinity = octeon_irq_ciu_set_affinity,
+	.irq_cpu_offline = octeon_irq_cpu_offline_ciu,
+#endif
+};
+
+static struct irq_chip octeon_irq_chip_ciu_edge = {
+	.name = "CIU",
+	.irq_enable = octeon_irq_ciu_enable,
+	.irq_disable = octeon_irq_ciu_disable_all,
+	.irq_ack = octeon_irq_ciu_ack,
+	.irq_mask = octeon_irq_ciu_disable_local,
+	.irq_unmask = octeon_irq_ciu_enable,
+#ifdef CONFIG_SMP
+	.irq_set_affinity = octeon_irq_ciu_set_affinity,
+	.irq_cpu_offline = octeon_irq_cpu_offline_ciu,
+#endif
+};
+
+/* The mbox versions don't do any affinity or round-robin. */
+static struct irq_chip octeon_irq_chip_ciu_mbox_v2 = {
+	.name = "CIU-M",
+	.irq_enable = octeon_irq_ciu_enable_all_v2,
+	.irq_disable = octeon_irq_ciu_disable_all_v2,
+	.irq_ack = octeon_irq_ciu_disable_local_v2,
+	.irq_eoi = octeon_irq_ciu_enable_local_v2,
+
+	.irq_cpu_online = octeon_irq_ciu_enable_local_v2,
+	.irq_cpu_offline = octeon_irq_ciu_disable_local_v2,
+	.flags = IRQCHIP_ONOFFLINE_ENABLED,
+};
+
+static struct irq_chip octeon_irq_chip_ciu_mbox = {
+	.name = "CIU-M",
+	.irq_enable = octeon_irq_ciu_enable_all,
+	.irq_disable = octeon_irq_ciu_disable_all,
+	.irq_ack = octeon_irq_ciu_disable_local,
+	.irq_eoi = octeon_irq_ciu_enable_local,
+
+	.irq_cpu_online = octeon_irq_ciu_enable_local,
+	.irq_cpu_offline = octeon_irq_ciu_disable_local,
+	.flags = IRQCHIP_ONOFFLINE_ENABLED,
+};
+
+static struct irq_chip octeon_irq_chip_ciu_gpio_v2 = {
+	.name = "CIU-GPIO",
+	.irq_enable = octeon_irq_ciu_enable_gpio_v2,
+	.irq_disable = octeon_irq_ciu_disable_gpio_v2,
+	.irq_ack = octeon_irq_ciu_gpio_ack,
+	.irq_mask = octeon_irq_ciu_disable_local_v2,
+	.irq_unmask = octeon_irq_ciu_enable_v2,
+	.irq_set_type = octeon_irq_ciu_gpio_set_type,
+#ifdef CONFIG_SMP
+	.irq_set_affinity = octeon_irq_ciu_set_affinity_v2,
+	.irq_cpu_offline = octeon_irq_cpu_offline_ciu,
+#endif
+	.flags = IRQCHIP_SET_TYPE_MASKED,
+};
+
+static struct irq_chip octeon_irq_chip_ciu_gpio = {
+	.name = "CIU-GPIO",
+	.irq_enable = octeon_irq_ciu_enable_gpio,
+	.irq_disable = octeon_irq_ciu_disable_gpio,
+	.irq_mask = octeon_irq_ciu_disable_local,
+	.irq_unmask = octeon_irq_ciu_enable,
+	.irq_ack = octeon_irq_ciu_gpio_ack,
+	.irq_set_type = octeon_irq_ciu_gpio_set_type,
+#ifdef CONFIG_SMP
+	.irq_set_affinity = octeon_irq_ciu_set_affinity,
+	.irq_cpu_offline = octeon_irq_cpu_offline_ciu,
+#endif
+	.flags = IRQCHIP_SET_TYPE_MASKED,
+};
+
+/*
+ * Watchdog interrupts are special.  They are associated with a single
+ * core, so we hardwire the affinity to that core.
+ */
+static void octeon_irq_ciu_wd_enable(struct irq_data *data)
+{
+	unsigned long flags;
+	unsigned long *pen;
+	int coreid = data->irq - OCTEON_IRQ_WDOG0;	/* Bit 0-63 of EN1 */
+	int cpu = octeon_cpu_for_coreid(coreid);
+	raw_spinlock_t *lock = &per_cpu(octeon_irq_ciu_spinlock, cpu);
+
+	raw_spin_lock_irqsave(lock, flags);
+	pen = &per_cpu(octeon_irq_ciu1_en_mirror, cpu);
+	__set_bit(coreid, pen);
+	/*
+	 * Must be visible to octeon_irq_ip{2,3}_ciu() before enabling
+	 * the irq.
+	 */
+	wmb();
+	cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1), *pen);
+	raw_spin_unlock_irqrestore(lock, flags);
+}
+
+/*
+ * Watchdog interrupts are special.  They are associated with a single
+ * core, so we hardwire the affinity to that core.
+ */
+static void octeon_irq_ciu1_wd_enable_v2(struct irq_data *data)
+{
+	int coreid = data->irq - OCTEON_IRQ_WDOG0;
+	int cpu = octeon_cpu_for_coreid(coreid);
+
+	set_bit(coreid, &per_cpu(octeon_irq_ciu1_en_mirror, cpu));
+	cvmx_write_csr(CVMX_CIU_INTX_EN1_W1S(coreid * 2 + 1), 1ull << coreid);
+}
+
+
+static struct irq_chip octeon_irq_chip_ciu_wd_v2 = {
+	.name = "CIU-W",
+	.irq_enable = octeon_irq_ciu1_wd_enable_v2,
+	.irq_disable = octeon_irq_ciu_disable_all_v2,
+	.irq_mask = octeon_irq_ciu_disable_local_v2,
+	.irq_unmask = octeon_irq_ciu_enable_local_v2,
+};
+
+static struct irq_chip octeon_irq_chip_ciu_wd = {
+	.name = "CIU-W",
+	.irq_enable = octeon_irq_ciu_wd_enable,
+	.irq_disable = octeon_irq_ciu_disable_all,
+	.irq_mask = octeon_irq_ciu_disable_local,
+	.irq_unmask = octeon_irq_ciu_enable_local,
+};
+
+static bool octeon_irq_ciu_is_edge(unsigned int line, unsigned int bit)
+{
+	bool edge = false;
+
+	if (line == 0)
+		switch (bit) {
+		case 48 ... 49: /* GMX DRP */
+		case 50: /* IPD_DRP */
+		case 52 ... 55: /* Timers */
+		case 58: /* MPI */
+			edge = true;
+			break;
+		default:
+			break;
+		}
+	else /* line == 1 */
+		switch (bit) {
+		case 47: /* PTP */
+			edge = true;
+			break;
+		default:
+			break;
+		}
+	return edge;
+}
+
+struct octeon_irq_gpio_domain_data {
+	unsigned int base_hwirq;
+};
+
+static int octeon_irq_gpio_xlat(struct irq_domain *d,
+				struct device_node *node,
+				const u32 *intspec,
+				unsigned int intsize,
+				unsigned long *out_hwirq,
+				unsigned int *out_type)
+{
+	unsigned int type;
+	unsigned int pin;
+	unsigned int trigger;
+
+	if (irq_domain_get_of_node(d) != node)
+		return -EINVAL;
+
+	if (intsize < 2)
+		return -EINVAL;
+
+	pin = intspec[0];
+	if (pin >= 16)
+		return -EINVAL;
+
+	trigger = intspec[1];
+
+	switch (trigger) {
+	case 1:
+		type = IRQ_TYPE_EDGE_RISING;
+		break;
+	case 2:
+		type = IRQ_TYPE_EDGE_FALLING;
+		break;
+	case 4:
+		type = IRQ_TYPE_LEVEL_HIGH;
+		break;
+	case 8:
+		type = IRQ_TYPE_LEVEL_LOW;
+		break;
+	default:
+		pr_err("Error: (%s) Invalid irq trigger specification: %x\n",
+		       node->name,
+		       trigger);
+		type = IRQ_TYPE_LEVEL_LOW;
+		break;
+	}
+	*out_type = type;
+	*out_hwirq = pin;
+
+	return 0;
+}
+
+static int octeon_irq_ciu_xlat(struct irq_domain *d,
+			       struct device_node *node,
+			       const u32 *intspec,
+			       unsigned int intsize,
+			       unsigned long *out_hwirq,
+			       unsigned int *out_type)
+{
+	unsigned int ciu, bit;
+	struct octeon_irq_ciu_domain_data *dd = d->host_data;
+
+	ciu = intspec[0];
+	bit = intspec[1];
+
+	if (ciu >= dd->num_sum || bit > 63)
+		return -EINVAL;
+
+	*out_hwirq = (ciu << 6) | bit;
+	*out_type = 0;
+
+	return 0;
+}
+
+static struct irq_chip *octeon_irq_ciu_chip;
+static struct irq_chip *octeon_irq_ciu_chip_edge;
+static struct irq_chip *octeon_irq_gpio_chip;
+
+static int octeon_irq_ciu_map(struct irq_domain *d,
+			      unsigned int virq, irq_hw_number_t hw)
+{
+	int rv;
+	unsigned int line = hw >> 6;
+	unsigned int bit = hw & 63;
+	struct octeon_irq_ciu_domain_data *dd = d->host_data;
+
+	if (line >= dd->num_sum || octeon_irq_ciu_to_irq[line][bit] != 0)
+		return -EINVAL;
+
+	if (line == 2) {
+		if (octeon_irq_ciu_is_edge(line, bit))
+			rv = octeon_irq_set_ciu_mapping(virq, line, bit, 0,
+				&octeon_irq_chip_ciu_sum2_edge,
+				handle_edge_irq);
+		else
+			rv = octeon_irq_set_ciu_mapping(virq, line, bit, 0,
+				&octeon_irq_chip_ciu_sum2,
+				handle_level_irq);
+	} else {
+		if (octeon_irq_ciu_is_edge(line, bit))
+			rv = octeon_irq_set_ciu_mapping(virq, line, bit, 0,
+				octeon_irq_ciu_chip_edge,
+				handle_edge_irq);
+		else
+			rv = octeon_irq_set_ciu_mapping(virq, line, bit, 0,
+				octeon_irq_ciu_chip,
+				handle_level_irq);
+	}
+	return rv;
+}
+
+static int octeon_irq_gpio_map(struct irq_domain *d,
+			       unsigned int virq, irq_hw_number_t hw)
+{
+	struct octeon_irq_gpio_domain_data *gpiod = d->host_data;
+	unsigned int line, bit;
+	int r;
+
+	line = (hw + gpiod->base_hwirq) >> 6;
+	bit = (hw + gpiod->base_hwirq) & 63;
+	if (line >= ARRAY_SIZE(octeon_irq_ciu_to_irq) ||
+		octeon_irq_ciu_to_irq[line][bit] != 0)
+		return -EINVAL;
+
+	/*
+	 * Default to handle_level_irq. If the DT contains a different
+	 * trigger type, it will call the irq_set_type callback and
+	 * the handler gets updated.
+	 */
+	r = octeon_irq_set_ciu_mapping(virq, line, bit, hw,
+				       octeon_irq_gpio_chip, handle_level_irq);
+	return r;
+}
+
+static struct irq_domain_ops octeon_irq_domain_ciu_ops = {
+	.map = octeon_irq_ciu_map,
+	.unmap = octeon_irq_free_cd,
+	.xlate = octeon_irq_ciu_xlat,
+};
+
+static struct irq_domain_ops octeon_irq_domain_gpio_ops = {
+	.map = octeon_irq_gpio_map,
+	.unmap = octeon_irq_free_cd,
+	.xlate = octeon_irq_gpio_xlat,
+};
+
+static void octeon_irq_ip2_ciu(void)
+{
+	const unsigned long core_id = cvmx_get_core_num();
+	u64 ciu_sum = cvmx_read_csr(CVMX_CIU_INTX_SUM0(core_id * 2));
+
+	ciu_sum &= __this_cpu_read(octeon_irq_ciu0_en_mirror);
+	if (likely(ciu_sum)) {
+		int bit = fls64(ciu_sum) - 1;
+		int irq = octeon_irq_ciu_to_irq[0][bit];
+		if (likely(irq))
+			do_IRQ(irq);
+		else
+			spurious_interrupt();
+	} else {
+		spurious_interrupt();
+	}
+}
+
+static void octeon_irq_ip3_ciu(void)
+{
+	u64 ciu_sum = cvmx_read_csr(CVMX_CIU_INT_SUM1);
+
+	ciu_sum &= __this_cpu_read(octeon_irq_ciu1_en_mirror);
+	if (likely(ciu_sum)) {
+		int bit = fls64(ciu_sum) - 1;
+		int irq = octeon_irq_ciu_to_irq[1][bit];
+		if (likely(irq))
+			do_IRQ(irq);
+		else
+			spurious_interrupt();
+	} else {
+		spurious_interrupt();
+	}
+}
+
+static void octeon_irq_ip4_ciu(void)
+{
+	int coreid = cvmx_get_core_num();
+	u64 ciu_sum = cvmx_read_csr(CVMX_CIU_SUM2_PPX_IP4(coreid));
+	u64 ciu_en = cvmx_read_csr(CVMX_CIU_EN2_PPX_IP4(coreid));
+
+	ciu_sum &= ciu_en;
+	if (likely(ciu_sum)) {
+		int bit = fls64(ciu_sum) - 1;
+		int irq = octeon_irq_ciu_to_irq[2][bit];
+
+		if (likely(irq))
+			do_IRQ(irq);
+		else
+			spurious_interrupt();
+	} else {
+		spurious_interrupt();
+	}
+}
+
+static bool octeon_irq_use_ip4;
+
+static void octeon_irq_local_enable_ip4(void *arg)
+{
+	set_c0_status(STATUSF_IP4);
+}
+
+static void octeon_irq_ip4_mask(void)
+{
+	clear_c0_status(STATUSF_IP4);
+	spurious_interrupt();
+}
+
+static void (*octeon_irq_ip2)(void);
+static void (*octeon_irq_ip3)(void);
+static void (*octeon_irq_ip4)(void);
+
+void (*octeon_irq_setup_secondary)(void);
+
+void octeon_irq_set_ip4_handler(octeon_irq_ip4_handler_t h)
+{
+	octeon_irq_ip4 = h;
+	octeon_irq_use_ip4 = true;
+	on_each_cpu(octeon_irq_local_enable_ip4, NULL, 1);
+}
+
+static void octeon_irq_percpu_enable(void)
+{
+	irq_cpu_online();
+}
+
+static void octeon_irq_init_ciu_percpu(void)
+{
+	int coreid = cvmx_get_core_num();
+
+
+	__this_cpu_write(octeon_irq_ciu0_en_mirror, 0);
+	__this_cpu_write(octeon_irq_ciu1_en_mirror, 0);
+	wmb();
+	raw_spin_lock_init(this_cpu_ptr(&octeon_irq_ciu_spinlock));
+	/*
+	 * Disable All CIU Interrupts. The ones we need will be
+	 * enabled later.  Read the SUM register so we know the write
+	 * completed.
+	 */
+	cvmx_write_csr(CVMX_CIU_INTX_EN0((coreid * 2)), 0);
+	cvmx_write_csr(CVMX_CIU_INTX_EN0((coreid * 2 + 1)), 0);
+	cvmx_write_csr(CVMX_CIU_INTX_EN1((coreid * 2)), 0);
+	cvmx_write_csr(CVMX_CIU_INTX_EN1((coreid * 2 + 1)), 0);
+	cvmx_read_csr(CVMX_CIU_INTX_SUM0((coreid * 2)));
+}
+
+static void octeon_irq_init_ciu2_percpu(void)
+{
+	u64 regx, ipx;
+	int coreid = cvmx_get_core_num();
+	u64 base = CVMX_CIU2_EN_PPX_IP2_WRKQ(coreid);
+
+	/*
+	 * Disable All CIU2 Interrupts. The ones we need will be
+	 * enabled later.  Read the SUM register so we know the write
+	 * completed.
+	 *
+	 * There are 9 registers and 3 IPX levels with strides 0x1000
+	 * and 0x200 respectivly.  Use loops to clear them.
+	 */
+	for (regx = 0; regx <= 0x8000; regx += 0x1000) {
+		for (ipx = 0; ipx <= 0x400; ipx += 0x200)
+			cvmx_write_csr(base + regx + ipx, 0);
+	}
+
+	cvmx_read_csr(CVMX_CIU2_SUM_PPX_IP2(coreid));
+}
+
+static void octeon_irq_setup_secondary_ciu(void)
+{
+	octeon_irq_init_ciu_percpu();
+	octeon_irq_percpu_enable();
+
+	/* Enable the CIU lines */
+	set_c0_status(STATUSF_IP3 | STATUSF_IP2);
+	if (octeon_irq_use_ip4)
+		set_c0_status(STATUSF_IP4);
+	else
+		clear_c0_status(STATUSF_IP4);
+}
+
+static void octeon_irq_setup_secondary_ciu2(void)
+{
+	octeon_irq_init_ciu2_percpu();
+	octeon_irq_percpu_enable();
+
+	/* Enable the CIU lines */
+	set_c0_status(STATUSF_IP3 | STATUSF_IP2);
+	if (octeon_irq_use_ip4)
+		set_c0_status(STATUSF_IP4);
+	else
+		clear_c0_status(STATUSF_IP4);
+}
+
+static int __init octeon_irq_init_ciu(
+	struct device_node *ciu_node, struct device_node *parent)
+{
+	unsigned int i, r;
+	struct irq_chip *chip;
+	struct irq_chip *chip_edge;
+	struct irq_chip *chip_mbox;
+	struct irq_chip *chip_wd;
+	struct irq_domain *ciu_domain = NULL;
+	struct octeon_irq_ciu_domain_data *dd;
+
+	dd = kzalloc(sizeof(*dd), GFP_KERNEL);
+	if (!dd)
+		return -ENOMEM;
+
+	octeon_irq_init_ciu_percpu();
+	octeon_irq_setup_secondary = octeon_irq_setup_secondary_ciu;
+
+	octeon_irq_ip2 = octeon_irq_ip2_ciu;
+	octeon_irq_ip3 = octeon_irq_ip3_ciu;
+	if ((OCTEON_IS_OCTEON2() || OCTEON_IS_OCTEON3())
+		&& !OCTEON_IS_MODEL(OCTEON_CN63XX)) {
+		octeon_irq_ip4 =  octeon_irq_ip4_ciu;
+		dd->num_sum = 3;
+		octeon_irq_use_ip4 = true;
+	} else {
+		octeon_irq_ip4 = octeon_irq_ip4_mask;
+		dd->num_sum = 2;
+		octeon_irq_use_ip4 = false;
+	}
+	if (OCTEON_IS_MODEL(OCTEON_CN58XX_PASS2_X) ||
+	    OCTEON_IS_MODEL(OCTEON_CN56XX_PASS2_X) ||
+	    OCTEON_IS_MODEL(OCTEON_CN52XX_PASS2_X) ||
+	    OCTEON_IS_OCTEON2() || OCTEON_IS_OCTEON3()) {
+		chip = &octeon_irq_chip_ciu_v2;
+		chip_edge = &octeon_irq_chip_ciu_v2_edge;
+		chip_mbox = &octeon_irq_chip_ciu_mbox_v2;
+		chip_wd = &octeon_irq_chip_ciu_wd_v2;
+		octeon_irq_gpio_chip = &octeon_irq_chip_ciu_gpio_v2;
+	} else {
+		chip = &octeon_irq_chip_ciu;
+		chip_edge = &octeon_irq_chip_ciu_edge;
+		chip_mbox = &octeon_irq_chip_ciu_mbox;
+		chip_wd = &octeon_irq_chip_ciu_wd;
+		octeon_irq_gpio_chip = &octeon_irq_chip_ciu_gpio;
+	}
+	octeon_irq_ciu_chip = chip;
+	octeon_irq_ciu_chip_edge = chip_edge;
+
+	/* Mips internal */
+	octeon_irq_init_core();
+
+	ciu_domain = irq_domain_add_tree(
+		ciu_node, &octeon_irq_domain_ciu_ops, dd);
+	irq_set_default_host(ciu_domain);
+
+	/* CIU_0 */
+	for (i = 0; i < 16; i++) {
+		r = octeon_irq_force_ciu_mapping(
+			ciu_domain, i + OCTEON_IRQ_WORKQ0, 0, i + 0);
+		if (r)
+			goto err;
+	}
+
+	r = octeon_irq_set_ciu_mapping(
+		OCTEON_IRQ_MBOX0, 0, 32, 0, chip_mbox, handle_percpu_irq);
+	if (r)
+		goto err;
+	r = octeon_irq_set_ciu_mapping(
+		OCTEON_IRQ_MBOX1, 0, 33, 0, chip_mbox, handle_percpu_irq);
+	if (r)
+		goto err;
+
+	for (i = 0; i < 4; i++) {
+		r = octeon_irq_force_ciu_mapping(
+			ciu_domain, i + OCTEON_IRQ_PCI_INT0, 0, i + 36);
+		if (r)
+			goto err;
+	}
+	for (i = 0; i < 4; i++) {
+		r = octeon_irq_force_ciu_mapping(
+			ciu_domain, i + OCTEON_IRQ_PCI_MSI0, 0, i + 40);
+		if (r)
+			goto err;
+	}
+
+	r = octeon_irq_force_ciu_mapping(ciu_domain, OCTEON_IRQ_TWSI, 0, 45);
+	if (r)
+		goto err;
+
+	r = octeon_irq_force_ciu_mapping(ciu_domain, OCTEON_IRQ_RML, 0, 46);
+	if (r)
+		goto err;
+
+	for (i = 0; i < 4; i++) {
+		r = octeon_irq_force_ciu_mapping(
+			ciu_domain, i + OCTEON_IRQ_TIMER0, 0, i + 52);
+		if (r)
+			goto err;
+	}
+
+	r = octeon_irq_force_ciu_mapping(ciu_domain, OCTEON_IRQ_TWSI2, 0, 59);
+	if (r)
+		goto err;
+
+	/* CIU_1 */
+	for (i = 0; i < 16; i++) {
+		r = octeon_irq_set_ciu_mapping(
+			i + OCTEON_IRQ_WDOG0, 1, i + 0, 0, chip_wd,
+			handle_level_irq);
+		if (r)
+			goto err;
+	}
+
+	/* Enable the CIU lines */
+	set_c0_status(STATUSF_IP3 | STATUSF_IP2);
+	if (octeon_irq_use_ip4)
+		set_c0_status(STATUSF_IP4);
+	else
+		clear_c0_status(STATUSF_IP4);
+
+	return 0;
+err:
+	return r;
+}
+
+static int __init octeon_irq_init_gpio(
+	struct device_node *gpio_node, struct device_node *parent)
+{
+	struct octeon_irq_gpio_domain_data *gpiod;
+	u32 interrupt_cells;
+	unsigned int base_hwirq;
+	int r;
+
+	r = of_property_read_u32(parent, "#interrupt-cells", &interrupt_cells);
+	if (r)
+		return r;
+
+	if (interrupt_cells == 1) {
+		u32 v;
+
+		r = of_property_read_u32_index(gpio_node, "interrupts", 0, &v);
+		if (r) {
+			pr_warn("No \"interrupts\" property.\n");
+			return r;
+		}
+		base_hwirq = v;
+	} else if (interrupt_cells == 2) {
+		u32 v0, v1;
+
+		r = of_property_read_u32_index(gpio_node, "interrupts", 0, &v0);
+		if (r) {
+			pr_warn("No \"interrupts\" property.\n");
+			return r;
+		}
+		r = of_property_read_u32_index(gpio_node, "interrupts", 1, &v1);
+		if (r) {
+			pr_warn("No \"interrupts\" property.\n");
+			return r;
+		}
+		base_hwirq = (v0 << 6) | v1;
+	} else {
+		pr_warn("Bad \"#interrupt-cells\" property: %u\n",
+			interrupt_cells);
+		return -EINVAL;
+	}
+
+	gpiod = kzalloc(sizeof(*gpiod), GFP_KERNEL);
+	if (gpiod) {
+		/* gpio domain host_data is the base hwirq number. */
+		gpiod->base_hwirq = base_hwirq;
+		irq_domain_add_linear(
+			gpio_node, 16, &octeon_irq_domain_gpio_ops, gpiod);
+	} else {
+		pr_warn("Cannot allocate memory for GPIO irq_domain.\n");
+		return -ENOMEM;
+	}
+
+	/*
+	 * Clear the OF_POPULATED flag that was set by of_irq_init()
+	 * so that all GPIO devices will be probed.
+	 */
+	of_node_clear_flag(gpio_node, OF_POPULATED);
+
+	return 0;
+}
+/*
+ * Watchdog interrupts are special.  They are associated with a single
+ * core, so we hardwire the affinity to that core.
+ */
+static void octeon_irq_ciu2_wd_enable(struct irq_data *data)
+{
+	u64 mask;
+	u64 en_addr;
+	int coreid = data->irq - OCTEON_IRQ_WDOG0;
+	struct octeon_ciu_chip_data *cd;
+
+	cd = irq_data_get_irq_chip_data(data);
+	mask = 1ull << (cd->bit);
+
+	en_addr = CVMX_CIU2_EN_PPX_IP2_WRKQ_W1S(coreid) +
+		(0x1000ull * cd->line);
+	cvmx_write_csr(en_addr, mask);
+
+}
+
+static void octeon_irq_ciu2_enable(struct irq_data *data)
+{
+	u64 mask;
+	u64 en_addr;
+	int cpu = next_cpu_for_irq(data);
+	int coreid = octeon_coreid_for_cpu(cpu);
+	struct octeon_ciu_chip_data *cd;
+
+	cd = irq_data_get_irq_chip_data(data);
+	mask = 1ull << (cd->bit);
+
+	en_addr = CVMX_CIU2_EN_PPX_IP2_WRKQ_W1S(coreid) +
+		(0x1000ull * cd->line);
+	cvmx_write_csr(en_addr, mask);
+}
+
+static void octeon_irq_ciu2_enable_local(struct irq_data *data)
+{
+	u64 mask;
+	u64 en_addr;
+	int coreid = cvmx_get_core_num();
+	struct octeon_ciu_chip_data *cd;
+
+	cd = irq_data_get_irq_chip_data(data);
+	mask = 1ull << (cd->bit);
+
+	en_addr = CVMX_CIU2_EN_PPX_IP2_WRKQ_W1S(coreid) +
+		(0x1000ull * cd->line);
+	cvmx_write_csr(en_addr, mask);
+
+}
+
+static void octeon_irq_ciu2_disable_local(struct irq_data *data)
+{
+	u64 mask;
+	u64 en_addr;
+	int coreid = cvmx_get_core_num();
+	struct octeon_ciu_chip_data *cd;
+
+	cd = irq_data_get_irq_chip_data(data);
+	mask = 1ull << (cd->bit);
+
+	en_addr = CVMX_CIU2_EN_PPX_IP2_WRKQ_W1C(coreid) +
+		(0x1000ull * cd->line);
+	cvmx_write_csr(en_addr, mask);
+
+}
+
+static void octeon_irq_ciu2_ack(struct irq_data *data)
+{
+	u64 mask;
+	u64 en_addr;
+	int coreid = cvmx_get_core_num();
+	struct octeon_ciu_chip_data *cd;
+
+	cd = irq_data_get_irq_chip_data(data);
+	mask = 1ull << (cd->bit);
+
+	en_addr = CVMX_CIU2_RAW_PPX_IP2_WRKQ(coreid) + (0x1000ull * cd->line);
+	cvmx_write_csr(en_addr, mask);
+
+}
+
+static void octeon_irq_ciu2_disable_all(struct irq_data *data)
+{
+	int cpu;
+	u64 mask;
+	struct octeon_ciu_chip_data *cd;
+
+	cd = irq_data_get_irq_chip_data(data);
+	mask = 1ull << (cd->bit);
+
+	for_each_online_cpu(cpu) {
+		u64 en_addr = CVMX_CIU2_EN_PPX_IP2_WRKQ_W1C(
+			octeon_coreid_for_cpu(cpu)) + (0x1000ull * cd->line);
+		cvmx_write_csr(en_addr, mask);
+	}
+}
+
+static void octeon_irq_ciu2_mbox_enable_all(struct irq_data *data)
+{
+	int cpu;
+	u64 mask;
+
+	mask = 1ull << (data->irq - OCTEON_IRQ_MBOX0);
+
+	for_each_online_cpu(cpu) {
+		u64 en_addr = CVMX_CIU2_EN_PPX_IP3_MBOX_W1S(
+			octeon_coreid_for_cpu(cpu));
+		cvmx_write_csr(en_addr, mask);
+	}
+}
+
+static void octeon_irq_ciu2_mbox_disable_all(struct irq_data *data)
+{
+	int cpu;
+	u64 mask;
+
+	mask = 1ull << (data->irq - OCTEON_IRQ_MBOX0);
+
+	for_each_online_cpu(cpu) {
+		u64 en_addr = CVMX_CIU2_EN_PPX_IP3_MBOX_W1C(
+			octeon_coreid_for_cpu(cpu));
+		cvmx_write_csr(en_addr, mask);
+	}
+}
+
+static void octeon_irq_ciu2_mbox_enable_local(struct irq_data *data)
+{
+	u64 mask;
+	u64 en_addr;
+	int coreid = cvmx_get_core_num();
+
+	mask = 1ull << (data->irq - OCTEON_IRQ_MBOX0);
+	en_addr = CVMX_CIU2_EN_PPX_IP3_MBOX_W1S(coreid);
+	cvmx_write_csr(en_addr, mask);
+}
+
+static void octeon_irq_ciu2_mbox_disable_local(struct irq_data *data)
+{
+	u64 mask;
+	u64 en_addr;
+	int coreid = cvmx_get_core_num();
+
+	mask = 1ull << (data->irq - OCTEON_IRQ_MBOX0);
+	en_addr = CVMX_CIU2_EN_PPX_IP3_MBOX_W1C(coreid);
+	cvmx_write_csr(en_addr, mask);
+}
+
+#ifdef CONFIG_SMP
+static int octeon_irq_ciu2_set_affinity(struct irq_data *data,
+					const struct cpumask *dest, bool force)
+{
+	int cpu;
+	bool enable_one = !irqd_irq_disabled(data) && !irqd_irq_masked(data);
+	u64 mask;
+	struct octeon_ciu_chip_data *cd;
+
+	if (!enable_one)
+		return 0;
+
+	cd = irq_data_get_irq_chip_data(data);
+	mask = 1ull << cd->bit;
+
+	for_each_online_cpu(cpu) {
+		u64 en_addr;
+		if (cpumask_test_cpu(cpu, dest) && enable_one) {
+			enable_one = false;
+			en_addr = CVMX_CIU2_EN_PPX_IP2_WRKQ_W1S(
+				octeon_coreid_for_cpu(cpu)) +
+				(0x1000ull * cd->line);
+		} else {
+			en_addr = CVMX_CIU2_EN_PPX_IP2_WRKQ_W1C(
+				octeon_coreid_for_cpu(cpu)) +
+				(0x1000ull * cd->line);
+		}
+		cvmx_write_csr(en_addr, mask);
+	}
+
+	return 0;
+}
+#endif
+
+static void octeon_irq_ciu2_enable_gpio(struct irq_data *data)
+{
+	octeon_irq_gpio_setup(data);
+	octeon_irq_ciu2_enable(data);
+}
+
+static void octeon_irq_ciu2_disable_gpio(struct irq_data *data)
+{
+	struct octeon_ciu_chip_data *cd;
+
+	cd = irq_data_get_irq_chip_data(data);
+
+	cvmx_write_csr(CVMX_GPIO_BIT_CFGX(cd->gpio_line), 0);
+
+	octeon_irq_ciu2_disable_all(data);
+}
+
+static struct irq_chip octeon_irq_chip_ciu2 = {
+	.name = "CIU2-E",
+	.irq_enable = octeon_irq_ciu2_enable,
+	.irq_disable = octeon_irq_ciu2_disable_all,
+	.irq_mask = octeon_irq_ciu2_disable_local,
+	.irq_unmask = octeon_irq_ciu2_enable,
+#ifdef CONFIG_SMP
+	.irq_set_affinity = octeon_irq_ciu2_set_affinity,
+	.irq_cpu_offline = octeon_irq_cpu_offline_ciu,
+#endif
+};
+
+static struct irq_chip octeon_irq_chip_ciu2_edge = {
+	.name = "CIU2-E",
+	.irq_enable = octeon_irq_ciu2_enable,
+	.irq_disable = octeon_irq_ciu2_disable_all,
+	.irq_ack = octeon_irq_ciu2_ack,
+	.irq_mask = octeon_irq_ciu2_disable_local,
+	.irq_unmask = octeon_irq_ciu2_enable,
+#ifdef CONFIG_SMP
+	.irq_set_affinity = octeon_irq_ciu2_set_affinity,
+	.irq_cpu_offline = octeon_irq_cpu_offline_ciu,
+#endif
+};
+
+static struct irq_chip octeon_irq_chip_ciu2_mbox = {
+	.name = "CIU2-M",
+	.irq_enable = octeon_irq_ciu2_mbox_enable_all,
+	.irq_disable = octeon_irq_ciu2_mbox_disable_all,
+	.irq_ack = octeon_irq_ciu2_mbox_disable_local,
+	.irq_eoi = octeon_irq_ciu2_mbox_enable_local,
+
+	.irq_cpu_online = octeon_irq_ciu2_mbox_enable_local,
+	.irq_cpu_offline = octeon_irq_ciu2_mbox_disable_local,
+	.flags = IRQCHIP_ONOFFLINE_ENABLED,
+};
+
+static struct irq_chip octeon_irq_chip_ciu2_wd = {
+	.name = "CIU2-W",
+	.irq_enable = octeon_irq_ciu2_wd_enable,
+	.irq_disable = octeon_irq_ciu2_disable_all,
+	.irq_mask = octeon_irq_ciu2_disable_local,
+	.irq_unmask = octeon_irq_ciu2_enable_local,
+};
+
+static struct irq_chip octeon_irq_chip_ciu2_gpio = {
+	.name = "CIU-GPIO",
+	.irq_enable = octeon_irq_ciu2_enable_gpio,
+	.irq_disable = octeon_irq_ciu2_disable_gpio,
+	.irq_ack = octeon_irq_ciu_gpio_ack,
+	.irq_mask = octeon_irq_ciu2_disable_local,
+	.irq_unmask = octeon_irq_ciu2_enable,
+	.irq_set_type = octeon_irq_ciu_gpio_set_type,
+#ifdef CONFIG_SMP
+	.irq_set_affinity = octeon_irq_ciu2_set_affinity,
+	.irq_cpu_offline = octeon_irq_cpu_offline_ciu,
+#endif
+	.flags = IRQCHIP_SET_TYPE_MASKED,
+};
+
+static int octeon_irq_ciu2_xlat(struct irq_domain *d,
+				struct device_node *node,
+				const u32 *intspec,
+				unsigned int intsize,
+				unsigned long *out_hwirq,
+				unsigned int *out_type)
+{
+	unsigned int ciu, bit;
+
+	ciu = intspec[0];
+	bit = intspec[1];
+
+	*out_hwirq = (ciu << 6) | bit;
+	*out_type = 0;
+
+	return 0;
+}
+
+static bool octeon_irq_ciu2_is_edge(unsigned int line, unsigned int bit)
+{
+	bool edge = false;
+
+	if (line == 3) /* MIO */
+		switch (bit) {
+		case 2:	 /* IPD_DRP */
+		case 8 ... 11: /* Timers */
+		case 48: /* PTP */
+			edge = true;
+			break;
+		default:
+			break;
+		}
+	else if (line == 6) /* PKT */
+		switch (bit) {
+		case 52 ... 53: /* ILK_DRP */
+		case 8 ... 12:	/* GMX_DRP */
+			edge = true;
+			break;
+		default:
+			break;
+		}
+	return edge;
+}
+
+static int octeon_irq_ciu2_map(struct irq_domain *d,
+			       unsigned int virq, irq_hw_number_t hw)
+{
+	unsigned int line = hw >> 6;
+	unsigned int bit = hw & 63;
+
+	/*
+	 * Don't map irq if it is reserved for GPIO.
+	 * (Line 7 are the GPIO lines.)
+	 */
+	if (line == 7)
+		return 0;
+
+	if (line > 7 || octeon_irq_ciu_to_irq[line][bit] != 0)
+		return -EINVAL;
+
+	if (octeon_irq_ciu2_is_edge(line, bit))
+		octeon_irq_set_ciu_mapping(virq, line, bit, 0,
+					   &octeon_irq_chip_ciu2_edge,
+					   handle_edge_irq);
+	else
+		octeon_irq_set_ciu_mapping(virq, line, bit, 0,
+					   &octeon_irq_chip_ciu2,
+					   handle_level_irq);
+
+	return 0;
+}
+
+static struct irq_domain_ops octeon_irq_domain_ciu2_ops = {
+	.map = octeon_irq_ciu2_map,
+	.unmap = octeon_irq_free_cd,
+	.xlate = octeon_irq_ciu2_xlat,
+};
+
+static void octeon_irq_ciu2(void)
+{
+	int line;
+	int bit;
+	int irq;
+	u64 src_reg, src, sum;
+	const unsigned long core_id = cvmx_get_core_num();
+
+	sum = cvmx_read_csr(CVMX_CIU2_SUM_PPX_IP2(core_id)) & 0xfful;
+
+	if (unlikely(!sum))
+		goto spurious;
+
+	line = fls64(sum) - 1;
+	src_reg = CVMX_CIU2_SRC_PPX_IP2_WRKQ(core_id) + (0x1000 * line);
+	src = cvmx_read_csr(src_reg);
+
+	if (unlikely(!src))
+		goto spurious;
+
+	bit = fls64(src) - 1;
+	irq = octeon_irq_ciu_to_irq[line][bit];
+	if (unlikely(!irq))
+		goto spurious;
+
+	do_IRQ(irq);
+	goto out;
+
+spurious:
+	spurious_interrupt();
+out:
+	/* CN68XX pass 1.x has an errata that accessing the ACK registers
+		can stop interrupts from propagating */
+	if (OCTEON_IS_MODEL(OCTEON_CN68XX))
+		cvmx_read_csr(CVMX_CIU2_INTR_CIU_READY);
+	else
+		cvmx_read_csr(CVMX_CIU2_ACK_PPX_IP2(core_id));
+	return;
+}
+
+static void octeon_irq_ciu2_mbox(void)
+{
+	int line;
+
+	const unsigned long core_id = cvmx_get_core_num();
+	u64 sum = cvmx_read_csr(CVMX_CIU2_SUM_PPX_IP3(core_id)) >> 60;
+
+	if (unlikely(!sum))
+		goto spurious;
+
+	line = fls64(sum) - 1;
+
+	do_IRQ(OCTEON_IRQ_MBOX0 + line);
+	goto out;
+
+spurious:
+	spurious_interrupt();
+out:
+	/* CN68XX pass 1.x has an errata that accessing the ACK registers
+		can stop interrupts from propagating */
+	if (OCTEON_IS_MODEL(OCTEON_CN68XX))
+		cvmx_read_csr(CVMX_CIU2_INTR_CIU_READY);
+	else
+		cvmx_read_csr(CVMX_CIU2_ACK_PPX_IP3(core_id));
+	return;
+}
+
+static int __init octeon_irq_init_ciu2(
+	struct device_node *ciu_node, struct device_node *parent)
+{
+	unsigned int i, r;
+	struct irq_domain *ciu_domain = NULL;
+
+	octeon_irq_init_ciu2_percpu();
+	octeon_irq_setup_secondary = octeon_irq_setup_secondary_ciu2;
+
+	octeon_irq_gpio_chip = &octeon_irq_chip_ciu2_gpio;
+	octeon_irq_ip2 = octeon_irq_ciu2;
+	octeon_irq_ip3 = octeon_irq_ciu2_mbox;
+	octeon_irq_ip4 = octeon_irq_ip4_mask;
+
+	/* Mips internal */
+	octeon_irq_init_core();
+
+	ciu_domain = irq_domain_add_tree(
+		ciu_node, &octeon_irq_domain_ciu2_ops, NULL);
+	irq_set_default_host(ciu_domain);
+
+	/* CUI2 */
+	for (i = 0; i < 64; i++) {
+		r = octeon_irq_force_ciu_mapping(
+			ciu_domain, i + OCTEON_IRQ_WORKQ0, 0, i);
+		if (r)
+			goto err;
+	}
+
+	for (i = 0; i < 32; i++) {
+		r = octeon_irq_set_ciu_mapping(i + OCTEON_IRQ_WDOG0, 1, i, 0,
+			&octeon_irq_chip_ciu2_wd, handle_level_irq);
+		if (r)
+			goto err;
+	}
+
+	for (i = 0; i < 4; i++) {
+		r = octeon_irq_force_ciu_mapping(
+			ciu_domain, i + OCTEON_IRQ_TIMER0, 3, i + 8);
+		if (r)
+			goto err;
+	}
+
+	for (i = 0; i < 4; i++) {
+		r = octeon_irq_force_ciu_mapping(
+			ciu_domain, i + OCTEON_IRQ_PCI_INT0, 4, i);
+		if (r)
+			goto err;
+	}
+
+	for (i = 0; i < 4; i++) {
+		r = octeon_irq_force_ciu_mapping(
+			ciu_domain, i + OCTEON_IRQ_PCI_MSI0, 4, i + 8);
+		if (r)
+			goto err;
+	}
+
+	irq_set_chip_and_handler(OCTEON_IRQ_MBOX0, &octeon_irq_chip_ciu2_mbox, handle_percpu_irq);
+	irq_set_chip_and_handler(OCTEON_IRQ_MBOX1, &octeon_irq_chip_ciu2_mbox, handle_percpu_irq);
+	irq_set_chip_and_handler(OCTEON_IRQ_MBOX2, &octeon_irq_chip_ciu2_mbox, handle_percpu_irq);
+	irq_set_chip_and_handler(OCTEON_IRQ_MBOX3, &octeon_irq_chip_ciu2_mbox, handle_percpu_irq);
+
+	/* Enable the CIU lines */
+	set_c0_status(STATUSF_IP3 | STATUSF_IP2);
+	clear_c0_status(STATUSF_IP4);
+	return 0;
+err:
+	return r;
+}
+
+struct octeon_irq_cib_host_data {
+	raw_spinlock_t lock;
+	u64 raw_reg;
+	u64 en_reg;
+	int max_bits;
+};
+
+struct octeon_irq_cib_chip_data {
+	struct octeon_irq_cib_host_data *host_data;
+	int bit;
+};
+
+static void octeon_irq_cib_enable(struct irq_data *data)
+{
+	unsigned long flags;
+	u64 en;
+	struct octeon_irq_cib_chip_data *cd = irq_data_get_irq_chip_data(data);
+	struct octeon_irq_cib_host_data *host_data = cd->host_data;
+
+	raw_spin_lock_irqsave(&host_data->lock, flags);
+	en = cvmx_read_csr(host_data->en_reg);
+	en |= 1ull << cd->bit;
+	cvmx_write_csr(host_data->en_reg, en);
+	raw_spin_unlock_irqrestore(&host_data->lock, flags);
+}
+
+static void octeon_irq_cib_disable(struct irq_data *data)
+{
+	unsigned long flags;
+	u64 en;
+	struct octeon_irq_cib_chip_data *cd = irq_data_get_irq_chip_data(data);
+	struct octeon_irq_cib_host_data *host_data = cd->host_data;
+
+	raw_spin_lock_irqsave(&host_data->lock, flags);
+	en = cvmx_read_csr(host_data->en_reg);
+	en &= ~(1ull << cd->bit);
+	cvmx_write_csr(host_data->en_reg, en);
+	raw_spin_unlock_irqrestore(&host_data->lock, flags);
+}
+
+static int octeon_irq_cib_set_type(struct irq_data *data, unsigned int t)
+{
+	irqd_set_trigger_type(data, t);
+	return IRQ_SET_MASK_OK;
+}
+
+static struct irq_chip octeon_irq_chip_cib = {
+	.name = "CIB",
+	.irq_enable = octeon_irq_cib_enable,
+	.irq_disable = octeon_irq_cib_disable,
+	.irq_mask = octeon_irq_cib_disable,
+	.irq_unmask = octeon_irq_cib_enable,
+	.irq_set_type = octeon_irq_cib_set_type,
+};
+
+static int octeon_irq_cib_xlat(struct irq_domain *d,
+				   struct device_node *node,
+				   const u32 *intspec,
+				   unsigned int intsize,
+				   unsigned long *out_hwirq,
+				   unsigned int *out_type)
+{
+	unsigned int type = 0;
+
+	if (intsize == 2)
+		type = intspec[1];
+
+	switch (type) {
+	case 0: /* unofficial value, but we might as well let it work. */
+	case 4: /* official value for level triggering. */
+		*out_type = IRQ_TYPE_LEVEL_HIGH;
+		break;
+	case 1: /* official value for edge triggering. */
+		*out_type = IRQ_TYPE_EDGE_RISING;
+		break;
+	default: /* Nothing else is acceptable. */
+		return -EINVAL;
+	}
+
+	*out_hwirq = intspec[0];
+
+	return 0;
+}
+
+static int octeon_irq_cib_map(struct irq_domain *d,
+			      unsigned int virq, irq_hw_number_t hw)
+{
+	struct octeon_irq_cib_host_data *host_data = d->host_data;
+	struct octeon_irq_cib_chip_data *cd;
+
+	if (hw >= host_data->max_bits) {
+		pr_err("ERROR: %s mapping %u is to big!\n",
+		       irq_domain_get_of_node(d)->name, (unsigned)hw);
+		return -EINVAL;
+	}
+
+	cd = kzalloc(sizeof(*cd), GFP_KERNEL);
+	if (!cd)
+		return -ENOMEM;
+
+	cd->host_data = host_data;
+	cd->bit = hw;
+
+	irq_set_chip_and_handler(virq, &octeon_irq_chip_cib,
+				 handle_simple_irq);
+	irq_set_chip_data(virq, cd);
+	return 0;
+}
+
+static struct irq_domain_ops octeon_irq_domain_cib_ops = {
+	.map = octeon_irq_cib_map,
+	.unmap = octeon_irq_free_cd,
+	.xlate = octeon_irq_cib_xlat,
+};
+
+/* Chain to real handler. */
+static irqreturn_t octeon_irq_cib_handler(int my_irq, void *data)
+{
+	u64 en;
+	u64 raw;
+	u64 bits;
+	int i;
+	int irq;
+	struct irq_domain *cib_domain = data;
+	struct octeon_irq_cib_host_data *host_data = cib_domain->host_data;
+
+	en = cvmx_read_csr(host_data->en_reg);
+	raw = cvmx_read_csr(host_data->raw_reg);
+
+	bits = en & raw;
+
+	for (i = 0; i < host_data->max_bits; i++) {
+		if ((bits & 1ull << i) == 0)
+			continue;
+		irq = irq_find_mapping(cib_domain, i);
+		if (!irq) {
+			unsigned long flags;
+
+			pr_err("ERROR: CIB bit %d@%llx IRQ unhandled, disabling\n",
+				i, host_data->raw_reg);
+			raw_spin_lock_irqsave(&host_data->lock, flags);
+			en = cvmx_read_csr(host_data->en_reg);
+			en &= ~(1ull << i);
+			cvmx_write_csr(host_data->en_reg, en);
+			cvmx_write_csr(host_data->raw_reg, 1ull << i);
+			raw_spin_unlock_irqrestore(&host_data->lock, flags);
+		} else {
+			struct irq_desc *desc = irq_to_desc(irq);
+			struct irq_data *irq_data = irq_desc_get_irq_data(desc);
+			/* If edge, acknowledge the bit we will be sending. */
+			if (irqd_get_trigger_type(irq_data) &
+				IRQ_TYPE_EDGE_BOTH)
+				cvmx_write_csr(host_data->raw_reg, 1ull << i);
+			generic_handle_irq_desc(desc);
+		}
+	}
+
+	return IRQ_HANDLED;
+}
+
+static int __init octeon_irq_init_cib(struct device_node *ciu_node,
+				      struct device_node *parent)
+{
+	const __be32 *addr;
+	u32 val;
+	struct octeon_irq_cib_host_data *host_data;
+	int parent_irq;
+	int r;
+	struct irq_domain *cib_domain;
+
+	parent_irq = irq_of_parse_and_map(ciu_node, 0);
+	if (!parent_irq) {
+		pr_err("ERROR: Couldn't acquire parent_irq for %s\n",
+			ciu_node->name);
+		return -EINVAL;
+	}
+
+	host_data = kzalloc(sizeof(*host_data), GFP_KERNEL);
+	if (!host_data)
+		return -ENOMEM;
+	raw_spin_lock_init(&host_data->lock);
+
+	addr = of_get_address(ciu_node, 0, NULL, NULL);
+	if (!addr) {
+		pr_err("ERROR: Couldn't acquire reg(0) %s\n", ciu_node->name);
+		return -EINVAL;
+	}
+	host_data->raw_reg = (u64)phys_to_virt(
+		of_translate_address(ciu_node, addr));
+
+	addr = of_get_address(ciu_node, 1, NULL, NULL);
+	if (!addr) {
+		pr_err("ERROR: Couldn't acquire reg(1) %s\n", ciu_node->name);
+		return -EINVAL;
+	}
+	host_data->en_reg = (u64)phys_to_virt(
+		of_translate_address(ciu_node, addr));
+
+	r = of_property_read_u32(ciu_node, "cavium,max-bits", &val);
+	if (r) {
+		pr_err("ERROR: Couldn't read cavium,max-bits from %s\n",
+			ciu_node->name);
+		return r;
+	}
+	host_data->max_bits = val;
+
+	cib_domain = irq_domain_add_linear(ciu_node, host_data->max_bits,
+					   &octeon_irq_domain_cib_ops,
+					   host_data);
+	if (!cib_domain) {
+		pr_err("ERROR: Couldn't irq_domain_add_linear()\n");
+		return -ENOMEM;
+	}
+
+	cvmx_write_csr(host_data->en_reg, 0); /* disable all IRQs */
+	cvmx_write_csr(host_data->raw_reg, ~0); /* ack any outstanding */
+
+	r = request_irq(parent_irq, octeon_irq_cib_handler,
+			IRQF_NO_THREAD, "cib", cib_domain);
+	if (r) {
+		pr_err("request_irq cib failed %d\n", r);
+		return r;
+	}
+	pr_info("CIB interrupt controller probed: %llx %d\n",
+		host_data->raw_reg, host_data->max_bits);
+	return 0;
+}
+
+int octeon_irq_ciu3_xlat(struct irq_domain *d,
+			 struct device_node *node,
+			 const u32 *intspec,
+			 unsigned int intsize,
+			 unsigned long *out_hwirq,
+			 unsigned int *out_type)
+{
+	struct octeon_ciu3_info *ciu3_info = d->host_data;
+	unsigned int hwirq, type, intsn_major;
+	union cvmx_ciu3_iscx_ctl isc;
+
+	if (intsize < 2)
+		return -EINVAL;
+	hwirq = intspec[0];
+	type = intspec[1];
+
+	if (hwirq >= (1 << 20))
+		return -EINVAL;
+
+	intsn_major = hwirq >> 12;
+	switch (intsn_major) {
+	case 0x04: /* Software handled separately. */
+		return -EINVAL;
+	default:
+		break;
+	}
+
+	isc.u64 =  cvmx_read_csr(ciu3_info->ciu3_addr + CIU3_ISC_CTL(hwirq));
+	if (!isc.s.imp)
+		return -EINVAL;
+
+	switch (type) {
+	case 4: /* official value for level triggering. */
+		*out_type = IRQ_TYPE_LEVEL_HIGH;
+		break;
+	case 0: /* unofficial value, but we might as well let it work. */
+	case 1: /* official value for edge triggering. */
+		*out_type = IRQ_TYPE_EDGE_RISING;
+		break;
+	default: /* Nothing else is acceptable. */
+		return -EINVAL;
+	}
+
+	*out_hwirq = hwirq;
+
+	return 0;
+}
+
+void octeon_irq_ciu3_enable(struct irq_data *data)
+{
+	int cpu;
+	union cvmx_ciu3_iscx_ctl isc_ctl;
+	union cvmx_ciu3_iscx_w1c isc_w1c;
+	u64 isc_ctl_addr;
+
+	struct octeon_ciu_chip_data *cd;
+
+	cpu = next_cpu_for_irq(data);
+
+	cd = irq_data_get_irq_chip_data(data);
+
+	isc_w1c.u64 = 0;
+	isc_w1c.s.en = 1;
+	cvmx_write_csr(cd->ciu3_addr + CIU3_ISC_W1C(cd->intsn), isc_w1c.u64);
+
+	isc_ctl_addr = cd->ciu3_addr + CIU3_ISC_CTL(cd->intsn);
+	isc_ctl.u64 = 0;
+	isc_ctl.s.en = 1;
+	isc_ctl.s.idt = per_cpu(octeon_irq_ciu3_idt_ip2, cpu);
+	cvmx_write_csr(isc_ctl_addr, isc_ctl.u64);
+	cvmx_read_csr(isc_ctl_addr);
+}
+
+void octeon_irq_ciu3_disable(struct irq_data *data)
+{
+	u64 isc_ctl_addr;
+	union cvmx_ciu3_iscx_w1c isc_w1c;
+
+	struct octeon_ciu_chip_data *cd;
+
+	cd = irq_data_get_irq_chip_data(data);
+
+	isc_w1c.u64 = 0;
+	isc_w1c.s.en = 1;
+
+	isc_ctl_addr = cd->ciu3_addr + CIU3_ISC_CTL(cd->intsn);
+	cvmx_write_csr(cd->ciu3_addr + CIU3_ISC_W1C(cd->intsn), isc_w1c.u64);
+	cvmx_write_csr(isc_ctl_addr, 0);
+	cvmx_read_csr(isc_ctl_addr);
+}
+
+void octeon_irq_ciu3_ack(struct irq_data *data)
+{
+	u64 isc_w1c_addr;
+	union cvmx_ciu3_iscx_w1c isc_w1c;
+	struct octeon_ciu_chip_data *cd;
+	u32 trigger_type = irqd_get_trigger_type(data);
+
+	/*
+	 * We use a single irq_chip, so we have to do nothing to ack a
+	 * level interrupt.
+	 */
+	if (!(trigger_type & IRQ_TYPE_EDGE_BOTH))
+		return;
+
+	cd = irq_data_get_irq_chip_data(data);
+
+	isc_w1c.u64 = 0;
+	isc_w1c.s.raw = 1;
+
+	isc_w1c_addr = cd->ciu3_addr + CIU3_ISC_W1C(cd->intsn);
+	cvmx_write_csr(isc_w1c_addr, isc_w1c.u64);
+	cvmx_read_csr(isc_w1c_addr);
+}
+
+void octeon_irq_ciu3_mask(struct irq_data *data)
+{
+	union cvmx_ciu3_iscx_w1c isc_w1c;
+	u64 isc_w1c_addr;
+	struct octeon_ciu_chip_data *cd;
+
+	cd = irq_data_get_irq_chip_data(data);
+
+	isc_w1c.u64 = 0;
+	isc_w1c.s.en = 1;
+
+	isc_w1c_addr = cd->ciu3_addr + CIU3_ISC_W1C(cd->intsn);
+	cvmx_write_csr(isc_w1c_addr, isc_w1c.u64);
+	cvmx_read_csr(isc_w1c_addr);
+}
+
+void octeon_irq_ciu3_mask_ack(struct irq_data *data)
+{
+	union cvmx_ciu3_iscx_w1c isc_w1c;
+	u64 isc_w1c_addr;
+	struct octeon_ciu_chip_data *cd;
+	u32 trigger_type = irqd_get_trigger_type(data);
+
+	cd = irq_data_get_irq_chip_data(data);
+
+	isc_w1c.u64 = 0;
+	isc_w1c.s.en = 1;
+
+	/*
+	 * We use a single irq_chip, so only ack an edge (!level)
+	 * interrupt.
+	 */
+	if (trigger_type & IRQ_TYPE_EDGE_BOTH)
+		isc_w1c.s.raw = 1;
+
+	isc_w1c_addr = cd->ciu3_addr + CIU3_ISC_W1C(cd->intsn);
+	cvmx_write_csr(isc_w1c_addr, isc_w1c.u64);
+	cvmx_read_csr(isc_w1c_addr);
+}
+
+#ifdef CONFIG_SMP
+int octeon_irq_ciu3_set_affinity(struct irq_data *data,
+				 const struct cpumask *dest, bool force)
+{
+	union cvmx_ciu3_iscx_ctl isc_ctl;
+	union cvmx_ciu3_iscx_w1c isc_w1c;
+	u64 isc_ctl_addr;
+	int cpu;
+	bool enable_one = !irqd_irq_disabled(data) && !irqd_irq_masked(data);
+	struct octeon_ciu_chip_data *cd = irq_data_get_irq_chip_data(data);
+
+	if (!cpumask_subset(dest, cpumask_of_node(cd->ciu_node)))
+		return -EINVAL;
+
+	if (!enable_one)
+		return IRQ_SET_MASK_OK;
+
+	cd = irq_data_get_irq_chip_data(data);
+	cpu = cpumask_first(dest);
+	if (cpu >= nr_cpu_ids)
+		cpu = smp_processor_id();
+	cd->current_cpu = cpu;
+
+	isc_w1c.u64 = 0;
+	isc_w1c.s.en = 1;
+	cvmx_write_csr(cd->ciu3_addr + CIU3_ISC_W1C(cd->intsn), isc_w1c.u64);
+
+	isc_ctl_addr = cd->ciu3_addr + CIU3_ISC_CTL(cd->intsn);
+	isc_ctl.u64 = 0;
+	isc_ctl.s.en = 1;
+	isc_ctl.s.idt = per_cpu(octeon_irq_ciu3_idt_ip2, cpu);
+	cvmx_write_csr(isc_ctl_addr, isc_ctl.u64);
+	cvmx_read_csr(isc_ctl_addr);
+
+	return IRQ_SET_MASK_OK;
+}
+#endif
+
+static struct irq_chip octeon_irq_chip_ciu3 = {
+	.name = "CIU3",
+	.irq_startup = edge_startup,
+	.irq_enable = octeon_irq_ciu3_enable,
+	.irq_disable = octeon_irq_ciu3_disable,
+	.irq_ack = octeon_irq_ciu3_ack,
+	.irq_mask = octeon_irq_ciu3_mask,
+	.irq_mask_ack = octeon_irq_ciu3_mask_ack,
+	.irq_unmask = octeon_irq_ciu3_enable,
+	.irq_set_type = octeon_irq_ciu_set_type,
+#ifdef CONFIG_SMP
+	.irq_set_affinity = octeon_irq_ciu3_set_affinity,
+	.irq_cpu_offline = octeon_irq_cpu_offline_ciu,
+#endif
+};
+
+int octeon_irq_ciu3_mapx(struct irq_domain *d, unsigned int virq,
+			 irq_hw_number_t hw, struct irq_chip *chip)
+{
+	struct octeon_ciu3_info *ciu3_info = d->host_data;
+	struct octeon_ciu_chip_data *cd = kzalloc_node(sizeof(*cd), GFP_KERNEL,
+						       ciu3_info->node);
+	if (!cd)
+		return -ENOMEM;
+	cd->intsn = hw;
+	cd->current_cpu = -1;
+	cd->ciu3_addr = ciu3_info->ciu3_addr;
+	cd->ciu_node = ciu3_info->node;
+	irq_set_chip_and_handler(virq, chip, handle_edge_irq);
+	irq_set_chip_data(virq, cd);
+
+	return 0;
+}
+
+static int octeon_irq_ciu3_map(struct irq_domain *d,
+			       unsigned int virq, irq_hw_number_t hw)
+{
+	return octeon_irq_ciu3_mapx(d, virq, hw, &octeon_irq_chip_ciu3);
+}
+
+static struct irq_domain_ops octeon_dflt_domain_ciu3_ops = {
+	.map = octeon_irq_ciu3_map,
+	.unmap = octeon_irq_free_cd,
+	.xlate = octeon_irq_ciu3_xlat,
+};
+
+static void octeon_irq_ciu3_ip2(void)
+{
+	union cvmx_ciu3_destx_pp_int dest_pp_int;
+	struct octeon_ciu3_info *ciu3_info;
+	u64 ciu3_addr;
+
+	ciu3_info = __this_cpu_read(octeon_ciu3_info);
+	ciu3_addr = ciu3_info->ciu3_addr;
+
+	dest_pp_int.u64 = cvmx_read_csr(ciu3_addr + CIU3_DEST_PP_INT(3 * cvmx_get_local_core_num()));
+
+	if (likely(dest_pp_int.s.intr)) {
+		irq_hw_number_t intsn = dest_pp_int.s.intsn;
+		irq_hw_number_t hw;
+		struct irq_domain *domain;
+		/* Get the domain to use from the major block */
+		int block = intsn >> 12;
+		int ret;
+
+		domain = ciu3_info->domain[block];
+		if (ciu3_info->intsn2hw[block])
+			hw = ciu3_info->intsn2hw[block](domain, intsn);
+		else
+			hw = intsn;
+
+		ret = handle_domain_irq(domain, hw, NULL);
+		if (ret < 0) {
+			union cvmx_ciu3_iscx_w1c isc_w1c;
+			u64 isc_w1c_addr = ciu3_addr + CIU3_ISC_W1C(intsn);
+
+			isc_w1c.u64 = 0;
+			isc_w1c.s.en = 1;
+			cvmx_write_csr(isc_w1c_addr, isc_w1c.u64);
+			cvmx_read_csr(isc_w1c_addr);
+			spurious_interrupt();
+		}
+	} else {
+		spurious_interrupt();
+	}
+}
+
+/*
+ * 10 mbox per core starting from zero.
+ * Base mbox is core * 10
+ */
+static unsigned int octeon_irq_ciu3_base_mbox_intsn(int core)
+{
+	/* SW (mbox) are 0x04 in bits 12..19 */
+	return 0x04000 + CIU3_MBOX_PER_CORE * core;
+}
+
+static unsigned int octeon_irq_ciu3_mbox_intsn_for_core(int core, unsigned int mbox)
+{
+	return octeon_irq_ciu3_base_mbox_intsn(core) + mbox;
+}
+
+static unsigned int octeon_irq_ciu3_mbox_intsn_for_cpu(int cpu, unsigned int mbox)
+{
+	int local_core = octeon_coreid_for_cpu(cpu) & 0x3f;
+
+	return octeon_irq_ciu3_mbox_intsn_for_core(local_core, mbox);
+}
+
+static void octeon_irq_ciu3_mbox(void)
+{
+	union cvmx_ciu3_destx_pp_int dest_pp_int;
+	struct octeon_ciu3_info *ciu3_info;
+	u64 ciu3_addr;
+	int core = cvmx_get_local_core_num();
+
+	ciu3_info = __this_cpu_read(octeon_ciu3_info);
+	ciu3_addr = ciu3_info->ciu3_addr;
+
+	dest_pp_int.u64 = cvmx_read_csr(ciu3_addr + CIU3_DEST_PP_INT(1 + 3 * core));
+
+	if (likely(dest_pp_int.s.intr)) {
+		irq_hw_number_t intsn = dest_pp_int.s.intsn;
+		int mbox = intsn - octeon_irq_ciu3_base_mbox_intsn(core);
+
+		if (likely(mbox >= 0 && mbox < CIU3_MBOX_PER_CORE)) {
+			do_IRQ(mbox + OCTEON_IRQ_MBOX0);
+		} else {
+			union cvmx_ciu3_iscx_w1c isc_w1c;
+			u64 isc_w1c_addr = ciu3_addr + CIU3_ISC_W1C(intsn);
+
+			isc_w1c.u64 = 0;
+			isc_w1c.s.en = 1;
+			cvmx_write_csr(isc_w1c_addr, isc_w1c.u64);
+			cvmx_read_csr(isc_w1c_addr);
+			spurious_interrupt();
+		}
+	} else {
+		spurious_interrupt();
+	}
+}
+
+void octeon_ciu3_mbox_send(int cpu, unsigned int mbox)
+{
+	struct octeon_ciu3_info *ciu3_info;
+	unsigned int intsn;
+	union cvmx_ciu3_iscx_w1s isc_w1s;
+	u64 isc_w1s_addr;
+
+	if (WARN_ON_ONCE(mbox >= CIU3_MBOX_PER_CORE))
+		return;
+
+	intsn = octeon_irq_ciu3_mbox_intsn_for_cpu(cpu, mbox);
+	ciu3_info = per_cpu(octeon_ciu3_info, cpu);
+	isc_w1s_addr = ciu3_info->ciu3_addr + CIU3_ISC_W1S(intsn);
+
+	isc_w1s.u64 = 0;
+	isc_w1s.s.raw = 1;
+
+	cvmx_write_csr(isc_w1s_addr, isc_w1s.u64);
+	cvmx_read_csr(isc_w1s_addr);
+}
+
+static void octeon_irq_ciu3_mbox_set_enable(struct irq_data *data, int cpu, bool en)
+{
+	struct octeon_ciu3_info *ciu3_info;
+	unsigned int intsn;
+	u64 isc_ctl_addr, isc_w1c_addr;
+	union cvmx_ciu3_iscx_ctl isc_ctl;
+	unsigned int mbox = data->irq - OCTEON_IRQ_MBOX0;
+
+	intsn = octeon_irq_ciu3_mbox_intsn_for_cpu(cpu, mbox);
+	ciu3_info = per_cpu(octeon_ciu3_info, cpu);
+	isc_w1c_addr = ciu3_info->ciu3_addr + CIU3_ISC_W1C(intsn);
+	isc_ctl_addr = ciu3_info->ciu3_addr + CIU3_ISC_CTL(intsn);
+
+	isc_ctl.u64 = 0;
+	isc_ctl.s.en = 1;
+
+	cvmx_write_csr(isc_w1c_addr, isc_ctl.u64);
+	cvmx_write_csr(isc_ctl_addr, 0);
+	if (en) {
+		unsigned int idt = per_cpu(octeon_irq_ciu3_idt_ip3, cpu);
+
+		isc_ctl.u64 = 0;
+		isc_ctl.s.en = 1;
+		isc_ctl.s.idt = idt;
+		cvmx_write_csr(isc_ctl_addr, isc_ctl.u64);
+	}
+	cvmx_read_csr(isc_ctl_addr);
+}
+
+static void octeon_irq_ciu3_mbox_enable(struct irq_data *data)
+{
+	int cpu;
+	unsigned int mbox = data->irq - OCTEON_IRQ_MBOX0;
+
+	WARN_ON(mbox >= CIU3_MBOX_PER_CORE);
+
+	for_each_online_cpu(cpu)
+		octeon_irq_ciu3_mbox_set_enable(data, cpu, true);
+}
+
+static void octeon_irq_ciu3_mbox_disable(struct irq_data *data)
+{
+	int cpu;
+	unsigned int mbox = data->irq - OCTEON_IRQ_MBOX0;
+
+	WARN_ON(mbox >= CIU3_MBOX_PER_CORE);
+
+	for_each_online_cpu(cpu)
+		octeon_irq_ciu3_mbox_set_enable(data, cpu, false);
+}
+
+static void octeon_irq_ciu3_mbox_ack(struct irq_data *data)
+{
+	struct octeon_ciu3_info *ciu3_info;
+	unsigned int intsn;
+	u64 isc_w1c_addr;
+	union cvmx_ciu3_iscx_w1c isc_w1c;
+	unsigned int mbox = data->irq - OCTEON_IRQ_MBOX0;
+
+	intsn = octeon_irq_ciu3_mbox_intsn_for_core(cvmx_get_local_core_num(), mbox);
+
+	isc_w1c.u64 = 0;
+	isc_w1c.s.raw = 1;
+
+	ciu3_info = __this_cpu_read(octeon_ciu3_info);
+	isc_w1c_addr = ciu3_info->ciu3_addr + CIU3_ISC_W1C(intsn);
+	cvmx_write_csr(isc_w1c_addr, isc_w1c.u64);
+	cvmx_read_csr(isc_w1c_addr);
+}
+
+static void octeon_irq_ciu3_mbox_cpu_online(struct irq_data *data)
+{
+	octeon_irq_ciu3_mbox_set_enable(data, smp_processor_id(), true);
+}
+
+static void octeon_irq_ciu3_mbox_cpu_offline(struct irq_data *data)
+{
+	octeon_irq_ciu3_mbox_set_enable(data, smp_processor_id(), false);
+}
+
+static int octeon_irq_ciu3_alloc_resources(struct octeon_ciu3_info *ciu3_info)
+{
+	u64 b = ciu3_info->ciu3_addr;
+	int idt_ip2, idt_ip3, idt_ip4;
+	int unused_idt2;
+	int core = cvmx_get_local_core_num();
+	int i;
+
+	__this_cpu_write(octeon_ciu3_info, ciu3_info);
+
+	/*
+	 * 4 idt per core starting from 1 because zero is reserved.
+	 * Base idt per core is 4 * core + 1
+	 */
+	idt_ip2 = core * 4 + 1;
+	idt_ip3 = core * 4 + 2;
+	idt_ip4 = core * 4 + 3;
+	unused_idt2 = core * 4 + 4;
+	__this_cpu_write(octeon_irq_ciu3_idt_ip2, idt_ip2);
+	__this_cpu_write(octeon_irq_ciu3_idt_ip3, idt_ip3);
+
+	/* ip2 interrupts for this CPU */
+	cvmx_write_csr(b + CIU3_IDT_CTL(idt_ip2), 0);
+	cvmx_write_csr(b + CIU3_IDT_PP(idt_ip2, 0), 1ull << core);
+	cvmx_write_csr(b + CIU3_IDT_IO(idt_ip2), 0);
+
+	/* ip3 interrupts for this CPU */
+	cvmx_write_csr(b + CIU3_IDT_CTL(idt_ip3), 1);
+	cvmx_write_csr(b + CIU3_IDT_PP(idt_ip3, 0), 1ull << core);
+	cvmx_write_csr(b + CIU3_IDT_IO(idt_ip3), 0);
+
+	/* ip4 interrupts for this CPU */
+	cvmx_write_csr(b + CIU3_IDT_CTL(idt_ip4), 2);
+	cvmx_write_csr(b + CIU3_IDT_PP(idt_ip4, 0), 0);
+	cvmx_write_csr(b + CIU3_IDT_IO(idt_ip4), 0);
+
+	cvmx_write_csr(b + CIU3_IDT_CTL(unused_idt2), 0);
+	cvmx_write_csr(b + CIU3_IDT_PP(unused_idt2, 0), 0);
+	cvmx_write_csr(b + CIU3_IDT_IO(unused_idt2), 0);
+
+	for (i = 0; i < CIU3_MBOX_PER_CORE; i++) {
+		unsigned int intsn = octeon_irq_ciu3_mbox_intsn_for_core(core, i);
+
+		cvmx_write_csr(b + CIU3_ISC_W1C(intsn), 2);
+		cvmx_write_csr(b + CIU3_ISC_CTL(intsn), 0);
+	}
+
+	return 0;
+}
+
+static void octeon_irq_setup_secondary_ciu3(void)
+{
+	struct octeon_ciu3_info *ciu3_info;
+
+	ciu3_info = octeon_ciu3_info_per_node[cvmx_get_node_num()];
+	octeon_irq_ciu3_alloc_resources(ciu3_info);
+	irq_cpu_online();
+
+	/* Enable the CIU lines */
+	set_c0_status(STATUSF_IP3 | STATUSF_IP2);
+	if (octeon_irq_use_ip4)
+		set_c0_status(STATUSF_IP4);
+	else
+		clear_c0_status(STATUSF_IP4);
+}
+
+static struct irq_chip octeon_irq_chip_ciu3_mbox = {
+	.name = "CIU3-M",
+	.irq_enable = octeon_irq_ciu3_mbox_enable,
+	.irq_disable = octeon_irq_ciu3_mbox_disable,
+	.irq_ack = octeon_irq_ciu3_mbox_ack,
+
+	.irq_cpu_online = octeon_irq_ciu3_mbox_cpu_online,
+	.irq_cpu_offline = octeon_irq_ciu3_mbox_cpu_offline,
+	.flags = IRQCHIP_ONOFFLINE_ENABLED,
+};
+
+static int __init octeon_irq_init_ciu3(struct device_node *ciu_node,
+				       struct device_node *parent)
+{
+	int i;
+	int node;
+	struct irq_domain *domain;
+	struct octeon_ciu3_info *ciu3_info;
+	const __be32 *zero_addr;
+	u64 base_addr;
+	union cvmx_ciu3_const consts;
+
+	node = 0; /* of_node_to_nid(ciu_node); */
+	ciu3_info = kzalloc_node(sizeof(*ciu3_info), GFP_KERNEL, node);
+
+	if (!ciu3_info)
+		return -ENOMEM;
+
+	zero_addr = of_get_address(ciu_node, 0, NULL, NULL);
+	if (WARN_ON(!zero_addr))
+		return -EINVAL;
+
+	base_addr = of_translate_address(ciu_node, zero_addr);
+	base_addr = (u64)phys_to_virt(base_addr);
+
+	ciu3_info->ciu3_addr = base_addr;
+	ciu3_info->node = node;
+
+	consts.u64 = cvmx_read_csr(base_addr + CIU3_CONST);
+
+	octeon_irq_setup_secondary = octeon_irq_setup_secondary_ciu3;
+
+	octeon_irq_ip2 = octeon_irq_ciu3_ip2;
+	octeon_irq_ip3 = octeon_irq_ciu3_mbox;
+	octeon_irq_ip4 = octeon_irq_ip4_mask;
+
+	if (node == cvmx_get_node_num()) {
+		/* Mips internal */
+		octeon_irq_init_core();
+
+		/* Only do per CPU things if it is the CIU of the boot node. */
+		i = irq_alloc_descs_from(OCTEON_IRQ_MBOX0, 8, node);
+		WARN_ON(i < 0);
+
+		for (i = 0; i < 8; i++)
+			irq_set_chip_and_handler(i + OCTEON_IRQ_MBOX0,
+						 &octeon_irq_chip_ciu3_mbox, handle_percpu_irq);
+	}
+
+	/*
+	 * Initialize all domains to use the default domain. Specific major
+	 * blocks will overwrite the default domain as needed.
+	 */
+	domain = irq_domain_add_tree(ciu_node, &octeon_dflt_domain_ciu3_ops,
+				     ciu3_info);
+	for (i = 0; i < MAX_CIU3_DOMAINS; i++)
+		ciu3_info->domain[i] = domain;
+
+	octeon_ciu3_info_per_node[node] = ciu3_info;
+
+	if (node == cvmx_get_node_num()) {
+		/* Only do per CPU things if it is the CIU of the boot node. */
+		octeon_irq_ciu3_alloc_resources(ciu3_info);
+		if (node == 0)
+			irq_set_default_host(domain);
+
+		octeon_irq_use_ip4 = false;
+		/* Enable the CIU lines */
+		set_c0_status(STATUSF_IP2 | STATUSF_IP3);
+		clear_c0_status(STATUSF_IP4);
+	}
+
+	return 0;
+}
+
+static struct of_device_id ciu_types[] __initdata = {
+	{.compatible = "cavium,octeon-3860-ciu", .data = octeon_irq_init_ciu},
+	{.compatible = "cavium,octeon-3860-gpio", .data = octeon_irq_init_gpio},
+	{.compatible = "cavium,octeon-6880-ciu2", .data = octeon_irq_init_ciu2},
+	{.compatible = "cavium,octeon-7890-ciu3", .data = octeon_irq_init_ciu3},
+	{.compatible = "cavium,octeon-7130-cib", .data = octeon_irq_init_cib},
+	{}
+};
+
+void __init arch_init_irq(void)
+{
+#ifdef CONFIG_SMP
+	/* Set the default affinity to the boot cpu. */
+	cpumask_clear(irq_default_affinity);
+	cpumask_set_cpu(smp_processor_id(), irq_default_affinity);
+#endif
+	of_irq_init(ciu_types);
+}
+
+asmlinkage void plat_irq_dispatch(void)
+{
+	unsigned long cop0_cause;
+	unsigned long cop0_status;
+
+	while (1) {
+		cop0_cause = read_c0_cause();
+		cop0_status = read_c0_status();
+		cop0_cause &= cop0_status;
+		cop0_cause &= ST0_IM;
+
+		if (cop0_cause & STATUSF_IP2)
+			octeon_irq_ip2();
+		else if (cop0_cause & STATUSF_IP3)
+			octeon_irq_ip3();
+		else if (cop0_cause & STATUSF_IP4)
+			octeon_irq_ip4();
+		else if (cop0_cause)
+			do_IRQ(fls(cop0_cause) - 9 + MIPS_CPU_IRQ_BASE);
+		else
+			break;
+	}
+}
+
+#ifdef CONFIG_HOTPLUG_CPU
+
+void octeon_fixup_irqs(void)
+{
+	irq_cpu_offline();
+}
+
+#endif /* CONFIG_HOTPLUG_CPU */
+
+struct irq_domain *octeon_irq_get_block_domain(int node, uint8_t block)
+{
+	struct octeon_ciu3_info *ciu3_info;
+
+	ciu3_info = octeon_ciu3_info_per_node[node & CVMX_NODE_MASK];
+	return ciu3_info->domain[block];
+}
+EXPORT_SYMBOL(octeon_irq_get_block_domain);
diff --git a/src/kernel/linux/v4.14/arch/mips/cavium-octeon/octeon-memcpy.S b/src/kernel/linux/v4.14/arch/mips/cavium-octeon/octeon-memcpy.S
new file mode 100644
index 0000000..0a7c983
--- /dev/null
+++ b/src/kernel/linux/v4.14/arch/mips/cavium-octeon/octeon-memcpy.S
@@ -0,0 +1,482 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Unified implementation of memcpy, memmove and the __copy_user backend.
+ *
+ * Copyright (C) 1998, 99, 2000, 01, 2002 Ralf Baechle (ralf@gnu.org)
+ * Copyright (C) 1999, 2000, 01, 2002 Silicon Graphics, Inc.
+ * Copyright (C) 2002 Broadcom, Inc.
+ *   memcpy/copy_user author: Mark Vandevoorde
+ *
+ * Mnemonic names for arguments to memcpy/__copy_user
+ */
+
+#include <asm/asm.h>
+#include <asm/asm-offsets.h>
+#include <asm/export.h>
+#include <asm/regdef.h>
+
+#define dst a0
+#define src a1
+#define len a2
+
+/*
+ * Spec
+ *
+ * memcpy copies len bytes from src to dst and sets v0 to dst.
+ * It assumes that
+ *   - src and dst don't overlap
+ *   - src is readable
+ *   - dst is writable
+ * memcpy uses the standard calling convention
+ *
+ * __copy_user copies up to len bytes from src to dst and sets a2 (len) to
+ * the number of uncopied bytes due to an exception caused by a read or write.
+ * __copy_user assumes that src and dst don't overlap, and that the call is
+ * implementing one of the following:
+ *   copy_to_user
+ *     - src is readable  (no exceptions when reading src)
+ *   copy_from_user
+ *     - dst is writable  (no exceptions when writing dst)
+ * __copy_user uses a non-standard calling convention; see
+ * arch/mips/include/asm/uaccess.h
+ *
+ * When an exception happens on a load, the handler must
+ # ensure that all of the destination buffer is overwritten to prevent
+ * leaking information to user mode programs.
+ */
+
+/*
+ * Implementation
+ */
+
+/*
+ * The exception handler for loads requires that:
+ *  1- AT contain the address of the byte just past the end of the source
+ *     of the copy,
+ *  2- src_entry <= src < AT, and
+ *  3- (dst - src) == (dst_entry - src_entry),
+ * The _entry suffix denotes values when __copy_user was called.
+ *
+ * (1) is set up up by uaccess.h and maintained by not writing AT in copy_user
+ * (2) is met by incrementing src by the number of bytes copied
+ * (3) is met by not doing loads between a pair of increments of dst and src
+ *
+ * The exception handlers for stores adjust len (if necessary) and return.
+ * These handlers do not need to overwrite any data.
+ *
+ * For __rmemcpy and memmove an exception is always a kernel bug, therefore
+ * they're not protected.
+ */
+
+#define EXC(inst_reg,addr,handler)		\
+9:	inst_reg, addr;				\
+	.section __ex_table,"a";		\
+	PTR	9b, handler;			\
+	.previous
+
+/*
+ * Only on the 64-bit kernel we can made use of 64-bit registers.
+ */
+
+#define LOAD   ld
+#define LOADL  ldl
+#define LOADR  ldr
+#define STOREL sdl
+#define STORER sdr
+#define STORE  sd
+#define ADD    daddu
+#define SUB    dsubu
+#define SRL    dsrl
+#define SRA    dsra
+#define SLL    dsll
+#define SLLV   dsllv
+#define SRLV   dsrlv
+#define NBYTES 8
+#define LOG_NBYTES 3
+
+/*
+ * As we are sharing code base with the mips32 tree (which use the o32 ABI
+ * register definitions). We need to redefine the register definitions from
+ * the n64 ABI register naming to the o32 ABI register naming.
+ */
+#undef t0
+#undef t1
+#undef t2
+#undef t3
+#define t0	$8
+#define t1	$9
+#define t2	$10
+#define t3	$11
+#define t4	$12
+#define t5	$13
+#define t6	$14
+#define t7	$15
+
+#ifdef CONFIG_CPU_LITTLE_ENDIAN
+#define LDFIRST LOADR
+#define LDREST	LOADL
+#define STFIRST STORER
+#define STREST	STOREL
+#define SHIFT_DISCARD SLLV
+#else
+#define LDFIRST LOADL
+#define LDREST	LOADR
+#define STFIRST STOREL
+#define STREST	STORER
+#define SHIFT_DISCARD SRLV
+#endif
+
+#define FIRST(unit) ((unit)*NBYTES)
+#define REST(unit)  (FIRST(unit)+NBYTES-1)
+#define UNIT(unit)  FIRST(unit)
+
+#define ADDRMASK (NBYTES-1)
+
+	.text
+	.set	noreorder
+	.set	noat
+
+/*
+ * A combined memcpy/__copy_user
+ * __copy_user sets len to 0 for success; else to an upper bound of
+ * the number of uncopied bytes.
+ * memcpy sets v0 to dst.
+ */
+	.align	5
+LEAF(memcpy)					/* a0=dst a1=src a2=len */
+EXPORT_SYMBOL(memcpy)
+	move	v0, dst				/* return value */
+__memcpy:
+FEXPORT(__copy_user)
+EXPORT_SYMBOL(__copy_user)
+	/*
+	 * Note: dst & src may be unaligned, len may be 0
+	 * Temps
+	 */
+	#
+	# Octeon doesn't care if the destination is unaligned. The hardware
+	# can fix it faster than we can special case the assembly.
+	#
+	pref	0, 0(src)
+	sltu	t0, len, NBYTES		# Check if < 1 word
+	bnez	t0, copy_bytes_checklen
+	 and	t0, src, ADDRMASK	# Check if src unaligned
+	bnez	t0, src_unaligned
+	 sltu	t0, len, 4*NBYTES	# Check if < 4 words
+	bnez	t0, less_than_4units
+	 sltu	t0, len, 8*NBYTES	# Check if < 8 words
+	bnez	t0, less_than_8units
+	 sltu	t0, len, 16*NBYTES	# Check if < 16 words
+	bnez	t0, cleanup_both_aligned
+	 sltu	t0, len, 128+1		# Check if len < 129
+	bnez	t0, 1f			# Skip prefetch if len is too short
+	 sltu	t0, len, 256+1		# Check if len < 257
+	bnez	t0, 1f			# Skip prefetch if len is too short
+	 pref	0, 128(src)		# We must not prefetch invalid addresses
+	#
+	# This is where we loop if there is more than 128 bytes left
+2:	pref	0, 256(src)		# We must not prefetch invalid addresses
+	#
+	# This is where we loop if we can't prefetch anymore
+1:
+EXC(	LOAD	t0, UNIT(0)(src),	l_exc)
+EXC(	LOAD	t1, UNIT(1)(src),	l_exc_copy)
+EXC(	LOAD	t2, UNIT(2)(src),	l_exc_copy)
+EXC(	LOAD	t3, UNIT(3)(src),	l_exc_copy)
+	SUB	len, len, 16*NBYTES
+EXC(	STORE	t0, UNIT(0)(dst),	s_exc_p16u)
+EXC(	STORE	t1, UNIT(1)(dst),	s_exc_p15u)
+EXC(	STORE	t2, UNIT(2)(dst),	s_exc_p14u)
+EXC(	STORE	t3, UNIT(3)(dst),	s_exc_p13u)
+EXC(	LOAD	t0, UNIT(4)(src),	l_exc_copy)
+EXC(	LOAD	t1, UNIT(5)(src),	l_exc_copy)
+EXC(	LOAD	t2, UNIT(6)(src),	l_exc_copy)
+EXC(	LOAD	t3, UNIT(7)(src),	l_exc_copy)
+EXC(	STORE	t0, UNIT(4)(dst),	s_exc_p12u)
+EXC(	STORE	t1, UNIT(5)(dst),	s_exc_p11u)
+EXC(	STORE	t2, UNIT(6)(dst),	s_exc_p10u)
+	ADD	src, src, 16*NBYTES
+EXC(	STORE	t3, UNIT(7)(dst),	s_exc_p9u)
+	ADD	dst, dst, 16*NBYTES
+EXC(	LOAD	t0, UNIT(-8)(src),	l_exc_copy_rewind16)
+EXC(	LOAD	t1, UNIT(-7)(src),	l_exc_copy_rewind16)
+EXC(	LOAD	t2, UNIT(-6)(src),	l_exc_copy_rewind16)
+EXC(	LOAD	t3, UNIT(-5)(src),	l_exc_copy_rewind16)
+EXC(	STORE	t0, UNIT(-8)(dst),	s_exc_p8u)
+EXC(	STORE	t1, UNIT(-7)(dst),	s_exc_p7u)
+EXC(	STORE	t2, UNIT(-6)(dst),	s_exc_p6u)
+EXC(	STORE	t3, UNIT(-5)(dst),	s_exc_p5u)
+EXC(	LOAD	t0, UNIT(-4)(src),	l_exc_copy_rewind16)
+EXC(	LOAD	t1, UNIT(-3)(src),	l_exc_copy_rewind16)
+EXC(	LOAD	t2, UNIT(-2)(src),	l_exc_copy_rewind16)
+EXC(	LOAD	t3, UNIT(-1)(src),	l_exc_copy_rewind16)
+EXC(	STORE	t0, UNIT(-4)(dst),	s_exc_p4u)
+EXC(	STORE	t1, UNIT(-3)(dst),	s_exc_p3u)
+EXC(	STORE	t2, UNIT(-2)(dst),	s_exc_p2u)
+EXC(	STORE	t3, UNIT(-1)(dst),	s_exc_p1u)
+	sltu	t0, len, 256+1		# See if we can prefetch more
+	beqz	t0, 2b
+	 sltu	t0, len, 128		# See if we can loop more time
+	beqz	t0, 1b
+	 nop
+	#
+	# Jump here if there are less than 16*NBYTES left.
+	#
+cleanup_both_aligned:
+	beqz	len, done
+	 sltu	t0, len, 8*NBYTES
+	bnez	t0, less_than_8units
+	 nop
+EXC(	LOAD	t0, UNIT(0)(src),	l_exc)
+EXC(	LOAD	t1, UNIT(1)(src),	l_exc_copy)
+EXC(	LOAD	t2, UNIT(2)(src),	l_exc_copy)
+EXC(	LOAD	t3, UNIT(3)(src),	l_exc_copy)
+	SUB	len, len, 8*NBYTES
+EXC(	STORE	t0, UNIT(0)(dst),	s_exc_p8u)
+EXC(	STORE	t1, UNIT(1)(dst),	s_exc_p7u)
+EXC(	STORE	t2, UNIT(2)(dst),	s_exc_p6u)
+EXC(	STORE	t3, UNIT(3)(dst),	s_exc_p5u)
+EXC(	LOAD	t0, UNIT(4)(src),	l_exc_copy)
+EXC(	LOAD	t1, UNIT(5)(src),	l_exc_copy)
+EXC(	LOAD	t2, UNIT(6)(src),	l_exc_copy)
+EXC(	LOAD	t3, UNIT(7)(src),	l_exc_copy)
+EXC(	STORE	t0, UNIT(4)(dst),	s_exc_p4u)
+EXC(	STORE	t1, UNIT(5)(dst),	s_exc_p3u)
+EXC(	STORE	t2, UNIT(6)(dst),	s_exc_p2u)
+EXC(	STORE	t3, UNIT(7)(dst),	s_exc_p1u)
+	ADD	src, src, 8*NBYTES
+	beqz	len, done
+	 ADD	dst, dst, 8*NBYTES
+	#
+	# Jump here if there are less than 8*NBYTES left.
+	#
+less_than_8units:
+	sltu	t0, len, 4*NBYTES
+	bnez	t0, less_than_4units
+	 nop
+EXC(	LOAD	t0, UNIT(0)(src),	l_exc)
+EXC(	LOAD	t1, UNIT(1)(src),	l_exc_copy)
+EXC(	LOAD	t2, UNIT(2)(src),	l_exc_copy)
+EXC(	LOAD	t3, UNIT(3)(src),	l_exc_copy)
+	SUB	len, len, 4*NBYTES
+EXC(	STORE	t0, UNIT(0)(dst),	s_exc_p4u)
+EXC(	STORE	t1, UNIT(1)(dst),	s_exc_p3u)
+EXC(	STORE	t2, UNIT(2)(dst),	s_exc_p2u)
+EXC(	STORE	t3, UNIT(3)(dst),	s_exc_p1u)
+	ADD	src, src, 4*NBYTES
+	beqz	len, done
+	 ADD	dst, dst, 4*NBYTES
+	#
+	# Jump here if there are less than 4*NBYTES left. This means
+	# we may need to copy up to 3 NBYTES words.
+	#
+less_than_4units:
+	sltu	t0, len, 1*NBYTES
+	bnez	t0, copy_bytes_checklen
+	 nop
+	#
+	# 1) Copy NBYTES, then check length again
+	#
+EXC(	LOAD	t0, 0(src),		l_exc)
+	SUB	len, len, NBYTES
+	sltu	t1, len, 8
+EXC(	STORE	t0, 0(dst),		s_exc_p1u)
+	ADD	src, src, NBYTES
+	bnez	t1, copy_bytes_checklen
+	 ADD	dst, dst, NBYTES
+	#
+	# 2) Copy NBYTES, then check length again
+	#
+EXC(	LOAD	t0, 0(src),		l_exc)
+	SUB	len, len, NBYTES
+	sltu	t1, len, 8
+EXC(	STORE	t0, 0(dst),		s_exc_p1u)
+	ADD	src, src, NBYTES
+	bnez	t1, copy_bytes_checklen
+	 ADD	dst, dst, NBYTES
+	#
+	# 3) Copy NBYTES, then check length again
+	#
+EXC(	LOAD	t0, 0(src),		l_exc)
+	SUB	len, len, NBYTES
+	ADD	src, src, NBYTES
+	ADD	dst, dst, NBYTES
+	b copy_bytes_checklen
+EXC(	 STORE	t0, -8(dst),		s_exc_p1u)
+
+src_unaligned:
+#define rem t8
+	SRL	t0, len, LOG_NBYTES+2	 # +2 for 4 units/iter
+	beqz	t0, cleanup_src_unaligned
+	 and	rem, len, (4*NBYTES-1)	 # rem = len % 4*NBYTES
+1:
+/*
+ * Avoid consecutive LD*'s to the same register since some mips
+ * implementations can't issue them in the same cycle.
+ * It's OK to load FIRST(N+1) before REST(N) because the two addresses
+ * are to the same unit (unless src is aligned, but it's not).
+ */
+EXC(	LDFIRST t0, FIRST(0)(src),	l_exc)
+EXC(	LDFIRST t1, FIRST(1)(src),	l_exc_copy)
+	SUB	len, len, 4*NBYTES
+EXC(	LDREST	t0, REST(0)(src),	l_exc_copy)
+EXC(	LDREST	t1, REST(1)(src),	l_exc_copy)
+EXC(	LDFIRST t2, FIRST(2)(src),	l_exc_copy)
+EXC(	LDFIRST t3, FIRST(3)(src),	l_exc_copy)
+EXC(	LDREST	t2, REST(2)(src),	l_exc_copy)
+EXC(	LDREST	t3, REST(3)(src),	l_exc_copy)
+	ADD	src, src, 4*NBYTES
+EXC(	STORE	t0, UNIT(0)(dst),	s_exc_p4u)
+EXC(	STORE	t1, UNIT(1)(dst),	s_exc_p3u)
+EXC(	STORE	t2, UNIT(2)(dst),	s_exc_p2u)
+EXC(	STORE	t3, UNIT(3)(dst),	s_exc_p1u)
+	bne	len, rem, 1b
+	 ADD	dst, dst, 4*NBYTES
+
+cleanup_src_unaligned:
+	beqz	len, done
+	 and	rem, len, NBYTES-1  # rem = len % NBYTES
+	beq	rem, len, copy_bytes
+	 nop
+1:
+EXC(	LDFIRST t0, FIRST(0)(src),	l_exc)
+EXC(	LDREST	t0, REST(0)(src),	l_exc_copy)
+	SUB	len, len, NBYTES
+EXC(	STORE	t0, 0(dst),		s_exc_p1u)
+	ADD	src, src, NBYTES
+	bne	len, rem, 1b
+	 ADD	dst, dst, NBYTES
+
+copy_bytes_checklen:
+	beqz	len, done
+	 nop
+copy_bytes:
+	/* 0 < len < NBYTES  */
+#define COPY_BYTE(N)			\
+EXC(	lb	t0, N(src), l_exc);	\
+	SUB	len, len, 1;		\
+	beqz	len, done;		\
+EXC(	 sb	t0, N(dst), s_exc_p1)
+
+	COPY_BYTE(0)
+	COPY_BYTE(1)
+	COPY_BYTE(2)
+	COPY_BYTE(3)
+	COPY_BYTE(4)
+	COPY_BYTE(5)
+EXC(	lb	t0, NBYTES-2(src), l_exc)
+	SUB	len, len, 1
+	jr	ra
+EXC(	 sb	t0, NBYTES-2(dst), s_exc_p1)
+done:
+	jr	ra
+	 nop
+	END(memcpy)
+
+l_exc_copy_rewind16:
+	/* Rewind src and dst by 16*NBYTES for l_exc_copy */
+	SUB	src, src, 16*NBYTES
+	SUB	dst, dst, 16*NBYTES
+l_exc_copy:
+	/*
+	 * Copy bytes from src until faulting load address (or until a
+	 * lb faults)
+	 *
+	 * When reached by a faulting LDFIRST/LDREST, THREAD_BUADDR($28)
+	 * may be more than a byte beyond the last address.
+	 * Hence, the lb below may get an exception.
+	 *
+	 * Assumes src < THREAD_BUADDR($28)
+	 */
+	LOAD	t0, TI_TASK($28)
+	LOAD	t0, THREAD_BUADDR(t0)
+1:
+EXC(	lb	t1, 0(src),	l_exc)
+	ADD	src, src, 1
+	sb	t1, 0(dst)	# can't fault -- we're copy_from_user
+	bne	src, t0, 1b
+	 ADD	dst, dst, 1
+l_exc:
+	LOAD	t0, TI_TASK($28)
+	LOAD	t0, THREAD_BUADDR(t0)	# t0 is just past last good address
+	SUB	len, AT, t0		# len number of uncopied bytes
+	jr	ra
+	 nop
+
+
+#define SEXC(n)				\
+s_exc_p ## n ## u:			\
+	jr	ra;			\
+	 ADD	len, len, n*NBYTES
+
+SEXC(16)
+SEXC(15)
+SEXC(14)
+SEXC(13)
+SEXC(12)
+SEXC(11)
+SEXC(10)
+SEXC(9)
+SEXC(8)
+SEXC(7)
+SEXC(6)
+SEXC(5)
+SEXC(4)
+SEXC(3)
+SEXC(2)
+SEXC(1)
+
+s_exc_p1:
+	jr	ra
+	 ADD	len, len, 1
+s_exc:
+	jr	ra
+	 nop
+
+	.align	5
+LEAF(memmove)
+EXPORT_SYMBOL(memmove)
+	ADD	t0, a0, a2
+	ADD	t1, a1, a2
+	sltu	t0, a1, t0			# dst + len <= src -> memcpy
+	sltu	t1, a0, t1			# dst >= src + len -> memcpy
+	and	t0, t1
+	beqz	t0, __memcpy
+	 move	v0, a0				/* return value */
+	beqz	a2, r_out
+	END(memmove)
+
+	/* fall through to __rmemcpy */
+LEAF(__rmemcpy)					/* a0=dst a1=src a2=len */
+	 sltu	t0, a1, a0
+	beqz	t0, r_end_bytes_up		# src >= dst
+	 nop
+	ADD	a0, a2				# dst = dst + len
+	ADD	a1, a2				# src = src + len
+
+r_end_bytes:
+	lb	t0, -1(a1)
+	SUB	a2, a2, 0x1
+	sb	t0, -1(a0)
+	SUB	a1, a1, 0x1
+	bnez	a2, r_end_bytes
+	 SUB	a0, a0, 0x1
+
+r_out:
+	jr	ra
+	 move	a2, zero
+
+r_end_bytes_up:
+	lb	t0, (a1)
+	SUB	a2, a2, 0x1
+	sb	t0, (a0)
+	ADD	a1, a1, 0x1
+	bnez	a2, r_end_bytes_up
+	 ADD	a0, a0, 0x1
+
+	jr	ra
+	 move	a2, zero
+	END(__rmemcpy)
diff --git a/src/kernel/linux/v4.14/arch/mips/cavium-octeon/octeon-platform.c b/src/kernel/linux/v4.14/arch/mips/cavium-octeon/octeon-platform.c
new file mode 100644
index 0000000..e1e2411
--- /dev/null
+++ b/src/kernel/linux/v4.14/arch/mips/cavium-octeon/octeon-platform.c
@@ -0,0 +1,1074 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2004-2017 Cavium, Inc.
+ * Copyright (C) 2008 Wind River Systems
+ */
+
+#include <linux/etherdevice.h>
+#include <linux/of_platform.h>
+#include <linux/of_fdt.h>
+#include <linux/libfdt.h>
+
+#include <asm/octeon/octeon.h>
+#include <asm/octeon/cvmx-helper-board.h>
+
+#ifdef CONFIG_USB
+#include <linux/usb/ehci_def.h>
+#include <linux/usb/ehci_pdriver.h>
+#include <linux/usb/ohci_pdriver.h>
+#include <asm/octeon/cvmx-uctlx-defs.h>
+
+#define CVMX_UAHCX_EHCI_USBCMD	(CVMX_ADD_IO_SEG(0x00016F0000000010ull))
+#define CVMX_UAHCX_OHCI_USBCMD	(CVMX_ADD_IO_SEG(0x00016F0000000408ull))
+
+static DEFINE_MUTEX(octeon2_usb_clocks_mutex);
+
+static int octeon2_usb_clock_start_cnt;
+
+static int __init octeon2_usb_reset(void)
+{
+	union cvmx_uctlx_clk_rst_ctl clk_rst_ctl;
+	u32 ucmd;
+
+	if (!OCTEON_IS_OCTEON2())
+		return 0;
+
+	clk_rst_ctl.u64 = cvmx_read_csr(CVMX_UCTLX_CLK_RST_CTL(0));
+	if (clk_rst_ctl.s.hrst) {
+		ucmd = cvmx_read64_uint32(CVMX_UAHCX_EHCI_USBCMD);
+		ucmd &= ~CMD_RUN;
+		cvmx_write64_uint32(CVMX_UAHCX_EHCI_USBCMD, ucmd);
+		mdelay(2);
+		ucmd |= CMD_RESET;
+		cvmx_write64_uint32(CVMX_UAHCX_EHCI_USBCMD, ucmd);
+		ucmd = cvmx_read64_uint32(CVMX_UAHCX_OHCI_USBCMD);
+		ucmd |= CMD_RUN;
+		cvmx_write64_uint32(CVMX_UAHCX_OHCI_USBCMD, ucmd);
+	}
+
+	return 0;
+}
+arch_initcall(octeon2_usb_reset);
+
+static void octeon2_usb_clocks_start(struct device *dev)
+{
+	u64 div;
+	union cvmx_uctlx_if_ena if_ena;
+	union cvmx_uctlx_clk_rst_ctl clk_rst_ctl;
+	union cvmx_uctlx_uphy_portx_ctl_status port_ctl_status;
+	int i;
+	unsigned long io_clk_64_to_ns;
+	u32 clock_rate = 12000000;
+	bool is_crystal_clock = false;
+
+
+	mutex_lock(&octeon2_usb_clocks_mutex);
+
+	octeon2_usb_clock_start_cnt++;
+	if (octeon2_usb_clock_start_cnt != 1)
+		goto exit;
+
+	io_clk_64_to_ns = 64000000000ull / octeon_get_io_clock_rate();
+
+	if (dev->of_node) {
+		struct device_node *uctl_node;
+		const char *clock_type;
+
+		uctl_node = of_get_parent(dev->of_node);
+		if (!uctl_node) {
+			dev_err(dev, "No UCTL device node\n");
+			goto exit;
+		}
+		i = of_property_read_u32(uctl_node,
+					 "refclk-frequency", &clock_rate);
+		if (i) {
+			dev_err(dev, "No UCTL \"refclk-frequency\"\n");
+			goto exit;
+		}
+		i = of_property_read_string(uctl_node,
+					    "refclk-type", &clock_type);
+
+		if (!i && strcmp("crystal", clock_type) == 0)
+			is_crystal_clock = true;
+	}
+
+	/*
+	 * Step 1: Wait for voltages stable.  That surely happened
+	 * before starting the kernel.
+	 *
+	 * Step 2: Enable  SCLK of UCTL by writing UCTL0_IF_ENA[EN] = 1
+	 */
+	if_ena.u64 = 0;
+	if_ena.s.en = 1;
+	cvmx_write_csr(CVMX_UCTLX_IF_ENA(0), if_ena.u64);
+
+	for (i = 0; i <= 1; i++) {
+		port_ctl_status.u64 =
+			cvmx_read_csr(CVMX_UCTLX_UPHY_PORTX_CTL_STATUS(i, 0));
+		/* Set txvreftune to 15 to obtain compliant 'eye' diagram. */
+		port_ctl_status.s.txvreftune = 15;
+		port_ctl_status.s.txrisetune = 1;
+		port_ctl_status.s.txpreemphasistune = 1;
+		cvmx_write_csr(CVMX_UCTLX_UPHY_PORTX_CTL_STATUS(i, 0),
+			       port_ctl_status.u64);
+	}
+
+	/* Step 3: Configure the reference clock, PHY, and HCLK */
+	clk_rst_ctl.u64 = cvmx_read_csr(CVMX_UCTLX_CLK_RST_CTL(0));
+
+	/*
+	 * If the UCTL looks like it has already been started, skip
+	 * the initialization, otherwise bus errors are obtained.
+	 */
+	if (clk_rst_ctl.s.hrst)
+		goto end_clock;
+	/* 3a */
+	clk_rst_ctl.s.p_por = 1;
+	clk_rst_ctl.s.hrst = 0;
+	clk_rst_ctl.s.p_prst = 0;
+	clk_rst_ctl.s.h_clkdiv_rst = 0;
+	clk_rst_ctl.s.o_clkdiv_rst = 0;
+	clk_rst_ctl.s.h_clkdiv_en = 0;
+	clk_rst_ctl.s.o_clkdiv_en = 0;
+	cvmx_write_csr(CVMX_UCTLX_CLK_RST_CTL(0), clk_rst_ctl.u64);
+
+	/* 3b */
+	clk_rst_ctl.s.p_refclk_sel = is_crystal_clock ? 0 : 1;
+	switch (clock_rate) {
+	default:
+		pr_err("Invalid UCTL clock rate of %u, using 12000000 instead\n",
+			clock_rate);
+		/* Fall through */
+	case 12000000:
+		clk_rst_ctl.s.p_refclk_div = 0;
+		break;
+	case 24000000:
+		clk_rst_ctl.s.p_refclk_div = 1;
+		break;
+	case 48000000:
+		clk_rst_ctl.s.p_refclk_div = 2;
+		break;
+	}
+	cvmx_write_csr(CVMX_UCTLX_CLK_RST_CTL(0), clk_rst_ctl.u64);
+
+	/* 3c */
+	div = octeon_get_io_clock_rate() / 130000000ull;
+
+	switch (div) {
+	case 0:
+		div = 1;
+		break;
+	case 1:
+	case 2:
+	case 3:
+	case 4:
+		break;
+	case 5:
+		div = 4;
+		break;
+	case 6:
+	case 7:
+		div = 6;
+		break;
+	case 8:
+	case 9:
+	case 10:
+	case 11:
+		div = 8;
+		break;
+	default:
+		div = 12;
+		break;
+	}
+	clk_rst_ctl.s.h_div = div;
+	cvmx_write_csr(CVMX_UCTLX_CLK_RST_CTL(0), clk_rst_ctl.u64);
+	/* Read it back, */
+	clk_rst_ctl.u64 = cvmx_read_csr(CVMX_UCTLX_CLK_RST_CTL(0));
+	clk_rst_ctl.s.h_clkdiv_en = 1;
+	cvmx_write_csr(CVMX_UCTLX_CLK_RST_CTL(0), clk_rst_ctl.u64);
+	/* 3d */
+	clk_rst_ctl.s.h_clkdiv_rst = 1;
+	cvmx_write_csr(CVMX_UCTLX_CLK_RST_CTL(0), clk_rst_ctl.u64);
+
+	/* 3e: delay 64 io clocks */
+	ndelay(io_clk_64_to_ns);
+
+	/*
+	 * Step 4: Program the power-on reset field in the UCTL
+	 * clock-reset-control register.
+	 */
+	clk_rst_ctl.s.p_por = 0;
+	cvmx_write_csr(CVMX_UCTLX_CLK_RST_CTL(0), clk_rst_ctl.u64);
+
+	/* Step 5:    Wait 3 ms for the PHY clock to start. */
+	mdelay(3);
+
+	/* Steps 6..9 for ATE only, are skipped. */
+
+	/* Step 10: Configure the OHCI_CLK48 and OHCI_CLK12 clocks. */
+	/* 10a */
+	clk_rst_ctl.s.o_clkdiv_rst = 1;
+	cvmx_write_csr(CVMX_UCTLX_CLK_RST_CTL(0), clk_rst_ctl.u64);
+
+	/* 10b */
+	clk_rst_ctl.s.o_clkdiv_en = 1;
+	cvmx_write_csr(CVMX_UCTLX_CLK_RST_CTL(0), clk_rst_ctl.u64);
+
+	/* 10c */
+	ndelay(io_clk_64_to_ns);
+
+	/*
+	 * Step 11: Program the PHY reset field:
+	 * UCTL0_CLK_RST_CTL[P_PRST] = 1
+	 */
+	clk_rst_ctl.s.p_prst = 1;
+	cvmx_write_csr(CVMX_UCTLX_CLK_RST_CTL(0), clk_rst_ctl.u64);
+
+	/* Step 11b */
+	udelay(1);
+
+	/* Step 11c */
+	clk_rst_ctl.s.p_prst = 0;
+	cvmx_write_csr(CVMX_UCTLX_CLK_RST_CTL(0), clk_rst_ctl.u64);
+
+	/* Step 11d */
+	mdelay(1);
+
+	/* Step 11e */
+	clk_rst_ctl.s.p_prst = 1;
+	cvmx_write_csr(CVMX_UCTLX_CLK_RST_CTL(0), clk_rst_ctl.u64);
+
+	/* Step 12: Wait 1 uS. */
+	udelay(1);
+
+	/* Step 13: Program the HRESET_N field: UCTL0_CLK_RST_CTL[HRST] = 1 */
+	clk_rst_ctl.s.hrst = 1;
+	cvmx_write_csr(CVMX_UCTLX_CLK_RST_CTL(0), clk_rst_ctl.u64);
+
+end_clock:
+	/* Set uSOF cycle period to 60,000 bits. */
+	cvmx_write_csr(CVMX_UCTLX_EHCI_FLA(0), 0x20ull);
+
+exit:
+	mutex_unlock(&octeon2_usb_clocks_mutex);
+}
+
+static void octeon2_usb_clocks_stop(void)
+{
+	mutex_lock(&octeon2_usb_clocks_mutex);
+	octeon2_usb_clock_start_cnt--;
+	mutex_unlock(&octeon2_usb_clocks_mutex);
+}
+
+static int octeon_ehci_power_on(struct platform_device *pdev)
+{
+	octeon2_usb_clocks_start(&pdev->dev);
+	return 0;
+}
+
+static void octeon_ehci_power_off(struct platform_device *pdev)
+{
+	octeon2_usb_clocks_stop();
+}
+
+static struct usb_ehci_pdata octeon_ehci_pdata = {
+	/* Octeon EHCI matches CPU endianness. */
+#ifdef __BIG_ENDIAN
+	.big_endian_mmio	= 1,
+#endif
+	/*
+	 * We can DMA from anywhere. But the descriptors must be in
+	 * the lower 4GB.
+	 */
+	.dma_mask_64	= 0,
+	.power_on	= octeon_ehci_power_on,
+	.power_off	= octeon_ehci_power_off,
+};
+
+static void __init octeon_ehci_hw_start(struct device *dev)
+{
+	union cvmx_uctlx_ehci_ctl ehci_ctl;
+
+	octeon2_usb_clocks_start(dev);
+
+	ehci_ctl.u64 = cvmx_read_csr(CVMX_UCTLX_EHCI_CTL(0));
+	/* Use 64-bit addressing. */
+	ehci_ctl.s.ehci_64b_addr_en = 1;
+	ehci_ctl.s.l2c_addr_msb = 0;
+#ifdef __BIG_ENDIAN
+	ehci_ctl.s.l2c_buff_emod = 1; /* Byte swapped. */
+	ehci_ctl.s.l2c_desc_emod = 1; /* Byte swapped. */
+#else
+	ehci_ctl.s.l2c_buff_emod = 0; /* not swapped. */
+	ehci_ctl.s.l2c_desc_emod = 0; /* not swapped. */
+	ehci_ctl.s.inv_reg_a2 = 1;
+#endif
+	cvmx_write_csr(CVMX_UCTLX_EHCI_CTL(0), ehci_ctl.u64);
+
+	octeon2_usb_clocks_stop();
+}
+
+static int __init octeon_ehci_device_init(void)
+{
+	struct platform_device *pd;
+	struct device_node *ehci_node;
+	int ret = 0;
+
+	ehci_node = of_find_node_by_name(NULL, "ehci");
+	if (!ehci_node)
+		return 0;
+
+	pd = of_find_device_by_node(ehci_node);
+	of_node_put(ehci_node);
+	if (!pd)
+		return 0;
+
+	pd->dev.platform_data = &octeon_ehci_pdata;
+	octeon_ehci_hw_start(&pd->dev);
+
+	return ret;
+}
+device_initcall(octeon_ehci_device_init);
+
+static int octeon_ohci_power_on(struct platform_device *pdev)
+{
+	octeon2_usb_clocks_start(&pdev->dev);
+	return 0;
+}
+
+static void octeon_ohci_power_off(struct platform_device *pdev)
+{
+	octeon2_usb_clocks_stop();
+}
+
+static struct usb_ohci_pdata octeon_ohci_pdata = {
+	/* Octeon OHCI matches CPU endianness. */
+#ifdef __BIG_ENDIAN
+	.big_endian_mmio	= 1,
+#endif
+	.power_on	= octeon_ohci_power_on,
+	.power_off	= octeon_ohci_power_off,
+};
+
+static void __init octeon_ohci_hw_start(struct device *dev)
+{
+	union cvmx_uctlx_ohci_ctl ohci_ctl;
+
+	octeon2_usb_clocks_start(dev);
+
+	ohci_ctl.u64 = cvmx_read_csr(CVMX_UCTLX_OHCI_CTL(0));
+	ohci_ctl.s.l2c_addr_msb = 0;
+#ifdef __BIG_ENDIAN
+	ohci_ctl.s.l2c_buff_emod = 1; /* Byte swapped. */
+	ohci_ctl.s.l2c_desc_emod = 1; /* Byte swapped. */
+#else
+	ohci_ctl.s.l2c_buff_emod = 0; /* not swapped. */
+	ohci_ctl.s.l2c_desc_emod = 0; /* not swapped. */
+	ohci_ctl.s.inv_reg_a2 = 1;
+#endif
+	cvmx_write_csr(CVMX_UCTLX_OHCI_CTL(0), ohci_ctl.u64);
+
+	octeon2_usb_clocks_stop();
+}
+
+static int __init octeon_ohci_device_init(void)
+{
+	struct platform_device *pd;
+	struct device_node *ohci_node;
+	int ret = 0;
+
+	ohci_node = of_find_node_by_name(NULL, "ohci");
+	if (!ohci_node)
+		return 0;
+
+	pd = of_find_device_by_node(ohci_node);
+	of_node_put(ohci_node);
+	if (!pd)
+		return 0;
+
+	pd->dev.platform_data = &octeon_ohci_pdata;
+	octeon_ohci_hw_start(&pd->dev);
+
+	return ret;
+}
+device_initcall(octeon_ohci_device_init);
+
+#endif /* CONFIG_USB */
+
+/* Octeon Random Number Generator.  */
+static int __init octeon_rng_device_init(void)
+{
+	struct platform_device *pd;
+	int ret = 0;
+
+	struct resource rng_resources[] = {
+		{
+			.flags	= IORESOURCE_MEM,
+			.start	= XKPHYS_TO_PHYS(CVMX_RNM_CTL_STATUS),
+			.end	= XKPHYS_TO_PHYS(CVMX_RNM_CTL_STATUS) + 0xf
+		}, {
+			.flags	= IORESOURCE_MEM,
+			.start	= cvmx_build_io_address(8, 0),
+			.end	= cvmx_build_io_address(8, 0) + 0x7
+		}
+	};
+
+	pd = platform_device_alloc("octeon_rng", -1);
+	if (!pd) {
+		ret = -ENOMEM;
+		goto out;
+	}
+
+	ret = platform_device_add_resources(pd, rng_resources,
+					    ARRAY_SIZE(rng_resources));
+	if (ret)
+		goto fail;
+
+	ret = platform_device_add(pd);
+	if (ret)
+		goto fail;
+
+	return ret;
+fail:
+	platform_device_put(pd);
+
+out:
+	return ret;
+}
+device_initcall(octeon_rng_device_init);
+
+const struct of_device_id octeon_ids[] __initconst = {
+	{ .compatible = "simple-bus", },
+	{ .compatible = "cavium,octeon-6335-uctl", },
+	{ .compatible = "cavium,octeon-5750-usbn", },
+	{ .compatible = "cavium,octeon-3860-bootbus", },
+	{ .compatible = "cavium,mdio-mux", },
+	{ .compatible = "gpio-leds", },
+	{ .compatible = "cavium,octeon-7130-usb-uctl", },
+	{},
+};
+
+static bool __init octeon_has_88e1145(void)
+{
+	return !OCTEON_IS_MODEL(OCTEON_CN52XX) &&
+	       !OCTEON_IS_MODEL(OCTEON_CN6XXX) &&
+	       !OCTEON_IS_MODEL(OCTEON_CN56XX);
+}
+
+static void __init octeon_fdt_set_phy(int eth, int phy_addr)
+{
+	const __be32 *phy_handle;
+	const __be32 *alt_phy_handle;
+	const __be32 *reg;
+	u32 phandle;
+	int phy;
+	int alt_phy;
+	const char *p;
+	int current_len;
+	char new_name[20];
+
+	phy_handle = fdt_getprop(initial_boot_params, eth, "phy-handle", NULL);
+	if (!phy_handle)
+		return;
+
+	phandle = be32_to_cpup(phy_handle);
+	phy = fdt_node_offset_by_phandle(initial_boot_params, phandle);
+
+	alt_phy_handle = fdt_getprop(initial_boot_params, eth, "cavium,alt-phy-handle", NULL);
+	if (alt_phy_handle) {
+		u32 alt_phandle = be32_to_cpup(alt_phy_handle);
+
+		alt_phy = fdt_node_offset_by_phandle(initial_boot_params, alt_phandle);
+	} else {
+		alt_phy = -1;
+	}
+
+	if (phy_addr < 0 || phy < 0) {
+		/* Delete the PHY things */
+		fdt_nop_property(initial_boot_params, eth, "phy-handle");
+		/* This one may fail */
+		fdt_nop_property(initial_boot_params, eth, "cavium,alt-phy-handle");
+		if (phy >= 0)
+			fdt_nop_node(initial_boot_params, phy);
+		if (alt_phy >= 0)
+			fdt_nop_node(initial_boot_params, alt_phy);
+		return;
+	}
+
+	if (phy_addr >= 256 && alt_phy > 0) {
+		const struct fdt_property *phy_prop;
+		struct fdt_property *alt_prop;
+		fdt32_t phy_handle_name;
+
+		/* Use the alt phy node instead.*/
+		phy_prop = fdt_get_property(initial_boot_params, eth, "phy-handle", NULL);
+		phy_handle_name = phy_prop->nameoff;
+		fdt_nop_node(initial_boot_params, phy);
+		fdt_nop_property(initial_boot_params, eth, "phy-handle");
+		alt_prop = fdt_get_property_w(initial_boot_params, eth, "cavium,alt-phy-handle", NULL);
+		alt_prop->nameoff = phy_handle_name;
+		phy = alt_phy;
+	}
+
+	phy_addr &= 0xff;
+
+	if (octeon_has_88e1145()) {
+		fdt_nop_property(initial_boot_params, phy, "marvell,reg-init");
+		memset(new_name, 0, sizeof(new_name));
+		strcpy(new_name, "marvell,88e1145");
+		p = fdt_getprop(initial_boot_params, phy, "compatible",
+				&current_len);
+		if (p && current_len >= strlen(new_name))
+			fdt_setprop_inplace(initial_boot_params, phy,
+					"compatible", new_name, current_len);
+	}
+
+	reg = fdt_getprop(initial_boot_params, phy, "reg", NULL);
+	if (phy_addr == be32_to_cpup(reg))
+		return;
+
+	fdt_setprop_inplace_cell(initial_boot_params, phy, "reg", phy_addr);
+
+	snprintf(new_name, sizeof(new_name), "ethernet-phy@%x", phy_addr);
+
+	p = fdt_get_name(initial_boot_params, phy, &current_len);
+	if (p && current_len == strlen(new_name))
+		fdt_set_name(initial_boot_params, phy, new_name);
+	else
+		pr_err("Error: could not rename ethernet phy: <%s>", p);
+}
+
+static void __init octeon_fdt_set_mac_addr(int n, u64 *pmac)
+{
+	const u8 *old_mac;
+	int old_len;
+	u8 new_mac[6];
+	u64 mac = *pmac;
+	int r;
+
+	old_mac = fdt_getprop(initial_boot_params, n, "local-mac-address",
+			      &old_len);
+	if (!old_mac || old_len != 6 || is_valid_ether_addr(old_mac))
+		return;
+
+	new_mac[0] = (mac >> 40) & 0xff;
+	new_mac[1] = (mac >> 32) & 0xff;
+	new_mac[2] = (mac >> 24) & 0xff;
+	new_mac[3] = (mac >> 16) & 0xff;
+	new_mac[4] = (mac >> 8) & 0xff;
+	new_mac[5] = mac & 0xff;
+
+	r = fdt_setprop_inplace(initial_boot_params, n, "local-mac-address",
+				new_mac, sizeof(new_mac));
+
+	if (r) {
+		pr_err("Setting \"local-mac-address\" failed %d", r);
+		return;
+	}
+	*pmac = mac + 1;
+}
+
+static void __init octeon_fdt_rm_ethernet(int node)
+{
+	const __be32 *phy_handle;
+
+	phy_handle = fdt_getprop(initial_boot_params, node, "phy-handle", NULL);
+	if (phy_handle) {
+		u32 ph = be32_to_cpup(phy_handle);
+		int p = fdt_node_offset_by_phandle(initial_boot_params, ph);
+
+		if (p >= 0)
+			fdt_nop_node(initial_boot_params, p);
+	}
+	fdt_nop_node(initial_boot_params, node);
+}
+
+static void __init octeon_fdt_pip_port(int iface, int i, int p, int max)
+{
+	char name_buffer[20];
+	int eth;
+	int phy_addr;
+	int ipd_port;
+
+	snprintf(name_buffer, sizeof(name_buffer), "ethernet@%x", p);
+	eth = fdt_subnode_offset(initial_boot_params, iface, name_buffer);
+	if (eth < 0)
+		return;
+	if (p > max) {
+		pr_debug("Deleting port %x:%x\n", i, p);
+		octeon_fdt_rm_ethernet(eth);
+		return;
+	}
+	if (OCTEON_IS_MODEL(OCTEON_CN68XX))
+		ipd_port = (0x100 * i) + (0x10 * p) + 0x800;
+	else
+		ipd_port = 16 * i + p;
+
+	phy_addr = cvmx_helper_board_get_mii_address(ipd_port);
+	octeon_fdt_set_phy(eth, phy_addr);
+}
+
+static void __init octeon_fdt_pip_iface(int pip, int idx)
+{
+	char name_buffer[20];
+	int iface;
+	int p;
+	int count = 0;
+
+	snprintf(name_buffer, sizeof(name_buffer), "interface@%d", idx);
+	iface = fdt_subnode_offset(initial_boot_params, pip, name_buffer);
+	if (iface < 0)
+		return;
+
+	if (cvmx_helper_interface_enumerate(idx) == 0)
+		count = cvmx_helper_ports_on_interface(idx);
+
+	for (p = 0; p < 16; p++)
+		octeon_fdt_pip_port(iface, idx, p, count - 1);
+}
+
+void __init octeon_fill_mac_addresses(void)
+{
+	const char *alias_prop;
+	char name_buffer[20];
+	u64 mac_addr_base;
+	int aliases;
+	int pip;
+	int i;
+
+	aliases = fdt_path_offset(initial_boot_params, "/aliases");
+	if (aliases < 0)
+		return;
+
+	mac_addr_base =
+		((octeon_bootinfo->mac_addr_base[0] & 0xffull)) << 40 |
+		((octeon_bootinfo->mac_addr_base[1] & 0xffull)) << 32 |
+		((octeon_bootinfo->mac_addr_base[2] & 0xffull)) << 24 |
+		((octeon_bootinfo->mac_addr_base[3] & 0xffull)) << 16 |
+		((octeon_bootinfo->mac_addr_base[4] & 0xffull)) << 8 |
+		 (octeon_bootinfo->mac_addr_base[5] & 0xffull);
+
+	for (i = 0; i < 2; i++) {
+		int mgmt;
+
+		snprintf(name_buffer, sizeof(name_buffer), "mix%d", i);
+		alias_prop = fdt_getprop(initial_boot_params, aliases,
+					 name_buffer, NULL);
+		if (!alias_prop)
+			continue;
+		mgmt = fdt_path_offset(initial_boot_params, alias_prop);
+		if (mgmt < 0)
+			continue;
+		octeon_fdt_set_mac_addr(mgmt, &mac_addr_base);
+	}
+
+	alias_prop = fdt_getprop(initial_boot_params, aliases, "pip", NULL);
+	if (!alias_prop)
+		return;
+
+	pip = fdt_path_offset(initial_boot_params, alias_prop);
+	if (pip < 0)
+		return;
+
+	for (i = 0; i <= 4; i++) {
+		int iface;
+		int p;
+
+		snprintf(name_buffer, sizeof(name_buffer), "interface@%d", i);
+		iface = fdt_subnode_offset(initial_boot_params, pip,
+					   name_buffer);
+		if (iface < 0)
+			continue;
+		for (p = 0; p < 16; p++) {
+			int eth;
+
+			snprintf(name_buffer, sizeof(name_buffer),
+				 "ethernet@%x", p);
+			eth = fdt_subnode_offset(initial_boot_params, iface,
+						 name_buffer);
+			if (eth < 0)
+				continue;
+			octeon_fdt_set_mac_addr(eth, &mac_addr_base);
+		}
+	}
+}
+
+int __init octeon_prune_device_tree(void)
+{
+	int i, max_port, uart_mask;
+	const char *pip_path;
+	const char *alias_prop;
+	char name_buffer[20];
+	int aliases;
+
+	if (fdt_check_header(initial_boot_params))
+		panic("Corrupt Device Tree.");
+
+	WARN(octeon_bootinfo->board_type == CVMX_BOARD_TYPE_CUST_DSR1000N,
+	     "Built-in DTB booting is deprecated on %s. Please switch to use appended DTB.",
+	     cvmx_board_type_to_string(octeon_bootinfo->board_type));
+
+	aliases = fdt_path_offset(initial_boot_params, "/aliases");
+	if (aliases < 0) {
+		pr_err("Error: No /aliases node in device tree.");
+		return -EINVAL;
+	}
+
+	if (OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN63XX))
+		max_port = 2;
+	else if (OCTEON_IS_MODEL(OCTEON_CN56XX) || OCTEON_IS_MODEL(OCTEON_CN68XX))
+		max_port = 1;
+	else
+		max_port = 0;
+
+	if (octeon_bootinfo->board_type == CVMX_BOARD_TYPE_NIC10E)
+		max_port = 0;
+
+	for (i = 0; i < 2; i++) {
+		int mgmt;
+
+		snprintf(name_buffer, sizeof(name_buffer),
+			 "mix%d", i);
+		alias_prop = fdt_getprop(initial_boot_params, aliases,
+					name_buffer, NULL);
+		if (alias_prop) {
+			mgmt = fdt_path_offset(initial_boot_params, alias_prop);
+			if (mgmt < 0)
+				continue;
+			if (i >= max_port) {
+				pr_debug("Deleting mix%d\n", i);
+				octeon_fdt_rm_ethernet(mgmt);
+				fdt_nop_property(initial_boot_params, aliases,
+						 name_buffer);
+			} else {
+				int phy_addr = cvmx_helper_board_get_mii_address(CVMX_HELPER_BOARD_MGMT_IPD_PORT + i);
+
+				octeon_fdt_set_phy(mgmt, phy_addr);
+			}
+		}
+	}
+
+	pip_path = fdt_getprop(initial_boot_params, aliases, "pip", NULL);
+	if (pip_path) {
+		int pip = fdt_path_offset(initial_boot_params, pip_path);
+
+		if (pip	 >= 0)
+			for (i = 0; i <= 4; i++)
+				octeon_fdt_pip_iface(pip, i);
+	}
+
+	/* I2C */
+	if (OCTEON_IS_MODEL(OCTEON_CN52XX) ||
+	    OCTEON_IS_MODEL(OCTEON_CN63XX) ||
+	    OCTEON_IS_MODEL(OCTEON_CN68XX) ||
+	    OCTEON_IS_MODEL(OCTEON_CN56XX))
+		max_port = 2;
+	else
+		max_port = 1;
+
+	for (i = 0; i < 2; i++) {
+		int i2c;
+
+		snprintf(name_buffer, sizeof(name_buffer),
+			 "twsi%d", i);
+		alias_prop = fdt_getprop(initial_boot_params, aliases,
+					name_buffer, NULL);
+
+		if (alias_prop) {
+			i2c = fdt_path_offset(initial_boot_params, alias_prop);
+			if (i2c < 0)
+				continue;
+			if (i >= max_port) {
+				pr_debug("Deleting twsi%d\n", i);
+				fdt_nop_node(initial_boot_params, i2c);
+				fdt_nop_property(initial_boot_params, aliases,
+						 name_buffer);
+			}
+		}
+	}
+
+	/* SMI/MDIO */
+	if (OCTEON_IS_MODEL(OCTEON_CN68XX))
+		max_port = 4;
+	else if (OCTEON_IS_MODEL(OCTEON_CN52XX) ||
+		 OCTEON_IS_MODEL(OCTEON_CN63XX) ||
+		 OCTEON_IS_MODEL(OCTEON_CN56XX))
+		max_port = 2;
+	else
+		max_port = 1;
+
+	for (i = 0; i < 2; i++) {
+		int i2c;
+
+		snprintf(name_buffer, sizeof(name_buffer),
+			 "smi%d", i);
+		alias_prop = fdt_getprop(initial_boot_params, aliases,
+					name_buffer, NULL);
+		if (alias_prop) {
+			i2c = fdt_path_offset(initial_boot_params, alias_prop);
+			if (i2c < 0)
+				continue;
+			if (i >= max_port) {
+				pr_debug("Deleting smi%d\n", i);
+				fdt_nop_node(initial_boot_params, i2c);
+				fdt_nop_property(initial_boot_params, aliases,
+						 name_buffer);
+			}
+		}
+	}
+
+	/* Serial */
+	uart_mask = 3;
+
+	/* Right now CN52XX is the only chip with a third uart */
+	if (OCTEON_IS_MODEL(OCTEON_CN52XX))
+		uart_mask |= 4; /* uart2 */
+
+	for (i = 0; i < 3; i++) {
+		int uart;
+
+		snprintf(name_buffer, sizeof(name_buffer),
+			 "uart%d", i);
+		alias_prop = fdt_getprop(initial_boot_params, aliases,
+					name_buffer, NULL);
+
+		if (alias_prop) {
+			uart = fdt_path_offset(initial_boot_params, alias_prop);
+			if (uart_mask & (1 << i)) {
+				__be32 f;
+
+				f = cpu_to_be32(octeon_get_io_clock_rate());
+				fdt_setprop_inplace(initial_boot_params,
+						    uart, "clock-frequency",
+						    &f, sizeof(f));
+				continue;
+			}
+			pr_debug("Deleting uart%d\n", i);
+			fdt_nop_node(initial_boot_params, uart);
+			fdt_nop_property(initial_boot_params, aliases,
+					 name_buffer);
+		}
+	}
+
+	/* Compact Flash */
+	alias_prop = fdt_getprop(initial_boot_params, aliases,
+				 "cf0", NULL);
+	if (alias_prop) {
+		union cvmx_mio_boot_reg_cfgx mio_boot_reg_cfg;
+		unsigned long base_ptr, region_base, region_size;
+		unsigned long region1_base = 0;
+		unsigned long region1_size = 0;
+		int cs, bootbus;
+		bool is_16bit = false;
+		bool is_true_ide = false;
+		__be32 new_reg[6];
+		__be32 *ranges;
+		int len;
+
+		int cf = fdt_path_offset(initial_boot_params, alias_prop);
+
+		base_ptr = 0;
+		if (octeon_bootinfo->major_version == 1
+			&& octeon_bootinfo->minor_version >= 1) {
+			if (octeon_bootinfo->compact_flash_common_base_addr)
+				base_ptr = octeon_bootinfo->compact_flash_common_base_addr;
+		} else {
+			base_ptr = 0x1d000800;
+		}
+
+		if (!base_ptr)
+			goto no_cf;
+
+		/* Find CS0 region. */
+		for (cs = 0; cs < 8; cs++) {
+			mio_boot_reg_cfg.u64 = cvmx_read_csr(CVMX_MIO_BOOT_REG_CFGX(cs));
+			region_base = mio_boot_reg_cfg.s.base << 16;
+			region_size = (mio_boot_reg_cfg.s.size + 1) << 16;
+			if (mio_boot_reg_cfg.s.en && base_ptr >= region_base
+				&& base_ptr < region_base + region_size) {
+				is_16bit = mio_boot_reg_cfg.s.width;
+				break;
+			}
+		}
+		if (cs >= 7) {
+			/* cs and cs + 1 are CS0 and CS1, both must be less than 8. */
+			goto no_cf;
+		}
+
+		if (!(base_ptr & 0xfffful)) {
+			/*
+			 * Boot loader signals availability of DMA (true_ide
+			 * mode) by setting low order bits of base_ptr to
+			 * zero.
+			 */
+
+			/* Asume that CS1 immediately follows. */
+			mio_boot_reg_cfg.u64 =
+				cvmx_read_csr(CVMX_MIO_BOOT_REG_CFGX(cs + 1));
+			region1_base = mio_boot_reg_cfg.s.base << 16;
+			region1_size = (mio_boot_reg_cfg.s.size + 1) << 16;
+			if (!mio_boot_reg_cfg.s.en)
+				goto no_cf;
+			is_true_ide = true;
+
+		} else {
+			fdt_nop_property(initial_boot_params, cf, "cavium,true-ide");
+			fdt_nop_property(initial_boot_params, cf, "cavium,dma-engine-handle");
+			if (!is_16bit) {
+				__be32 width = cpu_to_be32(8);
+
+				fdt_setprop_inplace(initial_boot_params, cf,
+						"cavium,bus-width", &width, sizeof(width));
+			}
+		}
+		new_reg[0] = cpu_to_be32(cs);
+		new_reg[1] = cpu_to_be32(0);
+		new_reg[2] = cpu_to_be32(0x10000);
+		new_reg[3] = cpu_to_be32(cs + 1);
+		new_reg[4] = cpu_to_be32(0);
+		new_reg[5] = cpu_to_be32(0x10000);
+		fdt_setprop_inplace(initial_boot_params, cf,
+				    "reg",  new_reg, sizeof(new_reg));
+
+		bootbus = fdt_parent_offset(initial_boot_params, cf);
+		if (bootbus < 0)
+			goto no_cf;
+		ranges = fdt_getprop_w(initial_boot_params, bootbus, "ranges", &len);
+		if (!ranges || len < (5 * 8 * sizeof(__be32)))
+			goto no_cf;
+
+		ranges[(cs * 5) + 2] = cpu_to_be32(region_base >> 32);
+		ranges[(cs * 5) + 3] = cpu_to_be32(region_base & 0xffffffff);
+		ranges[(cs * 5) + 4] = cpu_to_be32(region_size);
+		if (is_true_ide) {
+			cs++;
+			ranges[(cs * 5) + 2] = cpu_to_be32(region1_base >> 32);
+			ranges[(cs * 5) + 3] = cpu_to_be32(region1_base & 0xffffffff);
+			ranges[(cs * 5) + 4] = cpu_to_be32(region1_size);
+		}
+		goto end_cf;
+no_cf:
+		fdt_nop_node(initial_boot_params, cf);
+
+end_cf:
+		;
+	}
+
+	/* 8 char LED */
+	alias_prop = fdt_getprop(initial_boot_params, aliases,
+				 "led0", NULL);
+	if (alias_prop) {
+		union cvmx_mio_boot_reg_cfgx mio_boot_reg_cfg;
+		unsigned long base_ptr, region_base, region_size;
+		int cs, bootbus;
+		__be32 new_reg[6];
+		__be32 *ranges;
+		int len;
+		int led = fdt_path_offset(initial_boot_params, alias_prop);
+
+		base_ptr = octeon_bootinfo->led_display_base_addr;
+		if (base_ptr == 0)
+			goto no_led;
+		/* Find CS0 region. */
+		for (cs = 0; cs < 8; cs++) {
+			mio_boot_reg_cfg.u64 = cvmx_read_csr(CVMX_MIO_BOOT_REG_CFGX(cs));
+			region_base = mio_boot_reg_cfg.s.base << 16;
+			region_size = (mio_boot_reg_cfg.s.size + 1) << 16;
+			if (mio_boot_reg_cfg.s.en && base_ptr >= region_base
+				&& base_ptr < region_base + region_size)
+				break;
+		}
+
+		if (cs > 7)
+			goto no_led;
+
+		new_reg[0] = cpu_to_be32(cs);
+		new_reg[1] = cpu_to_be32(0x20);
+		new_reg[2] = cpu_to_be32(0x20);
+		new_reg[3] = cpu_to_be32(cs);
+		new_reg[4] = cpu_to_be32(0);
+		new_reg[5] = cpu_to_be32(0x20);
+		fdt_setprop_inplace(initial_boot_params, led,
+				    "reg",  new_reg, sizeof(new_reg));
+
+		bootbus = fdt_parent_offset(initial_boot_params, led);
+		if (bootbus < 0)
+			goto no_led;
+		ranges = fdt_getprop_w(initial_boot_params, bootbus, "ranges", &len);
+		if (!ranges || len < (5 * 8 * sizeof(__be32)))
+			goto no_led;
+
+		ranges[(cs * 5) + 2] = cpu_to_be32(region_base >> 32);
+		ranges[(cs * 5) + 3] = cpu_to_be32(region_base & 0xffffffff);
+		ranges[(cs * 5) + 4] = cpu_to_be32(region_size);
+		goto end_led;
+
+no_led:
+		fdt_nop_node(initial_boot_params, led);
+end_led:
+		;
+	}
+
+#ifdef CONFIG_USB
+	/* OHCI/UHCI USB */
+	alias_prop = fdt_getprop(initial_boot_params, aliases,
+				 "uctl", NULL);
+	if (alias_prop) {
+		int uctl = fdt_path_offset(initial_boot_params, alias_prop);
+
+		if (uctl >= 0 && (!OCTEON_IS_MODEL(OCTEON_CN6XXX) ||
+				  octeon_bootinfo->board_type == CVMX_BOARD_TYPE_NIC2E)) {
+			pr_debug("Deleting uctl\n");
+			fdt_nop_node(initial_boot_params, uctl);
+			fdt_nop_property(initial_boot_params, aliases, "uctl");
+		} else if (octeon_bootinfo->board_type == CVMX_BOARD_TYPE_NIC10E ||
+			   octeon_bootinfo->board_type == CVMX_BOARD_TYPE_NIC4E) {
+			/* Missing "refclk-type" defaults to crystal. */
+			fdt_nop_property(initial_boot_params, uctl, "refclk-type");
+		}
+	}
+
+	/* DWC2 USB */
+	alias_prop = fdt_getprop(initial_boot_params, aliases,
+				 "usbn", NULL);
+	if (alias_prop) {
+		int usbn = fdt_path_offset(initial_boot_params, alias_prop);
+
+		if (usbn >= 0 && (current_cpu_type() == CPU_CAVIUM_OCTEON2 ||
+				  !octeon_has_feature(OCTEON_FEATURE_USB))) {
+			pr_debug("Deleting usbn\n");
+			fdt_nop_node(initial_boot_params, usbn);
+			fdt_nop_property(initial_boot_params, aliases, "usbn");
+		} else  {
+			__be32 new_f[1];
+			enum cvmx_helper_board_usb_clock_types c;
+
+			c = __cvmx_helper_board_usb_get_clock_type();
+			switch (c) {
+			case USB_CLOCK_TYPE_REF_48:
+				new_f[0] = cpu_to_be32(48000000);
+				fdt_setprop_inplace(initial_boot_params, usbn,
+						    "refclk-frequency",  new_f, sizeof(new_f));
+				/* Fall through ...*/
+			case USB_CLOCK_TYPE_REF_12:
+				/* Missing "refclk-type" defaults to external. */
+				fdt_nop_property(initial_boot_params, usbn, "refclk-type");
+				break;
+			default:
+				break;
+			}
+		}
+	}
+#endif
+
+	return 0;
+}
+
+static int __init octeon_publish_devices(void)
+{
+	return of_platform_bus_probe(NULL, octeon_ids, NULL);
+}
+arch_initcall(octeon_publish_devices);
diff --git a/src/kernel/linux/v4.14/arch/mips/cavium-octeon/octeon-usb.c b/src/kernel/linux/v4.14/arch/mips/cavium-octeon/octeon-usb.c
new file mode 100644
index 0000000..75189ff
--- /dev/null
+++ b/src/kernel/linux/v4.14/arch/mips/cavium-octeon/octeon-usb.c
@@ -0,0 +1,555 @@
+/*
+ * XHCI HCD glue for Cavium Octeon III SOCs.
+ *
+ * Copyright (C) 2010-2017 Cavium Networks
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/mutex.h>
+#include <linux/delay.h>
+#include <linux/of_platform.h>
+#include <linux/io.h>
+
+#include <asm/octeon/octeon.h>
+
+/* USB Control Register */
+union cvm_usbdrd_uctl_ctl {
+	uint64_t u64;
+	struct cvm_usbdrd_uctl_ctl_s {
+	/* 1 = BIST and set all USB RAMs to 0x0, 0 = BIST */
+	__BITFIELD_FIELD(uint64_t clear_bist:1,
+	/* 1 = Start BIST and cleared by hardware */
+	__BITFIELD_FIELD(uint64_t start_bist:1,
+	/* Reference clock select for SuperSpeed and HighSpeed PLLs:
+	 *	0x0 = Both PLLs use DLMC_REF_CLK0 for reference clock
+	 *	0x1 = Both PLLs use DLMC_REF_CLK1 for reference clock
+	 *	0x2 = SuperSpeed PLL uses DLMC_REF_CLK0 for reference clock &
+	 *	      HighSpeed PLL uses PLL_REF_CLK for reference clck
+	 *	0x3 = SuperSpeed PLL uses DLMC_REF_CLK1 for reference clock &
+	 *	      HighSpeed PLL uses PLL_REF_CLK for reference clck
+	 */
+	__BITFIELD_FIELD(uint64_t ref_clk_sel:2,
+	/* 1 = Spread-spectrum clock enable, 0 = SS clock disable */
+	__BITFIELD_FIELD(uint64_t ssc_en:1,
+	/* Spread-spectrum clock modulation range:
+	 *	0x0 = -4980 ppm downspread
+	 *	0x1 = -4492 ppm downspread
+	 *	0x2 = -4003 ppm downspread
+	 *	0x3 - 0x7 = Reserved
+	 */
+	__BITFIELD_FIELD(uint64_t ssc_range:3,
+	/* Enable non-standard oscillator frequencies:
+	 *	[55:53] = modules -1
+	 *	[52:47] = 2's complement push amount, 0 = Feature disabled
+	 */
+	__BITFIELD_FIELD(uint64_t ssc_ref_clk_sel:9,
+	/* Reference clock multiplier for non-standard frequencies:
+	 *	0x19 = 100MHz on DLMC_REF_CLK* if REF_CLK_SEL = 0x0 or 0x1
+	 *	0x28 = 125MHz on DLMC_REF_CLK* if REF_CLK_SEL = 0x0 or 0x1
+	 *	0x32 =  50MHz on DLMC_REF_CLK* if REF_CLK_SEL = 0x0 or 0x1
+	 *	Other Values = Reserved
+	 */
+	__BITFIELD_FIELD(uint64_t mpll_multiplier:7,
+	/* Enable reference clock to prescaler for SuperSpeed functionality.
+	 * Should always be set to "1"
+	 */
+	__BITFIELD_FIELD(uint64_t ref_ssp_en:1,
+	/* Divide the reference clock by 2 before entering the
+	 * REF_CLK_FSEL divider:
+	 *	If REF_CLK_SEL = 0x0 or 0x1, then only 0x0 is legal
+	 *	If REF_CLK_SEL = 0x2 or 0x3, then:
+	 *		0x1 = DLMC_REF_CLK* is 125MHz
+	 *		0x0 = DLMC_REF_CLK* is another supported frequency
+	 */
+	__BITFIELD_FIELD(uint64_t ref_clk_div2:1,
+	/* Select reference clock freqnuency for both PLL blocks:
+	 *	0x27 = REF_CLK_SEL is 0x0 or 0x1
+	 *	0x07 = REF_CLK_SEL is 0x2 or 0x3
+	 */
+	__BITFIELD_FIELD(uint64_t ref_clk_fsel:6,
+	/* Reserved */
+	__BITFIELD_FIELD(uint64_t reserved_31_31:1,
+	/* Controller clock enable. */
+	__BITFIELD_FIELD(uint64_t h_clk_en:1,
+	/* Select bypass input to controller clock divider:
+	 *	0x0 = Use divided coprocessor clock from H_CLKDIV
+	 *	0x1 = Use clock from GPIO pins
+	 */
+	__BITFIELD_FIELD(uint64_t h_clk_byp_sel:1,
+	/* Reset controller clock divider. */
+	__BITFIELD_FIELD(uint64_t h_clkdiv_rst:1,
+	/* Reserved */
+	__BITFIELD_FIELD(uint64_t reserved_27_27:1,
+	/* Clock divider select:
+	 *	0x0 = divide by 1
+	 *	0x1 = divide by 2
+	 *	0x2 = divide by 4
+	 *	0x3 = divide by 6
+	 *	0x4 = divide by 8
+	 *	0x5 = divide by 16
+	 *	0x6 = divide by 24
+	 *	0x7 = divide by 32
+	 */
+	__BITFIELD_FIELD(uint64_t h_clkdiv_sel:3,
+	/* Reserved */
+	__BITFIELD_FIELD(uint64_t reserved_22_23:2,
+	/* USB3 port permanently attached: 0x0 = No, 0x1 = Yes */
+	__BITFIELD_FIELD(uint64_t usb3_port_perm_attach:1,
+	/* USB2 port permanently attached: 0x0 = No, 0x1 = Yes */
+	__BITFIELD_FIELD(uint64_t usb2_port_perm_attach:1,
+	/* Reserved */
+	__BITFIELD_FIELD(uint64_t reserved_19_19:1,
+	/* Disable SuperSpeed PHY: 0x0 = No, 0x1 = Yes */
+	__BITFIELD_FIELD(uint64_t usb3_port_disable:1,
+	/* Reserved */
+	__BITFIELD_FIELD(uint64_t reserved_17_17:1,
+	/* Disable HighSpeed PHY: 0x0 = No, 0x1 = Yes */
+	__BITFIELD_FIELD(uint64_t usb2_port_disable:1,
+	/* Reserved */
+	__BITFIELD_FIELD(uint64_t reserved_15_15:1,
+	/* Enable PHY SuperSpeed block power: 0x0 = No, 0x1 = Yes */
+	__BITFIELD_FIELD(uint64_t ss_power_en:1,
+	/* Reserved */
+	__BITFIELD_FIELD(uint64_t reserved_13_13:1,
+	/* Enable PHY HighSpeed block power: 0x0 = No, 0x1 = Yes */
+	__BITFIELD_FIELD(uint64_t hs_power_en:1,
+	/* Reserved */
+	__BITFIELD_FIELD(uint64_t reserved_5_11:7,
+	/* Enable USB UCTL interface clock: 0xx = No, 0x1 = Yes */
+	__BITFIELD_FIELD(uint64_t csclk_en:1,
+	/* Controller mode: 0x0 = Host, 0x1 = Device */
+	__BITFIELD_FIELD(uint64_t drd_mode:1,
+	/* PHY reset */
+	__BITFIELD_FIELD(uint64_t uphy_rst:1,
+	/* Software reset UAHC */
+	__BITFIELD_FIELD(uint64_t uahc_rst:1,
+	/* Software resets UCTL */
+	__BITFIELD_FIELD(uint64_t uctl_rst:1,
+	;)))))))))))))))))))))))))))))))))
+	} s;
+};
+
+/* UAHC Configuration Register */
+union cvm_usbdrd_uctl_host_cfg {
+	uint64_t u64;
+	struct cvm_usbdrd_uctl_host_cfg_s {
+	/* Reserved */
+	__BITFIELD_FIELD(uint64_t reserved_60_63:4,
+	/* Indicates minimum value of all received BELT values */
+	__BITFIELD_FIELD(uint64_t host_current_belt:12,
+	/* Reserved */
+	__BITFIELD_FIELD(uint64_t reserved_38_47:10,
+	/* HS jitter adjustment */
+	__BITFIELD_FIELD(uint64_t fla:6,
+	/* Reserved */
+	__BITFIELD_FIELD(uint64_t reserved_29_31:3,
+	/* Bus-master enable: 0x0 = Disabled (stall DMAs), 0x1 = enabled */
+	__BITFIELD_FIELD(uint64_t bme:1,
+	/* Overcurrent protection enable: 0x0 = unavailable, 0x1 = available */
+	__BITFIELD_FIELD(uint64_t oci_en:1,
+	/* Overcurrent sene selection:
+	 *	0x0 = Overcurrent indication from off-chip is active-low
+	 *	0x1 = Overcurrent indication from off-chip is active-high
+	 */
+	__BITFIELD_FIELD(uint64_t oci_active_high_en:1,
+	/* Port power control enable: 0x0 = unavailable, 0x1 = available */
+	__BITFIELD_FIELD(uint64_t ppc_en:1,
+	/* Port power control sense selection:
+	 *	0x0 = Port power to off-chip is active-low
+	 *	0x1 = Port power to off-chip is active-high
+	 */
+	__BITFIELD_FIELD(uint64_t ppc_active_high_en:1,
+	/* Reserved */
+	__BITFIELD_FIELD(uint64_t reserved_0_23:24,
+	;)))))))))))
+	} s;
+};
+
+/* UCTL Shim Features Register */
+union cvm_usbdrd_uctl_shim_cfg {
+	uint64_t u64;
+	struct cvm_usbdrd_uctl_shim_cfg_s {
+	/* Out-of-bound UAHC register access: 0 = read, 1 = write */
+	__BITFIELD_FIELD(uint64_t xs_ncb_oob_wrn:1,
+	/* Reserved */
+	__BITFIELD_FIELD(uint64_t reserved_60_62:3,
+	/* SRCID error log for out-of-bound UAHC register access:
+	 *	[59:58] = chipID
+	 *	[57] = Request source: 0 = core, 1 = NCB-device
+	 *	[56:51] = Core/NCB-device number, [56] always 0 for NCB devices
+	 *	[50:48] = SubID
+	 */
+	__BITFIELD_FIELD(uint64_t xs_ncb_oob_osrc:12,
+	/* Error log for bad UAHC DMA access: 0 = Read log, 1 = Write log */
+	__BITFIELD_FIELD(uint64_t xm_bad_dma_wrn:1,
+	/* Reserved */
+	__BITFIELD_FIELD(uint64_t reserved_44_46:3,
+	/* Encoded error type for bad UAHC DMA */
+	__BITFIELD_FIELD(uint64_t xm_bad_dma_type:4,
+	/* Reserved */
+	__BITFIELD_FIELD(uint64_t reserved_13_39:27,
+	/* Select the IOI read command used by DMA accesses */
+	__BITFIELD_FIELD(uint64_t dma_read_cmd:1,
+	/* Reserved */
+	__BITFIELD_FIELD(uint64_t reserved_10_11:2,
+	/* Select endian format for DMA accesses to the L2c:
+	 *	0x0 = Little endian
+	 *`	0x1 = Big endian
+	 *	0x2 = Reserved
+	 *	0x3 = Reserved
+	 */
+	__BITFIELD_FIELD(uint64_t dma_endian_mode:2,
+	/* Reserved */
+	__BITFIELD_FIELD(uint64_t reserved_2_7:6,
+	/* Select endian format for IOI CSR access to UAHC:
+	 *	0x0 = Little endian
+	 *`	0x1 = Big endian
+	 *	0x2 = Reserved
+	 *	0x3 = Reserved
+	 */
+	__BITFIELD_FIELD(uint64_t csr_endian_mode:2,
+	;))))))))))))
+	} s;
+};
+
+#define OCTEON_H_CLKDIV_SEL		8
+#define OCTEON_MIN_H_CLK_RATE		150000000
+#define OCTEON_MAX_H_CLK_RATE		300000000
+
+static DEFINE_MUTEX(dwc3_octeon_clocks_mutex);
+static uint8_t clk_div[OCTEON_H_CLKDIV_SEL] = {1, 2, 4, 6, 8, 16, 24, 32};
+
+
+static int dwc3_octeon_config_power(struct device *dev, u64 base)
+{
+#define UCTL_HOST_CFG	0xe0
+	union cvm_usbdrd_uctl_host_cfg uctl_host_cfg;
+	union cvmx_gpio_bit_cfgx gpio_bit;
+	uint32_t gpio_pwr[3];
+	int gpio, len, power_active_low;
+	struct device_node *node = dev->of_node;
+	int index = (base >> 24) & 1;
+
+	if (of_find_property(node, "power", &len) != NULL) {
+		if (len == 12) {
+			of_property_read_u32_array(node, "power", gpio_pwr, 3);
+			power_active_low = gpio_pwr[2] & 0x01;
+			gpio = gpio_pwr[1];
+		} else if (len == 8) {
+			of_property_read_u32_array(node, "power", gpio_pwr, 2);
+			power_active_low = 0;
+			gpio = gpio_pwr[1];
+		} else {
+			dev_err(dev, "dwc3 controller clock init failure.\n");
+			return -EINVAL;
+		}
+		if ((OCTEON_IS_MODEL(OCTEON_CN73XX) ||
+		    OCTEON_IS_MODEL(OCTEON_CNF75XX))
+		    && gpio <= 31) {
+			gpio_bit.u64 = cvmx_read_csr(CVMX_GPIO_BIT_CFGX(gpio));
+			gpio_bit.s.tx_oe = 1;
+			gpio_bit.cn73xx.output_sel = (index == 0 ? 0x14 : 0x15);
+			cvmx_write_csr(CVMX_GPIO_BIT_CFGX(gpio), gpio_bit.u64);
+		} else if (gpio <= 15) {
+			gpio_bit.u64 = cvmx_read_csr(CVMX_GPIO_BIT_CFGX(gpio));
+			gpio_bit.s.tx_oe = 1;
+			gpio_bit.cn70xx.output_sel = (index == 0 ? 0x14 : 0x19);
+			cvmx_write_csr(CVMX_GPIO_BIT_CFGX(gpio), gpio_bit.u64);
+		} else {
+			gpio_bit.u64 = cvmx_read_csr(CVMX_GPIO_XBIT_CFGX(gpio));
+			gpio_bit.s.tx_oe = 1;
+			gpio_bit.cn70xx.output_sel = (index == 0 ? 0x14 : 0x19);
+			cvmx_write_csr(CVMX_GPIO_XBIT_CFGX(gpio), gpio_bit.u64);
+		}
+
+		/* Enable XHCI power control and set if active high or low. */
+		uctl_host_cfg.u64 = cvmx_read_csr(base + UCTL_HOST_CFG);
+		uctl_host_cfg.s.ppc_en = 1;
+		uctl_host_cfg.s.ppc_active_high_en = !power_active_low;
+		cvmx_write_csr(base + UCTL_HOST_CFG, uctl_host_cfg.u64);
+	} else {
+		/* Disable XHCI power control and set if active high. */
+		uctl_host_cfg.u64 = cvmx_read_csr(base + UCTL_HOST_CFG);
+		uctl_host_cfg.s.ppc_en = 0;
+		uctl_host_cfg.s.ppc_active_high_en = 0;
+		cvmx_write_csr(base + UCTL_HOST_CFG, uctl_host_cfg.u64);
+		dev_warn(dev, "dwc3 controller clock init failure.\n");
+	}
+	return 0;
+}
+
+static int dwc3_octeon_clocks_start(struct device *dev, u64 base)
+{
+	union cvm_usbdrd_uctl_ctl uctl_ctl;
+	int ref_clk_sel = 2;
+	u64 div;
+	u32 clock_rate;
+	int mpll_mul;
+	int i;
+	u64 h_clk_rate;
+	u64 uctl_ctl_reg = base;
+
+	if (dev->of_node) {
+		const char *ss_clock_type;
+		const char *hs_clock_type;
+
+		i = of_property_read_u32(dev->of_node,
+					 "refclk-frequency", &clock_rate);
+		if (i) {
+			pr_err("No UCTL \"refclk-frequency\"\n");
+			return -EINVAL;
+		}
+		i = of_property_read_string(dev->of_node,
+					    "refclk-type-ss", &ss_clock_type);
+		if (i) {
+			pr_err("No UCTL \"refclk-type-ss\"\n");
+			return -EINVAL;
+		}
+		i = of_property_read_string(dev->of_node,
+					    "refclk-type-hs", &hs_clock_type);
+		if (i) {
+			pr_err("No UCTL \"refclk-type-hs\"\n");
+			return -EINVAL;
+		}
+		if (strcmp("dlmc_ref_clk0", ss_clock_type) == 0) {
+			if (strcmp(hs_clock_type, "dlmc_ref_clk0") == 0)
+				ref_clk_sel = 0;
+			else if (strcmp(hs_clock_type, "pll_ref_clk") == 0)
+				ref_clk_sel = 2;
+			else
+				pr_err("Invalid HS clock type %s, using  pll_ref_clk instead\n",
+				       hs_clock_type);
+		} else if (strcmp(ss_clock_type, "dlmc_ref_clk1") == 0) {
+			if (strcmp(hs_clock_type, "dlmc_ref_clk1") == 0)
+				ref_clk_sel = 1;
+			else if (strcmp(hs_clock_type, "pll_ref_clk") == 0)
+				ref_clk_sel = 3;
+			else {
+				pr_err("Invalid HS clock type %s, using  pll_ref_clk instead\n",
+				       hs_clock_type);
+				ref_clk_sel = 3;
+			}
+		} else
+			pr_err("Invalid SS clock type %s, using  dlmc_ref_clk0 instead\n",
+			       ss_clock_type);
+
+		if ((ref_clk_sel == 0 || ref_clk_sel == 1) &&
+				  (clock_rate != 100000000))
+			pr_err("Invalid UCTL clock rate of %u, using 100000000 instead\n",
+			       clock_rate);
+
+	} else {
+		pr_err("No USB UCTL device node\n");
+		return -EINVAL;
+	}
+
+	/*
+	 * Step 1: Wait for all voltages to be stable...that surely
+	 *         happened before starting the kernel. SKIP
+	 */
+
+	/* Step 2: Select GPIO for overcurrent indication, if desired. SKIP */
+
+	/* Step 3: Assert all resets. */
+	uctl_ctl.u64 = cvmx_read_csr(uctl_ctl_reg);
+	uctl_ctl.s.uphy_rst = 1;
+	uctl_ctl.s.uahc_rst = 1;
+	uctl_ctl.s.uctl_rst = 1;
+	cvmx_write_csr(uctl_ctl_reg, uctl_ctl.u64);
+
+	/* Step 4a: Reset the clock dividers. */
+	uctl_ctl.u64 = cvmx_read_csr(uctl_ctl_reg);
+	uctl_ctl.s.h_clkdiv_rst = 1;
+	cvmx_write_csr(uctl_ctl_reg, uctl_ctl.u64);
+
+	/* Step 4b: Select controller clock frequency. */
+	for (div = 0; div < OCTEON_H_CLKDIV_SEL; div++) {
+		h_clk_rate = octeon_get_io_clock_rate() / clk_div[div];
+		if (h_clk_rate <= OCTEON_MAX_H_CLK_RATE &&
+				 h_clk_rate >= OCTEON_MIN_H_CLK_RATE)
+			break;
+	}
+	uctl_ctl.u64 = cvmx_read_csr(uctl_ctl_reg);
+	uctl_ctl.s.h_clkdiv_sel = div;
+	uctl_ctl.s.h_clk_en = 1;
+	cvmx_write_csr(uctl_ctl_reg, uctl_ctl.u64);
+	uctl_ctl.u64 = cvmx_read_csr(uctl_ctl_reg);
+	if ((div != uctl_ctl.s.h_clkdiv_sel) || (!uctl_ctl.s.h_clk_en)) {
+		dev_err(dev, "dwc3 controller clock init failure.\n");
+			return -EINVAL;
+	}
+
+	/* Step 4c: Deassert the controller clock divider reset. */
+	uctl_ctl.u64 = cvmx_read_csr(uctl_ctl_reg);
+	uctl_ctl.s.h_clkdiv_rst = 0;
+	cvmx_write_csr(uctl_ctl_reg, uctl_ctl.u64);
+
+	/* Step 5a: Reference clock configuration. */
+	uctl_ctl.u64 = cvmx_read_csr(uctl_ctl_reg);
+	uctl_ctl.s.ref_clk_sel = ref_clk_sel;
+	uctl_ctl.s.ref_clk_fsel = 0x07;
+	uctl_ctl.s.ref_clk_div2 = 0;
+	switch (clock_rate) {
+	default:
+		dev_err(dev, "Invalid ref_clk %u, using 100000000 instead\n",
+			clock_rate);
+	case 100000000:
+		mpll_mul = 0x19;
+		if (ref_clk_sel < 2)
+			uctl_ctl.s.ref_clk_fsel = 0x27;
+		break;
+	case 50000000:
+		mpll_mul = 0x32;
+		break;
+	case 125000000:
+		mpll_mul = 0x28;
+		break;
+	}
+	uctl_ctl.s.mpll_multiplier = mpll_mul;
+
+	/* Step 5b: Configure and enable spread-spectrum for SuperSpeed. */
+	uctl_ctl.s.ssc_en = 1;
+
+	/* Step 5c: Enable SuperSpeed. */
+	uctl_ctl.s.ref_ssp_en = 1;
+
+	/* Step 5d: Cofngiure PHYs. SKIP */
+
+	/* Step 6a & 6b: Power up PHYs. */
+	uctl_ctl.s.hs_power_en = 1;
+	uctl_ctl.s.ss_power_en = 1;
+	cvmx_write_csr(uctl_ctl_reg, uctl_ctl.u64);
+
+	/* Step 7: Wait 10 controller-clock cycles to take effect. */
+	udelay(10);
+
+	/* Step 8a: Deassert UCTL reset signal. */
+	uctl_ctl.u64 = cvmx_read_csr(uctl_ctl_reg);
+	uctl_ctl.s.uctl_rst = 0;
+	cvmx_write_csr(uctl_ctl_reg, uctl_ctl.u64);
+
+	/* Step 8b: Wait 10 controller-clock cycles. */
+	udelay(10);
+
+	/* Steo 8c: Setup power-power control. */
+	if (dwc3_octeon_config_power(dev, base)) {
+		dev_err(dev, "Error configuring power.\n");
+		return -EINVAL;
+	}
+
+	/* Step 8d: Deassert UAHC reset signal. */
+	uctl_ctl.u64 = cvmx_read_csr(uctl_ctl_reg);
+	uctl_ctl.s.uahc_rst = 0;
+	cvmx_write_csr(uctl_ctl_reg, uctl_ctl.u64);
+
+	/* Step 8e: Wait 10 controller-clock cycles. */
+	udelay(10);
+
+	/* Step 9: Enable conditional coprocessor clock of UCTL. */
+	uctl_ctl.u64 = cvmx_read_csr(uctl_ctl_reg);
+	uctl_ctl.s.csclk_en = 1;
+	cvmx_write_csr(uctl_ctl_reg, uctl_ctl.u64);
+
+	/*Step 10: Set for host mode only. */
+	uctl_ctl.u64 = cvmx_read_csr(uctl_ctl_reg);
+	uctl_ctl.s.drd_mode = 0;
+	cvmx_write_csr(uctl_ctl_reg, uctl_ctl.u64);
+
+	return 0;
+}
+
+static void __init dwc3_octeon_set_endian_mode(u64 base)
+{
+#define UCTL_SHIM_CFG	0xe8
+	union cvm_usbdrd_uctl_shim_cfg shim_cfg;
+
+	shim_cfg.u64 = cvmx_read_csr(base + UCTL_SHIM_CFG);
+#ifdef __BIG_ENDIAN
+	shim_cfg.s.dma_endian_mode = 1;
+	shim_cfg.s.csr_endian_mode = 1;
+#else
+	shim_cfg.s.dma_endian_mode = 0;
+	shim_cfg.s.csr_endian_mode = 0;
+#endif
+	cvmx_write_csr(base + UCTL_SHIM_CFG, shim_cfg.u64);
+}
+
+#define CVMX_USBDRDX_UCTL_CTL(index)				\
+		(CVMX_ADD_IO_SEG(0x0001180068000000ull) +	\
+		((index & 1) * 0x1000000ull))
+static void __init dwc3_octeon_phy_reset(u64 base)
+{
+	union cvm_usbdrd_uctl_ctl uctl_ctl;
+	int index = (base >> 24) & 1;
+
+	uctl_ctl.u64 = cvmx_read_csr(CVMX_USBDRDX_UCTL_CTL(index));
+	uctl_ctl.s.uphy_rst = 0;
+	cvmx_write_csr(CVMX_USBDRDX_UCTL_CTL(index), uctl_ctl.u64);
+}
+
+static int __init dwc3_octeon_device_init(void)
+{
+	const char compat_node_name[] = "cavium,octeon-7130-usb-uctl";
+	struct platform_device *pdev;
+	struct device_node *node;
+	struct resource *res;
+	void __iomem *base;
+
+	/*
+	 * There should only be three universal controllers, "uctl"
+	 * in the device tree. Two USB and a SATA, which we ignore.
+	 */
+	node = NULL;
+	do {
+		node = of_find_node_by_name(node, "uctl");
+		if (!node)
+			return -ENODEV;
+
+		if (of_device_is_compatible(node, compat_node_name)) {
+			pdev = of_find_device_by_node(node);
+			if (!pdev)
+				return -ENODEV;
+
+			res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+			if (res == NULL) {
+				put_device(&pdev->dev);
+				dev_err(&pdev->dev, "No memory resources\n");
+				return -ENXIO;
+			}
+
+			/*
+			 * The code below maps in the registers necessary for
+			 * setting up the clocks and reseting PHYs. We must
+			 * release the resources so the dwc3 subsystem doesn't
+			 * know the difference.
+			 */
+			base = devm_ioremap_resource(&pdev->dev, res);
+			if (IS_ERR(base)) {
+				put_device(&pdev->dev);
+				return PTR_ERR(base);
+			}
+
+			mutex_lock(&dwc3_octeon_clocks_mutex);
+			dwc3_octeon_clocks_start(&pdev->dev, (u64)base);
+			dwc3_octeon_set_endian_mode((u64)base);
+			dwc3_octeon_phy_reset((u64)base);
+			dev_info(&pdev->dev, "clocks initialized.\n");
+			mutex_unlock(&dwc3_octeon_clocks_mutex);
+			devm_iounmap(&pdev->dev, base);
+			devm_release_mem_region(&pdev->dev, res->start,
+						resource_size(res));
+		}
+	} while (node != NULL);
+
+	return 0;
+}
+device_initcall(dwc3_octeon_device_init);
+
+MODULE_AUTHOR("David Daney <david.daney@cavium.com>");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("USB driver for OCTEON III SoC");
diff --git a/src/kernel/linux/v4.14/arch/mips/cavium-octeon/octeon_boot.h b/src/kernel/linux/v4.14/arch/mips/cavium-octeon/octeon_boot.h
new file mode 100644
index 0000000..a6ce7c4
--- /dev/null
+++ b/src/kernel/linux/v4.14/arch/mips/cavium-octeon/octeon_boot.h
@@ -0,0 +1,95 @@
+/*
+ * (C) Copyright 2004, 2005 Cavium Networks
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
+ * MA 02111-1307 USA
+ */
+
+#ifndef __OCTEON_BOOT_H__
+#define __OCTEON_BOOT_H__
+
+#include <linux/types.h>
+
+struct boot_init_vector {
+	/* First stage address - in ram instead of flash */
+	uint64_t code_addr;
+	/* Setup code for application, NOT application entry point */
+	uint32_t app_start_func_addr;
+	/* k0 is used for global data - needs to be passed to other cores */
+	uint32_t k0_val;
+	/* Address of boot info block structure */
+	uint64_t boot_info_addr;
+	uint32_t flags;		/* flags */
+	uint32_t pad;
+};
+
+/* similar to bootloader's linux_app_boot_info but without global data */
+struct linux_app_boot_info {
+#ifdef __BIG_ENDIAN_BITFIELD
+	uint32_t labi_signature;
+	uint32_t start_core0_addr;
+	uint32_t avail_coremask;
+	uint32_t pci_console_active;
+	uint32_t icache_prefetch_disable;
+	uint32_t padding;
+	uint64_t InitTLBStart_addr;
+	uint32_t start_app_addr;
+	uint32_t cur_exception_base;
+	uint32_t no_mark_private_data;
+	uint32_t compact_flash_common_base_addr;
+	uint32_t compact_flash_attribute_base_addr;
+	uint32_t led_display_base_addr;
+#else
+	uint32_t start_core0_addr;
+	uint32_t labi_signature;
+
+	uint32_t pci_console_active;
+	uint32_t avail_coremask;
+
+	uint32_t padding;
+	uint32_t icache_prefetch_disable;
+
+	uint64_t InitTLBStart_addr;
+
+	uint32_t cur_exception_base;
+	uint32_t start_app_addr;
+
+	uint32_t compact_flash_common_base_addr;
+	uint32_t no_mark_private_data;
+
+	uint32_t led_display_base_addr;
+	uint32_t compact_flash_attribute_base_addr;
+#endif
+};
+
+/* If not to copy a lot of bootloader's structures
+   here is only offset of requested member */
+#define AVAIL_COREMASK_OFFSET_IN_LINUX_APP_BOOT_BLOCK	 0x765c
+
+/* hardcoded in bootloader */
+#define	 LABI_ADDR_IN_BOOTLOADER			 0x700
+
+#define LINUX_APP_BOOT_BLOCK_NAME "linux-app-boot"
+
+#define LABI_SIGNATURE 0xAABBCC01
+
+/*  from uboot-headers/octeon_mem_map.h */
+#define EXCEPTION_BASE_INCR	(4 * 1024)
+			       /* Increment size for exception base addresses (4k minimum) */
+#define EXCEPTION_BASE_BASE	0
+#define BOOTLOADER_PRIV_DATA_BASE	(EXCEPTION_BASE_BASE + 0x800)
+#define BOOTLOADER_BOOT_VECTOR		(BOOTLOADER_PRIV_DATA_BASE)
+
+#endif /* __OCTEON_BOOT_H__ */
diff --git a/src/kernel/linux/v4.14/arch/mips/cavium-octeon/setup.c b/src/kernel/linux/v4.14/arch/mips/cavium-octeon/setup.c
new file mode 100644
index 0000000..a8034d0
--- /dev/null
+++ b/src/kernel/linux/v4.14/arch/mips/cavium-octeon/setup.c
@@ -0,0 +1,1278 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2004-2007 Cavium Networks
+ * Copyright (C) 2008, 2009 Wind River Systems
+ *   written by Ralf Baechle <ralf@linux-mips.org>
+ */
+#include <linux/compiler.h>
+#include <linux/vmalloc.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/console.h>
+#include <linux/delay.h>
+#include <linux/export.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/serial.h>
+#include <linux/smp.h>
+#include <linux/types.h>
+#include <linux/string.h>	/* for memset */
+#include <linux/tty.h>
+#include <linux/time.h>
+#include <linux/platform_device.h>
+#include <linux/serial_core.h>
+#include <linux/serial_8250.h>
+#include <linux/of_fdt.h>
+#include <linux/libfdt.h>
+#include <linux/kexec.h>
+
+#include <asm/processor.h>
+#include <asm/reboot.h>
+#include <asm/smp-ops.h>
+#include <asm/irq_cpu.h>
+#include <asm/mipsregs.h>
+#include <asm/bootinfo.h>
+#include <asm/sections.h>
+#include <asm/time.h>
+
+#include <asm/octeon/octeon.h>
+#include <asm/octeon/pci-octeon.h>
+#include <asm/octeon/cvmx-rst-defs.h>
+
+/*
+ * TRUE for devices having registers with little-endian byte
+ * order, FALSE for registers with native-endian byte order.
+ * PCI mandates little-endian, USB and SATA are configuraable,
+ * but we chose little-endian for these.
+ */
+const bool octeon_should_swizzle_table[256] = {
+	[0x00] = true,	/* bootbus/CF */
+	[0x1b] = true,	/* PCI mmio window */
+	[0x1c] = true,	/* PCI mmio window */
+	[0x1d] = true,	/* PCI mmio window */
+	[0x1e] = true,	/* PCI mmio window */
+	[0x68] = true,	/* OCTEON III USB */
+	[0x69] = true,	/* OCTEON III USB */
+	[0x6c] = true,	/* OCTEON III SATA */
+	[0x6f] = true,	/* OCTEON II USB */
+};
+EXPORT_SYMBOL(octeon_should_swizzle_table);
+
+#ifdef CONFIG_PCI
+extern void pci_console_init(const char *arg);
+#endif
+
+static unsigned long long max_memory = ULLONG_MAX;
+static unsigned long long reserve_low_mem;
+
+DEFINE_SEMAPHORE(octeon_bootbus_sem);
+EXPORT_SYMBOL(octeon_bootbus_sem);
+
+struct octeon_boot_descriptor *octeon_boot_desc_ptr;
+
+struct cvmx_bootinfo *octeon_bootinfo;
+EXPORT_SYMBOL(octeon_bootinfo);
+
+#ifdef CONFIG_KEXEC
+#ifdef CONFIG_SMP
+/*
+ * Wait for relocation code is prepared and send
+ * secondary CPUs to spin until kernel is relocated.
+ */
+static void octeon_kexec_smp_down(void *ignored)
+{
+	int cpu = smp_processor_id();
+
+	local_irq_disable();
+	set_cpu_online(cpu, false);
+	while (!atomic_read(&kexec_ready_to_reboot))
+		cpu_relax();
+
+	asm volatile (
+	"	sync						\n"
+	"	synci	($0)					\n");
+
+	relocated_kexec_smp_wait(NULL);
+}
+#endif
+
+#define OCTEON_DDR0_BASE    (0x0ULL)
+#define OCTEON_DDR0_SIZE    (0x010000000ULL)
+#define OCTEON_DDR1_BASE    (0x410000000ULL)
+#define OCTEON_DDR1_SIZE    (0x010000000ULL)
+#define OCTEON_DDR2_BASE    (0x020000000ULL)
+#define OCTEON_DDR2_SIZE    (0x3e0000000ULL)
+#define OCTEON_MAX_PHY_MEM_SIZE (16*1024*1024*1024ULL)
+
+static struct kimage *kimage_ptr;
+
+static void kexec_bootmem_init(uint64_t mem_size, uint32_t low_reserved_bytes)
+{
+	int64_t addr;
+	struct cvmx_bootmem_desc *bootmem_desc;
+
+	bootmem_desc = cvmx_bootmem_get_desc();
+
+	if (mem_size > OCTEON_MAX_PHY_MEM_SIZE) {
+		mem_size = OCTEON_MAX_PHY_MEM_SIZE;
+		pr_err("Error: requested memory too large,"
+		       "truncating to maximum size\n");
+	}
+
+	bootmem_desc->major_version = CVMX_BOOTMEM_DESC_MAJ_VER;
+	bootmem_desc->minor_version = CVMX_BOOTMEM_DESC_MIN_VER;
+
+	addr = (OCTEON_DDR0_BASE + reserve_low_mem + low_reserved_bytes);
+	bootmem_desc->head_addr = 0;
+
+	if (mem_size <= OCTEON_DDR0_SIZE) {
+		__cvmx_bootmem_phy_free(addr,
+				mem_size - reserve_low_mem -
+				low_reserved_bytes, 0);
+		return;
+	}
+
+	__cvmx_bootmem_phy_free(addr,
+			OCTEON_DDR0_SIZE - reserve_low_mem -
+			low_reserved_bytes, 0);
+
+	mem_size -= OCTEON_DDR0_SIZE;
+
+	if (mem_size > OCTEON_DDR1_SIZE) {
+		__cvmx_bootmem_phy_free(OCTEON_DDR1_BASE, OCTEON_DDR1_SIZE, 0);
+		__cvmx_bootmem_phy_free(OCTEON_DDR2_BASE,
+				mem_size - OCTEON_DDR1_SIZE, 0);
+	} else
+		__cvmx_bootmem_phy_free(OCTEON_DDR1_BASE, mem_size, 0);
+}
+
+static int octeon_kexec_prepare(struct kimage *image)
+{
+	int i;
+	char *bootloader = "kexec";
+
+	octeon_boot_desc_ptr->argc = 0;
+	for (i = 0; i < image->nr_segments; i++) {
+		if (!strncmp(bootloader, (char *)image->segment[i].buf,
+				strlen(bootloader))) {
+			/*
+			 * convert command line string to array
+			 * of parameters (as bootloader does).
+			 */
+			int argc = 0, offt;
+			char *str = (char *)image->segment[i].buf;
+			char *ptr = strchr(str, ' ');
+			while (ptr && (OCTEON_ARGV_MAX_ARGS > argc)) {
+				*ptr = '\0';
+				if (ptr[1] != ' ') {
+					offt = (int)(ptr - str + 1);
+					octeon_boot_desc_ptr->argv[argc] =
+						image->segment[i].mem + offt;
+					argc++;
+				}
+				ptr = strchr(ptr + 1, ' ');
+			}
+			octeon_boot_desc_ptr->argc = argc;
+			break;
+		}
+	}
+
+	/*
+	 * Information about segments will be needed during pre-boot memory
+	 * initialization.
+	 */
+	kimage_ptr = image;
+	return 0;
+}
+
+static void octeon_generic_shutdown(void)
+{
+	int i;
+#ifdef CONFIG_SMP
+	int cpu;
+#endif
+	struct cvmx_bootmem_desc *bootmem_desc;
+	void *named_block_array_ptr;
+
+	bootmem_desc = cvmx_bootmem_get_desc();
+	named_block_array_ptr =
+		cvmx_phys_to_ptr(bootmem_desc->named_block_array_addr);
+
+#ifdef CONFIG_SMP
+	/* disable watchdogs */
+	for_each_online_cpu(cpu)
+		cvmx_write_csr(CVMX_CIU_WDOGX(cpu_logical_map(cpu)), 0);
+#else
+	cvmx_write_csr(CVMX_CIU_WDOGX(cvmx_get_core_num()), 0);
+#endif
+	if (kimage_ptr != kexec_crash_image) {
+		memset(named_block_array_ptr,
+			0x0,
+			CVMX_BOOTMEM_NUM_NAMED_BLOCKS *
+			sizeof(struct cvmx_bootmem_named_block_desc));
+		/*
+		 * Mark all memory (except low 0x100000 bytes) as free.
+		 * It is the same thing that bootloader does.
+		 */
+		kexec_bootmem_init(octeon_bootinfo->dram_size*1024ULL*1024ULL,
+				0x100000);
+		/*
+		 * Allocate all segments to avoid their corruption during boot.
+		 */
+		for (i = 0; i < kimage_ptr->nr_segments; i++)
+			cvmx_bootmem_alloc_address(
+				kimage_ptr->segment[i].memsz + 2*PAGE_SIZE,
+				kimage_ptr->segment[i].mem - PAGE_SIZE,
+				PAGE_SIZE);
+	} else {
+		/*
+		 * Do not mark all memory as free. Free only named sections
+		 * leaving the rest of memory unchanged.
+		 */
+		struct cvmx_bootmem_named_block_desc *ptr =
+			(struct cvmx_bootmem_named_block_desc *)
+			named_block_array_ptr;
+
+		for (i = 0; i < bootmem_desc->named_block_num_blocks; i++)
+			if (ptr[i].size)
+				cvmx_bootmem_free_named(ptr[i].name);
+	}
+	kexec_args[2] = 1UL; /* running on octeon_main_processor */
+	kexec_args[3] = (unsigned long)octeon_boot_desc_ptr;
+#ifdef CONFIG_SMP
+	secondary_kexec_args[2] = 0UL; /* running on secondary cpu */
+	secondary_kexec_args[3] = (unsigned long)octeon_boot_desc_ptr;
+#endif
+}
+
+static void octeon_shutdown(void)
+{
+	octeon_generic_shutdown();
+#ifdef CONFIG_SMP
+	smp_call_function(octeon_kexec_smp_down, NULL, 0);
+	smp_wmb();
+	while (num_online_cpus() > 1) {
+		cpu_relax();
+		mdelay(1);
+	}
+#endif
+}
+
+static void octeon_crash_shutdown(struct pt_regs *regs)
+{
+	octeon_generic_shutdown();
+	default_machine_crash_shutdown(regs);
+}
+
+#ifdef CONFIG_SMP
+void octeon_crash_smp_send_stop(void)
+{
+	int cpu;
+
+	/* disable watchdogs */
+	for_each_online_cpu(cpu)
+		cvmx_write_csr(CVMX_CIU_WDOGX(cpu_logical_map(cpu)), 0);
+}
+#endif
+
+#endif /* CONFIG_KEXEC */
+
+#ifdef CONFIG_CAVIUM_RESERVE32
+uint64_t octeon_reserve32_memory;
+EXPORT_SYMBOL(octeon_reserve32_memory);
+#endif
+
+#ifdef CONFIG_KEXEC
+/* crashkernel cmdline parameter is parsed _after_ memory setup
+ * we also parse it here (workaround for EHB5200) */
+static uint64_t crashk_size, crashk_base;
+#endif
+
+static int octeon_uart;
+
+extern asmlinkage void handle_int(void);
+
+/**
+ * Return non zero if we are currently running in the Octeon simulator
+ *
+ * Returns
+ */
+int octeon_is_simulation(void)
+{
+	return octeon_bootinfo->board_type == CVMX_BOARD_TYPE_SIM;
+}
+EXPORT_SYMBOL(octeon_is_simulation);
+
+/**
+ * Return true if Octeon is in PCI Host mode. This means
+ * Linux can control the PCI bus.
+ *
+ * Returns Non zero if Octeon in host mode.
+ */
+int octeon_is_pci_host(void)
+{
+#ifdef CONFIG_PCI
+	return octeon_bootinfo->config_flags & CVMX_BOOTINFO_CFG_FLAG_PCI_HOST;
+#else
+	return 0;
+#endif
+}
+
+/**
+ * Get the clock rate of Octeon
+ *
+ * Returns Clock rate in HZ
+ */
+uint64_t octeon_get_clock_rate(void)
+{
+	struct cvmx_sysinfo *sysinfo = cvmx_sysinfo_get();
+
+	return sysinfo->cpu_clock_hz;
+}
+EXPORT_SYMBOL(octeon_get_clock_rate);
+
+static u64 octeon_io_clock_rate;
+
+u64 octeon_get_io_clock_rate(void)
+{
+	return octeon_io_clock_rate;
+}
+EXPORT_SYMBOL(octeon_get_io_clock_rate);
+
+
+/**
+ * Write to the LCD display connected to the bootbus. This display
+ * exists on most Cavium evaluation boards. If it doesn't exist, then
+ * this function doesn't do anything.
+ *
+ * @s:	    String to write
+ */
+void octeon_write_lcd(const char *s)
+{
+	if (octeon_bootinfo->led_display_base_addr) {
+		void __iomem *lcd_address =
+			ioremap_nocache(octeon_bootinfo->led_display_base_addr,
+					8);
+		int i;
+		for (i = 0; i < 8; i++, s++) {
+			if (*s)
+				iowrite8(*s, lcd_address + i);
+			else
+				iowrite8(' ', lcd_address + i);
+		}
+		iounmap(lcd_address);
+	}
+}
+
+/**
+ * Return the console uart passed by the bootloader
+ *
+ * Returns uart	  (0 or 1)
+ */
+int octeon_get_boot_uart(void)
+{
+	return (octeon_boot_desc_ptr->flags & OCTEON_BL_FLAG_CONSOLE_UART1) ?
+		1 : 0;
+}
+
+/**
+ * Get the coremask Linux was booted on.
+ *
+ * Returns Core mask
+ */
+int octeon_get_boot_coremask(void)
+{
+	return octeon_boot_desc_ptr->core_mask;
+}
+
+/**
+ * Check the hardware BIST results for a CPU
+ */
+void octeon_check_cpu_bist(void)
+{
+	const int coreid = cvmx_get_core_num();
+	unsigned long long mask;
+	unsigned long long bist_val;
+
+	/* Check BIST results for COP0 registers */
+	mask = 0x1f00000000ull;
+	bist_val = read_octeon_c0_icacheerr();
+	if (bist_val & mask)
+		pr_err("Core%d BIST Failure: CacheErr(icache) = 0x%llx\n",
+		       coreid, bist_val);
+
+	bist_val = read_octeon_c0_dcacheerr();
+	if (bist_val & 1)
+		pr_err("Core%d L1 Dcache parity error: "
+		       "CacheErr(dcache) = 0x%llx\n",
+		       coreid, bist_val);
+
+	mask = 0xfc00000000000000ull;
+	bist_val = read_c0_cvmmemctl();
+	if (bist_val & mask)
+		pr_err("Core%d BIST Failure: COP0_CVM_MEM_CTL = 0x%llx\n",
+		       coreid, bist_val);
+
+	write_octeon_c0_dcacheerr(0);
+}
+
+/**
+ * Reboot Octeon
+ *
+ * @command: Command to pass to the bootloader. Currently ignored.
+ */
+static void octeon_restart(char *command)
+{
+	/* Disable all watchdogs before soft reset. They don't get cleared */
+#ifdef CONFIG_SMP
+	int cpu;
+	for_each_online_cpu(cpu)
+		cvmx_write_csr(CVMX_CIU_WDOGX(cpu_logical_map(cpu)), 0);
+#else
+	cvmx_write_csr(CVMX_CIU_WDOGX(cvmx_get_core_num()), 0);
+#endif
+
+	mb();
+	while (1)
+		if (OCTEON_IS_OCTEON3())
+			cvmx_write_csr(CVMX_RST_SOFT_RST, 1);
+		else
+			cvmx_write_csr(CVMX_CIU_SOFT_RST, 1);
+}
+
+
+/**
+ * Permanently stop a core.
+ *
+ * @arg: Ignored.
+ */
+static void octeon_kill_core(void *arg)
+{
+	if (octeon_is_simulation())
+		/* A break instruction causes the simulator stop a core */
+		asm volatile ("break" ::: "memory");
+
+	local_irq_disable();
+	/* Disable watchdog on this core. */
+	cvmx_write_csr(CVMX_CIU_WDOGX(cvmx_get_core_num()), 0);
+	/* Spin in a low power mode. */
+	while (true)
+		asm volatile ("wait" ::: "memory");
+}
+
+
+/**
+ * Halt the system
+ */
+static void octeon_halt(void)
+{
+	smp_call_function(octeon_kill_core, NULL, 0);
+
+	switch (octeon_bootinfo->board_type) {
+	case CVMX_BOARD_TYPE_NAO38:
+		/* Driving a 1 to GPIO 12 shuts off this board */
+		cvmx_write_csr(CVMX_GPIO_BIT_CFGX(12), 1);
+		cvmx_write_csr(CVMX_GPIO_TX_SET, 0x1000);
+		break;
+	default:
+		octeon_write_lcd("PowerOff");
+		break;
+	}
+
+	octeon_kill_core(NULL);
+}
+
+static char __read_mostly octeon_system_type[80];
+
+static void __init init_octeon_system_type(void)
+{
+	char const *board_type;
+
+	board_type = cvmx_board_type_to_string(octeon_bootinfo->board_type);
+	if (board_type == NULL) {
+		struct device_node *root;
+		int ret;
+
+		root = of_find_node_by_path("/");
+		ret = of_property_read_string(root, "model", &board_type);
+		of_node_put(root);
+		if (ret)
+			board_type = "Unsupported Board";
+	}
+
+	snprintf(octeon_system_type, sizeof(octeon_system_type), "%s (%s)",
+		 board_type, octeon_model_get_string(read_c0_prid()));
+}
+
+/**
+ * Return a string representing the system type
+ *
+ * Returns
+ */
+const char *octeon_board_type_string(void)
+{
+	return octeon_system_type;
+}
+
+const char *get_system_type(void)
+	__attribute__ ((alias("octeon_board_type_string")));
+
+void octeon_user_io_init(void)
+{
+	union octeon_cvmemctl cvmmemctl;
+
+	/* Get the current settings for CP0_CVMMEMCTL_REG */
+	cvmmemctl.u64 = read_c0_cvmmemctl();
+	/* R/W If set, marked write-buffer entries time out the same
+	 * as as other entries; if clear, marked write-buffer entries
+	 * use the maximum timeout. */
+	cvmmemctl.s.dismarkwblongto = 1;
+	/* R/W If set, a merged store does not clear the write-buffer
+	 * entry timeout state. */
+	cvmmemctl.s.dismrgclrwbto = 0;
+	/* R/W Two bits that are the MSBs of the resultant CVMSEG LM
+	 * word location for an IOBDMA. The other 8 bits come from the
+	 * SCRADDR field of the IOBDMA. */
+	cvmmemctl.s.iobdmascrmsb = 0;
+	/* R/W If set, SYNCWS and SYNCS only order marked stores; if
+	 * clear, SYNCWS and SYNCS only order unmarked
+	 * stores. SYNCWSMARKED has no effect when DISSYNCWS is
+	 * set. */
+	cvmmemctl.s.syncwsmarked = 0;
+	/* R/W If set, SYNCWS acts as SYNCW and SYNCS acts as SYNC. */
+	cvmmemctl.s.dissyncws = 0;
+	/* R/W If set, no stall happens on write buffer full. */
+	if (OCTEON_IS_MODEL(OCTEON_CN38XX_PASS2))
+		cvmmemctl.s.diswbfst = 1;
+	else
+		cvmmemctl.s.diswbfst = 0;
+	/* R/W If set (and SX set), supervisor-level loads/stores can
+	 * use XKPHYS addresses with <48>==0 */
+	cvmmemctl.s.xkmemenas = 0;
+
+	/* R/W If set (and UX set), user-level loads/stores can use
+	 * XKPHYS addresses with VA<48>==0 */
+	cvmmemctl.s.xkmemenau = 0;
+
+	/* R/W If set (and SX set), supervisor-level loads/stores can
+	 * use XKPHYS addresses with VA<48>==1 */
+	cvmmemctl.s.xkioenas = 0;
+
+	/* R/W If set (and UX set), user-level loads/stores can use
+	 * XKPHYS addresses with VA<48>==1 */
+	cvmmemctl.s.xkioenau = 0;
+
+	/* R/W If set, all stores act as SYNCW (NOMERGE must be set
+	 * when this is set) RW, reset to 0. */
+	cvmmemctl.s.allsyncw = 0;
+
+	/* R/W If set, no stores merge, and all stores reach the
+	 * coherent bus in order. */
+	cvmmemctl.s.nomerge = 0;
+	/* R/W Selects the bit in the counter used for DID time-outs 0
+	 * = 231, 1 = 230, 2 = 229, 3 = 214. Actual time-out is
+	 * between 1x and 2x this interval. For example, with
+	 * DIDTTO=3, expiration interval is between 16K and 32K. */
+	cvmmemctl.s.didtto = 0;
+	/* R/W If set, the (mem) CSR clock never turns off. */
+	cvmmemctl.s.csrckalwys = 0;
+	/* R/W If set, mclk never turns off. */
+	cvmmemctl.s.mclkalwys = 0;
+	/* R/W Selects the bit in the counter used for write buffer
+	 * flush time-outs (WBFLT+11) is the bit position in an
+	 * internal counter used to determine expiration. The write
+	 * buffer expires between 1x and 2x this interval. For
+	 * example, with WBFLT = 0, a write buffer expires between 2K
+	 * and 4K cycles after the write buffer entry is allocated. */
+	cvmmemctl.s.wbfltime = 0;
+	/* R/W If set, do not put Istream in the L2 cache. */
+	cvmmemctl.s.istrnol2 = 0;
+
+	/*
+	 * R/W The write buffer threshold. As per erratum Core-14752
+	 * for CN63XX, a sc/scd might fail if the write buffer is
+	 * full.  Lowering WBTHRESH greatly lowers the chances of the
+	 * write buffer ever being full and triggering the erratum.
+	 */
+	if (OCTEON_IS_MODEL(OCTEON_CN63XX_PASS1_X))
+		cvmmemctl.s.wbthresh = 4;
+	else
+		cvmmemctl.s.wbthresh = 10;
+
+	/* R/W If set, CVMSEG is available for loads/stores in
+	 * kernel/debug mode. */
+#if CONFIG_CAVIUM_OCTEON_CVMSEG_SIZE > 0
+	cvmmemctl.s.cvmsegenak = 1;
+#else
+	cvmmemctl.s.cvmsegenak = 0;
+#endif
+	/* R/W If set, CVMSEG is available for loads/stores in
+	 * supervisor mode. */
+	cvmmemctl.s.cvmsegenas = 0;
+	/* R/W If set, CVMSEG is available for loads/stores in user
+	 * mode. */
+	cvmmemctl.s.cvmsegenau = 0;
+
+	write_c0_cvmmemctl(cvmmemctl.u64);
+
+	/* Setup of CVMSEG is done in kernel-entry-init.h */
+	if (smp_processor_id() == 0)
+		pr_notice("CVMSEG size: %d cache lines (%d bytes)\n",
+			  CONFIG_CAVIUM_OCTEON_CVMSEG_SIZE,
+			  CONFIG_CAVIUM_OCTEON_CVMSEG_SIZE * 128);
+
+	if (octeon_has_feature(OCTEON_FEATURE_FAU)) {
+		union cvmx_iob_fau_timeout fau_timeout;
+
+		/* Set a default for the hardware timeouts */
+		fau_timeout.u64 = 0;
+		fau_timeout.s.tout_val = 0xfff;
+		/* Disable tagwait FAU timeout */
+		fau_timeout.s.tout_enb = 0;
+		cvmx_write_csr(CVMX_IOB_FAU_TIMEOUT, fau_timeout.u64);
+	}
+
+	if ((!OCTEON_IS_MODEL(OCTEON_CN68XX) &&
+	     !OCTEON_IS_MODEL(OCTEON_CN7XXX)) ||
+	    OCTEON_IS_MODEL(OCTEON_CN70XX)) {
+		union cvmx_pow_nw_tim nm_tim;
+
+		nm_tim.u64 = 0;
+		/* 4096 cycles */
+		nm_tim.s.nw_tim = 3;
+		cvmx_write_csr(CVMX_POW_NW_TIM, nm_tim.u64);
+	}
+
+	write_octeon_c0_icacheerr(0);
+	write_c0_derraddr1(0);
+}
+
+/**
+ * Early entry point for arch setup
+ */
+void __init prom_init(void)
+{
+	struct cvmx_sysinfo *sysinfo;
+	const char *arg;
+	char *p;
+	int i;
+	u64 t;
+	int argc;
+#ifdef CONFIG_CAVIUM_RESERVE32
+	int64_t addr = -1;
+#endif
+	/*
+	 * The bootloader passes a pointer to the boot descriptor in
+	 * $a3, this is available as fw_arg3.
+	 */
+	octeon_boot_desc_ptr = (struct octeon_boot_descriptor *)fw_arg3;
+	octeon_bootinfo =
+		cvmx_phys_to_ptr(octeon_boot_desc_ptr->cvmx_desc_vaddr);
+	cvmx_bootmem_init(cvmx_phys_to_ptr(octeon_bootinfo->phy_mem_desc_addr));
+
+	sysinfo = cvmx_sysinfo_get();
+	memset(sysinfo, 0, sizeof(*sysinfo));
+	sysinfo->system_dram_size = octeon_bootinfo->dram_size << 20;
+	sysinfo->phy_mem_desc_addr = (u64)phys_to_virt(octeon_bootinfo->phy_mem_desc_addr);
+
+	if ((octeon_bootinfo->major_version > 1) ||
+	    (octeon_bootinfo->major_version == 1 &&
+	     octeon_bootinfo->minor_version >= 4))
+		cvmx_coremask_copy(&sysinfo->core_mask,
+				   &octeon_bootinfo->ext_core_mask);
+	else
+		cvmx_coremask_set64(&sysinfo->core_mask,
+				    octeon_bootinfo->core_mask);
+
+	/* Some broken u-boot pass garbage in upper bits, clear them out */
+	if (!OCTEON_IS_MODEL(OCTEON_CN78XX))
+		for (i = 512; i < 1024; i++)
+			cvmx_coremask_clear_core(&sysinfo->core_mask, i);
+
+	sysinfo->exception_base_addr = octeon_bootinfo->exception_base_addr;
+	sysinfo->cpu_clock_hz = octeon_bootinfo->eclock_hz;
+	sysinfo->dram_data_rate_hz = octeon_bootinfo->dclock_hz * 2;
+	sysinfo->board_type = octeon_bootinfo->board_type;
+	sysinfo->board_rev_major = octeon_bootinfo->board_rev_major;
+	sysinfo->board_rev_minor = octeon_bootinfo->board_rev_minor;
+	memcpy(sysinfo->mac_addr_base, octeon_bootinfo->mac_addr_base,
+	       sizeof(sysinfo->mac_addr_base));
+	sysinfo->mac_addr_count = octeon_bootinfo->mac_addr_count;
+	memcpy(sysinfo->board_serial_number,
+	       octeon_bootinfo->board_serial_number,
+	       sizeof(sysinfo->board_serial_number));
+	sysinfo->compact_flash_common_base_addr =
+		octeon_bootinfo->compact_flash_common_base_addr;
+	sysinfo->compact_flash_attribute_base_addr =
+		octeon_bootinfo->compact_flash_attribute_base_addr;
+	sysinfo->led_display_base_addr = octeon_bootinfo->led_display_base_addr;
+	sysinfo->dfa_ref_clock_hz = octeon_bootinfo->dfa_ref_clock_hz;
+	sysinfo->bootloader_config_flags = octeon_bootinfo->config_flags;
+
+	if (OCTEON_IS_OCTEON2()) {
+		/* I/O clock runs at a different rate than the CPU. */
+		union cvmx_mio_rst_boot rst_boot;
+		rst_boot.u64 = cvmx_read_csr(CVMX_MIO_RST_BOOT);
+		octeon_io_clock_rate = 50000000 * rst_boot.s.pnr_mul;
+	} else if (OCTEON_IS_OCTEON3()) {
+		/* I/O clock runs at a different rate than the CPU. */
+		union cvmx_rst_boot rst_boot;
+		rst_boot.u64 = cvmx_read_csr(CVMX_RST_BOOT);
+		octeon_io_clock_rate = 50000000 * rst_boot.s.pnr_mul;
+	} else {
+		octeon_io_clock_rate = sysinfo->cpu_clock_hz;
+	}
+
+	t = read_c0_cvmctl();
+	if ((t & (1ull << 27)) == 0) {
+		/*
+		 * Setup the multiplier save/restore code if
+		 * CvmCtl[NOMUL] clear.
+		 */
+		void *save;
+		void *save_end;
+		void *restore;
+		void *restore_end;
+		int save_len;
+		int restore_len;
+		int save_max = (char *)octeon_mult_save_end -
+			(char *)octeon_mult_save;
+		int restore_max = (char *)octeon_mult_restore_end -
+			(char *)octeon_mult_restore;
+		if (current_cpu_data.cputype == CPU_CAVIUM_OCTEON3) {
+			save = octeon_mult_save3;
+			save_end = octeon_mult_save3_end;
+			restore = octeon_mult_restore3;
+			restore_end = octeon_mult_restore3_end;
+		} else {
+			save = octeon_mult_save2;
+			save_end = octeon_mult_save2_end;
+			restore = octeon_mult_restore2;
+			restore_end = octeon_mult_restore2_end;
+		}
+		save_len = (char *)save_end - (char *)save;
+		restore_len = (char *)restore_end - (char *)restore;
+		if (!WARN_ON(save_len > save_max ||
+				restore_len > restore_max)) {
+			memcpy(octeon_mult_save, save, save_len);
+			memcpy(octeon_mult_restore, restore, restore_len);
+		}
+	}
+
+	/*
+	 * Only enable the LED controller if we're running on a CN38XX, CN58XX,
+	 * or CN56XX. The CN30XX and CN31XX don't have an LED controller.
+	 */
+	if (!octeon_is_simulation() &&
+	    octeon_has_feature(OCTEON_FEATURE_LED_CONTROLLER)) {
+		cvmx_write_csr(CVMX_LED_EN, 0);
+		cvmx_write_csr(CVMX_LED_PRT, 0);
+		cvmx_write_csr(CVMX_LED_DBG, 0);
+		cvmx_write_csr(CVMX_LED_PRT_FMT, 0);
+		cvmx_write_csr(CVMX_LED_UDD_CNTX(0), 32);
+		cvmx_write_csr(CVMX_LED_UDD_CNTX(1), 32);
+		cvmx_write_csr(CVMX_LED_UDD_DATX(0), 0);
+		cvmx_write_csr(CVMX_LED_UDD_DATX(1), 0);
+		cvmx_write_csr(CVMX_LED_EN, 1);
+	}
+#ifdef CONFIG_CAVIUM_RESERVE32
+	/*
+	 * We need to temporarily allocate all memory in the reserve32
+	 * region. This makes sure the kernel doesn't allocate this
+	 * memory when it is getting memory from the
+	 * bootloader. Later, after the memory allocations are
+	 * complete, the reserve32 will be freed.
+	 *
+	 * Allocate memory for RESERVED32 aligned on 2MB boundary. This
+	 * is in case we later use hugetlb entries with it.
+	 */
+	addr = cvmx_bootmem_phy_named_block_alloc(CONFIG_CAVIUM_RESERVE32 << 20,
+						0, 0, 2 << 20,
+						"CAVIUM_RESERVE32", 0);
+	if (addr < 0)
+		pr_err("Failed to allocate CAVIUM_RESERVE32 memory area\n");
+	else
+		octeon_reserve32_memory = addr;
+#endif
+
+#ifdef CONFIG_CAVIUM_OCTEON_LOCK_L2
+	if (cvmx_read_csr(CVMX_L2D_FUS3) & (3ull << 34)) {
+		pr_info("Skipping L2 locking due to reduced L2 cache size\n");
+	} else {
+		uint32_t __maybe_unused ebase = read_c0_ebase() & 0x3ffff000;
+#ifdef CONFIG_CAVIUM_OCTEON_LOCK_L2_TLB
+		/* TLB refill */
+		cvmx_l2c_lock_mem_region(ebase, 0x100);
+#endif
+#ifdef CONFIG_CAVIUM_OCTEON_LOCK_L2_EXCEPTION
+		/* General exception */
+		cvmx_l2c_lock_mem_region(ebase + 0x180, 0x80);
+#endif
+#ifdef CONFIG_CAVIUM_OCTEON_LOCK_L2_LOW_LEVEL_INTERRUPT
+		/* Interrupt handler */
+		cvmx_l2c_lock_mem_region(ebase + 0x200, 0x80);
+#endif
+#ifdef CONFIG_CAVIUM_OCTEON_LOCK_L2_INTERRUPT
+		cvmx_l2c_lock_mem_region(__pa_symbol(handle_int), 0x100);
+		cvmx_l2c_lock_mem_region(__pa_symbol(plat_irq_dispatch), 0x80);
+#endif
+#ifdef CONFIG_CAVIUM_OCTEON_LOCK_L2_MEMCPY
+		cvmx_l2c_lock_mem_region(__pa_symbol(memcpy), 0x480);
+#endif
+	}
+#endif
+
+	octeon_check_cpu_bist();
+
+	octeon_uart = octeon_get_boot_uart();
+
+#ifdef CONFIG_SMP
+	octeon_write_lcd("LinuxSMP");
+#else
+	octeon_write_lcd("Linux");
+#endif
+
+	octeon_setup_delays();
+
+	/*
+	 * BIST should always be enabled when doing a soft reset. L2
+	 * Cache locking for instance is not cleared unless BIST is
+	 * enabled.  Unfortunately due to a chip errata G-200 for
+	 * Cn38XX and CN31XX, BIST msut be disabled on these parts.
+	 */
+	if (OCTEON_IS_MODEL(OCTEON_CN38XX_PASS2) ||
+	    OCTEON_IS_MODEL(OCTEON_CN31XX))
+		cvmx_write_csr(CVMX_CIU_SOFT_BIST, 0);
+	else
+		cvmx_write_csr(CVMX_CIU_SOFT_BIST, 1);
+
+	/* Default to 64MB in the simulator to speed things up */
+	if (octeon_is_simulation())
+		max_memory = 64ull << 20;
+
+	arg = strstr(arcs_cmdline, "mem=");
+	if (arg) {
+		max_memory = memparse(arg + 4, &p);
+		if (max_memory == 0)
+			max_memory = 32ull << 30;
+		if (*p == '@')
+			reserve_low_mem = memparse(p + 1, &p);
+	}
+
+	arcs_cmdline[0] = 0;
+	argc = octeon_boot_desc_ptr->argc;
+	for (i = 0; i < argc; i++) {
+		const char *arg =
+			cvmx_phys_to_ptr(octeon_boot_desc_ptr->argv[i]);
+		if ((strncmp(arg, "MEM=", 4) == 0) ||
+		    (strncmp(arg, "mem=", 4) == 0)) {
+			max_memory = memparse(arg + 4, &p);
+			if (max_memory == 0)
+				max_memory = 32ull << 30;
+			if (*p == '@')
+				reserve_low_mem = memparse(p + 1, &p);
+#ifdef CONFIG_KEXEC
+		} else if (strncmp(arg, "crashkernel=", 12) == 0) {
+			crashk_size = memparse(arg+12, &p);
+			if (*p == '@')
+				crashk_base = memparse(p+1, &p);
+			strcat(arcs_cmdline, " ");
+			strcat(arcs_cmdline, arg);
+			/*
+			 * To do: switch parsing to new style, something like:
+			 * parse_crashkernel(arg, sysinfo->system_dram_size,
+			 *		  &crashk_size, &crashk_base);
+			 */
+#endif
+		} else if (strlen(arcs_cmdline) + strlen(arg) + 1 <
+			   sizeof(arcs_cmdline) - 1) {
+			strcat(arcs_cmdline, " ");
+			strcat(arcs_cmdline, arg);
+		}
+	}
+
+	if (strstr(arcs_cmdline, "console=") == NULL) {
+		if (octeon_uart == 1)
+			strcat(arcs_cmdline, " console=ttyS1,115200");
+		else
+			strcat(arcs_cmdline, " console=ttyS0,115200");
+	}
+
+	mips_hpt_frequency = octeon_get_clock_rate();
+
+	octeon_init_cvmcount();
+
+	_machine_restart = octeon_restart;
+	_machine_halt = octeon_halt;
+
+#ifdef CONFIG_KEXEC
+	_machine_kexec_shutdown = octeon_shutdown;
+	_machine_crash_shutdown = octeon_crash_shutdown;
+	_machine_kexec_prepare = octeon_kexec_prepare;
+#ifdef CONFIG_SMP
+	_crash_smp_send_stop = octeon_crash_smp_send_stop;
+#endif
+#endif
+
+	octeon_user_io_init();
+	octeon_setup_smp();
+}
+
+/* Exclude a single page from the regions obtained in plat_mem_setup. */
+#ifndef CONFIG_CRASH_DUMP
+static __init void memory_exclude_page(u64 addr, u64 *mem, u64 *size)
+{
+	if (addr > *mem && addr < *mem + *size) {
+		u64 inc = addr - *mem;
+		add_memory_region(*mem, inc, BOOT_MEM_RAM);
+		*mem += inc;
+		*size -= inc;
+	}
+
+	if (addr == *mem && *size > PAGE_SIZE) {
+		*mem += PAGE_SIZE;
+		*size -= PAGE_SIZE;
+	}
+}
+#endif /* CONFIG_CRASH_DUMP */
+
+void __init fw_init_cmdline(void)
+{
+	int i;
+
+	octeon_boot_desc_ptr = (struct octeon_boot_descriptor *)fw_arg3;
+	for (i = 0; i < octeon_boot_desc_ptr->argc; i++) {
+		const char *arg =
+			cvmx_phys_to_ptr(octeon_boot_desc_ptr->argv[i]);
+		if (strlen(arcs_cmdline) + strlen(arg) + 1 <
+			   sizeof(arcs_cmdline) - 1) {
+			strcat(arcs_cmdline, " ");
+			strcat(arcs_cmdline, arg);
+		}
+	}
+}
+
+void __init *plat_get_fdt(void)
+{
+	octeon_bootinfo =
+		cvmx_phys_to_ptr(octeon_boot_desc_ptr->cvmx_desc_vaddr);
+	return phys_to_virt(octeon_bootinfo->fdt_addr);
+}
+
+void __init plat_mem_setup(void)
+{
+	uint64_t mem_alloc_size;
+	uint64_t total;
+	uint64_t crashk_end;
+#ifndef CONFIG_CRASH_DUMP
+	int64_t memory;
+	uint64_t kernel_start;
+	uint64_t kernel_size;
+#endif
+
+	total = 0;
+	crashk_end = 0;
+
+	/*
+	 * The Mips memory init uses the first memory location for
+	 * some memory vectors. When SPARSEMEM is in use, it doesn't
+	 * verify that the size is big enough for the final
+	 * vectors. Making the smallest chuck 4MB seems to be enough
+	 * to consistently work.
+	 */
+	mem_alloc_size = 4 << 20;
+	if (mem_alloc_size > max_memory)
+		mem_alloc_size = max_memory;
+
+/* Crashkernel ignores bootmem list. It relies on mem=X@Y option */
+#ifdef CONFIG_CRASH_DUMP
+	add_memory_region(reserve_low_mem, max_memory, BOOT_MEM_RAM);
+	total += max_memory;
+#else
+#ifdef CONFIG_KEXEC
+	if (crashk_size > 0) {
+		add_memory_region(crashk_base, crashk_size, BOOT_MEM_RAM);
+		crashk_end = crashk_base + crashk_size;
+	}
+#endif
+	/*
+	 * When allocating memory, we want incrementing addresses from
+	 * bootmem_alloc so the code in add_memory_region can merge
+	 * regions next to each other.
+	 */
+	cvmx_bootmem_lock();
+	while ((boot_mem_map.nr_map < BOOT_MEM_MAP_MAX)
+		&& (total < max_memory)) {
+		memory = cvmx_bootmem_phy_alloc(mem_alloc_size,
+						__pa_symbol(&_end), -1,
+						0x100000,
+						CVMX_BOOTMEM_FLAG_NO_LOCKING);
+		if (memory >= 0) {
+			u64 size = mem_alloc_size;
+#ifdef CONFIG_KEXEC
+			uint64_t end;
+#endif
+
+			/*
+			 * exclude a page at the beginning and end of
+			 * the 256MB PCIe 'hole' so the kernel will not
+			 * try to allocate multi-page buffers that
+			 * span the discontinuity.
+			 */
+			memory_exclude_page(CVMX_PCIE_BAR1_PHYS_BASE,
+					    &memory, &size);
+			memory_exclude_page(CVMX_PCIE_BAR1_PHYS_BASE +
+					    CVMX_PCIE_BAR1_PHYS_SIZE,
+					    &memory, &size);
+#ifdef CONFIG_KEXEC
+			end = memory + mem_alloc_size;
+
+			/*
+			 * This function automatically merges address regions
+			 * next to each other if they are received in
+			 * incrementing order
+			 */
+			if (memory < crashk_base && end >  crashk_end) {
+				/* region is fully in */
+				add_memory_region(memory,
+						  crashk_base - memory,
+						  BOOT_MEM_RAM);
+				total += crashk_base - memory;
+				add_memory_region(crashk_end,
+						  end - crashk_end,
+						  BOOT_MEM_RAM);
+				total += end - crashk_end;
+				continue;
+			}
+
+			if (memory >= crashk_base && end <= crashk_end)
+				/*
+				 * Entire memory region is within the new
+				 *  kernel's memory, ignore it.
+				 */
+				continue;
+
+			if (memory > crashk_base && memory < crashk_end &&
+			    end > crashk_end) {
+				/*
+				 * Overlap with the beginning of the region,
+				 * reserve the beginning.
+				  */
+				mem_alloc_size -= crashk_end - memory;
+				memory = crashk_end;
+			} else if (memory < crashk_base && end > crashk_base &&
+				   end < crashk_end)
+				/*
+				 * Overlap with the beginning of the region,
+				 * chop of end.
+				 */
+				mem_alloc_size -= end - crashk_base;
+#endif
+			add_memory_region(memory, mem_alloc_size, BOOT_MEM_RAM);
+			total += mem_alloc_size;
+			/* Recovering mem_alloc_size */
+			mem_alloc_size = 4 << 20;
+		} else {
+			break;
+		}
+	}
+	cvmx_bootmem_unlock();
+	/* Add the memory region for the kernel. */
+	kernel_start = (unsigned long) _text;
+	kernel_size = _end - _text;
+
+	/* Adjust for physical offset. */
+	kernel_start &= ~0xffffffff80000000ULL;
+	add_memory_region(kernel_start, kernel_size, BOOT_MEM_RAM);
+#endif /* CONFIG_CRASH_DUMP */
+
+#ifdef CONFIG_CAVIUM_RESERVE32
+	/*
+	 * Now that we've allocated the kernel memory it is safe to
+	 * free the reserved region. We free it here so that builtin
+	 * drivers can use the memory.
+	 */
+	if (octeon_reserve32_memory)
+		cvmx_bootmem_free_named("CAVIUM_RESERVE32");
+#endif /* CONFIG_CAVIUM_RESERVE32 */
+
+	if (total == 0)
+		panic("Unable to allocate memory from "
+		      "cvmx_bootmem_phy_alloc");
+}
+
+/*
+ * Emit one character to the boot UART.	 Exported for use by the
+ * watchdog timer.
+ */
+int prom_putchar(char c)
+{
+	uint64_t lsrval;
+
+	/* Spin until there is room */
+	do {
+		lsrval = cvmx_read_csr(CVMX_MIO_UARTX_LSR(octeon_uart));
+	} while ((lsrval & 0x20) == 0);
+
+	/* Write the byte */
+	cvmx_write_csr(CVMX_MIO_UARTX_THR(octeon_uart), c & 0xffull);
+	return 1;
+}
+EXPORT_SYMBOL(prom_putchar);
+
+void __init prom_free_prom_memory(void)
+{
+	if (CAVIUM_OCTEON_DCACHE_PREFETCH_WAR) {
+		/* Check for presence of Core-14449 fix.  */
+		u32 insn;
+		u32 *foo;
+
+		foo = &insn;
+
+		asm volatile("# before" : : : "memory");
+		prefetch(foo);
+		asm volatile(
+			".set push\n\t"
+			".set noreorder\n\t"
+			"bal 1f\n\t"
+			"nop\n"
+			"1:\tlw %0,-12($31)\n\t"
+			".set pop\n\t"
+			: "=r" (insn) : : "$31", "memory");
+
+		if ((insn >> 26) != 0x33)
+			panic("No PREF instruction at Core-14449 probe point.");
+
+		if (((insn >> 16) & 0x1f) != 28)
+			panic("OCTEON II DCache prefetch workaround not in place (%04x).\n"
+			      "Please build kernel with proper options (CONFIG_CAVIUM_CN63XXP1).",
+			      insn);
+	}
+}
+
+void __init octeon_fill_mac_addresses(void);
+int octeon_prune_device_tree(void);
+
+extern const char __appended_dtb;
+extern const char __dtb_octeon_3xxx_begin;
+extern const char __dtb_octeon_68xx_begin;
+void __init device_tree_init(void)
+{
+	const void *fdt;
+	bool do_prune;
+	bool fill_mac;
+
+#ifdef CONFIG_MIPS_ELF_APPENDED_DTB
+	if (!fdt_check_header(&__appended_dtb)) {
+		fdt = &__appended_dtb;
+		do_prune = false;
+		fill_mac = true;
+		pr_info("Using appended Device Tree.\n");
+	} else
+#endif
+	if (octeon_bootinfo->minor_version >= 3 && octeon_bootinfo->fdt_addr) {
+		fdt = phys_to_virt(octeon_bootinfo->fdt_addr);
+		if (fdt_check_header(fdt))
+			panic("Corrupt Device Tree passed to kernel.");
+		do_prune = false;
+		fill_mac = false;
+		pr_info("Using passed Device Tree.\n");
+	} else if (OCTEON_IS_MODEL(OCTEON_CN68XX)) {
+		fdt = &__dtb_octeon_68xx_begin;
+		do_prune = true;
+		fill_mac = true;
+	} else {
+		fdt = &__dtb_octeon_3xxx_begin;
+		do_prune = true;
+		fill_mac = true;
+	}
+
+	initial_boot_params = (void *)fdt;
+
+	if (do_prune) {
+		octeon_prune_device_tree();
+		pr_info("Using internal Device Tree.\n");
+	}
+	if (fill_mac)
+		octeon_fill_mac_addresses();
+	unflatten_and_copy_device_tree();
+	init_octeon_system_type();
+}
+
+static int __initdata disable_octeon_edac_p;
+
+static int __init disable_octeon_edac(char *str)
+{
+	disable_octeon_edac_p = 1;
+	return 0;
+}
+early_param("disable_octeon_edac", disable_octeon_edac);
+
+static char *edac_device_names[] = {
+	"octeon_l2c_edac",
+	"octeon_pc_edac",
+};
+
+static int __init edac_devinit(void)
+{
+	struct platform_device *dev;
+	int i, err = 0;
+	int num_lmc;
+	char *name;
+
+	if (disable_octeon_edac_p)
+		return 0;
+
+	for (i = 0; i < ARRAY_SIZE(edac_device_names); i++) {
+		name = edac_device_names[i];
+		dev = platform_device_register_simple(name, -1, NULL, 0);
+		if (IS_ERR(dev)) {
+			pr_err("Registration of %s failed!\n", name);
+			err = PTR_ERR(dev);
+		}
+	}
+
+	num_lmc = OCTEON_IS_MODEL(OCTEON_CN68XX) ? 4 :
+		(OCTEON_IS_MODEL(OCTEON_CN56XX) ? 2 : 1);
+	for (i = 0; i < num_lmc; i++) {
+		dev = platform_device_register_simple("octeon_lmc_edac",
+						      i, NULL, 0);
+		if (IS_ERR(dev)) {
+			pr_err("Registration of octeon_lmc_edac %d failed!\n", i);
+			err = PTR_ERR(dev);
+		}
+	}
+
+	return err;
+}
+device_initcall(edac_devinit);
+
+static void __initdata *octeon_dummy_iospace;
+
+static int __init octeon_no_pci_init(void)
+{
+	/*
+	 * Initially assume there is no PCI. The PCI/PCIe platform code will
+	 * later re-initialize these to correct values if they are present.
+	 */
+	octeon_dummy_iospace = vzalloc(IO_SPACE_LIMIT);
+	set_io_port_base((unsigned long)octeon_dummy_iospace);
+	ioport_resource.start = MAX_RESOURCE;
+	ioport_resource.end = 0;
+	return 0;
+}
+core_initcall(octeon_no_pci_init);
+
+static int __init octeon_no_pci_release(void)
+{
+	/*
+	 * Release the allocated memory if a real IO space is there.
+	 */
+	if ((unsigned long)octeon_dummy_iospace != mips_io_port_base)
+		vfree(octeon_dummy_iospace);
+	return 0;
+}
+late_initcall(octeon_no_pci_release);
diff --git a/src/kernel/linux/v4.14/arch/mips/cavium-octeon/smp.c b/src/kernel/linux/v4.14/arch/mips/cavium-octeon/smp.c
new file mode 100644
index 0000000..75e7c86
--- /dev/null
+++ b/src/kernel/linux/v4.14/arch/mips/cavium-octeon/smp.c
@@ -0,0 +1,516 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2004-2008, 2009, 2010 Cavium Networks
+ */
+#include <linux/cpu.h>
+#include <linux/delay.h>
+#include <linux/smp.h>
+#include <linux/interrupt.h>
+#include <linux/kernel_stat.h>
+#include <linux/sched.h>
+#include <linux/sched/hotplug.h>
+#include <linux/sched/task_stack.h>
+#include <linux/init.h>
+#include <linux/export.h>
+
+#include <asm/mmu_context.h>
+#include <asm/time.h>
+#include <asm/setup.h>
+
+#include <asm/octeon/octeon.h>
+
+#include "octeon_boot.h"
+
+volatile unsigned long octeon_processor_boot = 0xff;
+volatile unsigned long octeon_processor_sp;
+volatile unsigned long octeon_processor_gp;
+#ifdef CONFIG_RELOCATABLE
+volatile unsigned long octeon_processor_relocated_kernel_entry;
+#endif /* CONFIG_RELOCATABLE */
+
+#ifdef CONFIG_HOTPLUG_CPU
+uint64_t octeon_bootloader_entry_addr;
+EXPORT_SYMBOL(octeon_bootloader_entry_addr);
+#endif
+
+extern void kernel_entry(unsigned long arg1, ...);
+
+static void octeon_icache_flush(void)
+{
+	asm volatile ("synci 0($0)\n");
+}
+
+static void (*octeon_message_functions[8])(void) = {
+	scheduler_ipi,
+	generic_smp_call_function_interrupt,
+	octeon_icache_flush,
+};
+
+static irqreturn_t mailbox_interrupt(int irq, void *dev_id)
+{
+	u64 mbox_clrx = CVMX_CIU_MBOX_CLRX(cvmx_get_core_num());
+	u64 action;
+	int i;
+
+	/*
+	 * Make sure the function array initialization remains
+	 * correct.
+	 */
+	BUILD_BUG_ON(SMP_RESCHEDULE_YOURSELF != (1 << 0));
+	BUILD_BUG_ON(SMP_CALL_FUNCTION       != (1 << 1));
+	BUILD_BUG_ON(SMP_ICACHE_FLUSH        != (1 << 2));
+
+	/*
+	 * Load the mailbox register to figure out what we're supposed
+	 * to do.
+	 */
+	action = cvmx_read_csr(mbox_clrx);
+
+	if (OCTEON_IS_MODEL(OCTEON_CN68XX))
+		action &= 0xff;
+	else
+		action &= 0xffff;
+
+	/* Clear the mailbox to clear the interrupt */
+	cvmx_write_csr(mbox_clrx, action);
+
+	for (i = 0; i < ARRAY_SIZE(octeon_message_functions) && action;) {
+		if (action & 1) {
+			void (*fn)(void) = octeon_message_functions[i];
+
+			if (fn)
+				fn();
+		}
+		action >>= 1;
+		i++;
+	}
+	return IRQ_HANDLED;
+}
+
+/**
+ * Cause the function described by call_data to be executed on the passed
+ * cpu.	 When the function has finished, increment the finished field of
+ * call_data.
+ */
+void octeon_send_ipi_single(int cpu, unsigned int action)
+{
+	int coreid = cpu_logical_map(cpu);
+	/*
+	pr_info("SMP: Mailbox send cpu=%d, coreid=%d, action=%u\n", cpu,
+	       coreid, action);
+	*/
+	cvmx_write_csr(CVMX_CIU_MBOX_SETX(coreid), action);
+}
+
+static inline void octeon_send_ipi_mask(const struct cpumask *mask,
+					unsigned int action)
+{
+	unsigned int i;
+
+	for_each_cpu(i, mask)
+		octeon_send_ipi_single(i, action);
+}
+
+/**
+ * Detect available CPUs, populate cpu_possible_mask
+ */
+static void octeon_smp_hotplug_setup(void)
+{
+#ifdef CONFIG_HOTPLUG_CPU
+	struct linux_app_boot_info *labi;
+
+	if (!setup_max_cpus)
+		return;
+
+	labi = (struct linux_app_boot_info *)PHYS_TO_XKSEG_CACHED(LABI_ADDR_IN_BOOTLOADER);
+	if (labi->labi_signature != LABI_SIGNATURE) {
+		pr_info("The bootloader on this board does not support HOTPLUG_CPU.");
+		return;
+	}
+
+	octeon_bootloader_entry_addr = labi->InitTLBStart_addr;
+#endif
+}
+
+static void __init octeon_smp_setup(void)
+{
+	const int coreid = cvmx_get_core_num();
+	int cpus;
+	int id;
+	struct cvmx_sysinfo *sysinfo = cvmx_sysinfo_get();
+
+#ifdef CONFIG_HOTPLUG_CPU
+	int core_mask = octeon_get_boot_coremask();
+	unsigned int num_cores = cvmx_octeon_num_cores();
+#endif
+
+	/* The present CPUs are initially just the boot cpu (CPU 0). */
+	for (id = 0; id < NR_CPUS; id++) {
+		set_cpu_possible(id, id == 0);
+		set_cpu_present(id, id == 0);
+	}
+
+	__cpu_number_map[coreid] = 0;
+	__cpu_logical_map[0] = coreid;
+
+	/* The present CPUs get the lowest CPU numbers. */
+	cpus = 1;
+	for (id = 0; id < NR_CPUS; id++) {
+		if ((id != coreid) && cvmx_coremask_is_core_set(&sysinfo->core_mask, id)) {
+			set_cpu_possible(cpus, true);
+			set_cpu_present(cpus, true);
+			__cpu_number_map[id] = cpus;
+			__cpu_logical_map[cpus] = id;
+			cpus++;
+		}
+	}
+
+#ifdef CONFIG_HOTPLUG_CPU
+	/*
+	 * The possible CPUs are all those present on the chip.	 We
+	 * will assign CPU numbers for possible cores as well.	Cores
+	 * are always consecutively numberd from 0.
+	 */
+	for (id = 0; setup_max_cpus && octeon_bootloader_entry_addr &&
+		     id < num_cores && id < NR_CPUS; id++) {
+		if (!(core_mask & (1 << id))) {
+			set_cpu_possible(cpus, true);
+			__cpu_number_map[id] = cpus;
+			__cpu_logical_map[cpus] = id;
+			cpus++;
+		}
+	}
+#endif
+
+	octeon_smp_hotplug_setup();
+}
+
+
+#ifdef CONFIG_RELOCATABLE
+int plat_post_relocation(long offset)
+{
+	unsigned long entry = (unsigned long)kernel_entry;
+
+	/* Send secondaries into relocated kernel */
+	octeon_processor_relocated_kernel_entry = entry + offset;
+
+	return 0;
+}
+#endif /* CONFIG_RELOCATABLE */
+
+/**
+ * Firmware CPU startup hook
+ *
+ */
+static int octeon_boot_secondary(int cpu, struct task_struct *idle)
+{
+	int count;
+
+	pr_info("SMP: Booting CPU%02d (CoreId %2d)...\n", cpu,
+		cpu_logical_map(cpu));
+
+	octeon_processor_sp = __KSTK_TOS(idle);
+	octeon_processor_gp = (unsigned long)(task_thread_info(idle));
+	octeon_processor_boot = cpu_logical_map(cpu);
+	mb();
+
+	count = 10000;
+	while (octeon_processor_sp && count) {
+		/* Waiting for processor to get the SP and GP */
+		udelay(1);
+		count--;
+	}
+	if (count == 0) {
+		pr_err("Secondary boot timeout\n");
+		return -ETIMEDOUT;
+	}
+
+	return 0;
+}
+
+/**
+ * After we've done initial boot, this function is called to allow the
+ * board code to clean up state, if needed
+ */
+static void octeon_init_secondary(void)
+{
+	unsigned int sr;
+
+	sr = set_c0_status(ST0_BEV);
+	write_c0_ebase((u32)ebase);
+	write_c0_status(sr);
+
+	octeon_check_cpu_bist();
+	octeon_init_cvmcount();
+
+	octeon_irq_setup_secondary();
+}
+
+/**
+ * Callout to firmware before smp_init
+ *
+ */
+static void __init octeon_prepare_cpus(unsigned int max_cpus)
+{
+	/*
+	 * Only the low order mailbox bits are used for IPIs, leave
+	 * the other bits alone.
+	 */
+	cvmx_write_csr(CVMX_CIU_MBOX_CLRX(cvmx_get_core_num()), 0xffff);
+	if (request_irq(OCTEON_IRQ_MBOX0, mailbox_interrupt,
+			IRQF_PERCPU | IRQF_NO_THREAD, "SMP-IPI",
+			mailbox_interrupt)) {
+		panic("Cannot request_irq(OCTEON_IRQ_MBOX0)");
+	}
+}
+
+/**
+ * Last chance for the board code to finish SMP initialization before
+ * the CPU is "online".
+ */
+static void octeon_smp_finish(void)
+{
+	octeon_user_io_init();
+
+	/* to generate the first CPU timer interrupt */
+	write_c0_compare(read_c0_count() + mips_hpt_frequency / HZ);
+	local_irq_enable();
+}
+
+#ifdef CONFIG_HOTPLUG_CPU
+
+/* State of each CPU. */
+DEFINE_PER_CPU(int, cpu_state);
+
+static int octeon_cpu_disable(void)
+{
+	unsigned int cpu = smp_processor_id();
+
+	if (cpu == 0)
+		return -EBUSY;
+
+	if (!octeon_bootloader_entry_addr)
+		return -ENOTSUPP;
+
+	set_cpu_online(cpu, false);
+	calculate_cpu_foreign_map();
+	octeon_fixup_irqs();
+
+	__flush_cache_all();
+	local_flush_tlb_all();
+
+	return 0;
+}
+
+static void octeon_cpu_die(unsigned int cpu)
+{
+	int coreid = cpu_logical_map(cpu);
+	uint32_t mask, new_mask;
+	const struct cvmx_bootmem_named_block_desc *block_desc;
+
+	while (per_cpu(cpu_state, cpu) != CPU_DEAD)
+		cpu_relax();
+
+	/*
+	 * This is a bit complicated strategics of getting/settig available
+	 * cores mask, copied from bootloader
+	 */
+
+	mask = 1 << coreid;
+	/* LINUX_APP_BOOT_BLOCK is initialized in bootoct binary */
+	block_desc = cvmx_bootmem_find_named_block(LINUX_APP_BOOT_BLOCK_NAME);
+
+	if (!block_desc) {
+		struct linux_app_boot_info *labi;
+
+		labi = (struct linux_app_boot_info *)PHYS_TO_XKSEG_CACHED(LABI_ADDR_IN_BOOTLOADER);
+
+		labi->avail_coremask |= mask;
+		new_mask = labi->avail_coremask;
+	} else {		       /* alternative, already initialized */
+		uint32_t *p = (uint32_t *)PHYS_TO_XKSEG_CACHED(block_desc->base_addr +
+							       AVAIL_COREMASK_OFFSET_IN_LINUX_APP_BOOT_BLOCK);
+		*p |= mask;
+		new_mask = *p;
+	}
+
+	pr_info("Reset core %d. Available Coremask = 0x%x \n", coreid, new_mask);
+	mb();
+	cvmx_write_csr(CVMX_CIU_PP_RST, 1 << coreid);
+	cvmx_write_csr(CVMX_CIU_PP_RST, 0);
+}
+
+void play_dead(void)
+{
+	int cpu = cpu_number_map(cvmx_get_core_num());
+
+	idle_task_exit();
+	octeon_processor_boot = 0xff;
+	per_cpu(cpu_state, cpu) = CPU_DEAD;
+
+	mb();
+
+	while (1)	/* core will be reset here */
+		;
+}
+
+static void start_after_reset(void)
+{
+	kernel_entry(0, 0, 0);	/* set a2 = 0 for secondary core */
+}
+
+static int octeon_update_boot_vector(unsigned int cpu)
+{
+
+	int coreid = cpu_logical_map(cpu);
+	uint32_t avail_coremask;
+	const struct cvmx_bootmem_named_block_desc *block_desc;
+	struct boot_init_vector *boot_vect =
+		(struct boot_init_vector *)PHYS_TO_XKSEG_CACHED(BOOTLOADER_BOOT_VECTOR);
+
+	block_desc = cvmx_bootmem_find_named_block(LINUX_APP_BOOT_BLOCK_NAME);
+
+	if (!block_desc) {
+		struct linux_app_boot_info *labi;
+
+		labi = (struct linux_app_boot_info *)PHYS_TO_XKSEG_CACHED(LABI_ADDR_IN_BOOTLOADER);
+
+		avail_coremask = labi->avail_coremask;
+		labi->avail_coremask &= ~(1 << coreid);
+	} else {		       /* alternative, already initialized */
+		avail_coremask = *(uint32_t *)PHYS_TO_XKSEG_CACHED(
+			block_desc->base_addr + AVAIL_COREMASK_OFFSET_IN_LINUX_APP_BOOT_BLOCK);
+	}
+
+	if (!(avail_coremask & (1 << coreid))) {
+		/* core not available, assume, that caught by simple-executive */
+		cvmx_write_csr(CVMX_CIU_PP_RST, 1 << coreid);
+		cvmx_write_csr(CVMX_CIU_PP_RST, 0);
+	}
+
+	boot_vect[coreid].app_start_func_addr =
+		(uint32_t) (unsigned long) start_after_reset;
+	boot_vect[coreid].code_addr = octeon_bootloader_entry_addr;
+
+	mb();
+
+	cvmx_write_csr(CVMX_CIU_NMI, (1 << coreid) & avail_coremask);
+
+	return 0;
+}
+
+static int register_cavium_notifier(void)
+{
+	return cpuhp_setup_state_nocalls(CPUHP_MIPS_SOC_PREPARE,
+					 "mips/cavium:prepare",
+					 octeon_update_boot_vector, NULL);
+}
+late_initcall(register_cavium_notifier);
+
+#endif	/* CONFIG_HOTPLUG_CPU */
+
+const struct plat_smp_ops octeon_smp_ops = {
+	.send_ipi_single	= octeon_send_ipi_single,
+	.send_ipi_mask		= octeon_send_ipi_mask,
+	.init_secondary		= octeon_init_secondary,
+	.smp_finish		= octeon_smp_finish,
+	.boot_secondary		= octeon_boot_secondary,
+	.smp_setup		= octeon_smp_setup,
+	.prepare_cpus		= octeon_prepare_cpus,
+#ifdef CONFIG_HOTPLUG_CPU
+	.cpu_disable		= octeon_cpu_disable,
+	.cpu_die		= octeon_cpu_die,
+#endif
+};
+
+static irqreturn_t octeon_78xx_reched_interrupt(int irq, void *dev_id)
+{
+	scheduler_ipi();
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t octeon_78xx_call_function_interrupt(int irq, void *dev_id)
+{
+	generic_smp_call_function_interrupt();
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t octeon_78xx_icache_flush_interrupt(int irq, void *dev_id)
+{
+	octeon_icache_flush();
+	return IRQ_HANDLED;
+}
+
+/*
+ * Callout to firmware before smp_init
+ */
+static void octeon_78xx_prepare_cpus(unsigned int max_cpus)
+{
+	if (request_irq(OCTEON_IRQ_MBOX0 + 0,
+			octeon_78xx_reched_interrupt,
+			IRQF_PERCPU | IRQF_NO_THREAD, "Scheduler",
+			octeon_78xx_reched_interrupt)) {
+		panic("Cannot request_irq for SchedulerIPI");
+	}
+	if (request_irq(OCTEON_IRQ_MBOX0 + 1,
+			octeon_78xx_call_function_interrupt,
+			IRQF_PERCPU | IRQF_NO_THREAD, "SMP-Call",
+			octeon_78xx_call_function_interrupt)) {
+		panic("Cannot request_irq for SMP-Call");
+	}
+	if (request_irq(OCTEON_IRQ_MBOX0 + 2,
+			octeon_78xx_icache_flush_interrupt,
+			IRQF_PERCPU | IRQF_NO_THREAD, "ICache-Flush",
+			octeon_78xx_icache_flush_interrupt)) {
+		panic("Cannot request_irq for ICache-Flush");
+	}
+}
+
+static void octeon_78xx_send_ipi_single(int cpu, unsigned int action)
+{
+	int i;
+
+	for (i = 0; i < 8; i++) {
+		if (action & 1)
+			octeon_ciu3_mbox_send(cpu, i);
+		action >>= 1;
+	}
+}
+
+static void octeon_78xx_send_ipi_mask(const struct cpumask *mask,
+				      unsigned int action)
+{
+	unsigned int cpu;
+
+	for_each_cpu(cpu, mask)
+		octeon_78xx_send_ipi_single(cpu, action);
+}
+
+static const struct plat_smp_ops octeon_78xx_smp_ops = {
+	.send_ipi_single	= octeon_78xx_send_ipi_single,
+	.send_ipi_mask		= octeon_78xx_send_ipi_mask,
+	.init_secondary		= octeon_init_secondary,
+	.smp_finish		= octeon_smp_finish,
+	.boot_secondary		= octeon_boot_secondary,
+	.smp_setup		= octeon_smp_setup,
+	.prepare_cpus		= octeon_78xx_prepare_cpus,
+#ifdef CONFIG_HOTPLUG_CPU
+	.cpu_disable		= octeon_cpu_disable,
+	.cpu_die		= octeon_cpu_die,
+#endif
+};
+
+void __init octeon_setup_smp(void)
+{
+	const struct plat_smp_ops *ops;
+
+	if (octeon_has_feature(OCTEON_FEATURE_CIU3))
+		ops = &octeon_78xx_smp_ops;
+	else
+		ops = &octeon_smp_ops;
+
+	register_smp_ops(ops);
+}