ASR_BASE

Change-Id: Icf3719cc0afe3eeb3edc7fa80a2eb5199ca9dda1
diff --git a/marvell/linux/arch/riscv/kernel/.gitignore b/marvell/linux/arch/riscv/kernel/.gitignore
new file mode 100644
index 0000000..b51634f
--- /dev/null
+++ b/marvell/linux/arch/riscv/kernel/.gitignore
@@ -0,0 +1 @@
+/vmlinux.lds
diff --git a/marvell/linux/arch/riscv/kernel/Makefile b/marvell/linux/arch/riscv/kernel/Makefile
new file mode 100644
index 0000000..696020f
--- /dev/null
+++ b/marvell/linux/arch/riscv/kernel/Makefile
@@ -0,0 +1,45 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# Makefile for the RISC-V Linux kernel
+#
+
+ifdef CONFIG_FTRACE
+CFLAGS_REMOVE_ftrace.o = -pg
+endif
+
+extra-y += head.o
+extra-y += vmlinux.lds
+
+obj-y	+= cpu.o
+obj-y	+= cpufeature.o
+obj-y	+= entry.o
+obj-y	+= irq.o
+obj-y	+= process.o
+obj-y	+= ptrace.o
+obj-y	+= reset.o
+obj-y	+= setup.o
+obj-y	+= signal.o
+obj-y	+= syscall_table.o
+obj-y	+= sys_riscv.o
+obj-y	+= time.o
+obj-y	+= traps.o
+obj-y	+= riscv_ksyms.o
+obj-y	+= stacktrace.o
+obj-y	+= vdso.o
+obj-y	+= cacheinfo.o
+obj-y	+= vdso/
+
+obj-$(CONFIG_FPU)		+= fpu.o
+obj-$(CONFIG_SMP)		+= smpboot.o
+obj-$(CONFIG_SMP)		+= smp.o
+obj-$(CONFIG_MODULES)		+= module.o
+obj-$(CONFIG_MODULE_SECTIONS)	+= module-sections.o
+
+obj-$(CONFIG_FUNCTION_TRACER)	+= mcount.o ftrace.o
+obj-$(CONFIG_DYNAMIC_FTRACE)	+= mcount-dyn.o
+
+obj-$(CONFIG_PERF_EVENTS)	+= perf_event.o
+obj-$(CONFIG_PERF_EVENTS)	+= perf_callchain.o
+obj-$(CONFIG_HAVE_PERF_REGS)	+= perf_regs.o
+
+clean:
diff --git a/marvell/linux/arch/riscv/kernel/asm-offsets.c b/marvell/linux/arch/riscv/kernel/asm-offsets.c
new file mode 100644
index 0000000..42c69d5
--- /dev/null
+++ b/marvell/linux/arch/riscv/kernel/asm-offsets.c
@@ -0,0 +1,310 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2012 Regents of the University of California
+ * Copyright (C) 2017 SiFive
+ */
+
+#include <linux/kbuild.h>
+#include <linux/sched.h>
+#include <asm/thread_info.h>
+#include <asm/ptrace.h>
+
+void asm_offsets(void)
+{
+	OFFSET(TASK_THREAD_RA, task_struct, thread.ra);
+	OFFSET(TASK_THREAD_SP, task_struct, thread.sp);
+	OFFSET(TASK_THREAD_S0, task_struct, thread.s[0]);
+	OFFSET(TASK_THREAD_S1, task_struct, thread.s[1]);
+	OFFSET(TASK_THREAD_S2, task_struct, thread.s[2]);
+	OFFSET(TASK_THREAD_S3, task_struct, thread.s[3]);
+	OFFSET(TASK_THREAD_S4, task_struct, thread.s[4]);
+	OFFSET(TASK_THREAD_S5, task_struct, thread.s[5]);
+	OFFSET(TASK_THREAD_S6, task_struct, thread.s[6]);
+	OFFSET(TASK_THREAD_S7, task_struct, thread.s[7]);
+	OFFSET(TASK_THREAD_S8, task_struct, thread.s[8]);
+	OFFSET(TASK_THREAD_S9, task_struct, thread.s[9]);
+	OFFSET(TASK_THREAD_S10, task_struct, thread.s[10]);
+	OFFSET(TASK_THREAD_S11, task_struct, thread.s[11]);
+	OFFSET(TASK_THREAD_SP, task_struct, thread.sp);
+	OFFSET(TASK_STACK, task_struct, stack);
+	OFFSET(TASK_TI, task_struct, thread_info);
+	OFFSET(TASK_TI_FLAGS, task_struct, thread_info.flags);
+	OFFSET(TASK_TI_PREEMPT_COUNT, task_struct, thread_info.preempt_count);
+	OFFSET(TASK_TI_KERNEL_SP, task_struct, thread_info.kernel_sp);
+	OFFSET(TASK_TI_USER_SP, task_struct, thread_info.user_sp);
+	OFFSET(TASK_TI_CPU, task_struct, thread_info.cpu);
+
+	OFFSET(TASK_THREAD_F0,  task_struct, thread.fstate.f[0]);
+	OFFSET(TASK_THREAD_F1,  task_struct, thread.fstate.f[1]);
+	OFFSET(TASK_THREAD_F2,  task_struct, thread.fstate.f[2]);
+	OFFSET(TASK_THREAD_F3,  task_struct, thread.fstate.f[3]);
+	OFFSET(TASK_THREAD_F4,  task_struct, thread.fstate.f[4]);
+	OFFSET(TASK_THREAD_F5,  task_struct, thread.fstate.f[5]);
+	OFFSET(TASK_THREAD_F6,  task_struct, thread.fstate.f[6]);
+	OFFSET(TASK_THREAD_F7,  task_struct, thread.fstate.f[7]);
+	OFFSET(TASK_THREAD_F8,  task_struct, thread.fstate.f[8]);
+	OFFSET(TASK_THREAD_F9,  task_struct, thread.fstate.f[9]);
+	OFFSET(TASK_THREAD_F10, task_struct, thread.fstate.f[10]);
+	OFFSET(TASK_THREAD_F11, task_struct, thread.fstate.f[11]);
+	OFFSET(TASK_THREAD_F12, task_struct, thread.fstate.f[12]);
+	OFFSET(TASK_THREAD_F13, task_struct, thread.fstate.f[13]);
+	OFFSET(TASK_THREAD_F14, task_struct, thread.fstate.f[14]);
+	OFFSET(TASK_THREAD_F15, task_struct, thread.fstate.f[15]);
+	OFFSET(TASK_THREAD_F16, task_struct, thread.fstate.f[16]);
+	OFFSET(TASK_THREAD_F17, task_struct, thread.fstate.f[17]);
+	OFFSET(TASK_THREAD_F18, task_struct, thread.fstate.f[18]);
+	OFFSET(TASK_THREAD_F19, task_struct, thread.fstate.f[19]);
+	OFFSET(TASK_THREAD_F20, task_struct, thread.fstate.f[20]);
+	OFFSET(TASK_THREAD_F21, task_struct, thread.fstate.f[21]);
+	OFFSET(TASK_THREAD_F22, task_struct, thread.fstate.f[22]);
+	OFFSET(TASK_THREAD_F23, task_struct, thread.fstate.f[23]);
+	OFFSET(TASK_THREAD_F24, task_struct, thread.fstate.f[24]);
+	OFFSET(TASK_THREAD_F25, task_struct, thread.fstate.f[25]);
+	OFFSET(TASK_THREAD_F26, task_struct, thread.fstate.f[26]);
+	OFFSET(TASK_THREAD_F27, task_struct, thread.fstate.f[27]);
+	OFFSET(TASK_THREAD_F28, task_struct, thread.fstate.f[28]);
+	OFFSET(TASK_THREAD_F29, task_struct, thread.fstate.f[29]);
+	OFFSET(TASK_THREAD_F30, task_struct, thread.fstate.f[30]);
+	OFFSET(TASK_THREAD_F31, task_struct, thread.fstate.f[31]);
+	OFFSET(TASK_THREAD_FCSR, task_struct, thread.fstate.fcsr);
+
+	DEFINE(PT_SIZE, sizeof(struct pt_regs));
+	OFFSET(PT_SEPC, pt_regs, sepc);
+	OFFSET(PT_RA, pt_regs, ra);
+	OFFSET(PT_FP, pt_regs, s0);
+	OFFSET(PT_S0, pt_regs, s0);
+	OFFSET(PT_S1, pt_regs, s1);
+	OFFSET(PT_S2, pt_regs, s2);
+	OFFSET(PT_S3, pt_regs, s3);
+	OFFSET(PT_S4, pt_regs, s4);
+	OFFSET(PT_S5, pt_regs, s5);
+	OFFSET(PT_S6, pt_regs, s6);
+	OFFSET(PT_S7, pt_regs, s7);
+	OFFSET(PT_S8, pt_regs, s8);
+	OFFSET(PT_S9, pt_regs, s9);
+	OFFSET(PT_S10, pt_regs, s10);
+	OFFSET(PT_S11, pt_regs, s11);
+	OFFSET(PT_SP, pt_regs, sp);
+	OFFSET(PT_TP, pt_regs, tp);
+	OFFSET(PT_A0, pt_regs, a0);
+	OFFSET(PT_A1, pt_regs, a1);
+	OFFSET(PT_A2, pt_regs, a2);
+	OFFSET(PT_A3, pt_regs, a3);
+	OFFSET(PT_A4, pt_regs, a4);
+	OFFSET(PT_A5, pt_regs, a5);
+	OFFSET(PT_A6, pt_regs, a6);
+	OFFSET(PT_A7, pt_regs, a7);
+	OFFSET(PT_T0, pt_regs, t0);
+	OFFSET(PT_T1, pt_regs, t1);
+	OFFSET(PT_T2, pt_regs, t2);
+	OFFSET(PT_T3, pt_regs, t3);
+	OFFSET(PT_T4, pt_regs, t4);
+	OFFSET(PT_T5, pt_regs, t5);
+	OFFSET(PT_T6, pt_regs, t6);
+	OFFSET(PT_GP, pt_regs, gp);
+	OFFSET(PT_ORIG_A0, pt_regs, orig_a0);
+	OFFSET(PT_SSTATUS, pt_regs, sstatus);
+	OFFSET(PT_SBADADDR, pt_regs, sbadaddr);
+	OFFSET(PT_SCAUSE, pt_regs, scause);
+
+	/*
+	 * THREAD_{F,X}* might be larger than a S-type offset can handle, but
+	 * these are used in performance-sensitive assembly so we can't resort
+	 * to loading the long immediate every time.
+	 */
+	DEFINE(TASK_THREAD_RA_RA,
+		  offsetof(struct task_struct, thread.ra)
+		- offsetof(struct task_struct, thread.ra)
+	);
+	DEFINE(TASK_THREAD_SP_RA,
+		  offsetof(struct task_struct, thread.sp)
+		- offsetof(struct task_struct, thread.ra)
+	);
+	DEFINE(TASK_THREAD_S0_RA,
+		  offsetof(struct task_struct, thread.s[0])
+		- offsetof(struct task_struct, thread.ra)
+	);
+	DEFINE(TASK_THREAD_S1_RA,
+		  offsetof(struct task_struct, thread.s[1])
+		- offsetof(struct task_struct, thread.ra)
+	);
+	DEFINE(TASK_THREAD_S2_RA,
+		  offsetof(struct task_struct, thread.s[2])
+		- offsetof(struct task_struct, thread.ra)
+	);
+	DEFINE(TASK_THREAD_S3_RA,
+		  offsetof(struct task_struct, thread.s[3])
+		- offsetof(struct task_struct, thread.ra)
+	);
+	DEFINE(TASK_THREAD_S4_RA,
+		  offsetof(struct task_struct, thread.s[4])
+		- offsetof(struct task_struct, thread.ra)
+	);
+	DEFINE(TASK_THREAD_S5_RA,
+		  offsetof(struct task_struct, thread.s[5])
+		- offsetof(struct task_struct, thread.ra)
+	);
+	DEFINE(TASK_THREAD_S6_RA,
+		  offsetof(struct task_struct, thread.s[6])
+		- offsetof(struct task_struct, thread.ra)
+	);
+	DEFINE(TASK_THREAD_S7_RA,
+		  offsetof(struct task_struct, thread.s[7])
+		- offsetof(struct task_struct, thread.ra)
+	);
+	DEFINE(TASK_THREAD_S8_RA,
+		  offsetof(struct task_struct, thread.s[8])
+		- offsetof(struct task_struct, thread.ra)
+	);
+	DEFINE(TASK_THREAD_S9_RA,
+		  offsetof(struct task_struct, thread.s[9])
+		- offsetof(struct task_struct, thread.ra)
+	);
+	DEFINE(TASK_THREAD_S10_RA,
+		  offsetof(struct task_struct, thread.s[10])
+		- offsetof(struct task_struct, thread.ra)
+	);
+	DEFINE(TASK_THREAD_S11_RA,
+		  offsetof(struct task_struct, thread.s[11])
+		- offsetof(struct task_struct, thread.ra)
+	);
+
+	DEFINE(TASK_THREAD_F0_F0,
+		  offsetof(struct task_struct, thread.fstate.f[0])
+		- offsetof(struct task_struct, thread.fstate.f[0])
+	);
+	DEFINE(TASK_THREAD_F1_F0,
+		  offsetof(struct task_struct, thread.fstate.f[1])
+		- offsetof(struct task_struct, thread.fstate.f[0])
+	);
+	DEFINE(TASK_THREAD_F2_F0,
+		  offsetof(struct task_struct, thread.fstate.f[2])
+		- offsetof(struct task_struct, thread.fstate.f[0])
+	);
+	DEFINE(TASK_THREAD_F3_F0,
+		  offsetof(struct task_struct, thread.fstate.f[3])
+		- offsetof(struct task_struct, thread.fstate.f[0])
+	);
+	DEFINE(TASK_THREAD_F4_F0,
+		  offsetof(struct task_struct, thread.fstate.f[4])
+		- offsetof(struct task_struct, thread.fstate.f[0])
+	);
+	DEFINE(TASK_THREAD_F5_F0,
+		  offsetof(struct task_struct, thread.fstate.f[5])
+		- offsetof(struct task_struct, thread.fstate.f[0])
+	);
+	DEFINE(TASK_THREAD_F6_F0,
+		  offsetof(struct task_struct, thread.fstate.f[6])
+		- offsetof(struct task_struct, thread.fstate.f[0])
+	);
+	DEFINE(TASK_THREAD_F7_F0,
+		  offsetof(struct task_struct, thread.fstate.f[7])
+		- offsetof(struct task_struct, thread.fstate.f[0])
+	);
+	DEFINE(TASK_THREAD_F8_F0,
+		  offsetof(struct task_struct, thread.fstate.f[8])
+		- offsetof(struct task_struct, thread.fstate.f[0])
+	);
+	DEFINE(TASK_THREAD_F9_F0,
+		  offsetof(struct task_struct, thread.fstate.f[9])
+		- offsetof(struct task_struct, thread.fstate.f[0])
+	);
+	DEFINE(TASK_THREAD_F10_F0,
+		  offsetof(struct task_struct, thread.fstate.f[10])
+		- offsetof(struct task_struct, thread.fstate.f[0])
+	);
+	DEFINE(TASK_THREAD_F11_F0,
+		  offsetof(struct task_struct, thread.fstate.f[11])
+		- offsetof(struct task_struct, thread.fstate.f[0])
+	);
+	DEFINE(TASK_THREAD_F12_F0,
+		  offsetof(struct task_struct, thread.fstate.f[12])
+		- offsetof(struct task_struct, thread.fstate.f[0])
+	);
+	DEFINE(TASK_THREAD_F13_F0,
+		  offsetof(struct task_struct, thread.fstate.f[13])
+		- offsetof(struct task_struct, thread.fstate.f[0])
+	);
+	DEFINE(TASK_THREAD_F14_F0,
+		  offsetof(struct task_struct, thread.fstate.f[14])
+		- offsetof(struct task_struct, thread.fstate.f[0])
+	);
+	DEFINE(TASK_THREAD_F15_F0,
+		  offsetof(struct task_struct, thread.fstate.f[15])
+		- offsetof(struct task_struct, thread.fstate.f[0])
+	);
+	DEFINE(TASK_THREAD_F16_F0,
+		  offsetof(struct task_struct, thread.fstate.f[16])
+		- offsetof(struct task_struct, thread.fstate.f[0])
+	);
+	DEFINE(TASK_THREAD_F17_F0,
+		  offsetof(struct task_struct, thread.fstate.f[17])
+		- offsetof(struct task_struct, thread.fstate.f[0])
+	);
+	DEFINE(TASK_THREAD_F18_F0,
+		  offsetof(struct task_struct, thread.fstate.f[18])
+		- offsetof(struct task_struct, thread.fstate.f[0])
+	);
+	DEFINE(TASK_THREAD_F19_F0,
+		  offsetof(struct task_struct, thread.fstate.f[19])
+		- offsetof(struct task_struct, thread.fstate.f[0])
+	);
+	DEFINE(TASK_THREAD_F20_F0,
+		  offsetof(struct task_struct, thread.fstate.f[20])
+		- offsetof(struct task_struct, thread.fstate.f[0])
+	);
+	DEFINE(TASK_THREAD_F21_F0,
+		  offsetof(struct task_struct, thread.fstate.f[21])
+		- offsetof(struct task_struct, thread.fstate.f[0])
+	);
+	DEFINE(TASK_THREAD_F22_F0,
+		  offsetof(struct task_struct, thread.fstate.f[22])
+		- offsetof(struct task_struct, thread.fstate.f[0])
+	);
+	DEFINE(TASK_THREAD_F23_F0,
+		  offsetof(struct task_struct, thread.fstate.f[23])
+		- offsetof(struct task_struct, thread.fstate.f[0])
+	);
+	DEFINE(TASK_THREAD_F24_F0,
+		  offsetof(struct task_struct, thread.fstate.f[24])
+		- offsetof(struct task_struct, thread.fstate.f[0])
+	);
+	DEFINE(TASK_THREAD_F25_F0,
+		  offsetof(struct task_struct, thread.fstate.f[25])
+		- offsetof(struct task_struct, thread.fstate.f[0])
+	);
+	DEFINE(TASK_THREAD_F26_F0,
+		  offsetof(struct task_struct, thread.fstate.f[26])
+		- offsetof(struct task_struct, thread.fstate.f[0])
+	);
+	DEFINE(TASK_THREAD_F27_F0,
+		  offsetof(struct task_struct, thread.fstate.f[27])
+		- offsetof(struct task_struct, thread.fstate.f[0])
+	);
+	DEFINE(TASK_THREAD_F28_F0,
+		  offsetof(struct task_struct, thread.fstate.f[28])
+		- offsetof(struct task_struct, thread.fstate.f[0])
+	);
+	DEFINE(TASK_THREAD_F29_F0,
+		  offsetof(struct task_struct, thread.fstate.f[29])
+		- offsetof(struct task_struct, thread.fstate.f[0])
+	);
+	DEFINE(TASK_THREAD_F30_F0,
+		  offsetof(struct task_struct, thread.fstate.f[30])
+		- offsetof(struct task_struct, thread.fstate.f[0])
+	);
+	DEFINE(TASK_THREAD_F31_F0,
+		  offsetof(struct task_struct, thread.fstate.f[31])
+		- offsetof(struct task_struct, thread.fstate.f[0])
+	);
+	DEFINE(TASK_THREAD_FCSR_F0,
+		  offsetof(struct task_struct, thread.fstate.fcsr)
+		- offsetof(struct task_struct, thread.fstate.f[0])
+	);
+
+	/*
+	 * We allocate a pt_regs on the stack when entering the kernel.  This
+	 * ensures the alignment is sane.
+	 */
+	DEFINE(PT_SIZE_ON_STACK, ALIGN(sizeof(struct pt_regs), STACK_ALIGN));
+}
diff --git a/marvell/linux/arch/riscv/kernel/cacheinfo.c b/marvell/linux/arch/riscv/kernel/cacheinfo.c
new file mode 100644
index 0000000..d930bd0
--- /dev/null
+++ b/marvell/linux/arch/riscv/kernel/cacheinfo.c
@@ -0,0 +1,97 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2017 SiFive
+ */
+
+#include <linux/cacheinfo.h>
+#include <linux/cpu.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+
+static void ci_leaf_init(struct cacheinfo *this_leaf,
+			 struct device_node *node,
+			 enum cache_type type, unsigned int level)
+{
+	this_leaf->level = level;
+	this_leaf->type = type;
+}
+
+int init_cache_level(unsigned int cpu)
+{
+	struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
+	struct device_node *np = of_cpu_device_node_get(cpu);
+	struct device_node *prev = NULL;
+	int levels = 0, leaves = 0, level;
+
+	if (of_property_read_bool(np, "cache-size"))
+		++leaves;
+	if (of_property_read_bool(np, "i-cache-size"))
+		++leaves;
+	if (of_property_read_bool(np, "d-cache-size"))
+		++leaves;
+	if (leaves > 0)
+		levels = 1;
+
+	prev = np;
+	while ((np = of_find_next_cache_node(np))) {
+		of_node_put(prev);
+		prev = np;
+		if (!of_device_is_compatible(np, "cache"))
+			break;
+		if (of_property_read_u32(np, "cache-level", &level))
+			break;
+		if (level <= levels)
+			break;
+		if (of_property_read_bool(np, "cache-size"))
+			++leaves;
+		if (of_property_read_bool(np, "i-cache-size"))
+			++leaves;
+		if (of_property_read_bool(np, "d-cache-size"))
+			++leaves;
+		levels = level;
+	}
+
+	of_node_put(np);
+	this_cpu_ci->num_levels = levels;
+	this_cpu_ci->num_leaves = leaves;
+
+	return 0;
+}
+
+int populate_cache_leaves(unsigned int cpu)
+{
+	struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
+	struct cacheinfo *this_leaf = this_cpu_ci->info_list;
+	struct device_node *np = of_cpu_device_node_get(cpu);
+	struct device_node *prev = NULL;
+	int levels = 1, level = 1;
+
+	if (of_property_read_bool(np, "cache-size"))
+		ci_leaf_init(this_leaf++, np, CACHE_TYPE_UNIFIED, level);
+	if (of_property_read_bool(np, "i-cache-size"))
+		ci_leaf_init(this_leaf++, np, CACHE_TYPE_INST, level);
+	if (of_property_read_bool(np, "d-cache-size"))
+		ci_leaf_init(this_leaf++, np, CACHE_TYPE_DATA, level);
+
+	prev = np;
+	while ((np = of_find_next_cache_node(np))) {
+		of_node_put(prev);
+		prev = np;
+		if (!of_device_is_compatible(np, "cache"))
+			break;
+		if (of_property_read_u32(np, "cache-level", &level))
+			break;
+		if (level <= levels)
+			break;
+		if (of_property_read_bool(np, "cache-size"))
+			ci_leaf_init(this_leaf++, np, CACHE_TYPE_UNIFIED, level);
+		if (of_property_read_bool(np, "i-cache-size"))
+			ci_leaf_init(this_leaf++, np, CACHE_TYPE_INST, level);
+		if (of_property_read_bool(np, "d-cache-size"))
+			ci_leaf_init(this_leaf++, np, CACHE_TYPE_DATA, level);
+		levels = level;
+	}
+	of_node_put(np);
+
+	return 0;
+}
diff --git a/marvell/linux/arch/riscv/kernel/cpu.c b/marvell/linux/arch/riscv/kernel/cpu.c
new file mode 100644
index 0000000..7da3c6a
--- /dev/null
+++ b/marvell/linux/arch/riscv/kernel/cpu.c
@@ -0,0 +1,156 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2012 Regents of the University of California
+ */
+
+#include <linux/init.h>
+#include <linux/seq_file.h>
+#include <linux/of.h>
+#include <asm/smp.h>
+
+/*
+ * Returns the hart ID of the given device tree node, or -ENODEV if the node
+ * isn't an enabled and valid RISC-V hart node.
+ */
+int riscv_of_processor_hartid(struct device_node *node)
+{
+	const char *isa;
+	u32 hart;
+
+	if (!of_device_is_compatible(node, "riscv")) {
+		pr_warn("Found incompatible CPU\n");
+		return -ENODEV;
+	}
+
+	if (of_property_read_u32(node, "reg", &hart)) {
+		pr_warn("Found CPU without hart ID\n");
+		return -ENODEV;
+	}
+
+	if (!of_device_is_available(node)) {
+		pr_info("CPU with hartid=%d is not available\n", hart);
+		return -ENODEV;
+	}
+
+	if (of_property_read_string(node, "riscv,isa", &isa)) {
+		pr_warn("CPU with hartid=%d has no \"riscv,isa\" property\n", hart);
+		return -ENODEV;
+	}
+	if (isa[0] != 'r' || isa[1] != 'v') {
+		pr_warn("CPU with hartid=%d has an invalid ISA of \"%s\"\n", hart, isa);
+		return -ENODEV;
+	}
+
+	return hart;
+}
+
+#ifdef CONFIG_PROC_FS
+
+static void print_isa(struct seq_file *f, const char *orig_isa)
+{
+	static const char *ext = "mafdcsu";
+	const char *isa = orig_isa;
+	const char *e;
+
+	/*
+	 * Linux doesn't support rv32e or rv128i, and we only support booting
+	 * kernels on harts with the same ISA that the kernel is compiled for.
+	 */
+#if defined(CONFIG_32BIT)
+	if (strncmp(isa, "rv32i", 5) != 0)
+		return;
+#elif defined(CONFIG_64BIT)
+	if (strncmp(isa, "rv64i", 5) != 0)
+		return;
+#endif
+
+	/* Print the base ISA, as we already know it's legal. */
+	seq_puts(f, "isa\t\t: ");
+	seq_write(f, isa, 5);
+	isa += 5;
+
+	/*
+	 * Check the rest of the ISA string for valid extensions, printing those
+	 * we find.  RISC-V ISA strings define an order, so we only print the
+	 * extension bits when they're in order. Hide the supervisor (S)
+	 * extension from userspace as it's not accessible from there.
+	 */
+	for (e = ext; *e != '\0'; ++e) {
+		if (isa[0] == e[0]) {
+			if (isa[0] != 's')
+				seq_write(f, isa, 1);
+
+			isa++;
+		}
+	}
+	seq_puts(f, "\n");
+
+	/*
+	 * If we were given an unsupported ISA in the device tree then print
+	 * a bit of info describing what went wrong.
+	 */
+	if (isa[0] != '\0')
+		pr_info("unsupported ISA \"%s\" in device tree\n", orig_isa);
+}
+
+static void print_mmu(struct seq_file *f, const char *mmu_type)
+{
+#if defined(CONFIG_32BIT)
+	if (strcmp(mmu_type, "riscv,sv32") != 0)
+		return;
+#elif defined(CONFIG_64BIT)
+	if (strcmp(mmu_type, "riscv,sv39") != 0 &&
+	    strcmp(mmu_type, "riscv,sv48") != 0)
+		return;
+#endif
+
+	seq_printf(f, "mmu\t\t: %s\n", mmu_type+6);
+}
+
+static void *c_start(struct seq_file *m, loff_t *pos)
+{
+	*pos = cpumask_next(*pos - 1, cpu_online_mask);
+	if ((*pos) < nr_cpu_ids)
+		return (void *)(uintptr_t)(1 + *pos);
+	return NULL;
+}
+
+static void *c_next(struct seq_file *m, void *v, loff_t *pos)
+{
+	(*pos)++;
+	return c_start(m, pos);
+}
+
+static void c_stop(struct seq_file *m, void *v)
+{
+}
+
+static int c_show(struct seq_file *m, void *v)
+{
+	unsigned long cpu_id = (unsigned long)v - 1;
+	struct device_node *node = of_get_cpu_node(cpu_id, NULL);
+	const char *compat, *isa, *mmu;
+
+	seq_printf(m, "processor\t: %lu\n", cpu_id);
+	seq_printf(m, "hart\t\t: %lu\n", cpuid_to_hartid_map(cpu_id));
+	if (!of_property_read_string(node, "riscv,isa", &isa))
+		print_isa(m, isa);
+	if (!of_property_read_string(node, "mmu-type", &mmu))
+		print_mmu(m, mmu);
+	if (!of_property_read_string(node, "compatible", &compat)
+	    && strcmp(compat, "riscv"))
+		seq_printf(m, "uarch\t\t: %s\n", compat);
+	seq_puts(m, "\n");
+	of_node_put(node);
+
+	return 0;
+}
+
+const struct seq_operations cpuinfo_op = {
+	.start	= c_start,
+	.next	= c_next,
+	.stop	= c_stop,
+	.show	= c_show
+};
+
+#endif /* CONFIG_PROC_FS */
diff --git a/marvell/linux/arch/riscv/kernel/cpufeature.c b/marvell/linux/arch/riscv/kernel/cpufeature.c
new file mode 100644
index 0000000..a5ad000
--- /dev/null
+++ b/marvell/linux/arch/riscv/kernel/cpufeature.c
@@ -0,0 +1,74 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copied from arch/arm64/kernel/cpufeature.c
+ *
+ * Copyright (C) 2015 ARM Ltd.
+ * Copyright (C) 2017 SiFive
+ */
+
+#include <linux/of.h>
+#include <asm/processor.h>
+#include <asm/hwcap.h>
+#include <asm/smp.h>
+#include <asm/switch_to.h>
+
+unsigned long elf_hwcap __read_mostly;
+#ifdef CONFIG_FPU
+bool has_fpu __read_mostly;
+#endif
+
+void riscv_fill_hwcap(void)
+{
+	struct device_node *node;
+	const char *isa;
+	size_t i;
+	static unsigned long isa2hwcap[256] = {0};
+
+	isa2hwcap['i'] = isa2hwcap['I'] = COMPAT_HWCAP_ISA_I;
+	isa2hwcap['m'] = isa2hwcap['M'] = COMPAT_HWCAP_ISA_M;
+	isa2hwcap['a'] = isa2hwcap['A'] = COMPAT_HWCAP_ISA_A;
+	isa2hwcap['f'] = isa2hwcap['F'] = COMPAT_HWCAP_ISA_F;
+	isa2hwcap['d'] = isa2hwcap['D'] = COMPAT_HWCAP_ISA_D;
+	isa2hwcap['c'] = isa2hwcap['C'] = COMPAT_HWCAP_ISA_C;
+
+	elf_hwcap = 0;
+
+	for_each_of_cpu_node(node) {
+		unsigned long this_hwcap = 0;
+
+		if (riscv_of_processor_hartid(node) < 0)
+			continue;
+
+		if (of_property_read_string(node, "riscv,isa", &isa)) {
+			pr_warn("Unable to find \"riscv,isa\" devicetree entry\n");
+			continue;
+		}
+
+		for (i = 0; i < strlen(isa); ++i)
+			this_hwcap |= isa2hwcap[(unsigned char)(isa[i])];
+
+		/*
+		 * All "okay" hart should have same isa. Set HWCAP based on
+		 * common capabilities of every "okay" hart, in case they don't
+		 * have.
+		 */
+		if (elf_hwcap)
+			elf_hwcap &= this_hwcap;
+		else
+			elf_hwcap = this_hwcap;
+	}
+
+	/* We don't support systems with F but without D, so mask those out
+	 * here. */
+	if ((elf_hwcap & COMPAT_HWCAP_ISA_F) && !(elf_hwcap & COMPAT_HWCAP_ISA_D)) {
+		pr_info("This kernel does not support systems with F but not D\n");
+		elf_hwcap &= ~COMPAT_HWCAP_ISA_F;
+	}
+
+	pr_info("elf_hwcap is 0x%lx\n", elf_hwcap);
+
+#ifdef CONFIG_FPU
+	if (elf_hwcap & (COMPAT_HWCAP_ISA_F | COMPAT_HWCAP_ISA_D))
+		has_fpu = true;
+#endif
+}
diff --git a/marvell/linux/arch/riscv/kernel/entry.S b/marvell/linux/arch/riscv/kernel/entry.S
new file mode 100644
index 0000000..9c87ae7
--- /dev/null
+++ b/marvell/linux/arch/riscv/kernel/entry.S
@@ -0,0 +1,410 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2012 Regents of the University of California
+ * Copyright (C) 2017 SiFive
+ */
+
+#include <linux/init.h>
+#include <linux/linkage.h>
+
+#include <asm/asm.h>
+#include <asm/csr.h>
+#include <asm/unistd.h>
+#include <asm/thread_info.h>
+#include <asm/asm-offsets.h>
+
+	.text
+	.altmacro
+
+/*
+ * Prepares to enter a system call or exception by saving all registers to the
+ * stack.
+ */
+	.macro SAVE_ALL
+	LOCAL _restore_kernel_tpsp
+	LOCAL _save_context
+
+	/*
+	 * If coming from userspace, preserve the user thread pointer and load
+	 * the kernel thread pointer.  If we came from the kernel, sscratch
+	 * will contain 0, and we should continue on the current TP.
+	 */
+	csrrw tp, CSR_SSCRATCH, tp
+	bnez tp, _save_context
+
+_restore_kernel_tpsp:
+	csrr tp, CSR_SSCRATCH
+	REG_S sp, TASK_TI_KERNEL_SP(tp)
+_save_context:
+	REG_S sp, TASK_TI_USER_SP(tp)
+	REG_L sp, TASK_TI_KERNEL_SP(tp)
+	addi sp, sp, -(PT_SIZE_ON_STACK)
+	REG_S x1,  PT_RA(sp)
+	REG_S x3,  PT_GP(sp)
+	REG_S x5,  PT_T0(sp)
+	REG_S x6,  PT_T1(sp)
+	REG_S x7,  PT_T2(sp)
+	REG_S x8,  PT_S0(sp)
+	REG_S x9,  PT_S1(sp)
+	REG_S x10, PT_A0(sp)
+	REG_S x11, PT_A1(sp)
+	REG_S x12, PT_A2(sp)
+	REG_S x13, PT_A3(sp)
+	REG_S x14, PT_A4(sp)
+	REG_S x15, PT_A5(sp)
+	REG_S x16, PT_A6(sp)
+	REG_S x17, PT_A7(sp)
+	REG_S x18, PT_S2(sp)
+	REG_S x19, PT_S3(sp)
+	REG_S x20, PT_S4(sp)
+	REG_S x21, PT_S5(sp)
+	REG_S x22, PT_S6(sp)
+	REG_S x23, PT_S7(sp)
+	REG_S x24, PT_S8(sp)
+	REG_S x25, PT_S9(sp)
+	REG_S x26, PT_S10(sp)
+	REG_S x27, PT_S11(sp)
+	REG_S x28, PT_T3(sp)
+	REG_S x29, PT_T4(sp)
+	REG_S x30, PT_T5(sp)
+	REG_S x31, PT_T6(sp)
+
+	/*
+	 * Disable user-mode memory access as it should only be set in the
+	 * actual user copy routines.
+	 *
+	 * Disable the FPU to detect illegal usage of floating point in kernel
+	 * space.
+	 */
+	li t0, SR_SUM | SR_FS
+
+	REG_L s0, TASK_TI_USER_SP(tp)
+	csrrc s1, CSR_SSTATUS, t0
+	csrr s2, CSR_SEPC
+	csrr s3, CSR_STVAL
+	csrr s4, CSR_SCAUSE
+	csrr s5, CSR_SSCRATCH
+	REG_S s0, PT_SP(sp)
+	REG_S s1, PT_SSTATUS(sp)
+	REG_S s2, PT_SEPC(sp)
+	REG_S s3, PT_SBADADDR(sp)
+	REG_S s4, PT_SCAUSE(sp)
+	REG_S s5, PT_TP(sp)
+	.endm
+
+/*
+ * Prepares to return from a system call or exception by restoring all
+ * registers from the stack.
+ */
+	.macro RESTORE_ALL
+	REG_L a0, PT_SSTATUS(sp)
+	/*
+	 * The current load reservation is effectively part of the processor's
+	 * state, in the sense that load reservations cannot be shared between
+	 * different hart contexts.  We can't actually save and restore a load
+	 * reservation, so instead here we clear any existing reservation --
+	 * it's always legal for implementations to clear load reservations at
+	 * any point (as long as the forward progress guarantee is kept, but
+	 * we'll ignore that here).
+	 *
+	 * Dangling load reservations can be the result of taking a trap in the
+	 * middle of an LR/SC sequence, but can also be the result of a taken
+	 * forward branch around an SC -- which is how we implement CAS.  As a
+	 * result we need to clear reservations between the last CAS and the
+	 * jump back to the new context.  While it is unlikely the store
+	 * completes, implementations are allowed to expand reservations to be
+	 * arbitrarily large.
+	 */
+	REG_L  a2, PT_SEPC(sp)
+	REG_SC x0, a2, PT_SEPC(sp)
+
+	csrw CSR_SSTATUS, a0
+	csrw CSR_SEPC, a2
+
+	REG_L x1,  PT_RA(sp)
+	REG_L x3,  PT_GP(sp)
+	REG_L x4,  PT_TP(sp)
+	REG_L x5,  PT_T0(sp)
+	REG_L x6,  PT_T1(sp)
+	REG_L x7,  PT_T2(sp)
+	REG_L x8,  PT_S0(sp)
+	REG_L x9,  PT_S1(sp)
+	REG_L x10, PT_A0(sp)
+	REG_L x11, PT_A1(sp)
+	REG_L x12, PT_A2(sp)
+	REG_L x13, PT_A3(sp)
+	REG_L x14, PT_A4(sp)
+	REG_L x15, PT_A5(sp)
+	REG_L x16, PT_A6(sp)
+	REG_L x17, PT_A7(sp)
+	REG_L x18, PT_S2(sp)
+	REG_L x19, PT_S3(sp)
+	REG_L x20, PT_S4(sp)
+	REG_L x21, PT_S5(sp)
+	REG_L x22, PT_S6(sp)
+	REG_L x23, PT_S7(sp)
+	REG_L x24, PT_S8(sp)
+	REG_L x25, PT_S9(sp)
+	REG_L x26, PT_S10(sp)
+	REG_L x27, PT_S11(sp)
+	REG_L x28, PT_T3(sp)
+	REG_L x29, PT_T4(sp)
+	REG_L x30, PT_T5(sp)
+	REG_L x31, PT_T6(sp)
+
+	REG_L x2,  PT_SP(sp)
+	.endm
+
+#if !IS_ENABLED(CONFIG_PREEMPT)
+.set resume_kernel, restore_all
+#endif
+
+ENTRY(handle_exception)
+	SAVE_ALL
+
+	/*
+	 * Set sscratch register to 0, so that if a recursive exception
+	 * occurs, the exception vector knows it came from the kernel
+	 */
+	csrw CSR_SSCRATCH, x0
+
+	/* Load the global pointer */
+.option push
+.option norelax
+	la gp, __global_pointer$
+.option pop
+
+	la ra, ret_from_exception
+	/*
+	 * MSB of cause differentiates between
+	 * interrupts and exceptions
+	 */
+	bge s4, zero, 1f
+
+	/* Handle interrupts */
+	move a0, sp /* pt_regs */
+	tail do_IRQ
+1:
+	/* Exceptions run with interrupts enabled or disabled
+	   depending on the state of sstatus.SR_SPIE */
+	andi t0, s1, SR_SPIE
+	beqz t0, 1f
+	csrs CSR_SSTATUS, SR_SIE
+
+1:
+	/* Handle syscalls */
+	li t0, EXC_SYSCALL
+	beq s4, t0, handle_syscall
+
+	/* Handle other exceptions */
+	slli t0, s4, RISCV_LGPTR
+	la t1, excp_vect_table
+	la t2, excp_vect_table_end
+	move a0, sp /* pt_regs */
+	add t0, t1, t0
+	/* Check if exception code lies within bounds */
+	bgeu t0, t2, 1f
+	REG_L t0, 0(t0)
+	jr t0
+1:
+	tail do_trap_unknown
+
+handle_syscall:
+	 /* save the initial A0 value (needed in signal handlers) */
+	REG_S a0, PT_ORIG_A0(sp)
+	/*
+	 * Advance SEPC to avoid executing the original
+	 * scall instruction on sret
+	 */
+	addi s2, s2, 0x4
+	REG_S s2, PT_SEPC(sp)
+	/* Trace syscalls, but only if requested by the user. */
+	REG_L t0, TASK_TI_FLAGS(tp)
+	andi t0, t0, _TIF_SYSCALL_WORK
+	bnez t0, handle_syscall_trace_enter
+check_syscall_nr:
+	/* Check to make sure we don't jump to a bogus syscall number. */
+	li t0, __NR_syscalls
+	la s0, sys_ni_syscall
+	/* Syscall number held in a7 */
+	bgeu a7, t0, 1f
+	la s0, sys_call_table
+	slli t0, a7, RISCV_LGPTR
+	add s0, s0, t0
+	REG_L s0, 0(s0)
+1:
+	jalr s0
+
+ret_from_syscall:
+	/* Set user a0 to kernel a0 */
+	REG_S a0, PT_A0(sp)
+	/* Trace syscalls, but only if requested by the user. */
+	REG_L t0, TASK_TI_FLAGS(tp)
+	andi t0, t0, _TIF_SYSCALL_WORK
+	bnez t0, handle_syscall_trace_exit
+
+ret_from_exception:
+	REG_L s0, PT_SSTATUS(sp)
+	csrc CSR_SSTATUS, SR_SIE
+	andi s0, s0, SR_SPP
+	bnez s0, resume_kernel
+
+resume_userspace:
+	/* Interrupts must be disabled here so flags are checked atomically */
+	REG_L s0, TASK_TI_FLAGS(tp) /* current_thread_info->flags */
+	andi s1, s0, _TIF_WORK_MASK
+	bnez s1, work_pending
+
+	/* Save unwound kernel stack pointer in thread_info */
+	addi s0, sp, PT_SIZE_ON_STACK
+	REG_S s0, TASK_TI_KERNEL_SP(tp)
+
+	/*
+	 * Save TP into sscratch, so we can find the kernel data structures
+	 * again.
+	 */
+	csrw CSR_SSCRATCH, tp
+
+restore_all:
+	RESTORE_ALL
+	sret
+
+#if IS_ENABLED(CONFIG_PREEMPT)
+resume_kernel:
+	REG_L s0, TASK_TI_PREEMPT_COUNT(tp)
+	bnez s0, restore_all
+	REG_L s0, TASK_TI_FLAGS(tp)
+	andi s0, s0, _TIF_NEED_RESCHED
+	beqz s0, restore_all
+	call preempt_schedule_irq
+	j restore_all
+#endif
+
+work_pending:
+	/* Enter slow path for supplementary processing */
+	la ra, ret_from_exception
+	andi s1, s0, _TIF_NEED_RESCHED
+	bnez s1, work_resched
+work_notifysig:
+	/* Handle pending signals and notify-resume requests */
+	csrs CSR_SSTATUS, SR_SIE /* Enable interrupts for do_notify_resume() */
+	move a0, sp /* pt_regs */
+	move a1, s0 /* current_thread_info->flags */
+	tail do_notify_resume
+work_resched:
+	tail schedule
+
+/* Slow paths for ptrace. */
+handle_syscall_trace_enter:
+	move a0, sp
+	call do_syscall_trace_enter
+	REG_L a0, PT_A0(sp)
+	REG_L a1, PT_A1(sp)
+	REG_L a2, PT_A2(sp)
+	REG_L a3, PT_A3(sp)
+	REG_L a4, PT_A4(sp)
+	REG_L a5, PT_A5(sp)
+	REG_L a6, PT_A6(sp)
+	REG_L a7, PT_A7(sp)
+	j check_syscall_nr
+handle_syscall_trace_exit:
+	move a0, sp
+	call do_syscall_trace_exit
+	j ret_from_exception
+
+END(handle_exception)
+
+ENTRY(ret_from_fork)
+	la ra, ret_from_exception
+	tail schedule_tail
+ENDPROC(ret_from_fork)
+
+ENTRY(ret_from_kernel_thread)
+	call schedule_tail
+	/* Call fn(arg) */
+	la ra, ret_from_exception
+	move a0, s1
+	jr s0
+ENDPROC(ret_from_kernel_thread)
+
+
+/*
+ * Integer register context switch
+ * The callee-saved registers must be saved and restored.
+ *
+ *   a0: previous task_struct (must be preserved across the switch)
+ *   a1: next task_struct
+ *
+ * The value of a0 and a1 must be preserved by this function, as that's how
+ * arguments are passed to schedule_tail.
+ */
+ENTRY(__switch_to)
+	/* Save context into prev->thread */
+	li    a4,  TASK_THREAD_RA
+	add   a3, a0, a4
+	add   a4, a1, a4
+	REG_S ra,  TASK_THREAD_RA_RA(a3)
+	REG_S sp,  TASK_THREAD_SP_RA(a3)
+	REG_S s0,  TASK_THREAD_S0_RA(a3)
+	REG_S s1,  TASK_THREAD_S1_RA(a3)
+	REG_S s2,  TASK_THREAD_S2_RA(a3)
+	REG_S s3,  TASK_THREAD_S3_RA(a3)
+	REG_S s4,  TASK_THREAD_S4_RA(a3)
+	REG_S s5,  TASK_THREAD_S5_RA(a3)
+	REG_S s6,  TASK_THREAD_S6_RA(a3)
+	REG_S s7,  TASK_THREAD_S7_RA(a3)
+	REG_S s8,  TASK_THREAD_S8_RA(a3)
+	REG_S s9,  TASK_THREAD_S9_RA(a3)
+	REG_S s10, TASK_THREAD_S10_RA(a3)
+	REG_S s11, TASK_THREAD_S11_RA(a3)
+	/* Restore context from next->thread */
+	REG_L ra,  TASK_THREAD_RA_RA(a4)
+	REG_L sp,  TASK_THREAD_SP_RA(a4)
+	REG_L s0,  TASK_THREAD_S0_RA(a4)
+	REG_L s1,  TASK_THREAD_S1_RA(a4)
+	REG_L s2,  TASK_THREAD_S2_RA(a4)
+	REG_L s3,  TASK_THREAD_S3_RA(a4)
+	REG_L s4,  TASK_THREAD_S4_RA(a4)
+	REG_L s5,  TASK_THREAD_S5_RA(a4)
+	REG_L s6,  TASK_THREAD_S6_RA(a4)
+	REG_L s7,  TASK_THREAD_S7_RA(a4)
+	REG_L s8,  TASK_THREAD_S8_RA(a4)
+	REG_L s9,  TASK_THREAD_S9_RA(a4)
+	REG_L s10, TASK_THREAD_S10_RA(a4)
+	REG_L s11, TASK_THREAD_S11_RA(a4)
+	/* Swap the CPU entry around. */
+	lw a3, TASK_TI_CPU(a0)
+	lw a4, TASK_TI_CPU(a1)
+	sw a3, TASK_TI_CPU(a1)
+	sw a4, TASK_TI_CPU(a0)
+#if TASK_TI != 0
+#error "TASK_TI != 0: tp will contain a 'struct thread_info', not a 'struct task_struct' so get_current() won't work."
+	addi tp, a1, TASK_TI
+#else
+	move tp, a1
+#endif
+	ret
+ENDPROC(__switch_to)
+
+	.section ".rodata"
+	.align LGREG
+	/* Exception vector table */
+ENTRY(excp_vect_table)
+	RISCV_PTR do_trap_insn_misaligned
+	RISCV_PTR do_trap_insn_fault
+	RISCV_PTR do_trap_insn_illegal
+	RISCV_PTR do_trap_break
+	RISCV_PTR do_trap_load_misaligned
+	RISCV_PTR do_trap_load_fault
+	RISCV_PTR do_trap_store_misaligned
+	RISCV_PTR do_trap_store_fault
+	RISCV_PTR do_trap_ecall_u /* system call, gets intercepted */
+	RISCV_PTR do_trap_ecall_s
+	RISCV_PTR do_trap_unknown
+	RISCV_PTR do_trap_ecall_m
+	RISCV_PTR do_page_fault   /* instruction page fault */
+	RISCV_PTR do_page_fault   /* load page fault */
+	RISCV_PTR do_trap_unknown
+	RISCV_PTR do_page_fault   /* store page fault */
+excp_vect_table_end:
+END(excp_vect_table)
diff --git a/marvell/linux/arch/riscv/kernel/fpu.S b/marvell/linux/arch/riscv/kernel/fpu.S
new file mode 100644
index 0000000..631d315
--- /dev/null
+++ b/marvell/linux/arch/riscv/kernel/fpu.S
@@ -0,0 +1,106 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2012 Regents of the University of California
+ * Copyright (C) 2017 SiFive
+ *
+ *   This program is free software; you can redistribute it and/or
+ *   modify it under the terms of the GNU General Public License
+ *   as published by the Free Software Foundation, version 2.
+ *
+ *   This program is distributed in the hope that it will be useful,
+ *   but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *   GNU General Public License for more details.
+ */
+
+#include <linux/linkage.h>
+
+#include <asm/asm.h>
+#include <asm/csr.h>
+#include <asm/asm-offsets.h>
+
+ENTRY(__fstate_save)
+	li  a2,  TASK_THREAD_F0
+	add a0, a0, a2
+	li t1, SR_FS
+	csrs CSR_SSTATUS, t1
+	frcsr t0
+	fsd f0,  TASK_THREAD_F0_F0(a0)
+	fsd f1,  TASK_THREAD_F1_F0(a0)
+	fsd f2,  TASK_THREAD_F2_F0(a0)
+	fsd f3,  TASK_THREAD_F3_F0(a0)
+	fsd f4,  TASK_THREAD_F4_F0(a0)
+	fsd f5,  TASK_THREAD_F5_F0(a0)
+	fsd f6,  TASK_THREAD_F6_F0(a0)
+	fsd f7,  TASK_THREAD_F7_F0(a0)
+	fsd f8,  TASK_THREAD_F8_F0(a0)
+	fsd f9,  TASK_THREAD_F9_F0(a0)
+	fsd f10, TASK_THREAD_F10_F0(a0)
+	fsd f11, TASK_THREAD_F11_F0(a0)
+	fsd f12, TASK_THREAD_F12_F0(a0)
+	fsd f13, TASK_THREAD_F13_F0(a0)
+	fsd f14, TASK_THREAD_F14_F0(a0)
+	fsd f15, TASK_THREAD_F15_F0(a0)
+	fsd f16, TASK_THREAD_F16_F0(a0)
+	fsd f17, TASK_THREAD_F17_F0(a0)
+	fsd f18, TASK_THREAD_F18_F0(a0)
+	fsd f19, TASK_THREAD_F19_F0(a0)
+	fsd f20, TASK_THREAD_F20_F0(a0)
+	fsd f21, TASK_THREAD_F21_F0(a0)
+	fsd f22, TASK_THREAD_F22_F0(a0)
+	fsd f23, TASK_THREAD_F23_F0(a0)
+	fsd f24, TASK_THREAD_F24_F0(a0)
+	fsd f25, TASK_THREAD_F25_F0(a0)
+	fsd f26, TASK_THREAD_F26_F0(a0)
+	fsd f27, TASK_THREAD_F27_F0(a0)
+	fsd f28, TASK_THREAD_F28_F0(a0)
+	fsd f29, TASK_THREAD_F29_F0(a0)
+	fsd f30, TASK_THREAD_F30_F0(a0)
+	fsd f31, TASK_THREAD_F31_F0(a0)
+	sw t0, TASK_THREAD_FCSR_F0(a0)
+	csrc CSR_SSTATUS, t1
+	ret
+ENDPROC(__fstate_save)
+
+ENTRY(__fstate_restore)
+	li  a2,  TASK_THREAD_F0
+	add a0, a0, a2
+	li t1, SR_FS
+	lw t0, TASK_THREAD_FCSR_F0(a0)
+	csrs CSR_SSTATUS, t1
+	fld f0,  TASK_THREAD_F0_F0(a0)
+	fld f1,  TASK_THREAD_F1_F0(a0)
+	fld f2,  TASK_THREAD_F2_F0(a0)
+	fld f3,  TASK_THREAD_F3_F0(a0)
+	fld f4,  TASK_THREAD_F4_F0(a0)
+	fld f5,  TASK_THREAD_F5_F0(a0)
+	fld f6,  TASK_THREAD_F6_F0(a0)
+	fld f7,  TASK_THREAD_F7_F0(a0)
+	fld f8,  TASK_THREAD_F8_F0(a0)
+	fld f9,  TASK_THREAD_F9_F0(a0)
+	fld f10, TASK_THREAD_F10_F0(a0)
+	fld f11, TASK_THREAD_F11_F0(a0)
+	fld f12, TASK_THREAD_F12_F0(a0)
+	fld f13, TASK_THREAD_F13_F0(a0)
+	fld f14, TASK_THREAD_F14_F0(a0)
+	fld f15, TASK_THREAD_F15_F0(a0)
+	fld f16, TASK_THREAD_F16_F0(a0)
+	fld f17, TASK_THREAD_F17_F0(a0)
+	fld f18, TASK_THREAD_F18_F0(a0)
+	fld f19, TASK_THREAD_F19_F0(a0)
+	fld f20, TASK_THREAD_F20_F0(a0)
+	fld f21, TASK_THREAD_F21_F0(a0)
+	fld f22, TASK_THREAD_F22_F0(a0)
+	fld f23, TASK_THREAD_F23_F0(a0)
+	fld f24, TASK_THREAD_F24_F0(a0)
+	fld f25, TASK_THREAD_F25_F0(a0)
+	fld f26, TASK_THREAD_F26_F0(a0)
+	fld f27, TASK_THREAD_F27_F0(a0)
+	fld f28, TASK_THREAD_F28_F0(a0)
+	fld f29, TASK_THREAD_F29_F0(a0)
+	fld f30, TASK_THREAD_F30_F0(a0)
+	fld f31, TASK_THREAD_F31_F0(a0)
+	fscsr t0
+	csrc CSR_SSTATUS, t1
+	ret
+ENDPROC(__fstate_restore)
diff --git a/marvell/linux/arch/riscv/kernel/ftrace.c b/marvell/linux/arch/riscv/kernel/ftrace.c
new file mode 100644
index 0000000..291c579
--- /dev/null
+++ b/marvell/linux/arch/riscv/kernel/ftrace.c
@@ -0,0 +1,220 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2013 Linaro Limited
+ * Author: AKASHI Takahiro <takahiro.akashi@linaro.org>
+ * Copyright (C) 2017 Andes Technology Corporation
+ */
+
+#include <linux/ftrace.h>
+#include <linux/uaccess.h>
+#include <asm/cacheflush.h>
+
+#ifdef CONFIG_DYNAMIC_FTRACE
+static int ftrace_check_current_call(unsigned long hook_pos,
+				     unsigned int *expected)
+{
+	unsigned int replaced[2];
+	unsigned int nops[2] = {NOP4, NOP4};
+
+	/* we expect nops at the hook position */
+	if (!expected)
+		expected = nops;
+
+	/*
+	 * Read the text we want to modify;
+	 * return must be -EFAULT on read error
+	 */
+	if (probe_kernel_read(replaced, (void *)hook_pos, MCOUNT_INSN_SIZE))
+		return -EFAULT;
+
+	/*
+	 * Make sure it is what we expect it to be;
+	 * return must be -EINVAL on failed comparison
+	 */
+	if (memcmp(expected, replaced, sizeof(replaced))) {
+		pr_err("%p: expected (%08x %08x) but got (%08x %08x)\n",
+		       (void *)hook_pos, expected[0], expected[1], replaced[0],
+		       replaced[1]);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int __ftrace_modify_call(unsigned long hook_pos, unsigned long target,
+				bool enable)
+{
+	unsigned int call[2];
+	unsigned int nops[2] = {NOP4, NOP4};
+	int ret = 0;
+
+	make_call(hook_pos, target, call);
+
+	/* replace the auipc-jalr pair at once */
+	ret = probe_kernel_write((void *)hook_pos, enable ? call : nops,
+				 MCOUNT_INSN_SIZE);
+	/* return must be -EPERM on write error */
+	if (ret)
+		return -EPERM;
+
+	smp_mb();
+	flush_icache_range((void *)hook_pos, (void *)hook_pos + MCOUNT_INSN_SIZE);
+
+	return 0;
+}
+
+int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
+{
+	int ret = ftrace_check_current_call(rec->ip, NULL);
+
+	if (ret)
+		return ret;
+
+	return __ftrace_modify_call(rec->ip, addr, true);
+}
+
+int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec,
+		    unsigned long addr)
+{
+	unsigned int call[2];
+	int ret;
+
+	make_call(rec->ip, addr, call);
+	ret = ftrace_check_current_call(rec->ip, call);
+
+	if (ret)
+		return ret;
+
+	return __ftrace_modify_call(rec->ip, addr, false);
+}
+
+
+/*
+ * This is called early on, and isn't wrapped by
+ * ftrace_arch_code_modify_{prepare,post_process}() and therefor doesn't hold
+ * text_mutex, which triggers a lockdep failure.  SMP isn't running so we could
+ * just directly poke the text, but it's simpler to just take the lock
+ * ourselves.
+ */
+int ftrace_init_nop(struct module *mod, struct dyn_ftrace *rec)
+{
+	int out;
+
+	ftrace_arch_code_modify_prepare();
+	out = ftrace_make_nop(mod, rec, MCOUNT_ADDR);
+	ftrace_arch_code_modify_post_process();
+
+	return out;
+}
+
+int ftrace_update_ftrace_func(ftrace_func_t func)
+{
+	int ret = __ftrace_modify_call((unsigned long)&ftrace_call,
+				       (unsigned long)func, true);
+	if (!ret) {
+		ret = __ftrace_modify_call((unsigned long)&ftrace_regs_call,
+					   (unsigned long)func, true);
+	}
+
+	return ret;
+}
+
+int __init ftrace_dyn_arch_init(void)
+{
+	return 0;
+}
+#endif
+
+#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
+int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
+		       unsigned long addr)
+{
+	unsigned int call[2];
+	int ret;
+
+	make_call(rec->ip, old_addr, call);
+	ret = ftrace_check_current_call(rec->ip, call);
+
+	if (ret)
+		return ret;
+
+	return __ftrace_modify_call(rec->ip, addr, true);
+}
+#endif
+
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+/*
+ * Most of this function is copied from arm64.
+ */
+void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr,
+			   unsigned long frame_pointer)
+{
+	unsigned long return_hooker = (unsigned long)&return_to_handler;
+	unsigned long old;
+
+	if (unlikely(atomic_read(&current->tracing_graph_pause)))
+		return;
+
+	/*
+	 * We don't suffer access faults, so no extra fault-recovery assembly
+	 * is needed here.
+	 */
+	old = *parent;
+
+	if (!function_graph_enter(old, self_addr, frame_pointer, parent))
+		*parent = return_hooker;
+}
+
+#ifdef CONFIG_DYNAMIC_FTRACE
+extern void ftrace_graph_call(void);
+int ftrace_enable_ftrace_graph_caller(void)
+{
+	unsigned int call[2];
+	static int init_graph = 1;
+	int ret;
+
+	make_call(&ftrace_graph_call, &ftrace_stub, call);
+
+	/*
+	 * When enabling graph tracer for the first time, ftrace_graph_call
+	 * should contains a call to ftrace_stub.  Once it has been disabled,
+	 * the 8-bytes at the position becomes NOPs.
+	 */
+	if (init_graph) {
+		ret = ftrace_check_current_call((unsigned long)&ftrace_graph_call,
+						call);
+		init_graph = 0;
+	} else {
+		ret = ftrace_check_current_call((unsigned long)&ftrace_graph_call,
+						NULL);
+	}
+
+	if (ret)
+		return ret;
+
+	return __ftrace_modify_call((unsigned long)&ftrace_graph_call,
+				    (unsigned long)&prepare_ftrace_return, true);
+}
+
+int ftrace_disable_ftrace_graph_caller(void)
+{
+	unsigned int call[2];
+	int ret;
+
+	make_call(&ftrace_graph_call, &prepare_ftrace_return, call);
+
+	/*
+	 * This is to make sure that ftrace_enable_ftrace_graph_caller
+	 * did the right thing.
+	 */
+	ret = ftrace_check_current_call((unsigned long)&ftrace_graph_call,
+					call);
+
+	if (ret)
+		return ret;
+
+	return __ftrace_modify_call((unsigned long)&ftrace_graph_call,
+				    (unsigned long)&prepare_ftrace_return, false);
+}
+#endif /* CONFIG_DYNAMIC_FTRACE */
+#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
diff --git a/marvell/linux/arch/riscv/kernel/head.S b/marvell/linux/arch/riscv/kernel/head.S
new file mode 100644
index 0000000..3447931
--- /dev/null
+++ b/marvell/linux/arch/riscv/kernel/head.S
@@ -0,0 +1,205 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2012 Regents of the University of California
+ */
+
+#include <asm/thread_info.h>
+#include <asm/asm-offsets.h>
+#include <asm/asm.h>
+#include <linux/init.h>
+#include <linux/linkage.h>
+#include <asm/thread_info.h>
+#include <asm/page.h>
+#include <asm/csr.h>
+#include <asm/image.h>
+
+__INIT
+ENTRY(_start)
+	/*
+	 * Image header expected by Linux boot-loaders. The image header data
+	 * structure is described in asm/image.h.
+	 * Do not modify it without modifying the structure and all bootloaders
+	 * that expects this header format!!
+	 */
+	/* jump to start kernel */
+	j _start_kernel
+	/* reserved */
+	.word 0
+	.balign 8
+#ifdef CONFIG_RISCV_M_MODE
+	/* Image load offset (0MB) from start of RAM for M-mode */
+	.dword 0
+#else
+#if __riscv_xlen == 64
+	/* Image load offset(2MB) from start of RAM */
+	.dword 0x200000
+#else
+	/* Image load offset(4MB) from start of RAM */
+	.dword 0x400000
+#endif
+#endif
+	/* Effective size of kernel image */
+	.dword _end - _start
+	.dword __HEAD_FLAGS
+	.word RISCV_HEADER_VERSION
+	.word 0
+	.dword 0
+	.ascii RISCV_IMAGE_MAGIC
+	.balign 4
+	.ascii RISCV_IMAGE_MAGIC2
+	.word 0
+
+.global _start_kernel
+_start_kernel:
+	/* Mask all interrupts */
+	csrw CSR_SIE, zero
+	csrw CSR_SIP, zero
+
+	/* Load the global pointer */
+.option push
+.option norelax
+	la gp, __global_pointer$
+.option pop
+
+	/*
+	 * Disable FPU to detect illegal usage of
+	 * floating point in kernel space
+	 */
+	li t0, SR_FS
+	csrc CSR_SSTATUS, t0
+
+#ifdef CONFIG_SMP
+	li t0, CONFIG_NR_CPUS
+	bgeu a0, t0, .Lsecondary_park
+#endif
+
+	/* Pick one hart to run the main boot sequence */
+	la a3, hart_lottery
+	li a2, 1
+	amoadd.w a3, a2, (a3)
+	bnez a3, .Lsecondary_start
+
+	/* Clear BSS for flat non-ELF images */
+	la a3, __bss_start
+	la a4, __bss_stop
+	ble a4, a3, clear_bss_done
+clear_bss:
+	REG_S zero, (a3)
+	add a3, a3, RISCV_SZPTR
+	blt a3, a4, clear_bss
+clear_bss_done:
+
+	/* Save hart ID and DTB physical address */
+	mv s0, a0
+	mv s1, a1
+	la a2, boot_cpu_hartid
+	REG_S a0, (a2)
+
+	/* Initialize page tables and relocate to virtual addresses */
+	la sp, init_thread_union + THREAD_SIZE
+	mv a0, s1
+	call setup_vm
+	la a0, early_pg_dir
+	call relocate
+
+	/* Restore C environment */
+	la tp, init_task
+	sw zero, TASK_TI_CPU(tp)
+	la sp, init_thread_union + THREAD_SIZE
+
+	/* Start the kernel */
+	call parse_dtb
+	tail start_kernel
+
+relocate:
+	/* Relocate return address */
+	li a1, PAGE_OFFSET
+	la a2, _start
+	sub a1, a1, a2
+	add ra, ra, a1
+
+	/* Point stvec to virtual address of intruction after satp write */
+	la a2, 1f
+	add a2, a2, a1
+	csrw CSR_STVEC, a2
+
+	/* Compute satp for kernel page tables, but don't load it yet */
+	srl a2, a0, PAGE_SHIFT
+	li a1, SATP_MODE
+	or a2, a2, a1
+
+	/*
+	 * Load trampoline page directory, which will cause us to trap to
+	 * stvec if VA != PA, or simply fall through if VA == PA.  We need a
+	 * full fence here because setup_vm() just wrote these PTEs and we need
+	 * to ensure the new translations are in use.
+	 */
+	la a0, trampoline_pg_dir
+	srl a0, a0, PAGE_SHIFT
+	or a0, a0, a1
+	sfence.vma
+	csrw CSR_SATP, a0
+.align 2
+1:
+	/* Set trap vector to spin forever to help debug */
+	la a0, .Lsecondary_park
+	csrw CSR_STVEC, a0
+
+	/* Reload the global pointer */
+.option push
+.option norelax
+	la gp, __global_pointer$
+.option pop
+
+	/*
+	 * Switch to kernel page tables.  A full fence is necessary in order to
+	 * avoid using the trampoline translations, which are only correct for
+	 * the first superpage.  Fetching the fence is guarnteed to work
+	 * because that first superpage is translated the same way.
+	 */
+	csrw CSR_SATP, a2
+	sfence.vma
+
+	ret
+
+.Lsecondary_start:
+#ifdef CONFIG_SMP
+	/* Set trap vector to spin forever to help debug */
+	la a3, .Lsecondary_park
+	csrw CSR_STVEC, a3
+
+	slli a3, a0, LGREG
+	la a1, __cpu_up_stack_pointer
+	la a2, __cpu_up_task_pointer
+	add a1, a3, a1
+	add a2, a3, a2
+
+	/*
+	 * This hart didn't win the lottery, so we wait for the winning hart to
+	 * get far enough along the boot process that it should continue.
+	 */
+.Lwait_for_cpu_up:
+	/* FIXME: We should WFI to save some energy here. */
+	REG_L sp, (a1)
+	REG_L tp, (a2)
+	beqz sp, .Lwait_for_cpu_up
+	beqz tp, .Lwait_for_cpu_up
+	fence
+
+	/* Enable virtual memory and relocate to virtual address */
+	la a0, swapper_pg_dir
+	call relocate
+
+	tail smp_callin
+#endif
+
+.align 2
+.Lsecondary_park:
+	/* We lack SMP support or have too many harts, so park this hart */
+	wfi
+	j .Lsecondary_park
+END(_start)
+
+__PAGE_ALIGNED_BSS
+	/* Empty zero page */
+	.balign PAGE_SIZE
diff --git a/marvell/linux/arch/riscv/kernel/head.h b/marvell/linux/arch/riscv/kernel/head.h
new file mode 100644
index 0000000..105fb04
--- /dev/null
+++ b/marvell/linux/arch/riscv/kernel/head.h
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2019 SiFive, Inc.
+ */
+#ifndef __ASM_HEAD_H
+#define __ASM_HEAD_H
+
+#include <linux/linkage.h>
+#include <linux/init.h>
+
+extern atomic_t hart_lottery;
+
+asmlinkage void do_page_fault(struct pt_regs *regs);
+asmlinkage void __init setup_vm(uintptr_t dtb_pa);
+
+extern void *__cpu_up_stack_pointer[];
+extern void *__cpu_up_task_pointer[];
+
+void __init parse_dtb(void);
+
+#endif /* __ASM_HEAD_H */
diff --git a/marvell/linux/arch/riscv/kernel/irq.c b/marvell/linux/arch/riscv/kernel/irq.c
new file mode 100644
index 0000000..fffac6d
--- /dev/null
+++ b/marvell/linux/arch/riscv/kernel/irq.c
@@ -0,0 +1,60 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2012 Regents of the University of California
+ * Copyright (C) 2017 SiFive
+ * Copyright (C) 2018 Christoph Hellwig
+ */
+
+#include <linux/interrupt.h>
+#include <linux/irqchip.h>
+#include <linux/irqdomain.h>
+#include <linux/seq_file.h>
+#include <asm/smp.h>
+
+/*
+ * Possible interrupt causes:
+ */
+#define INTERRUPT_CAUSE_SOFTWARE	IRQ_S_SOFT
+#define INTERRUPT_CAUSE_TIMER		IRQ_S_TIMER
+#define INTERRUPT_CAUSE_EXTERNAL	IRQ_S_EXT
+
+int arch_show_interrupts(struct seq_file *p, int prec)
+{
+	show_ipi_stats(p, prec);
+	return 0;
+}
+
+asmlinkage __visible void __irq_entry do_IRQ(struct pt_regs *regs)
+{
+	struct pt_regs *old_regs = set_irq_regs(regs);
+
+	irq_enter();
+	switch (regs->scause & ~SCAUSE_IRQ_FLAG) {
+	case INTERRUPT_CAUSE_TIMER:
+		riscv_timer_interrupt();
+		break;
+#ifdef CONFIG_SMP
+	case INTERRUPT_CAUSE_SOFTWARE:
+		/*
+		 * We only use software interrupts to pass IPIs, so if a non-SMP
+		 * system gets one, then we don't know what to do.
+		 */
+		riscv_software_interrupt();
+		break;
+#endif
+	case INTERRUPT_CAUSE_EXTERNAL:
+		handle_arch_irq(regs);
+		break;
+	default:
+		pr_alert("unexpected interrupt cause 0x%lx", regs->scause);
+		BUG();
+	}
+	irq_exit();
+
+	set_irq_regs(old_regs);
+}
+
+void __init init_IRQ(void)
+{
+	irqchip_init();
+}
diff --git a/marvell/linux/arch/riscv/kernel/mcount-dyn.S b/marvell/linux/arch/riscv/kernel/mcount-dyn.S
new file mode 100644
index 0000000..35a6ed7
--- /dev/null
+++ b/marvell/linux/arch/riscv/kernel/mcount-dyn.S
@@ -0,0 +1,239 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2017 Andes Technology Corporation */
+
+#include <linux/init.h>
+#include <linux/linkage.h>
+#include <asm/asm.h>
+#include <asm/csr.h>
+#include <asm/unistd.h>
+#include <asm/thread_info.h>
+#include <asm/asm-offsets.h>
+#include <asm-generic/export.h>
+#include <asm/ftrace.h>
+
+	.text
+
+	.macro SAVE_ABI_STATE
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+	addi    sp, sp, -48
+	sd      s0, 32(sp)
+	sd      ra, 40(sp)
+	addi    s0, sp, 48
+	sd      t0, 24(sp)
+	sd      t1, 16(sp)
+#ifdef HAVE_FUNCTION_GRAPH_FP_TEST
+	sd      t2, 8(sp)
+#endif
+#else
+	addi	sp, sp, -16
+	sd	s0, 0(sp)
+	sd	ra, 8(sp)
+	addi	s0, sp, 16
+#endif
+	.endm
+
+	.macro RESTORE_ABI_STATE
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+	ld	s0, 32(sp)
+	ld	ra, 40(sp)
+	addi	sp, sp, 48
+#else
+	ld	ra, 8(sp)
+	ld	s0, 0(sp)
+	addi	sp, sp, 16
+#endif
+	.endm
+
+	.macro RESTORE_GRAPH_ARGS
+	ld	a0, 24(sp)
+	ld	a1, 16(sp)
+#ifdef HAVE_FUNCTION_GRAPH_FP_TEST
+	ld	a2, 8(sp)
+#endif
+	.endm
+
+ENTRY(ftrace_graph_caller)
+	addi	sp, sp, -16
+	sd	s0, 0(sp)
+	sd	ra, 8(sp)
+	addi	s0, sp, 16
+ftrace_graph_call:
+	.global ftrace_graph_call
+	/*
+	 * Calling ftrace_enable/disable_ftrace_graph_caller would overwrite the
+	 * call below.  Check ftrace_modify_all_code for details.
+	 */
+	call	ftrace_stub
+	ld	ra, 8(sp)
+	ld	s0, 0(sp)
+	addi	sp, sp, 16
+	ret
+ENDPROC(ftrace_graph_caller)
+
+ENTRY(ftrace_caller)
+	/*
+	 * a0: the address in the caller when calling ftrace_caller
+	 * a1: the caller's return address
+	 * a2: the address of global variable function_trace_op
+	 */
+	ld	a1, -8(s0)
+	addi	a0, ra, -MCOUNT_INSN_SIZE
+	la	t5, function_trace_op
+	ld	a2, 0(t5)
+
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+	/*
+	 * the graph tracer (specifically, prepare_ftrace_return) needs these
+	 * arguments but for now the function tracer occupies the regs, so we
+	 * save them in temporary regs to recover later.
+	 */
+	addi	t0, s0, -8
+	mv	t1, a0
+#ifdef HAVE_FUNCTION_GRAPH_FP_TEST
+	ld	t2, -16(s0)
+#endif
+#endif
+
+	SAVE_ABI_STATE
+ftrace_call:
+	.global ftrace_call
+	/*
+	 * For the dynamic ftrace to work, here we should reserve at least
+	 * 8 bytes for a functional auipc-jalr pair.  The following call
+	 * serves this purpose.
+	 *
+	 * Calling ftrace_update_ftrace_func would overwrite the nops below.
+	 * Check ftrace_modify_all_code for details.
+	 */
+	call	ftrace_stub
+
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+	RESTORE_GRAPH_ARGS
+	call	ftrace_graph_caller
+#endif
+
+	RESTORE_ABI_STATE
+	ret
+ENDPROC(ftrace_caller)
+
+#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
+	.macro SAVE_ALL
+	addi	sp, sp, -(PT_SIZE_ON_STACK+16)
+	sd	s0, (PT_SIZE_ON_STACK)(sp)
+	sd	ra, (PT_SIZE_ON_STACK+8)(sp)
+	addi	s0, sp, (PT_SIZE_ON_STACK+16)
+
+	sd x1,  PT_RA(sp)
+	sd x2,  PT_SP(sp)
+	sd x3,  PT_GP(sp)
+	sd x4,  PT_TP(sp)
+	sd x5,  PT_T0(sp)
+	sd x6,  PT_T1(sp)
+	sd x7,  PT_T2(sp)
+	sd x8,  PT_S0(sp)
+	sd x9,  PT_S1(sp)
+	sd x10, PT_A0(sp)
+	sd x11, PT_A1(sp)
+	sd x12, PT_A2(sp)
+	sd x13, PT_A3(sp)
+	sd x14, PT_A4(sp)
+	sd x15, PT_A5(sp)
+	sd x16, PT_A6(sp)
+	sd x17, PT_A7(sp)
+	sd x18, PT_S2(sp)
+	sd x19, PT_S3(sp)
+	sd x20, PT_S4(sp)
+	sd x21, PT_S5(sp)
+	sd x22, PT_S6(sp)
+	sd x23, PT_S7(sp)
+	sd x24, PT_S8(sp)
+	sd x25, PT_S9(sp)
+	sd x26, PT_S10(sp)
+	sd x27, PT_S11(sp)
+	sd x28, PT_T3(sp)
+	sd x29, PT_T4(sp)
+	sd x30, PT_T5(sp)
+	sd x31, PT_T6(sp)
+	.endm
+
+	.macro RESTORE_ALL
+	ld x1,  PT_RA(sp)
+	ld x2,  PT_SP(sp)
+	ld x3,  PT_GP(sp)
+	ld x4,  PT_TP(sp)
+	ld x5,  PT_T0(sp)
+	ld x6,  PT_T1(sp)
+	ld x7,  PT_T2(sp)
+	ld x8,  PT_S0(sp)
+	ld x9,  PT_S1(sp)
+	ld x10, PT_A0(sp)
+	ld x11, PT_A1(sp)
+	ld x12, PT_A2(sp)
+	ld x13, PT_A3(sp)
+	ld x14, PT_A4(sp)
+	ld x15, PT_A5(sp)
+	ld x16, PT_A6(sp)
+	ld x17, PT_A7(sp)
+	ld x18, PT_S2(sp)
+	ld x19, PT_S3(sp)
+	ld x20, PT_S4(sp)
+	ld x21, PT_S5(sp)
+	ld x22, PT_S6(sp)
+	ld x23, PT_S7(sp)
+	ld x24, PT_S8(sp)
+	ld x25, PT_S9(sp)
+	ld x26, PT_S10(sp)
+	ld x27, PT_S11(sp)
+	ld x28, PT_T3(sp)
+	ld x29, PT_T4(sp)
+	ld x30, PT_T5(sp)
+	ld x31, PT_T6(sp)
+
+	ld	s0, (PT_SIZE_ON_STACK)(sp)
+	ld	ra, (PT_SIZE_ON_STACK+8)(sp)
+	addi	sp, sp, (PT_SIZE_ON_STACK+16)
+	.endm
+
+	.macro RESTORE_GRAPH_REG_ARGS
+	ld	a0, PT_T0(sp)
+	ld	a1, PT_T1(sp)
+#ifdef HAVE_FUNCTION_GRAPH_FP_TEST
+	ld	a2, PT_T2(sp)
+#endif
+	.endm
+
+/*
+ * Most of the contents are the same as ftrace_caller.
+ */
+ENTRY(ftrace_regs_caller)
+	/*
+	 * a3: the address of all registers in the stack
+	 */
+	ld	a1, -8(s0)
+	addi	a0, ra, -MCOUNT_INSN_SIZE
+	la	t5, function_trace_op
+	ld	a2, 0(t5)
+	addi	a3, sp, -(PT_SIZE_ON_STACK+16)
+
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+	addi	t0, s0, -8
+	mv	t1, a0
+#ifdef HAVE_FUNCTION_GRAPH_FP_TEST
+	ld	t2, -16(s0)
+#endif
+#endif
+	SAVE_ALL
+
+ftrace_regs_call:
+	.global ftrace_regs_call
+	call	ftrace_stub
+
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+	RESTORE_GRAPH_REG_ARGS
+	call	ftrace_graph_caller
+#endif
+
+	RESTORE_ALL
+	ret
+ENDPROC(ftrace_regs_caller)
+#endif /* CONFIG_DYNAMIC_FTRACE_WITH_REGS */
diff --git a/marvell/linux/arch/riscv/kernel/mcount.S b/marvell/linux/arch/riscv/kernel/mcount.S
new file mode 100644
index 0000000..6d46268
--- /dev/null
+++ b/marvell/linux/arch/riscv/kernel/mcount.S
@@ -0,0 +1,129 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2017 Andes Technology Corporation */
+
+#include <linux/init.h>
+#include <linux/linkage.h>
+#include <asm/asm.h>
+#include <asm/csr.h>
+#include <asm/unistd.h>
+#include <asm/thread_info.h>
+#include <asm/asm-offsets.h>
+#include <asm-generic/export.h>
+#include <asm/ftrace.h>
+
+	.text
+
+	.macro SAVE_ABI_STATE
+	addi	sp, sp, -16
+	sd	s0, 0(sp)
+	sd	ra, 8(sp)
+	addi	s0, sp, 16
+	.endm
+
+	/*
+	 * The call to ftrace_return_to_handler would overwrite the return
+	 * register if a0 was not saved.
+	 */
+	.macro SAVE_RET_ABI_STATE
+	addi	sp, sp, -32
+	sd	s0, 16(sp)
+	sd	ra, 24(sp)
+	sd	a0, 8(sp)
+	addi	s0, sp, 32
+	.endm
+
+	.macro RESTORE_ABI_STATE
+	ld	ra, 8(sp)
+	ld	s0, 0(sp)
+	addi	sp, sp, 16
+	.endm
+
+	.macro RESTORE_RET_ABI_STATE
+	ld	ra, 24(sp)
+	ld	s0, 16(sp)
+	ld	a0, 8(sp)
+	addi	sp, sp, 32
+	.endm
+
+ENTRY(ftrace_stub)
+#ifdef CONFIG_DYNAMIC_FTRACE
+       .global MCOUNT_NAME
+       .set    MCOUNT_NAME, ftrace_stub
+#endif
+	ret
+ENDPROC(ftrace_stub)
+
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+ENTRY(return_to_handler)
+/*
+ * On implementing the frame point test, the ideal way is to compare the
+ * s0 (frame pointer, if enabled) on entry and the sp (stack pointer) on return.
+ * However, the psABI of variable-length-argument functions does not allow this.
+ *
+ * So alternatively we check the *old* frame pointer position, that is, the
+ * value stored in -16(s0) on entry, and the s0 on return.
+ */
+#ifdef HAVE_FUNCTION_GRAPH_FP_TEST
+	mv	t6, s0
+#endif
+	SAVE_RET_ABI_STATE
+#ifdef HAVE_FUNCTION_GRAPH_FP_TEST
+	mv	a0, t6
+#endif
+	call	ftrace_return_to_handler
+	mv	a1, a0
+	RESTORE_RET_ABI_STATE
+	jalr	a1
+ENDPROC(return_to_handler)
+#endif
+
+#ifndef CONFIG_DYNAMIC_FTRACE
+ENTRY(MCOUNT_NAME)
+	la	t4, ftrace_stub
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+	la	t0, ftrace_graph_return
+	ld	t1, 0(t0)
+	bne	t1, t4, do_ftrace_graph_caller
+
+	la	t3, ftrace_graph_entry
+	ld	t2, 0(t3)
+	la	t6, ftrace_graph_entry_stub
+	bne	t2, t6, do_ftrace_graph_caller
+#endif
+	la	t3, ftrace_trace_function
+	ld	t5, 0(t3)
+	bne	t5, t4, do_trace
+	ret
+
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+/*
+ * A pseudo representation for the function graph tracer:
+ * prepare_to_return(&ra_to_caller_of_caller, ra_to_caller)
+ */
+do_ftrace_graph_caller:
+	addi	a0, s0, -8
+	mv	a1, ra
+#ifdef HAVE_FUNCTION_GRAPH_FP_TEST
+	ld	a2, -16(s0)
+#endif
+	SAVE_ABI_STATE
+	call	prepare_ftrace_return
+	RESTORE_ABI_STATE
+	ret
+#endif
+
+/*
+ * A pseudo representation for the function tracer:
+ * (*ftrace_trace_function)(ra_to_caller, ra_to_caller_of_caller)
+ */
+do_trace:
+	ld	a1, -8(s0)
+	mv	a0, ra
+
+	SAVE_ABI_STATE
+	jalr	t5
+	RESTORE_ABI_STATE
+	ret
+ENDPROC(MCOUNT_NAME)
+#endif
+EXPORT_SYMBOL(MCOUNT_NAME)
diff --git a/marvell/linux/arch/riscv/kernel/module-sections.c b/marvell/linux/arch/riscv/kernel/module-sections.c
new file mode 100644
index 0000000..e264e59
--- /dev/null
+++ b/marvell/linux/arch/riscv/kernel/module-sections.c
@@ -0,0 +1,159 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright (C) 2014-2017 Linaro Ltd. <ard.biesheuvel@linaro.org>
+ *
+ * Copyright (C) 2018 Andes Technology Corporation <zong@andestech.com>
+ */
+
+#include <linux/elf.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/moduleloader.h>
+
+unsigned long module_emit_got_entry(struct module *mod, unsigned long val)
+{
+	struct mod_section *got_sec = &mod->arch.got;
+	int i = got_sec->num_entries;
+	struct got_entry *got = get_got_entry(val, got_sec);
+
+	if (got)
+		return (unsigned long)got;
+
+	/* There is no duplicate entry, create a new one */
+	got = (struct got_entry *)got_sec->shdr->sh_addr;
+	got[i] = emit_got_entry(val);
+
+	got_sec->num_entries++;
+	BUG_ON(got_sec->num_entries > got_sec->max_entries);
+
+	return (unsigned long)&got[i];
+}
+
+unsigned long module_emit_plt_entry(struct module *mod, unsigned long val)
+{
+	struct mod_section *got_plt_sec = &mod->arch.got_plt;
+	struct got_entry *got_plt;
+	struct mod_section *plt_sec = &mod->arch.plt;
+	struct plt_entry *plt = get_plt_entry(val, plt_sec, got_plt_sec);
+	int i = plt_sec->num_entries;
+
+	if (plt)
+		return (unsigned long)plt;
+
+	/* There is no duplicate entry, create a new one */
+	got_plt = (struct got_entry *)got_plt_sec->shdr->sh_addr;
+	got_plt[i] = emit_got_entry(val);
+	plt = (struct plt_entry *)plt_sec->shdr->sh_addr;
+	plt[i] = emit_plt_entry(val,
+				(unsigned long)&plt[i],
+				(unsigned long)&got_plt[i]);
+
+	plt_sec->num_entries++;
+	got_plt_sec->num_entries++;
+	BUG_ON(plt_sec->num_entries > plt_sec->max_entries);
+
+	return (unsigned long)&plt[i];
+}
+
+static int is_rela_equal(const Elf_Rela *x, const Elf_Rela *y)
+{
+	return x->r_info == y->r_info && x->r_addend == y->r_addend;
+}
+
+static bool duplicate_rela(const Elf_Rela *rela, int idx)
+{
+	int i;
+	for (i = 0; i < idx; i++) {
+		if (is_rela_equal(&rela[i], &rela[idx]))
+			return true;
+	}
+	return false;
+}
+
+static void count_max_entries(Elf_Rela *relas, int num,
+			      unsigned int *plts, unsigned int *gots)
+{
+	unsigned int type, i;
+
+	for (i = 0; i < num; i++) {
+		type = ELF_RISCV_R_TYPE(relas[i].r_info);
+		if (type == R_RISCV_CALL_PLT) {
+			if (!duplicate_rela(relas, i))
+				(*plts)++;
+		} else if (type == R_RISCV_GOT_HI20) {
+			if (!duplicate_rela(relas, i))
+				(*gots)++;
+		}
+	}
+}
+
+int module_frob_arch_sections(Elf_Ehdr *ehdr, Elf_Shdr *sechdrs,
+			      char *secstrings, struct module *mod)
+{
+	unsigned int num_plts = 0;
+	unsigned int num_gots = 0;
+	int i;
+
+	/*
+	 * Find the empty .got and .plt sections.
+	 */
+	for (i = 0; i < ehdr->e_shnum; i++) {
+		if (!strcmp(secstrings + sechdrs[i].sh_name, ".plt"))
+			mod->arch.plt.shdr = sechdrs + i;
+		else if (!strcmp(secstrings + sechdrs[i].sh_name, ".got"))
+			mod->arch.got.shdr = sechdrs + i;
+		else if (!strcmp(secstrings + sechdrs[i].sh_name, ".got.plt"))
+			mod->arch.got_plt.shdr = sechdrs + i;
+	}
+
+	if (!mod->arch.plt.shdr) {
+		pr_err("%s: module PLT section(s) missing\n", mod->name);
+		return -ENOEXEC;
+	}
+	if (!mod->arch.got.shdr) {
+		pr_err("%s: module GOT section(s) missing\n", mod->name);
+		return -ENOEXEC;
+	}
+	if (!mod->arch.got_plt.shdr) {
+		pr_err("%s: module GOT.PLT section(s) missing\n", mod->name);
+		return -ENOEXEC;
+	}
+
+	/* Calculate the maxinum number of entries */
+	for (i = 0; i < ehdr->e_shnum; i++) {
+		Elf_Rela *relas = (void *)ehdr + sechdrs[i].sh_offset;
+		int num_rela = sechdrs[i].sh_size / sizeof(Elf_Rela);
+		Elf_Shdr *dst_sec = sechdrs + sechdrs[i].sh_info;
+
+		if (sechdrs[i].sh_type != SHT_RELA)
+			continue;
+
+		/* ignore relocations that operate on non-exec sections */
+		if (!(dst_sec->sh_flags & SHF_EXECINSTR))
+			continue;
+
+		count_max_entries(relas, num_rela, &num_plts, &num_gots);
+	}
+
+	mod->arch.plt.shdr->sh_type = SHT_NOBITS;
+	mod->arch.plt.shdr->sh_flags = SHF_EXECINSTR | SHF_ALLOC;
+	mod->arch.plt.shdr->sh_addralign = L1_CACHE_BYTES;
+	mod->arch.plt.shdr->sh_size = (num_plts + 1) * sizeof(struct plt_entry);
+	mod->arch.plt.num_entries = 0;
+	mod->arch.plt.max_entries = num_plts;
+
+	mod->arch.got.shdr->sh_type = SHT_NOBITS;
+	mod->arch.got.shdr->sh_flags = SHF_ALLOC;
+	mod->arch.got.shdr->sh_addralign = L1_CACHE_BYTES;
+	mod->arch.got.shdr->sh_size = (num_gots + 1) * sizeof(struct got_entry);
+	mod->arch.got.num_entries = 0;
+	mod->arch.got.max_entries = num_gots;
+
+	mod->arch.got_plt.shdr->sh_type = SHT_NOBITS;
+	mod->arch.got_plt.shdr->sh_flags = SHF_ALLOC;
+	mod->arch.got_plt.shdr->sh_addralign = L1_CACHE_BYTES;
+	mod->arch.got_plt.shdr->sh_size = (num_plts + 1) * sizeof(struct got_entry);
+	mod->arch.got_plt.num_entries = 0;
+	mod->arch.got_plt.max_entries = num_plts;
+	return 0;
+}
diff --git a/marvell/linux/arch/riscv/kernel/module.c b/marvell/linux/arch/riscv/kernel/module.c
new file mode 100644
index 0000000..a963b76
--- /dev/null
+++ b/marvell/linux/arch/riscv/kernel/module.c
@@ -0,0 +1,415 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ *
+ *  Copyright (C) 2017 Zihao Yu
+ */
+
+#include <linux/elf.h>
+#include <linux/err.h>
+#include <linux/errno.h>
+#include <linux/moduleloader.h>
+#include <linux/vmalloc.h>
+#include <linux/sizes.h>
+#include <asm/pgtable.h>
+#include <asm/sections.h>
+
+/*
+ * The auipc+jalr instruction pair can reach any PC-relative offset
+ * in the range [-2^31 - 2^11, 2^31 - 2^11)
+ */
+static bool riscv_insn_valid_32bit_offset(ptrdiff_t val)
+{
+#ifdef CONFIG_32BIT
+	return true;
+#else
+	return (-(1L << 31) - (1L << 11)) <= val && val < ((1L << 31) - (1L << 11));
+#endif
+}
+
+static int apply_r_riscv_32_rela(struct module *me, u32 *location, Elf_Addr v)
+{
+	if (v != (u32)v) {
+		pr_err("%s: value %016llx out of range for 32-bit field\n",
+		       me->name, (long long)v);
+		return -EINVAL;
+	}
+	*location = v;
+	return 0;
+}
+
+static int apply_r_riscv_64_rela(struct module *me, u32 *location, Elf_Addr v)
+{
+	*(u64 *)location = v;
+	return 0;
+}
+
+static int apply_r_riscv_branch_rela(struct module *me, u32 *location,
+				     Elf_Addr v)
+{
+	ptrdiff_t offset = (void *)v - (void *)location;
+	u32 imm12 = (offset & 0x1000) << (31 - 12);
+	u32 imm11 = (offset & 0x800) >> (11 - 7);
+	u32 imm10_5 = (offset & 0x7e0) << (30 - 10);
+	u32 imm4_1 = (offset & 0x1e) << (11 - 4);
+
+	*location = (*location & 0x1fff07f) | imm12 | imm11 | imm10_5 | imm4_1;
+	return 0;
+}
+
+static int apply_r_riscv_jal_rela(struct module *me, u32 *location,
+				  Elf_Addr v)
+{
+	ptrdiff_t offset = (void *)v - (void *)location;
+	u32 imm20 = (offset & 0x100000) << (31 - 20);
+	u32 imm19_12 = (offset & 0xff000);
+	u32 imm11 = (offset & 0x800) << (20 - 11);
+	u32 imm10_1 = (offset & 0x7fe) << (30 - 10);
+
+	*location = (*location & 0xfff) | imm20 | imm19_12 | imm11 | imm10_1;
+	return 0;
+}
+
+static int apply_r_riscv_rcv_branch_rela(struct module *me, u32 *location,
+					 Elf_Addr v)
+{
+	ptrdiff_t offset = (void *)v - (void *)location;
+	u16 imm8 = (offset & 0x100) << (12 - 8);
+	u16 imm7_6 = (offset & 0xc0) >> (6 - 5);
+	u16 imm5 = (offset & 0x20) >> (5 - 2);
+	u16 imm4_3 = (offset & 0x18) << (12 - 5);
+	u16 imm2_1 = (offset & 0x6) << (12 - 10);
+
+	*(u16 *)location = (*(u16 *)location & 0xe383) |
+		    imm8 | imm7_6 | imm5 | imm4_3 | imm2_1;
+	return 0;
+}
+
+static int apply_r_riscv_rvc_jump_rela(struct module *me, u32 *location,
+				       Elf_Addr v)
+{
+	ptrdiff_t offset = (void *)v - (void *)location;
+	u16 imm11 = (offset & 0x800) << (12 - 11);
+	u16 imm10 = (offset & 0x400) >> (10 - 8);
+	u16 imm9_8 = (offset & 0x300) << (12 - 11);
+	u16 imm7 = (offset & 0x80) >> (7 - 6);
+	u16 imm6 = (offset & 0x40) << (12 - 11);
+	u16 imm5 = (offset & 0x20) >> (5 - 2);
+	u16 imm4 = (offset & 0x10) << (12 - 5);
+	u16 imm3_1 = (offset & 0xe) << (12 - 10);
+
+	*(u16 *)location = (*(u16 *)location & 0xe003) |
+		    imm11 | imm10 | imm9_8 | imm7 | imm6 | imm5 | imm4 | imm3_1;
+	return 0;
+}
+
+static int apply_r_riscv_pcrel_hi20_rela(struct module *me, u32 *location,
+					 Elf_Addr v)
+{
+	ptrdiff_t offset = (void *)v - (void *)location;
+	s32 hi20;
+
+	if (!riscv_insn_valid_32bit_offset(offset)) {
+		pr_err(
+		  "%s: target %016llx can not be addressed by the 32-bit offset from PC = %p\n",
+		  me->name, (long long)v, location);
+		return -EINVAL;
+	}
+
+	hi20 = (offset + 0x800) & 0xfffff000;
+	*location = (*location & 0xfff) | hi20;
+	return 0;
+}
+
+static int apply_r_riscv_pcrel_lo12_i_rela(struct module *me, u32 *location,
+					   Elf_Addr v)
+{
+	/*
+	 * v is the lo12 value to fill. It is calculated before calling this
+	 * handler.
+	 */
+	*location = (*location & 0xfffff) | ((v & 0xfff) << 20);
+	return 0;
+}
+
+static int apply_r_riscv_pcrel_lo12_s_rela(struct module *me, u32 *location,
+					   Elf_Addr v)
+{
+	/*
+	 * v is the lo12 value to fill. It is calculated before calling this
+	 * handler.
+	 */
+	u32 imm11_5 = (v & 0xfe0) << (31 - 11);
+	u32 imm4_0 = (v & 0x1f) << (11 - 4);
+
+	*location = (*location & 0x1fff07f) | imm11_5 | imm4_0;
+	return 0;
+}
+
+static int apply_r_riscv_hi20_rela(struct module *me, u32 *location,
+				   Elf_Addr v)
+{
+	s32 hi20;
+
+	if (IS_ENABLED(CONFIG_CMODEL_MEDLOW)) {
+		pr_err(
+		  "%s: target %016llx can not be addressed by the 32-bit offset from PC = %p\n",
+		  me->name, (long long)v, location);
+		return -EINVAL;
+	}
+
+	hi20 = ((s32)v + 0x800) & 0xfffff000;
+	*location = (*location & 0xfff) | hi20;
+	return 0;
+}
+
+static int apply_r_riscv_lo12_i_rela(struct module *me, u32 *location,
+				     Elf_Addr v)
+{
+	/* Skip medlow checking because of filtering by HI20 already */
+	s32 hi20 = ((s32)v + 0x800) & 0xfffff000;
+	s32 lo12 = ((s32)v - hi20);
+	*location = (*location & 0xfffff) | ((lo12 & 0xfff) << 20);
+	return 0;
+}
+
+static int apply_r_riscv_lo12_s_rela(struct module *me, u32 *location,
+				     Elf_Addr v)
+{
+	/* Skip medlow checking because of filtering by HI20 already */
+	s32 hi20 = ((s32)v + 0x800) & 0xfffff000;
+	s32 lo12 = ((s32)v - hi20);
+	u32 imm11_5 = (lo12 & 0xfe0) << (31 - 11);
+	u32 imm4_0 = (lo12 & 0x1f) << (11 - 4);
+	*location = (*location & 0x1fff07f) | imm11_5 | imm4_0;
+	return 0;
+}
+
+static int apply_r_riscv_got_hi20_rela(struct module *me, u32 *location,
+				       Elf_Addr v)
+{
+	ptrdiff_t offset = (void *)v - (void *)location;
+	s32 hi20;
+
+	/* Always emit the got entry */
+	if (IS_ENABLED(CONFIG_MODULE_SECTIONS)) {
+		offset = module_emit_got_entry(me, v);
+		offset = (void *)offset - (void *)location;
+	} else {
+		pr_err(
+		  "%s: can not generate the GOT entry for symbol = %016llx from PC = %p\n",
+		  me->name, (long long)v, location);
+		return -EINVAL;
+	}
+
+	hi20 = (offset + 0x800) & 0xfffff000;
+	*location = (*location & 0xfff) | hi20;
+	return 0;
+}
+
+static int apply_r_riscv_call_plt_rela(struct module *me, u32 *location,
+				       Elf_Addr v)
+{
+	ptrdiff_t offset = (void *)v - (void *)location;
+	u32 hi20, lo12;
+
+	if (!riscv_insn_valid_32bit_offset(offset)) {
+		/* Only emit the plt entry if offset over 32-bit range */
+		if (IS_ENABLED(CONFIG_MODULE_SECTIONS)) {
+			offset = module_emit_plt_entry(me, v);
+			offset = (void *)offset - (void *)location;
+		} else {
+			pr_err(
+			  "%s: target %016llx can not be addressed by the 32-bit offset from PC = %p\n",
+			  me->name, (long long)v, location);
+			return -EINVAL;
+		}
+	}
+
+	hi20 = (offset + 0x800) & 0xfffff000;
+	lo12 = (offset - hi20) & 0xfff;
+	*location = (*location & 0xfff) | hi20;
+	*(location + 1) = (*(location + 1) & 0xfffff) | (lo12 << 20);
+	return 0;
+}
+
+static int apply_r_riscv_call_rela(struct module *me, u32 *location,
+				   Elf_Addr v)
+{
+	ptrdiff_t offset = (void *)v - (void *)location;
+	u32 hi20, lo12;
+
+	if (!riscv_insn_valid_32bit_offset(offset)) {
+		pr_err(
+		  "%s: target %016llx can not be addressed by the 32-bit offset from PC = %p\n",
+		  me->name, (long long)v, location);
+		return -EINVAL;
+	}
+
+	hi20 = (offset + 0x800) & 0xfffff000;
+	lo12 = (offset - hi20) & 0xfff;
+	*location = (*location & 0xfff) | hi20;
+	*(location + 1) = (*(location + 1) & 0xfffff) | (lo12 << 20);
+	return 0;
+}
+
+static int apply_r_riscv_relax_rela(struct module *me, u32 *location,
+				    Elf_Addr v)
+{
+	return 0;
+}
+
+static int apply_r_riscv_align_rela(struct module *me, u32 *location,
+				    Elf_Addr v)
+{
+	pr_err(
+	  "%s: The unexpected relocation type 'R_RISCV_ALIGN' from PC = %p\n",
+	  me->name, location);
+	return -EINVAL;
+}
+
+static int apply_r_riscv_add32_rela(struct module *me, u32 *location,
+				    Elf_Addr v)
+{
+	*(u32 *)location += (u32)v;
+	return 0;
+}
+
+static int apply_r_riscv_sub32_rela(struct module *me, u32 *location,
+				    Elf_Addr v)
+{
+	*(u32 *)location -= (u32)v;
+	return 0;
+}
+
+static int (*reloc_handlers_rela[]) (struct module *me, u32 *location,
+				Elf_Addr v) = {
+	[R_RISCV_32]			= apply_r_riscv_32_rela,
+	[R_RISCV_64]			= apply_r_riscv_64_rela,
+	[R_RISCV_BRANCH]		= apply_r_riscv_branch_rela,
+	[R_RISCV_JAL]			= apply_r_riscv_jal_rela,
+	[R_RISCV_RVC_BRANCH]		= apply_r_riscv_rcv_branch_rela,
+	[R_RISCV_RVC_JUMP]		= apply_r_riscv_rvc_jump_rela,
+	[R_RISCV_PCREL_HI20]		= apply_r_riscv_pcrel_hi20_rela,
+	[R_RISCV_PCREL_LO12_I]		= apply_r_riscv_pcrel_lo12_i_rela,
+	[R_RISCV_PCREL_LO12_S]		= apply_r_riscv_pcrel_lo12_s_rela,
+	[R_RISCV_HI20]			= apply_r_riscv_hi20_rela,
+	[R_RISCV_LO12_I]		= apply_r_riscv_lo12_i_rela,
+	[R_RISCV_LO12_S]		= apply_r_riscv_lo12_s_rela,
+	[R_RISCV_GOT_HI20]		= apply_r_riscv_got_hi20_rela,
+	[R_RISCV_CALL_PLT]		= apply_r_riscv_call_plt_rela,
+	[R_RISCV_CALL]			= apply_r_riscv_call_rela,
+	[R_RISCV_RELAX]			= apply_r_riscv_relax_rela,
+	[R_RISCV_ALIGN]			= apply_r_riscv_align_rela,
+	[R_RISCV_ADD32]			= apply_r_riscv_add32_rela,
+	[R_RISCV_SUB32]			= apply_r_riscv_sub32_rela,
+};
+
+int apply_relocate_add(Elf_Shdr *sechdrs, const char *strtab,
+		       unsigned int symindex, unsigned int relsec,
+		       struct module *me)
+{
+	Elf_Rela *rel = (void *) sechdrs[relsec].sh_addr;
+	int (*handler)(struct module *me, u32 *location, Elf_Addr v);
+	Elf_Sym *sym;
+	u32 *location;
+	unsigned int i, type;
+	Elf_Addr v;
+	int res;
+
+	pr_debug("Applying relocate section %u to %u\n", relsec,
+	       sechdrs[relsec].sh_info);
+
+	for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
+		/* This is where to make the change */
+		location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
+			+ rel[i].r_offset;
+		/* This is the symbol it is referring to */
+		sym = (Elf_Sym *)sechdrs[symindex].sh_addr
+			+ ELF_RISCV_R_SYM(rel[i].r_info);
+		if (IS_ERR_VALUE(sym->st_value)) {
+			/* Ignore unresolved weak symbol */
+			if (ELF_ST_BIND(sym->st_info) == STB_WEAK)
+				continue;
+			pr_warning("%s: Unknown symbol %s\n",
+				   me->name, strtab + sym->st_name);
+			return -ENOENT;
+		}
+
+		type = ELF_RISCV_R_TYPE(rel[i].r_info);
+
+		if (type < ARRAY_SIZE(reloc_handlers_rela))
+			handler = reloc_handlers_rela[type];
+		else
+			handler = NULL;
+
+		if (!handler) {
+			pr_err("%s: Unknown relocation type %u\n",
+			       me->name, type);
+			return -EINVAL;
+		}
+
+		v = sym->st_value + rel[i].r_addend;
+
+		if (type == R_RISCV_PCREL_LO12_I || type == R_RISCV_PCREL_LO12_S) {
+			unsigned int j;
+
+			for (j = 0; j < sechdrs[relsec].sh_size / sizeof(*rel); j++) {
+				unsigned long hi20_loc =
+					sechdrs[sechdrs[relsec].sh_info].sh_addr
+					+ rel[j].r_offset;
+				u32 hi20_type = ELF_RISCV_R_TYPE(rel[j].r_info);
+
+				/* Find the corresponding HI20 relocation entry */
+				if (hi20_loc == sym->st_value
+				    && (hi20_type == R_RISCV_PCREL_HI20
+					|| hi20_type == R_RISCV_GOT_HI20)) {
+					s32 hi20, lo12;
+					Elf_Sym *hi20_sym =
+						(Elf_Sym *)sechdrs[symindex].sh_addr
+						+ ELF_RISCV_R_SYM(rel[j].r_info);
+					unsigned long hi20_sym_val =
+						hi20_sym->st_value
+						+ rel[j].r_addend;
+
+					/* Calculate lo12 */
+					size_t offset = hi20_sym_val - hi20_loc;
+					if (IS_ENABLED(CONFIG_MODULE_SECTIONS)
+					    && hi20_type == R_RISCV_GOT_HI20) {
+						offset = module_emit_got_entry(
+							 me, hi20_sym_val);
+						offset = offset - hi20_loc;
+					}
+					hi20 = (offset + 0x800) & 0xfffff000;
+					lo12 = offset - hi20;
+					v = lo12;
+
+					break;
+				}
+			}
+			if (j == sechdrs[relsec].sh_size / sizeof(*rel)) {
+				pr_err(
+				  "%s: Can not find HI20 relocation information\n",
+				  me->name);
+				return -EINVAL;
+			}
+		}
+
+		res = handler(me, location, v);
+		if (res)
+			return res;
+	}
+
+	return 0;
+}
+
+#if defined(CONFIG_MMU) && defined(CONFIG_64BIT)
+#define VMALLOC_MODULE_START \
+	 max(PFN_ALIGN((unsigned long)&_end - SZ_2G), VMALLOC_START)
+void *module_alloc(unsigned long size)
+{
+	return __vmalloc_node_range(size, 1, VMALLOC_MODULE_START,
+				    VMALLOC_END, GFP_KERNEL,
+				    PAGE_KERNEL_EXEC, 0, NUMA_NO_NODE,
+				    __builtin_return_address(0));
+}
+#endif
diff --git a/marvell/linux/arch/riscv/kernel/module.lds b/marvell/linux/arch/riscv/kernel/module.lds
new file mode 100644
index 0000000..18ec719
--- /dev/null
+++ b/marvell/linux/arch/riscv/kernel/module.lds
@@ -0,0 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2017 Andes Technology Corporation */
+
+SECTIONS {
+	.plt : { BYTE(0) }
+	.got : { BYTE(0) }
+	.got.plt : { BYTE(0) }
+}
diff --git a/marvell/linux/arch/riscv/kernel/perf_callchain.c b/marvell/linux/arch/riscv/kernel/perf_callchain.c
new file mode 100644
index 0000000..11541cb
--- /dev/null
+++ b/marvell/linux/arch/riscv/kernel/perf_callchain.c
@@ -0,0 +1,97 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2019 Hangzhou C-SKY Microsystems co.,ltd. */
+
+#include <linux/perf_event.h>
+#include <linux/uaccess.h>
+
+/* Kernel callchain */
+struct stackframe {
+	unsigned long fp;
+	unsigned long ra;
+};
+
+/*
+ * Get the return address for a single stackframe and return a pointer to the
+ * next frame tail.
+ */
+static unsigned long user_backtrace(struct perf_callchain_entry_ctx *entry,
+				    unsigned long fp, unsigned long reg_ra)
+{
+	struct stackframe buftail;
+	unsigned long ra = 0;
+	unsigned long *user_frame_tail =
+			(unsigned long *)(fp - sizeof(struct stackframe));
+
+	/* Check accessibility of one struct frame_tail beyond */
+	if (!access_ok(user_frame_tail, sizeof(buftail)))
+		return 0;
+	if (__copy_from_user_inatomic(&buftail, user_frame_tail,
+				      sizeof(buftail)))
+		return 0;
+
+	if (reg_ra != 0)
+		ra = reg_ra;
+	else
+		ra = buftail.ra;
+
+	fp = buftail.fp;
+	if (ra != 0)
+		perf_callchain_store(entry, ra);
+	else
+		return 0;
+
+	return fp;
+}
+
+/*
+ * This will be called when the target is in user mode
+ * This function will only be called when we use
+ * "PERF_SAMPLE_CALLCHAIN" in
+ * kernel/events/core.c:perf_prepare_sample()
+ *
+ * How to trigger perf_callchain_[user/kernel] :
+ * $ perf record -e cpu-clock --call-graph fp ./program
+ * $ perf report --call-graph
+ *
+ * On RISC-V platform, the program being sampled and the C library
+ * need to be compiled with -fno-omit-frame-pointer, otherwise
+ * the user stack will not contain function frame.
+ */
+void perf_callchain_user(struct perf_callchain_entry_ctx *entry,
+			 struct pt_regs *regs)
+{
+	struct perf_guest_info_callbacks *guest_cbs = perf_get_guest_cbs();
+	unsigned long fp = 0;
+
+	/* RISC-V does not support perf in guest mode. */
+	if (guest_cbs && guest_cbs->is_in_guest())
+		return;
+
+	fp = regs->s0;
+	perf_callchain_store(entry, regs->sepc);
+
+	fp = user_backtrace(entry, fp, regs->ra);
+	while (fp && !(fp & 0x7) && entry->nr < entry->max_stack)
+		fp = user_backtrace(entry, fp, 0);
+}
+
+bool fill_callchain(unsigned long pc, void *entry)
+{
+	return perf_callchain_store(entry, pc) == 0;
+}
+
+void notrace walk_stackframe(struct task_struct *task,
+	struct pt_regs *regs, bool (*fn)(unsigned long, void *), void *arg);
+void perf_callchain_kernel(struct perf_callchain_entry_ctx *entry,
+			   struct pt_regs *regs)
+{
+	struct perf_guest_info_callbacks *guest_cbs = perf_get_guest_cbs();
+
+	/* RISC-V does not support perf in guest mode. */
+	if (guest_cbs && guest_cbs->is_in_guest()) {
+		pr_warn("RISC-V does not support perf in guest mode!");
+		return;
+	}
+
+	walk_stackframe(NULL, regs, fill_callchain, entry);
+}
diff --git a/marvell/linux/arch/riscv/kernel/perf_event.c b/marvell/linux/arch/riscv/kernel/perf_event.c
new file mode 100644
index 0000000..91626d9
--- /dev/null
+++ b/marvell/linux/arch/riscv/kernel/perf_event.c
@@ -0,0 +1,485 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de>
+ * Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar
+ * Copyright (C) 2009 Jaswinder Singh Rajput
+ * Copyright (C) 2009 Advanced Micro Devices, Inc., Robert Richter
+ * Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra
+ * Copyright (C) 2009 Intel Corporation, <markus.t.metzger@intel.com>
+ * Copyright (C) 2009 Google, Inc., Stephane Eranian
+ * Copyright 2014 Tilera Corporation. All Rights Reserved.
+ * Copyright (C) 2018 Andes Technology Corporation
+ *
+ * Perf_events support for RISC-V platforms.
+ *
+ * Since the spec. (as of now, Priv-Spec 1.10) does not provide enough
+ * functionality for perf event to fully work, this file provides
+ * the very basic framework only.
+ *
+ * For platform portings, please check Documentations/riscv/pmu.txt.
+ *
+ * The Copyright line includes x86 and tile ones.
+ */
+
+#include <linux/kprobes.h>
+#include <linux/kernel.h>
+#include <linux/kdebug.h>
+#include <linux/mutex.h>
+#include <linux/bitmap.h>
+#include <linux/irq.h>
+#include <linux/perf_event.h>
+#include <linux/atomic.h>
+#include <linux/of.h>
+#include <asm/perf_event.h>
+
+static const struct riscv_pmu *riscv_pmu __read_mostly;
+static DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events);
+
+/*
+ * Hardware & cache maps and their methods
+ */
+
+static const int riscv_hw_event_map[] = {
+	[PERF_COUNT_HW_CPU_CYCLES]		= RISCV_PMU_CYCLE,
+	[PERF_COUNT_HW_INSTRUCTIONS]		= RISCV_PMU_INSTRET,
+	[PERF_COUNT_HW_CACHE_REFERENCES]	= RISCV_OP_UNSUPP,
+	[PERF_COUNT_HW_CACHE_MISSES]		= RISCV_OP_UNSUPP,
+	[PERF_COUNT_HW_BRANCH_INSTRUCTIONS]	= RISCV_OP_UNSUPP,
+	[PERF_COUNT_HW_BRANCH_MISSES]		= RISCV_OP_UNSUPP,
+	[PERF_COUNT_HW_BUS_CYCLES]		= RISCV_OP_UNSUPP,
+};
+
+#define C(x) PERF_COUNT_HW_CACHE_##x
+static const int riscv_cache_event_map[PERF_COUNT_HW_CACHE_MAX]
+[PERF_COUNT_HW_CACHE_OP_MAX]
+[PERF_COUNT_HW_CACHE_RESULT_MAX] = {
+	[C(L1D)] = {
+		[C(OP_READ)] = {
+			[C(RESULT_ACCESS)] = RISCV_OP_UNSUPP,
+			[C(RESULT_MISS)] = RISCV_OP_UNSUPP,
+		},
+		[C(OP_WRITE)] = {
+			[C(RESULT_ACCESS)] = RISCV_OP_UNSUPP,
+			[C(RESULT_MISS)] = RISCV_OP_UNSUPP,
+		},
+		[C(OP_PREFETCH)] = {
+			[C(RESULT_ACCESS)] = RISCV_OP_UNSUPP,
+			[C(RESULT_MISS)] = RISCV_OP_UNSUPP,
+		},
+	},
+	[C(L1I)] = {
+		[C(OP_READ)] = {
+			[C(RESULT_ACCESS)] = RISCV_OP_UNSUPP,
+			[C(RESULT_MISS)] = RISCV_OP_UNSUPP,
+		},
+		[C(OP_WRITE)] = {
+			[C(RESULT_ACCESS)] = RISCV_OP_UNSUPP,
+			[C(RESULT_MISS)] = RISCV_OP_UNSUPP,
+		},
+		[C(OP_PREFETCH)] = {
+			[C(RESULT_ACCESS)] = RISCV_OP_UNSUPP,
+			[C(RESULT_MISS)] = RISCV_OP_UNSUPP,
+		},
+	},
+	[C(LL)] = {
+		[C(OP_READ)] = {
+			[C(RESULT_ACCESS)] = RISCV_OP_UNSUPP,
+			[C(RESULT_MISS)] = RISCV_OP_UNSUPP,
+		},
+		[C(OP_WRITE)] = {
+			[C(RESULT_ACCESS)] = RISCV_OP_UNSUPP,
+			[C(RESULT_MISS)] = RISCV_OP_UNSUPP,
+		},
+		[C(OP_PREFETCH)] = {
+			[C(RESULT_ACCESS)] = RISCV_OP_UNSUPP,
+			[C(RESULT_MISS)] = RISCV_OP_UNSUPP,
+		},
+	},
+	[C(DTLB)] = {
+		[C(OP_READ)] = {
+			[C(RESULT_ACCESS)] =  RISCV_OP_UNSUPP,
+			[C(RESULT_MISS)] =  RISCV_OP_UNSUPP,
+		},
+		[C(OP_WRITE)] = {
+			[C(RESULT_ACCESS)] = RISCV_OP_UNSUPP,
+			[C(RESULT_MISS)] = RISCV_OP_UNSUPP,
+		},
+		[C(OP_PREFETCH)] = {
+			[C(RESULT_ACCESS)] = RISCV_OP_UNSUPP,
+			[C(RESULT_MISS)] = RISCV_OP_UNSUPP,
+		},
+	},
+	[C(ITLB)] = {
+		[C(OP_READ)] = {
+			[C(RESULT_ACCESS)] = RISCV_OP_UNSUPP,
+			[C(RESULT_MISS)] = RISCV_OP_UNSUPP,
+		},
+		[C(OP_WRITE)] = {
+			[C(RESULT_ACCESS)] = RISCV_OP_UNSUPP,
+			[C(RESULT_MISS)] = RISCV_OP_UNSUPP,
+		},
+		[C(OP_PREFETCH)] = {
+			[C(RESULT_ACCESS)] = RISCV_OP_UNSUPP,
+			[C(RESULT_MISS)] = RISCV_OP_UNSUPP,
+		},
+	},
+	[C(BPU)] = {
+		[C(OP_READ)] = {
+			[C(RESULT_ACCESS)] = RISCV_OP_UNSUPP,
+			[C(RESULT_MISS)] = RISCV_OP_UNSUPP,
+		},
+		[C(OP_WRITE)] = {
+			[C(RESULT_ACCESS)] = RISCV_OP_UNSUPP,
+			[C(RESULT_MISS)] = RISCV_OP_UNSUPP,
+		},
+		[C(OP_PREFETCH)] = {
+			[C(RESULT_ACCESS)] = RISCV_OP_UNSUPP,
+			[C(RESULT_MISS)] = RISCV_OP_UNSUPP,
+		},
+	},
+};
+
+static int riscv_map_hw_event(u64 config)
+{
+	if (config >= riscv_pmu->max_events)
+		return -EINVAL;
+
+	return riscv_pmu->hw_events[config];
+}
+
+int riscv_map_cache_decode(u64 config, unsigned int *type,
+			   unsigned int *op, unsigned int *result)
+{
+	return -ENOENT;
+}
+
+static int riscv_map_cache_event(u64 config)
+{
+	unsigned int type, op, result;
+	int err = -ENOENT;
+		int code;
+
+	err = riscv_map_cache_decode(config, &type, &op, &result);
+	if (!riscv_pmu->cache_events || err)
+		return err;
+
+	if (type >= PERF_COUNT_HW_CACHE_MAX ||
+	    op >= PERF_COUNT_HW_CACHE_OP_MAX ||
+	    result >= PERF_COUNT_HW_CACHE_RESULT_MAX)
+		return -EINVAL;
+
+	code = (*riscv_pmu->cache_events)[type][op][result];
+	if (code == RISCV_OP_UNSUPP)
+		return -EINVAL;
+
+	return code;
+}
+
+/*
+ * Low-level functions: reading/writing counters
+ */
+
+static inline u64 read_counter(int idx)
+{
+	u64 val = 0;
+
+	switch (idx) {
+	case RISCV_PMU_CYCLE:
+		val = csr_read(CSR_CYCLE);
+		break;
+	case RISCV_PMU_INSTRET:
+		val = csr_read(CSR_INSTRET);
+		break;
+	default:
+		WARN_ON_ONCE(idx < 0 ||	idx > RISCV_MAX_COUNTERS);
+		return -EINVAL;
+	}
+
+	return val;
+}
+
+static inline void write_counter(int idx, u64 value)
+{
+	/* currently not supported */
+	WARN_ON_ONCE(1);
+}
+
+/*
+ * pmu->read: read and update the counter
+ *
+ * Other architectures' implementation often have a xxx_perf_event_update
+ * routine, which can return counter values when called in the IRQ, but
+ * return void when being called by the pmu->read method.
+ */
+static void riscv_pmu_read(struct perf_event *event)
+{
+	struct hw_perf_event *hwc = &event->hw;
+	u64 prev_raw_count, new_raw_count;
+	u64 oldval;
+	int idx = hwc->idx;
+	u64 delta;
+
+	do {
+		prev_raw_count = local64_read(&hwc->prev_count);
+		new_raw_count = read_counter(idx);
+
+		oldval = local64_cmpxchg(&hwc->prev_count, prev_raw_count,
+					 new_raw_count);
+	} while (oldval != prev_raw_count);
+
+	/*
+	 * delta is the value to update the counter we maintain in the kernel.
+	 */
+	delta = (new_raw_count - prev_raw_count) &
+		((1ULL << riscv_pmu->counter_width) - 1);
+	local64_add(delta, &event->count);
+	/*
+	 * Something like local64_sub(delta, &hwc->period_left) here is
+	 * needed if there is an interrupt for perf.
+	 */
+}
+
+/*
+ * State transition functions:
+ *
+ * stop()/start() & add()/del()
+ */
+
+/*
+ * pmu->stop: stop the counter
+ */
+static void riscv_pmu_stop(struct perf_event *event, int flags)
+{
+	struct hw_perf_event *hwc = &event->hw;
+
+	WARN_ON_ONCE(hwc->state & PERF_HES_STOPPED);
+	hwc->state |= PERF_HES_STOPPED;
+
+	if ((flags & PERF_EF_UPDATE) && !(hwc->state & PERF_HES_UPTODATE)) {
+		riscv_pmu->pmu->read(event);
+		hwc->state |= PERF_HES_UPTODATE;
+	}
+}
+
+/*
+ * pmu->start: start the event.
+ */
+static void riscv_pmu_start(struct perf_event *event, int flags)
+{
+	struct hw_perf_event *hwc = &event->hw;
+
+	if (WARN_ON_ONCE(!(event->hw.state & PERF_HES_STOPPED)))
+		return;
+
+	if (flags & PERF_EF_RELOAD) {
+		WARN_ON_ONCE(!(event->hw.state & PERF_HES_UPTODATE));
+
+		/*
+		 * Set the counter to the period to the next interrupt here,
+		 * if you have any.
+		 */
+	}
+
+	hwc->state = 0;
+	perf_event_update_userpage(event);
+
+	/*
+	 * Since we cannot write to counters, this serves as an initialization
+	 * to the delta-mechanism in pmu->read(); otherwise, the delta would be
+	 * wrong when pmu->read is called for the first time.
+	 */
+	local64_set(&hwc->prev_count, read_counter(hwc->idx));
+}
+
+/*
+ * pmu->add: add the event to PMU.
+ */
+static int riscv_pmu_add(struct perf_event *event, int flags)
+{
+	struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
+	struct hw_perf_event *hwc = &event->hw;
+
+	if (cpuc->n_events == riscv_pmu->num_counters)
+		return -ENOSPC;
+
+	/*
+	 * We don't have general conunters, so no binding-event-to-counter
+	 * process here.
+	 *
+	 * Indexing using hwc->config generally not works, since config may
+	 * contain extra information, but here the only info we have in
+	 * hwc->config is the event index.
+	 */
+	hwc->idx = hwc->config;
+	cpuc->events[hwc->idx] = event;
+	cpuc->n_events++;
+
+	hwc->state = PERF_HES_UPTODATE | PERF_HES_STOPPED;
+
+	if (flags & PERF_EF_START)
+		riscv_pmu->pmu->start(event, PERF_EF_RELOAD);
+
+	return 0;
+}
+
+/*
+ * pmu->del: delete the event from PMU.
+ */
+static void riscv_pmu_del(struct perf_event *event, int flags)
+{
+	struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
+	struct hw_perf_event *hwc = &event->hw;
+
+	cpuc->events[hwc->idx] = NULL;
+	cpuc->n_events--;
+	riscv_pmu->pmu->stop(event, PERF_EF_UPDATE);
+	perf_event_update_userpage(event);
+}
+
+/*
+ * Interrupt: a skeletion for reference.
+ */
+
+static DEFINE_MUTEX(pmc_reserve_mutex);
+
+irqreturn_t riscv_base_pmu_handle_irq(int irq_num, void *dev)
+{
+	return IRQ_NONE;
+}
+
+static int reserve_pmc_hardware(void)
+{
+	int err = 0;
+
+	mutex_lock(&pmc_reserve_mutex);
+	if (riscv_pmu->irq >= 0 && riscv_pmu->handle_irq) {
+		err = request_irq(riscv_pmu->irq, riscv_pmu->handle_irq,
+				  IRQF_PERCPU, "riscv-base-perf", NULL);
+	}
+	mutex_unlock(&pmc_reserve_mutex);
+
+	return err;
+}
+
+void release_pmc_hardware(void)
+{
+	mutex_lock(&pmc_reserve_mutex);
+	if (riscv_pmu->irq >= 0)
+		free_irq(riscv_pmu->irq, NULL);
+	mutex_unlock(&pmc_reserve_mutex);
+}
+
+/*
+ * Event Initialization/Finalization
+ */
+
+static atomic_t riscv_active_events = ATOMIC_INIT(0);
+
+static void riscv_event_destroy(struct perf_event *event)
+{
+	if (atomic_dec_return(&riscv_active_events) == 0)
+		release_pmc_hardware();
+}
+
+static int riscv_event_init(struct perf_event *event)
+{
+	struct perf_event_attr *attr = &event->attr;
+	struct hw_perf_event *hwc = &event->hw;
+	int err;
+	int code;
+
+	if (atomic_inc_return(&riscv_active_events) == 1) {
+		err = reserve_pmc_hardware();
+
+		if (err) {
+			pr_warn("PMC hardware not available\n");
+			atomic_dec(&riscv_active_events);
+			return -EBUSY;
+		}
+	}
+
+	switch (event->attr.type) {
+	case PERF_TYPE_HARDWARE:
+		code = riscv_pmu->map_hw_event(attr->config);
+		break;
+	case PERF_TYPE_HW_CACHE:
+		code = riscv_pmu->map_cache_event(attr->config);
+		break;
+	case PERF_TYPE_RAW:
+		return -EOPNOTSUPP;
+	default:
+		return -ENOENT;
+	}
+
+	event->destroy = riscv_event_destroy;
+	if (code < 0) {
+		event->destroy(event);
+		return code;
+	}
+
+	/*
+	 * idx is set to -1 because the index of a general event should not be
+	 * decided until binding to some counter in pmu->add().
+	 *
+	 * But since we don't have such support, later in pmu->add(), we just
+	 * use hwc->config as the index instead.
+	 */
+	hwc->config = code;
+	hwc->idx = -1;
+
+	return 0;
+}
+
+/*
+ * Initialization
+ */
+
+static struct pmu min_pmu = {
+	.name		= "riscv-base",
+	.event_init	= riscv_event_init,
+	.add		= riscv_pmu_add,
+	.del		= riscv_pmu_del,
+	.start		= riscv_pmu_start,
+	.stop		= riscv_pmu_stop,
+	.read		= riscv_pmu_read,
+};
+
+static const struct riscv_pmu riscv_base_pmu = {
+	.pmu = &min_pmu,
+	.max_events = ARRAY_SIZE(riscv_hw_event_map),
+	.map_hw_event = riscv_map_hw_event,
+	.hw_events = riscv_hw_event_map,
+	.map_cache_event = riscv_map_cache_event,
+	.cache_events = &riscv_cache_event_map,
+	.counter_width = 63,
+	.num_counters = RISCV_BASE_COUNTERS + 0,
+	.handle_irq = &riscv_base_pmu_handle_irq,
+
+	/* This means this PMU has no IRQ. */
+	.irq = -1,
+};
+
+static const struct of_device_id riscv_pmu_of_ids[] = {
+	{.compatible = "riscv,base-pmu",	.data = &riscv_base_pmu},
+	{ /* sentinel value */ }
+};
+
+int __init init_hw_perf_events(void)
+{
+	struct device_node *node = of_find_node_by_type(NULL, "pmu");
+	const struct of_device_id *of_id;
+
+	riscv_pmu = &riscv_base_pmu;
+
+	if (node) {
+		of_id = of_match_node(riscv_pmu_of_ids, node);
+
+		if (of_id)
+			riscv_pmu = of_id->data;
+		of_node_put(node);
+	}
+
+	perf_pmu_register(riscv_pmu->pmu, "cpu", PERF_TYPE_RAW);
+	return 0;
+}
+arch_initcall(init_hw_perf_events);
diff --git a/marvell/linux/arch/riscv/kernel/perf_regs.c b/marvell/linux/arch/riscv/kernel/perf_regs.c
new file mode 100644
index 0000000..04a38fb
--- /dev/null
+++ b/marvell/linux/arch/riscv/kernel/perf_regs.c
@@ -0,0 +1,44 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2019 Hangzhou C-SKY Microsystems co.,ltd. */
+
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/perf_event.h>
+#include <linux/bug.h>
+#include <asm/perf_regs.h>
+#include <asm/ptrace.h>
+
+u64 perf_reg_value(struct pt_regs *regs, int idx)
+{
+	if (WARN_ON_ONCE((u32)idx >= PERF_REG_RISCV_MAX))
+		return 0;
+
+	return ((unsigned long *)regs)[idx];
+}
+
+#define REG_RESERVED (~((1ULL << PERF_REG_RISCV_MAX) - 1))
+
+int perf_reg_validate(u64 mask)
+{
+	if (!mask || mask & REG_RESERVED)
+		return -EINVAL;
+
+	return 0;
+}
+
+u64 perf_reg_abi(struct task_struct *task)
+{
+#if __riscv_xlen == 64
+	return PERF_SAMPLE_REGS_ABI_64;
+#else
+	return PERF_SAMPLE_REGS_ABI_32;
+#endif
+}
+
+void perf_get_regs_user(struct perf_regs *regs_user,
+			struct pt_regs *regs,
+			struct pt_regs *regs_user_copy)
+{
+	regs_user->regs = task_pt_regs(current);
+	regs_user->abi = perf_reg_abi(current);
+}
diff --git a/marvell/linux/arch/riscv/kernel/process.c b/marvell/linux/arch/riscv/kernel/process.c
new file mode 100644
index 0000000..9d4b409
--- /dev/null
+++ b/marvell/linux/arch/riscv/kernel/process.c
@@ -0,0 +1,131 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (C) 2009 Sunplus Core Technology Co., Ltd.
+ *  Chen Liqin <liqin.chen@sunplusct.com>
+ *  Lennox Wu <lennox.wu@sunplusct.com>
+ * Copyright (C) 2012 Regents of the University of California
+ * Copyright (C) 2017 SiFive
+ */
+
+#include <linux/cpu.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/sched/task_stack.h>
+#include <linux/tick.h>
+#include <linux/ptrace.h>
+#include <linux/uaccess.h>
+
+#include <asm/unistd.h>
+#include <asm/processor.h>
+#include <asm/csr.h>
+#include <asm/string.h>
+#include <asm/switch_to.h>
+#include <asm/thread_info.h>
+
+extern asmlinkage void ret_from_fork(void);
+extern asmlinkage void ret_from_kernel_thread(void);
+
+void arch_cpu_idle(void)
+{
+	wait_for_interrupt();
+	local_irq_enable();
+}
+
+void show_regs(struct pt_regs *regs)
+{
+	show_regs_print_info(KERN_DEFAULT);
+
+	pr_cont("sepc: " REG_FMT " ra : " REG_FMT " sp : " REG_FMT "\n",
+		regs->sepc, regs->ra, regs->sp);
+	pr_cont(" gp : " REG_FMT " tp : " REG_FMT " t0 : " REG_FMT "\n",
+		regs->gp, regs->tp, regs->t0);
+	pr_cont(" t1 : " REG_FMT " t2 : " REG_FMT " s0 : " REG_FMT "\n",
+		regs->t1, regs->t2, regs->s0);
+	pr_cont(" s1 : " REG_FMT " a0 : " REG_FMT " a1 : " REG_FMT "\n",
+		regs->s1, regs->a0, regs->a1);
+	pr_cont(" a2 : " REG_FMT " a3 : " REG_FMT " a4 : " REG_FMT "\n",
+		regs->a2, regs->a3, regs->a4);
+	pr_cont(" a5 : " REG_FMT " a6 : " REG_FMT " a7 : " REG_FMT "\n",
+		regs->a5, regs->a6, regs->a7);
+	pr_cont(" s2 : " REG_FMT " s3 : " REG_FMT " s4 : " REG_FMT "\n",
+		regs->s2, regs->s3, regs->s4);
+	pr_cont(" s5 : " REG_FMT " s6 : " REG_FMT " s7 : " REG_FMT "\n",
+		regs->s5, regs->s6, regs->s7);
+	pr_cont(" s8 : " REG_FMT " s9 : " REG_FMT " s10: " REG_FMT "\n",
+		regs->s8, regs->s9, regs->s10);
+	pr_cont(" s11: " REG_FMT " t3 : " REG_FMT " t4 : " REG_FMT "\n",
+		regs->s11, regs->t3, regs->t4);
+	pr_cont(" t5 : " REG_FMT " t6 : " REG_FMT "\n",
+		regs->t5, regs->t6);
+
+	pr_cont("sstatus: " REG_FMT " sbadaddr: " REG_FMT " scause: " REG_FMT "\n",
+		regs->sstatus, regs->sbadaddr, regs->scause);
+}
+
+void start_thread(struct pt_regs *regs, unsigned long pc,
+	unsigned long sp)
+{
+	regs->sstatus = SR_SPIE;
+	if (has_fpu) {
+		regs->sstatus |= SR_FS_INITIAL;
+		/*
+		 * Restore the initial value to the FP register
+		 * before starting the user program.
+		 */
+		fstate_restore(current, regs);
+	}
+	regs->sepc = pc;
+	regs->sp = sp;
+	set_fs(USER_DS);
+}
+
+void flush_thread(void)
+{
+#ifdef CONFIG_FPU
+	/*
+	 * Reset FPU state and context
+	 *	frm: round to nearest, ties to even (IEEE default)
+	 *	fflags: accrued exceptions cleared
+	 */
+	fstate_off(current, task_pt_regs(current));
+	memset(&current->thread.fstate, 0, sizeof(current->thread.fstate));
+#endif
+}
+
+int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
+{
+	fstate_save(src, task_pt_regs(src));
+	*dst = *src;
+	return 0;
+}
+
+int copy_thread_tls(unsigned long clone_flags, unsigned long usp,
+	unsigned long arg, struct task_struct *p, unsigned long tls)
+{
+	struct pt_regs *childregs = task_pt_regs(p);
+
+	memset(&p->thread.s, 0, sizeof(p->thread.s));
+
+	/* p->thread holds context to be restored by __switch_to() */
+	if (unlikely(p->flags & PF_KTHREAD)) {
+		/* Kernel thread */
+		const register unsigned long gp __asm__ ("gp");
+		memset(childregs, 0, sizeof(struct pt_regs));
+		childregs->gp = gp;
+		childregs->sstatus = SR_SPP | SR_SPIE; /* Supervisor, irqs on */
+
+		p->thread.ra = (unsigned long)ret_from_kernel_thread;
+		p->thread.s[0] = usp; /* fn */
+		p->thread.s[1] = arg;
+	} else {
+		*childregs = *(current_pt_regs());
+		if (usp) /* User fork */
+			childregs->sp = usp;
+		if (clone_flags & CLONE_SETTLS)
+			childregs->tp = tls;
+		childregs->a0 = 0; /* Return value of fork() */
+		p->thread.ra = (unsigned long)ret_from_fork;
+	}
+	p->thread.sp = (unsigned long)childregs; /* kernel sp */
+	return 0;
+}
diff --git a/marvell/linux/arch/riscv/kernel/ptrace.c b/marvell/linux/arch/riscv/kernel/ptrace.c
new file mode 100644
index 0000000..1252113
--- /dev/null
+++ b/marvell/linux/arch/riscv/kernel/ptrace.c
@@ -0,0 +1,176 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright 2010 Tilera Corporation. All Rights Reserved.
+ * Copyright 2015 Regents of the University of California
+ * Copyright 2017 SiFive
+ *
+ * Copied from arch/tile/kernel/ptrace.c
+ */
+
+#include <asm/ptrace.h>
+#include <asm/syscall.h>
+#include <asm/thread_info.h>
+#include <linux/audit.h>
+#include <linux/ptrace.h>
+#include <linux/elf.h>
+#include <linux/regset.h>
+#include <linux/sched.h>
+#include <linux/sched/task_stack.h>
+#include <linux/tracehook.h>
+
+#define CREATE_TRACE_POINTS
+#include <trace/events/syscalls.h>
+
+enum riscv_regset {
+	REGSET_X,
+#ifdef CONFIG_FPU
+	REGSET_F,
+#endif
+};
+
+static int riscv_gpr_get(struct task_struct *target,
+			 const struct user_regset *regset,
+			 unsigned int pos, unsigned int count,
+			 void *kbuf, void __user *ubuf)
+{
+	struct pt_regs *regs;
+
+	regs = task_pt_regs(target);
+	return user_regset_copyout(&pos, &count, &kbuf, &ubuf, regs, 0, -1);
+}
+
+static int riscv_gpr_set(struct task_struct *target,
+			 const struct user_regset *regset,
+			 unsigned int pos, unsigned int count,
+			 const void *kbuf, const void __user *ubuf)
+{
+	int ret;
+	struct pt_regs *regs;
+
+	regs = task_pt_regs(target);
+	ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, regs, 0, -1);
+	return ret;
+}
+
+#ifdef CONFIG_FPU
+static int riscv_fpr_get(struct task_struct *target,
+			 const struct user_regset *regset,
+			 unsigned int pos, unsigned int count,
+			 void *kbuf, void __user *ubuf)
+{
+	int ret;
+	struct __riscv_d_ext_state *fstate = &target->thread.fstate;
+
+	ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, fstate, 0,
+				  offsetof(struct __riscv_d_ext_state, fcsr));
+	if (!ret) {
+		ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, fstate, 0,
+					  offsetof(struct __riscv_d_ext_state, fcsr) +
+					  sizeof(fstate->fcsr));
+	}
+
+	return ret;
+}
+
+static int riscv_fpr_set(struct task_struct *target,
+			 const struct user_regset *regset,
+			 unsigned int pos, unsigned int count,
+			 const void *kbuf, const void __user *ubuf)
+{
+	int ret;
+	struct __riscv_d_ext_state *fstate = &target->thread.fstate;
+
+	ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, fstate, 0,
+				 offsetof(struct __riscv_d_ext_state, fcsr));
+	if (!ret) {
+		ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, fstate, 0,
+					 offsetof(struct __riscv_d_ext_state, fcsr) +
+					 sizeof(fstate->fcsr));
+	}
+
+	return ret;
+}
+#endif
+
+static const struct user_regset riscv_user_regset[] = {
+	[REGSET_X] = {
+		.core_note_type = NT_PRSTATUS,
+		.n = ELF_NGREG,
+		.size = sizeof(elf_greg_t),
+		.align = sizeof(elf_greg_t),
+		.get = &riscv_gpr_get,
+		.set = &riscv_gpr_set,
+	},
+#ifdef CONFIG_FPU
+	[REGSET_F] = {
+		.core_note_type = NT_PRFPREG,
+		.n = ELF_NFPREG,
+		.size = sizeof(elf_fpreg_t),
+		.align = sizeof(elf_fpreg_t),
+		.get = &riscv_fpr_get,
+		.set = &riscv_fpr_set,
+	},
+#endif
+};
+
+static const struct user_regset_view riscv_user_native_view = {
+	.name = "riscv",
+	.e_machine = EM_RISCV,
+	.regsets = riscv_user_regset,
+	.n = ARRAY_SIZE(riscv_user_regset),
+};
+
+const struct user_regset_view *task_user_regset_view(struct task_struct *task)
+{
+	return &riscv_user_native_view;
+}
+
+void ptrace_disable(struct task_struct *child)
+{
+	clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
+}
+
+long arch_ptrace(struct task_struct *child, long request,
+		 unsigned long addr, unsigned long data)
+{
+	long ret = -EIO;
+
+	switch (request) {
+	default:
+		ret = ptrace_request(child, request, addr, data);
+		break;
+	}
+
+	return ret;
+}
+
+/*
+ * Allows PTRACE_SYSCALL to work.  These are called from entry.S in
+ * {handle,ret_from}_syscall.
+ */
+__visible void do_syscall_trace_enter(struct pt_regs *regs)
+{
+	if (test_thread_flag(TIF_SYSCALL_TRACE))
+		if (tracehook_report_syscall_entry(regs))
+			syscall_set_nr(current, regs, -1);
+
+#ifdef CONFIG_HAVE_SYSCALL_TRACEPOINTS
+	if (test_thread_flag(TIF_SYSCALL_TRACEPOINT))
+		trace_sys_enter(regs, syscall_get_nr(current, regs));
+#endif
+
+	audit_syscall_entry(regs->a7, regs->a0, regs->a1, regs->a2, regs->a3);
+}
+
+__visible void do_syscall_trace_exit(struct pt_regs *regs)
+{
+	audit_syscall_exit(regs);
+
+	if (test_thread_flag(TIF_SYSCALL_TRACE))
+		tracehook_report_syscall_exit(regs, 0);
+
+#ifdef CONFIG_HAVE_SYSCALL_TRACEPOINTS
+	if (test_thread_flag(TIF_SYSCALL_TRACEPOINT))
+		trace_sys_exit(regs, regs_return_value(regs));
+#endif
+}
diff --git a/marvell/linux/arch/riscv/kernel/reset.c b/marvell/linux/arch/riscv/kernel/reset.c
new file mode 100644
index 0000000..aa56bb1
--- /dev/null
+++ b/marvell/linux/arch/riscv/kernel/reset.c
@@ -0,0 +1,33 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2012 Regents of the University of California
+ */
+
+#include <linux/reboot.h>
+#include <linux/pm.h>
+#include <asm/sbi.h>
+
+static void default_power_off(void)
+{
+	sbi_shutdown();
+	while (1);
+}
+
+void (*pm_power_off)(void) = default_power_off;
+EXPORT_SYMBOL(pm_power_off);
+
+void machine_restart(char *cmd)
+{
+	do_kernel_restart(cmd);
+	while (1);
+}
+
+void machine_halt(void)
+{
+	pm_power_off();
+}
+
+void machine_power_off(void)
+{
+	pm_power_off();
+}
diff --git a/marvell/linux/arch/riscv/kernel/riscv_ksyms.c b/marvell/linux/arch/riscv/kernel/riscv_ksyms.c
new file mode 100644
index 0000000..4800cf7
--- /dev/null
+++ b/marvell/linux/arch/riscv/kernel/riscv_ksyms.c
@@ -0,0 +1,16 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2017 Zihao Yu
+ */
+
+#include <linux/export.h>
+#include <linux/uaccess.h>
+
+/*
+ * Assembly functions that may be used (directly or indirectly) by modules
+ */
+EXPORT_SYMBOL(__clear_user);
+EXPORT_SYMBOL(__asm_copy_to_user);
+EXPORT_SYMBOL(__asm_copy_from_user);
+EXPORT_SYMBOL(memset);
+EXPORT_SYMBOL(memcpy);
diff --git a/marvell/linux/arch/riscv/kernel/setup.c b/marvell/linux/arch/riscv/kernel/setup.c
new file mode 100644
index 0000000..845ae0e
--- /dev/null
+++ b/marvell/linux/arch/riscv/kernel/setup.c
@@ -0,0 +1,84 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (C) 2009 Sunplus Core Technology Co., Ltd.
+ *  Chen Liqin <liqin.chen@sunplusct.com>
+ *  Lennox Wu <lennox.wu@sunplusct.com>
+ * Copyright (C) 2012 Regents of the University of California
+ */
+
+#include <linux/init.h>
+#include <linux/mm.h>
+#include <linux/memblock.h>
+#include <linux/sched.h>
+#include <linux/console.h>
+#include <linux/screen_info.h>
+#include <linux/of_fdt.h>
+#include <linux/of_platform.h>
+#include <linux/sched/task.h>
+#include <linux/swiotlb.h>
+
+#include <asm/setup.h>
+#include <asm/sections.h>
+#include <asm/pgtable.h>
+#include <asm/smp.h>
+#include <asm/tlbflush.h>
+#include <asm/thread_info.h>
+
+#include "head.h"
+
+#ifdef CONFIG_DUMMY_CONSOLE
+struct screen_info screen_info = {
+	.orig_video_lines	= 30,
+	.orig_video_cols	= 80,
+	.orig_video_mode	= 0,
+	.orig_video_ega_bx	= 0,
+	.orig_video_isVGA	= 1,
+	.orig_video_points	= 8
+};
+#endif
+
+/* The lucky hart to first increment this variable will boot the other cores */
+atomic_t hart_lottery;
+unsigned long boot_cpu_hartid;
+
+void __init parse_dtb(void)
+{
+	if (early_init_dt_scan(dtb_early_va))
+		return;
+
+	pr_err("No DTB passed to the kernel\n");
+#ifdef CONFIG_CMDLINE_FORCE
+	strlcpy(boot_command_line, CONFIG_CMDLINE, COMMAND_LINE_SIZE);
+	pr_info("Forcing kernel command line to: %s\n", boot_command_line);
+#endif
+}
+
+void __init setup_arch(char **cmdline_p)
+{
+	init_mm.start_code = (unsigned long) _stext;
+	init_mm.end_code   = (unsigned long) _etext;
+	init_mm.end_data   = (unsigned long) _edata;
+	init_mm.brk        = (unsigned long) _end;
+
+	*cmdline_p = boot_command_line;
+
+	parse_early_param();
+
+	setup_bootmem();
+	paging_init();
+	unflatten_device_tree();
+
+#ifdef CONFIG_SWIOTLB
+	swiotlb_init(1);
+#endif
+
+#ifdef CONFIG_SMP
+	setup_smp();
+#endif
+
+#ifdef CONFIG_DUMMY_CONSOLE
+	conswitchp = &dummy_con;
+#endif
+
+	riscv_fill_hwcap();
+}
diff --git a/marvell/linux/arch/riscv/kernel/signal.c b/marvell/linux/arch/riscv/kernel/signal.c
new file mode 100644
index 0000000..d0f6f21
--- /dev/null
+++ b/marvell/linux/arch/riscv/kernel/signal.c
@@ -0,0 +1,306 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (C) 2009 Sunplus Core Technology Co., Ltd.
+ *  Chen Liqin <liqin.chen@sunplusct.com>
+ *  Lennox Wu <lennox.wu@sunplusct.com>
+ * Copyright (C) 2012 Regents of the University of California
+ */
+
+#include <linux/signal.h>
+#include <linux/uaccess.h>
+#include <linux/syscalls.h>
+#include <linux/tracehook.h>
+#include <linux/linkage.h>
+
+#include <asm/ucontext.h>
+#include <asm/vdso.h>
+#include <asm/switch_to.h>
+#include <asm/csr.h>
+
+#define DEBUG_SIG 0
+
+struct rt_sigframe {
+	struct siginfo info;
+	struct ucontext uc;
+};
+
+#ifdef CONFIG_FPU
+static long restore_fp_state(struct pt_regs *regs,
+			     union __riscv_fp_state __user *sc_fpregs)
+{
+	long err;
+	struct __riscv_d_ext_state __user *state = &sc_fpregs->d;
+	size_t i;
+
+	err = __copy_from_user(&current->thread.fstate, state, sizeof(*state));
+	if (unlikely(err))
+		return err;
+
+	fstate_restore(current, regs);
+
+	/* We support no other extension state at this time. */
+	for (i = 0; i < ARRAY_SIZE(sc_fpregs->q.reserved); i++) {
+		u32 value;
+
+		err = __get_user(value, &sc_fpregs->q.reserved[i]);
+		if (unlikely(err))
+			break;
+		if (value != 0)
+			return -EINVAL;
+	}
+
+	return err;
+}
+
+static long save_fp_state(struct pt_regs *regs,
+			  union __riscv_fp_state __user *sc_fpregs)
+{
+	long err;
+	struct __riscv_d_ext_state __user *state = &sc_fpregs->d;
+	size_t i;
+
+	fstate_save(current, regs);
+	err = __copy_to_user(state, &current->thread.fstate, sizeof(*state));
+	if (unlikely(err))
+		return err;
+
+	/* We support no other extension state at this time. */
+	for (i = 0; i < ARRAY_SIZE(sc_fpregs->q.reserved); i++) {
+		err = __put_user(0, &sc_fpregs->q.reserved[i]);
+		if (unlikely(err))
+			break;
+	}
+
+	return err;
+}
+#else
+#define save_fp_state(task, regs) (0)
+#define restore_fp_state(task, regs) (0)
+#endif
+
+static long restore_sigcontext(struct pt_regs *regs,
+	struct sigcontext __user *sc)
+{
+	long err;
+	/* sc_regs is structured the same as the start of pt_regs */
+	err = __copy_from_user(regs, &sc->sc_regs, sizeof(sc->sc_regs));
+	/* Restore the floating-point state. */
+	if (has_fpu)
+		err |= restore_fp_state(regs, &sc->sc_fpregs);
+	return err;
+}
+
+SYSCALL_DEFINE0(rt_sigreturn)
+{
+	struct pt_regs *regs = current_pt_regs();
+	struct rt_sigframe __user *frame;
+	struct task_struct *task;
+	sigset_t set;
+
+	/* Always make any pending restarted system calls return -EINTR */
+	current->restart_block.fn = do_no_restart_syscall;
+
+	frame = (struct rt_sigframe __user *)regs->sp;
+
+	if (!access_ok(frame, sizeof(*frame)))
+		goto badframe;
+
+	if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set)))
+		goto badframe;
+
+	set_current_blocked(&set);
+
+	if (restore_sigcontext(regs, &frame->uc.uc_mcontext))
+		goto badframe;
+
+	if (restore_altstack(&frame->uc.uc_stack))
+		goto badframe;
+
+	return regs->a0;
+
+badframe:
+	task = current;
+	if (show_unhandled_signals) {
+		pr_info_ratelimited(
+			"%s[%d]: bad frame in %s: frame=%p pc=%p sp=%p\n",
+			task->comm, task_pid_nr(task), __func__,
+			frame, (void *)regs->sepc, (void *)regs->sp);
+	}
+	force_sig(SIGSEGV);
+	return 0;
+}
+
+static long setup_sigcontext(struct rt_sigframe __user *frame,
+	struct pt_regs *regs)
+{
+	struct sigcontext __user *sc = &frame->uc.uc_mcontext;
+	long err;
+	/* sc_regs is structured the same as the start of pt_regs */
+	err = __copy_to_user(&sc->sc_regs, regs, sizeof(sc->sc_regs));
+	/* Save the floating-point state. */
+	if (has_fpu)
+		err |= save_fp_state(regs, &sc->sc_fpregs);
+	return err;
+}
+
+static inline void __user *get_sigframe(struct ksignal *ksig,
+	struct pt_regs *regs, size_t framesize)
+{
+	unsigned long sp;
+	/* Default to using normal stack */
+	sp = regs->sp;
+
+	/*
+	 * If we are on the alternate signal stack and would overflow it, don't.
+	 * Return an always-bogus address instead so we will die with SIGSEGV.
+	 */
+	if (on_sig_stack(sp) && !likely(on_sig_stack(sp - framesize)))
+		return (void __user __force *)(-1UL);
+
+	/* This is the X/Open sanctioned signal stack switching. */
+	sp = sigsp(sp, ksig) - framesize;
+
+	/* Align the stack frame. */
+	sp &= ~0xfUL;
+
+	return (void __user *)sp;
+}
+
+
+static int setup_rt_frame(struct ksignal *ksig, sigset_t *set,
+	struct pt_regs *regs)
+{
+	struct rt_sigframe __user *frame;
+	long err = 0;
+
+	frame = get_sigframe(ksig, regs, sizeof(*frame));
+	if (!access_ok(frame, sizeof(*frame)))
+		return -EFAULT;
+
+	err |= copy_siginfo_to_user(&frame->info, &ksig->info);
+
+	/* Create the ucontext. */
+	err |= __put_user(0, &frame->uc.uc_flags);
+	err |= __put_user(NULL, &frame->uc.uc_link);
+	err |= __save_altstack(&frame->uc.uc_stack, regs->sp);
+	err |= setup_sigcontext(frame, regs);
+	err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
+	if (err)
+		return -EFAULT;
+
+	/* Set up to return from userspace. */
+	regs->ra = (unsigned long)VDSO_SYMBOL(
+		current->mm->context.vdso, rt_sigreturn);
+
+	/*
+	 * Set up registers for signal handler.
+	 * Registers that we don't modify keep the value they had from
+	 * user-space at the time we took the signal.
+	 * We always pass siginfo and mcontext, regardless of SA_SIGINFO,
+	 * since some things rely on this (e.g. glibc's debug/segfault.c).
+	 */
+	regs->sepc = (unsigned long)ksig->ka.sa.sa_handler;
+	regs->sp = (unsigned long)frame;
+	regs->a0 = ksig->sig;                     /* a0: signal number */
+	regs->a1 = (unsigned long)(&frame->info); /* a1: siginfo pointer */
+	regs->a2 = (unsigned long)(&frame->uc);   /* a2: ucontext pointer */
+
+#if DEBUG_SIG
+	pr_info("SIG deliver (%s:%d): sig=%d pc=%p ra=%p sp=%p\n",
+		current->comm, task_pid_nr(current), ksig->sig,
+		(void *)regs->sepc, (void *)regs->ra, frame);
+#endif
+
+	return 0;
+}
+
+static void handle_signal(struct ksignal *ksig, struct pt_regs *regs)
+{
+	sigset_t *oldset = sigmask_to_save();
+	int ret;
+
+	/* Are we from a system call? */
+	if (regs->scause == EXC_SYSCALL) {
+		/* Avoid additional syscall restarting via ret_from_exception */
+		regs->scause = -1UL;
+
+		/* If so, check system call restarting.. */
+		switch (regs->a0) {
+		case -ERESTART_RESTARTBLOCK:
+		case -ERESTARTNOHAND:
+			regs->a0 = -EINTR;
+			break;
+
+		case -ERESTARTSYS:
+			if (!(ksig->ka.sa.sa_flags & SA_RESTART)) {
+				regs->a0 = -EINTR;
+				break;
+			}
+			/* fallthrough */
+		case -ERESTARTNOINTR:
+                        regs->a0 = regs->orig_a0;
+			regs->sepc -= 0x4;
+			break;
+		}
+	}
+
+	/* Set up the stack frame */
+	ret = setup_rt_frame(ksig, oldset, regs);
+
+	signal_setup_done(ret, ksig, 0);
+}
+
+static void do_signal(struct pt_regs *regs)
+{
+	struct ksignal ksig;
+
+	if (get_signal(&ksig)) {
+		/* Actually deliver the signal */
+		handle_signal(&ksig, regs);
+		return;
+	}
+
+	/* Did we come from a system call? */
+	if (regs->scause == EXC_SYSCALL) {
+		/* Avoid additional syscall restarting via ret_from_exception */
+		regs->scause = -1UL;
+
+		/* Restart the system call - no handlers present */
+		switch (regs->a0) {
+		case -ERESTARTNOHAND:
+		case -ERESTARTSYS:
+		case -ERESTARTNOINTR:
+                        regs->a0 = regs->orig_a0;
+			regs->sepc -= 0x4;
+			break;
+		case -ERESTART_RESTARTBLOCK:
+                        regs->a0 = regs->orig_a0;
+			regs->a7 = __NR_restart_syscall;
+			regs->sepc -= 0x4;
+			break;
+		}
+	}
+
+	/*
+	 * If there is no signal to deliver, we just put the saved
+	 * sigmask back.
+	 */
+	restore_saved_sigmask();
+}
+
+/*
+ * notification of userspace execution resumption
+ * - triggered by the _TIF_WORK_MASK flags
+ */
+asmlinkage __visible void do_notify_resume(struct pt_regs *regs,
+					   unsigned long thread_info_flags)
+{
+	/* Handle pending signal delivery */
+	if (thread_info_flags & _TIF_SIGPENDING)
+		do_signal(regs);
+
+	if (thread_info_flags & _TIF_NOTIFY_RESUME) {
+		clear_thread_flag(TIF_NOTIFY_RESUME);
+		tracehook_notify_resume(regs);
+	}
+}
diff --git a/marvell/linux/arch/riscv/kernel/smp.c b/marvell/linux/arch/riscv/kernel/smp.c
new file mode 100644
index 0000000..098c04a
--- /dev/null
+++ b/marvell/linux/arch/riscv/kernel/smp.c
@@ -0,0 +1,211 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * SMP initialisation and IPI support
+ * Based on arch/arm64/kernel/smp.c
+ *
+ * Copyright (C) 2012 ARM Ltd.
+ * Copyright (C) 2015 Regents of the University of California
+ * Copyright (C) 2017 SiFive
+ */
+
+#include <linux/cpu.h>
+#include <linux/interrupt.h>
+#include <linux/profile.h>
+#include <linux/smp.h>
+#include <linux/sched.h>
+#include <linux/seq_file.h>
+#include <linux/delay.h>
+
+#include <asm/sbi.h>
+#include <asm/tlbflush.h>
+#include <asm/cacheflush.h>
+
+enum ipi_message_type {
+	IPI_RESCHEDULE,
+	IPI_CALL_FUNC,
+	IPI_CPU_STOP,
+	IPI_MAX
+};
+
+unsigned long __cpuid_to_hartid_map[NR_CPUS] = {
+	[0 ... NR_CPUS-1] = INVALID_HARTID
+};
+
+void __init smp_setup_processor_id(void)
+{
+	cpuid_to_hartid_map(0) = boot_cpu_hartid;
+}
+
+/* A collection of single bit ipi messages.  */
+static struct {
+	unsigned long stats[IPI_MAX] ____cacheline_aligned;
+	unsigned long bits ____cacheline_aligned;
+} ipi_data[NR_CPUS] __cacheline_aligned;
+
+int riscv_hartid_to_cpuid(int hartid)
+{
+	int i;
+
+	for (i = 0; i < NR_CPUS; i++)
+		if (cpuid_to_hartid_map(i) == hartid)
+			return i;
+
+	pr_err("Couldn't find cpu id for hartid [%d]\n", hartid);
+	return -ENOENT;
+}
+
+void riscv_cpuid_to_hartid_mask(const struct cpumask *in, struct cpumask *out)
+{
+	int cpu;
+
+	cpumask_clear(out);
+	for_each_cpu(cpu, in)
+		cpumask_set_cpu(cpuid_to_hartid_map(cpu), out);
+}
+
+bool arch_match_cpu_phys_id(int cpu, u64 phys_id)
+{
+	return phys_id == cpuid_to_hartid_map(cpu);
+}
+
+/* Unsupported */
+int setup_profiling_timer(unsigned int multiplier)
+{
+	return -EINVAL;
+}
+
+static void ipi_stop(void)
+{
+	set_cpu_online(smp_processor_id(), false);
+	while (1)
+		wait_for_interrupt();
+}
+
+static void send_ipi_mask(const struct cpumask *mask, enum ipi_message_type op)
+{
+	struct cpumask hartid_mask;
+	int cpu;
+
+	smp_mb__before_atomic();
+	for_each_cpu(cpu, mask)
+		set_bit(op, &ipi_data[cpu].bits);
+	smp_mb__after_atomic();
+
+	riscv_cpuid_to_hartid_mask(mask, &hartid_mask);
+	sbi_send_ipi(cpumask_bits(&hartid_mask));
+}
+
+static void send_ipi_single(int cpu, enum ipi_message_type op)
+{
+	int hartid = cpuid_to_hartid_map(cpu);
+
+	smp_mb__before_atomic();
+	set_bit(op, &ipi_data[cpu].bits);
+	smp_mb__after_atomic();
+
+	sbi_send_ipi(cpumask_bits(cpumask_of(hartid)));
+}
+
+static inline void clear_ipi(void)
+{
+	csr_clear(CSR_SIP, SIE_SSIE);
+}
+
+void riscv_software_interrupt(void)
+{
+	unsigned long *pending_ipis = &ipi_data[smp_processor_id()].bits;
+	unsigned long *stats = ipi_data[smp_processor_id()].stats;
+
+	clear_ipi();
+
+	while (true) {
+		unsigned long ops;
+
+		/* Order bit clearing and data access. */
+		mb();
+
+		ops = xchg(pending_ipis, 0);
+		if (ops == 0)
+			return;
+
+		if (ops & (1 << IPI_RESCHEDULE)) {
+			stats[IPI_RESCHEDULE]++;
+			scheduler_ipi();
+		}
+
+		if (ops & (1 << IPI_CALL_FUNC)) {
+			stats[IPI_CALL_FUNC]++;
+			generic_smp_call_function_interrupt();
+		}
+
+		if (ops & (1 << IPI_CPU_STOP)) {
+			stats[IPI_CPU_STOP]++;
+			ipi_stop();
+		}
+
+		BUG_ON((ops >> IPI_MAX) != 0);
+
+		/* Order data access and bit testing. */
+		mb();
+	}
+}
+
+static const char * const ipi_names[] = {
+	[IPI_RESCHEDULE]	= "Rescheduling interrupts",
+	[IPI_CALL_FUNC]		= "Function call interrupts",
+	[IPI_CPU_STOP]		= "CPU stop interrupts",
+};
+
+void show_ipi_stats(struct seq_file *p, int prec)
+{
+	unsigned int cpu, i;
+
+	for (i = 0; i < IPI_MAX; i++) {
+		seq_printf(p, "%*s%u:%s", prec - 1, "IPI", i,
+			   prec >= 4 ? " " : "");
+		for_each_online_cpu(cpu)
+			seq_printf(p, "%10lu ", ipi_data[cpu].stats[i]);
+		seq_printf(p, " %s\n", ipi_names[i]);
+	}
+}
+
+void arch_send_call_function_ipi_mask(struct cpumask *mask)
+{
+	send_ipi_mask(mask, IPI_CALL_FUNC);
+}
+
+void arch_send_call_function_single_ipi(int cpu)
+{
+	send_ipi_single(cpu, IPI_CALL_FUNC);
+}
+
+void smp_send_stop(void)
+{
+	unsigned long timeout;
+
+	if (num_online_cpus() > 1) {
+		cpumask_t mask;
+
+		cpumask_copy(&mask, cpu_online_mask);
+		cpumask_clear_cpu(smp_processor_id(), &mask);
+
+		if (system_state <= SYSTEM_RUNNING)
+			pr_crit("SMP: stopping secondary CPUs\n");
+		send_ipi_mask(&mask, IPI_CPU_STOP);
+	}
+
+	/* Wait up to one second for other CPUs to stop */
+	timeout = USEC_PER_SEC;
+	while (num_online_cpus() > 1 && timeout--)
+		udelay(1);
+
+	if (num_online_cpus() > 1)
+		pr_warn("SMP: failed to stop secondary CPUs %*pbl\n",
+			   cpumask_pr_args(cpu_online_mask));
+}
+
+void smp_send_reschedule(int cpu)
+{
+	send_ipi_single(cpu, IPI_RESCHEDULE);
+}
+EXPORT_SYMBOL_GPL(smp_send_reschedule);
diff --git a/marvell/linux/arch/riscv/kernel/smpboot.c b/marvell/linux/arch/riscv/kernel/smpboot.c
new file mode 100644
index 0000000..0576a6b
--- /dev/null
+++ b/marvell/linux/arch/riscv/kernel/smpboot.c
@@ -0,0 +1,163 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * SMP initialisation and IPI support
+ * Based on arch/arm64/kernel/smp.c
+ *
+ * Copyright (C) 2012 ARM Ltd.
+ * Copyright (C) 2015 Regents of the University of California
+ * Copyright (C) 2017 SiFive
+ */
+
+#include <linux/arch_topology.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/sched.h>
+#include <linux/kernel_stat.h>
+#include <linux/notifier.h>
+#include <linux/cpu.h>
+#include <linux/percpu.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/irq.h>
+#include <linux/of.h>
+#include <linux/sched/task_stack.h>
+#include <linux/sched/mm.h>
+#include <asm/irq.h>
+#include <asm/mmu_context.h>
+#include <asm/tlbflush.h>
+#include <asm/sections.h>
+#include <asm/sbi.h>
+#include <asm/smp.h>
+
+#include "head.h"
+
+void *__cpu_up_stack_pointer[NR_CPUS];
+void *__cpu_up_task_pointer[NR_CPUS];
+static DECLARE_COMPLETION(cpu_running);
+
+void __init smp_prepare_boot_cpu(void)
+{
+	init_cpu_topology();
+}
+
+void __init smp_prepare_cpus(unsigned int max_cpus)
+{
+	int cpuid;
+
+	store_cpu_topology(smp_processor_id());
+
+	/* This covers non-smp usecase mandated by "nosmp" option */
+	if (max_cpus == 0)
+		return;
+
+	for_each_possible_cpu(cpuid) {
+		if (cpuid == smp_processor_id())
+			continue;
+		set_cpu_present(cpuid, true);
+	}
+}
+
+void __init setup_smp(void)
+{
+	struct device_node *dn;
+	int hart;
+	bool found_boot_cpu = false;
+	int cpuid = 1;
+
+	for_each_of_cpu_node(dn) {
+		hart = riscv_of_processor_hartid(dn);
+		if (hart < 0)
+			continue;
+
+		if (hart == cpuid_to_hartid_map(0)) {
+			BUG_ON(found_boot_cpu);
+			found_boot_cpu = 1;
+			continue;
+		}
+		if (cpuid >= NR_CPUS) {
+			pr_warn("Invalid cpuid [%d] for hartid [%d]\n",
+				cpuid, hart);
+			break;
+		}
+
+		cpuid_to_hartid_map(cpuid) = hart;
+		cpuid++;
+	}
+
+	BUG_ON(!found_boot_cpu);
+
+	if (cpuid > nr_cpu_ids)
+		pr_warn("Total number of cpus [%d] is greater than nr_cpus option value [%d]\n",
+			cpuid, nr_cpu_ids);
+
+	for (cpuid = 1; cpuid < nr_cpu_ids; cpuid++) {
+		if (cpuid_to_hartid_map(cpuid) != INVALID_HARTID)
+			set_cpu_possible(cpuid, true);
+	}
+}
+
+int __cpu_up(unsigned int cpu, struct task_struct *tidle)
+{
+	int ret = 0;
+	int hartid = cpuid_to_hartid_map(cpu);
+	tidle->thread_info.cpu = cpu;
+
+	/*
+	 * On RISC-V systems, all harts boot on their own accord.  Our _start
+	 * selects the first hart to boot the kernel and causes the remainder
+	 * of the harts to spin in a loop waiting for their stack pointer to be
+	 * setup by that main hart.  Writing __cpu_up_stack_pointer signals to
+	 * the spinning harts that they can continue the boot process.
+	 */
+	smp_mb();
+	WRITE_ONCE(__cpu_up_stack_pointer[hartid],
+		  task_stack_page(tidle) + THREAD_SIZE);
+	WRITE_ONCE(__cpu_up_task_pointer[hartid], tidle);
+
+	lockdep_assert_held(&cpu_running);
+	wait_for_completion_timeout(&cpu_running,
+					    msecs_to_jiffies(1000));
+
+	if (!cpu_online(cpu)) {
+		pr_crit("CPU%u: failed to come online\n", cpu);
+		ret = -EIO;
+	}
+
+	return ret;
+}
+
+void __init smp_cpus_done(unsigned int max_cpus)
+{
+}
+
+/*
+ * C entry point for a secondary processor.
+ */
+asmlinkage __visible void __init smp_callin(void)
+{
+	struct mm_struct *mm = &init_mm;
+
+	/* All kernel threads share the same mm context.  */
+	mmgrab(mm);
+	current->active_mm = mm;
+
+	trap_init();
+	store_cpu_topology(smp_processor_id());
+	notify_cpu_starting(smp_processor_id());
+	set_cpu_online(smp_processor_id(), 1);
+	/*
+	 * Remote TLB flushes are ignored while the CPU is offline, so emit
+	 * a local TLB flush right now just in case.
+	 */
+	local_flush_tlb_all();
+	complete(&cpu_running);
+	/*
+	 * Disable preemption before enabling interrupts, so we don't try to
+	 * schedule a CPU that hasn't actually started yet.
+	 */
+	preempt_disable();
+	local_irq_enable();
+	cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
+}
diff --git a/marvell/linux/arch/riscv/kernel/stacktrace.c b/marvell/linux/arch/riscv/kernel/stacktrace.c
new file mode 100644
index 0000000..5ba4d23
--- /dev/null
+++ b/marvell/linux/arch/riscv/kernel/stacktrace.c
@@ -0,0 +1,169 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2008 ARM Limited
+ * Copyright (C) 2014 Regents of the University of California
+ */
+
+#include <linux/export.h>
+#include <linux/kallsyms.h>
+#include <linux/sched.h>
+#include <linux/sched/debug.h>
+#include <linux/sched/task_stack.h>
+#include <linux/stacktrace.h>
+#include <linux/ftrace.h>
+
+#ifdef CONFIG_FRAME_POINTER
+
+struct stackframe {
+	unsigned long fp;
+	unsigned long ra;
+};
+
+void notrace walk_stackframe(struct task_struct *task, struct pt_regs *regs,
+			     bool (*fn)(unsigned long, void *), void *arg)
+{
+	unsigned long fp, sp, pc;
+
+	if (regs) {
+		fp = frame_pointer(regs);
+		sp = user_stack_pointer(regs);
+		pc = instruction_pointer(regs);
+	} else if (task == NULL || task == current) {
+		const register unsigned long current_sp __asm__ ("sp");
+		fp = (unsigned long)__builtin_frame_address(0);
+		sp = current_sp;
+		pc = (unsigned long)walk_stackframe;
+	} else {
+		/* task blocked in __switch_to */
+		fp = task->thread.s[0];
+		sp = task->thread.sp;
+		pc = task->thread.ra;
+	}
+
+	for (;;) {
+		unsigned long low, high;
+		struct stackframe *frame;
+
+		if (unlikely(!__kernel_text_address(pc) || fn(pc, arg)))
+			break;
+
+		/* Validate frame pointer */
+		low = sp + sizeof(struct stackframe);
+		high = ALIGN(sp, THREAD_SIZE);
+		if (unlikely(fp < low || fp > high || fp & 0x7))
+			break;
+		/* Unwind stack frame */
+		frame = (struct stackframe *)fp - 1;
+		sp = fp;
+		fp = frame->fp;
+		pc = ftrace_graph_ret_addr(current, NULL, frame->ra,
+					   (unsigned long *)(fp - 8));
+	}
+}
+
+#else /* !CONFIG_FRAME_POINTER */
+
+void notrace walk_stackframe(struct task_struct *task,
+	struct pt_regs *regs, bool (*fn)(unsigned long, void *), void *arg)
+{
+	unsigned long sp, pc;
+	unsigned long *ksp;
+
+	if (regs) {
+		sp = user_stack_pointer(regs);
+		pc = instruction_pointer(regs);
+	} else if (task == NULL || task == current) {
+		const register unsigned long current_sp __asm__ ("sp");
+		sp = current_sp;
+		pc = (unsigned long)walk_stackframe;
+	} else {
+		/* task blocked in __switch_to */
+		sp = task->thread.sp;
+		pc = task->thread.ra;
+	}
+
+	if (unlikely(sp & 0x7))
+		return;
+
+	ksp = (unsigned long *)sp;
+	while (!kstack_end(ksp)) {
+		if (__kernel_text_address(pc) && unlikely(fn(pc, arg)))
+			break;
+		pc = READ_ONCE_NOCHECK(*ksp++) - 0x4;
+	}
+}
+
+#endif /* CONFIG_FRAME_POINTER */
+
+
+static bool print_trace_address(unsigned long pc, void *arg)
+{
+	print_ip_sym(pc);
+	return false;
+}
+
+void show_stack(struct task_struct *task, unsigned long *sp)
+{
+	pr_cont("Call Trace:\n");
+	walk_stackframe(task, NULL, print_trace_address, NULL);
+}
+
+
+static bool save_wchan(unsigned long pc, void *arg)
+{
+	if (!in_sched_functions(pc)) {
+		unsigned long *p = arg;
+		*p = pc;
+		return true;
+	}
+	return false;
+}
+
+unsigned long get_wchan(struct task_struct *task)
+{
+	unsigned long pc = 0;
+
+	if (likely(task && task != current && task->state != TASK_RUNNING))
+		walk_stackframe(task, NULL, save_wchan, &pc);
+	return pc;
+}
+
+
+#ifdef CONFIG_STACKTRACE
+
+static bool __save_trace(unsigned long pc, void *arg, bool nosched)
+{
+	struct stack_trace *trace = arg;
+
+	if (unlikely(nosched && in_sched_functions(pc)))
+		return false;
+	if (unlikely(trace->skip > 0)) {
+		trace->skip--;
+		return false;
+	}
+
+	trace->entries[trace->nr_entries++] = pc;
+	return (trace->nr_entries >= trace->max_entries);
+}
+
+static bool save_trace(unsigned long pc, void *arg)
+{
+	return __save_trace(pc, arg, false);
+}
+
+/*
+ * Save stack-backtrace addresses into a stack_trace buffer.
+ */
+void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
+{
+	walk_stackframe(tsk, NULL, save_trace, trace);
+}
+EXPORT_SYMBOL_GPL(save_stack_trace_tsk);
+
+void save_stack_trace(struct stack_trace *trace)
+{
+	save_stack_trace_tsk(NULL, trace);
+}
+EXPORT_SYMBOL_GPL(save_stack_trace);
+
+#endif /* CONFIG_STACKTRACE */
diff --git a/marvell/linux/arch/riscv/kernel/sys_riscv.c b/marvell/linux/arch/riscv/kernel/sys_riscv.c
new file mode 100644
index 0000000..bb40268
--- /dev/null
+++ b/marvell/linux/arch/riscv/kernel/sys_riscv.c
@@ -0,0 +1,69 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2012 Regents of the University of California
+ * Copyright (C) 2014 Darius Rad <darius@bluespec.com>
+ * Copyright (C) 2017 SiFive
+ */
+
+#include <linux/syscalls.h>
+#include <asm/unistd.h>
+#include <asm/cacheflush.h>
+#include <asm-generic/mman-common.h>
+
+static long riscv_sys_mmap(unsigned long addr, unsigned long len,
+			   unsigned long prot, unsigned long flags,
+			   unsigned long fd, off_t offset,
+			   unsigned long page_shift_offset)
+{
+	if (unlikely(offset & (~PAGE_MASK >> page_shift_offset)))
+		return -EINVAL;
+
+	return ksys_mmap_pgoff(addr, len, prot, flags, fd,
+			       offset >> (PAGE_SHIFT - page_shift_offset));
+}
+
+#ifdef CONFIG_64BIT
+SYSCALL_DEFINE6(mmap, unsigned long, addr, unsigned long, len,
+	unsigned long, prot, unsigned long, flags,
+	unsigned long, fd, off_t, offset)
+{
+	return riscv_sys_mmap(addr, len, prot, flags, fd, offset, 0);
+}
+#else
+SYSCALL_DEFINE6(mmap2, unsigned long, addr, unsigned long, len,
+	unsigned long, prot, unsigned long, flags,
+	unsigned long, fd, off_t, offset)
+{
+	/*
+	 * Note that the shift for mmap2 is constant (12),
+	 * regardless of PAGE_SIZE
+	 */
+	return riscv_sys_mmap(addr, len, prot, flags, fd, offset, 12);
+}
+#endif /* !CONFIG_64BIT */
+
+/*
+ * Allows the instruction cache to be flushed from userspace.  Despite RISC-V
+ * having a direct 'fence.i' instruction available to userspace (which we
+ * can't trap!), that's not actually viable when running on Linux because the
+ * kernel might schedule a process on another hart.  There is no way for
+ * userspace to handle this without invoking the kernel (as it doesn't know the
+ * thread->hart mappings), so we've defined a RISC-V specific system call to
+ * flush the instruction cache.
+ *
+ * sys_riscv_flush_icache() is defined to flush the instruction cache over an
+ * address range, with the flush applying to either all threads or just the
+ * caller.  We don't currently do anything with the address range, that's just
+ * in there for forwards compatibility.
+ */
+SYSCALL_DEFINE3(riscv_flush_icache, uintptr_t, start, uintptr_t, end,
+	uintptr_t, flags)
+{
+	/* Check the reserved flags. */
+	if (unlikely(flags & ~SYS_RISCV_FLUSH_ICACHE_ALL))
+		return -EINVAL;
+
+	flush_icache_mm(current->mm, flags & SYS_RISCV_FLUSH_ICACHE_LOCAL);
+
+	return 0;
+}
diff --git a/marvell/linux/arch/riscv/kernel/syscall_table.c b/marvell/linux/arch/riscv/kernel/syscall_table.c
new file mode 100644
index 0000000..f1ead9d
--- /dev/null
+++ b/marvell/linux/arch/riscv/kernel/syscall_table.c
@@ -0,0 +1,19 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2009 Arnd Bergmann <arnd@arndb.de>
+ * Copyright (C) 2012 Regents of the University of California
+ */
+
+#include <linux/linkage.h>
+#include <linux/syscalls.h>
+#include <asm-generic/syscalls.h>
+#include <asm/vdso.h>
+#include <asm/syscall.h>
+
+#undef __SYSCALL
+#define __SYSCALL(nr, call)	[nr] = (call),
+
+void *sys_call_table[__NR_syscalls] = {
+	[0 ... __NR_syscalls - 1] = sys_ni_syscall,
+#include <asm/unistd.h>
+};
diff --git a/marvell/linux/arch/riscv/kernel/time.c b/marvell/linux/arch/riscv/kernel/time.c
new file mode 100644
index 0000000..726860a
--- /dev/null
+++ b/marvell/linux/arch/riscv/kernel/time.c
@@ -0,0 +1,34 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2012 Regents of the University of California
+ * Copyright (C) 2017 SiFive
+ */
+
+#include <linux/of_clk.h>
+#include <linux/clockchips.h>
+#include <linux/clocksource.h>
+#include <linux/delay.h>
+#include <asm/sbi.h>
+#include <asm/processor.h>
+
+unsigned long riscv_timebase;
+EXPORT_SYMBOL_GPL(riscv_timebase);
+
+void __init time_init(void)
+{
+	struct device_node *cpu;
+	u32 prop;
+
+	cpu = of_find_node_by_path("/cpus");
+	if (!cpu || of_property_read_u32(cpu, "timebase-frequency", &prop))
+		panic(KERN_WARNING "RISC-V system with no 'timebase-frequency' in DTS\n");
+	of_node_put(cpu);
+	riscv_timebase = prop;
+
+	lpj_fine = riscv_timebase / HZ;
+
+	of_clk_init(NULL);
+	timer_probe();
+
+	tick_setup_hrtimer_broadcast();
+}
diff --git a/marvell/linux/arch/riscv/kernel/traps.c b/marvell/linux/arch/riscv/kernel/traps.c
new file mode 100644
index 0000000..c28d4de
--- /dev/null
+++ b/marvell/linux/arch/riscv/kernel/traps.c
@@ -0,0 +1,165 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2012 Regents of the University of California
+ */
+
+#include <linux/cpu.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/sched.h>
+#include <linux/sched/debug.h>
+#include <linux/sched/signal.h>
+#include <linux/signal.h>
+#include <linux/kdebug.h>
+#include <linux/uaccess.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/irq.h>
+#include <linux/kexec.h>
+
+#include <asm/processor.h>
+#include <asm/ptrace.h>
+#include <asm/csr.h>
+
+int show_unhandled_signals = 1;
+
+extern asmlinkage void handle_exception(void);
+
+static DEFINE_SPINLOCK(die_lock);
+
+void die(struct pt_regs *regs, const char *str)
+{
+	static int die_counter;
+	int ret;
+
+	oops_enter();
+
+	spin_lock_irq(&die_lock);
+	console_verbose();
+	bust_spinlocks(1);
+
+	pr_emerg("%s [#%d]\n", str, ++die_counter);
+	print_modules();
+	show_regs(regs);
+
+	ret = notify_die(DIE_OOPS, str, regs, 0, regs->scause, SIGSEGV);
+
+	if (regs && kexec_should_crash(current))
+		crash_kexec(regs);
+
+	bust_spinlocks(0);
+	add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE);
+	spin_unlock_irq(&die_lock);
+	oops_exit();
+
+	if (in_interrupt())
+		panic("Fatal exception in interrupt");
+	if (panic_on_oops)
+		panic("Fatal exception");
+	if (ret != NOTIFY_STOP)
+		make_task_dead(SIGSEGV);
+}
+
+void do_trap(struct pt_regs *regs, int signo, int code, unsigned long addr)
+{
+	struct task_struct *tsk = current;
+
+	if (show_unhandled_signals && unhandled_signal(tsk, signo)
+	    && printk_ratelimit()) {
+		pr_info("%s[%d]: unhandled signal %d code 0x%x at 0x" REG_FMT,
+			tsk->comm, task_pid_nr(tsk), signo, code, addr);
+		print_vma_addr(KERN_CONT " in ", instruction_pointer(regs));
+		pr_cont("\n");
+		show_regs(regs);
+	}
+
+	force_sig_fault(signo, code, (void __user *)addr);
+}
+
+static void do_trap_error(struct pt_regs *regs, int signo, int code,
+	unsigned long addr, const char *str)
+{
+	if (user_mode(regs)) {
+		do_trap(regs, signo, code, addr);
+	} else {
+		if (!fixup_exception(regs))
+			die(regs, str);
+	}
+}
+
+#define DO_ERROR_INFO(name, signo, code, str)				\
+asmlinkage __visible void name(struct pt_regs *regs)			\
+{									\
+	do_trap_error(regs, signo, code, regs->sepc, "Oops - " str);	\
+}
+
+DO_ERROR_INFO(do_trap_unknown,
+	SIGILL, ILL_ILLTRP, "unknown exception");
+DO_ERROR_INFO(do_trap_insn_misaligned,
+	SIGBUS, BUS_ADRALN, "instruction address misaligned");
+DO_ERROR_INFO(do_trap_insn_fault,
+	SIGSEGV, SEGV_ACCERR, "instruction access fault");
+DO_ERROR_INFO(do_trap_insn_illegal,
+	SIGILL, ILL_ILLOPC, "illegal instruction");
+DO_ERROR_INFO(do_trap_load_misaligned,
+	SIGBUS, BUS_ADRALN, "load address misaligned");
+DO_ERROR_INFO(do_trap_load_fault,
+	SIGSEGV, SEGV_ACCERR, "load access fault");
+DO_ERROR_INFO(do_trap_store_misaligned,
+	SIGBUS, BUS_ADRALN, "store (or AMO) address misaligned");
+DO_ERROR_INFO(do_trap_store_fault,
+	SIGSEGV, SEGV_ACCERR, "store (or AMO) access fault");
+DO_ERROR_INFO(do_trap_ecall_u,
+	SIGILL, ILL_ILLTRP, "environment call from U-mode");
+DO_ERROR_INFO(do_trap_ecall_s,
+	SIGILL, ILL_ILLTRP, "environment call from S-mode");
+DO_ERROR_INFO(do_trap_ecall_m,
+	SIGILL, ILL_ILLTRP, "environment call from M-mode");
+
+static inline unsigned long get_break_insn_length(unsigned long pc)
+{
+	bug_insn_t insn;
+
+	if (probe_kernel_address((bug_insn_t *)pc, insn))
+		return 0;
+	return (((insn & __INSN_LENGTH_MASK) == __INSN_LENGTH_32) ? 4UL : 2UL);
+}
+
+asmlinkage __visible void do_trap_break(struct pt_regs *regs)
+{
+	if (user_mode(regs))
+		force_sig_fault(SIGTRAP, TRAP_BRKPT, (void __user *)regs->sepc);
+	else if (report_bug(regs->sepc, regs) == BUG_TRAP_TYPE_WARN)
+		regs->sepc += get_break_insn_length(regs->sepc);
+	else
+		die(regs, "Kernel BUG");
+}
+
+#ifdef CONFIG_GENERIC_BUG
+int is_valid_bugaddr(unsigned long pc)
+{
+	bug_insn_t insn;
+
+	if (pc < VMALLOC_START)
+		return 0;
+	if (probe_kernel_address((bug_insn_t *)pc, insn))
+		return 0;
+	if ((insn & __INSN_LENGTH_MASK) == __INSN_LENGTH_32)
+		return (insn == __BUG_INSN_32);
+	else
+		return ((insn & __COMPRESSED_INSN_MASK) == __BUG_INSN_16);
+}
+#endif /* CONFIG_GENERIC_BUG */
+
+void __init trap_init(void)
+{
+	/*
+	 * Set sup0 scratch register to 0, indicating to exception vector
+	 * that we are presently executing in the kernel
+	 */
+	csr_write(CSR_SSCRATCH, 0);
+	/* Set the exception vector address */
+	csr_write(CSR_STVEC, &handle_exception);
+	/* Enable all interrupts */
+	csr_write(CSR_SIE, -1);
+}
diff --git a/marvell/linux/arch/riscv/kernel/vdso.c b/marvell/linux/arch/riscv/kernel/vdso.c
new file mode 100644
index 0000000..484d95a
--- /dev/null
+++ b/marvell/linux/arch/riscv/kernel/vdso.c
@@ -0,0 +1,95 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2004 Benjamin Herrenschmidt, IBM Corp.
+ *                    <benh@kernel.crashing.org>
+ * Copyright (C) 2012 ARM Limited
+ * Copyright (C) 2015 Regents of the University of California
+ */
+
+#include <linux/elf.h>
+#include <linux/mm.h>
+#include <linux/slab.h>
+#include <linux/binfmts.h>
+#include <linux/err.h>
+
+#include <asm/vdso.h>
+
+extern char vdso_start[], vdso_end[];
+
+static unsigned int vdso_pages;
+static struct page **vdso_pagelist;
+
+/*
+ * The vDSO data page.
+ */
+static union {
+	struct vdso_data	data;
+	u8			page[PAGE_SIZE];
+} vdso_data_store __page_aligned_data;
+static struct vdso_data *vdso_data = &vdso_data_store.data;
+
+static int __init vdso_init(void)
+{
+	unsigned int i;
+
+	vdso_pages = (vdso_end - vdso_start) >> PAGE_SHIFT;
+	vdso_pagelist =
+		kcalloc(vdso_pages + 1, sizeof(struct page *), GFP_KERNEL);
+	if (unlikely(vdso_pagelist == NULL)) {
+		pr_err("vdso: pagelist allocation failed\n");
+		return -ENOMEM;
+	}
+
+	for (i = 0; i < vdso_pages; i++) {
+		struct page *pg;
+
+		pg = virt_to_page(vdso_start + (i << PAGE_SHIFT));
+		vdso_pagelist[i] = pg;
+	}
+	vdso_pagelist[i] = virt_to_page(vdso_data);
+
+	return 0;
+}
+arch_initcall(vdso_init);
+
+int arch_setup_additional_pages(struct linux_binprm *bprm,
+	int uses_interp)
+{
+	struct mm_struct *mm = current->mm;
+	unsigned long vdso_base, vdso_len;
+	int ret;
+
+	vdso_len = (vdso_pages + 1) << PAGE_SHIFT;
+
+	down_write(&mm->mmap_sem);
+	vdso_base = get_unmapped_area(NULL, 0, vdso_len, 0, 0);
+	if (IS_ERR_VALUE(vdso_base)) {
+		ret = vdso_base;
+		goto end;
+	}
+
+	/*
+	 * Put vDSO base into mm struct. We need to do this before calling
+	 * install_special_mapping or the perf counter mmap tracking code
+	 * will fail to recognise it as a vDSO (since arch_vma_name fails).
+	 */
+	mm->context.vdso = (void *)vdso_base;
+
+	ret = install_special_mapping(mm, vdso_base, vdso_len,
+		(VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC),
+		vdso_pagelist);
+
+	if (unlikely(ret))
+		mm->context.vdso = NULL;
+
+end:
+	up_write(&mm->mmap_sem);
+	return ret;
+}
+
+const char *arch_vma_name(struct vm_area_struct *vma)
+{
+	if (vma->vm_mm && (vma->vm_start == (long)vma->vm_mm->context.vdso))
+		return "[vdso]";
+	return NULL;
+}
diff --git a/marvell/linux/arch/riscv/kernel/vdso/.gitignore b/marvell/linux/arch/riscv/kernel/vdso/.gitignore
new file mode 100644
index 0000000..97c2d69
--- /dev/null
+++ b/marvell/linux/arch/riscv/kernel/vdso/.gitignore
@@ -0,0 +1,2 @@
+vdso.lds
+*.tmp
diff --git a/marvell/linux/arch/riscv/kernel/vdso/Makefile b/marvell/linux/arch/riscv/kernel/vdso/Makefile
new file mode 100644
index 0000000..c533ac8
--- /dev/null
+++ b/marvell/linux/arch/riscv/kernel/vdso/Makefile
@@ -0,0 +1,75 @@
+# SPDX-License-Identifier: GPL-2.0-only
+# Copied from arch/tile/kernel/vdso/Makefile
+
+# Symbols present in the vdso
+vdso-syms  = rt_sigreturn
+ifdef CONFIG_64BIT
+vdso-syms += gettimeofday
+vdso-syms += clock_gettime
+vdso-syms += clock_getres
+endif
+vdso-syms += getcpu
+vdso-syms += flush_icache
+
+# Files to link into the vdso
+obj-vdso = $(patsubst %, %.o, $(vdso-syms))
+
+# Build rules
+targets := $(obj-vdso) vdso.so vdso.so.dbg vdso.lds vdso-dummy.o
+obj-vdso := $(addprefix $(obj)/, $(obj-vdso))
+
+obj-y += vdso.o vdso-syms.o
+CPPFLAGS_vdso.lds += -P -C -U$(ARCH)
+ifneq ($(filter vgettimeofday, $(vdso-syms)),)
+CPPFLAGS_vdso.lds += -DHAS_VGETTIMEOFDAY
+endif
+
+# Disable gcov profiling for VDSO code
+GCOV_PROFILE := n
+
+# Force dependency
+$(obj)/vdso.o: $(obj)/vdso.so
+
+# link rule for the .so file, .lds has to be first
+SYSCFLAGS_vdso.so.dbg = $(c_flags)
+$(obj)/vdso.so.dbg: $(src)/vdso.lds $(obj-vdso) FORCE
+	$(call if_changed,vdsold)
+
+# We also create a special relocatable object that should mirror the symbol
+# table and layout of the linked DSO. With ld --just-symbols we can then
+# refer to these symbols in the kernel code rather than hand-coded addresses.
+
+SYSCFLAGS_vdso.so.dbg = -shared -s -Wl,-soname=linux-vdso.so.1 \
+	-Wl,--build-id -Wl,--hash-style=both
+$(obj)/vdso-dummy.o: $(src)/vdso.lds $(obj)/rt_sigreturn.o FORCE
+	$(call if_changed,vdsold)
+
+LDFLAGS_vdso-syms.o := -r --just-symbols
+$(obj)/vdso-syms.o: $(obj)/vdso-dummy.o FORCE
+	$(call if_changed,ld)
+
+# strip rule for the .so file
+$(obj)/%.so: OBJCOPYFLAGS := -S
+$(obj)/%.so: $(obj)/%.so.dbg FORCE
+	$(call if_changed,objcopy)
+
+# actual build commands
+# The DSO images are built using a special linker script
+# Add -lgcc so rv32 gets static muldi3 and lshrdi3 definitions.
+# Make sure only to export the intended __vdso_xxx symbol offsets.
+quiet_cmd_vdsold = VDSOLD  $@
+      cmd_vdsold = $(CC) $(KBUILD_CFLAGS) $(call cc-option, -no-pie) -nostdlib -nostartfiles $(SYSCFLAGS_$(@F)) \
+                           -Wl,-T,$(filter-out FORCE,$^) -o $@.tmp && \
+                   $(CROSS_COMPILE)objcopy \
+                           $(patsubst %, -G __vdso_%, $(vdso-syms)) $@.tmp $@ && \
+                   rm $@.tmp
+
+# install commands for the unstripped file
+quiet_cmd_vdso_install = INSTALL $@
+      cmd_vdso_install = cp $(obj)/$@.dbg $(MODLIB)/vdso/$@
+
+vdso.so: $(obj)/vdso.so.dbg
+	@mkdir -p $(MODLIB)/vdso
+	$(call cmd,vdso_install)
+
+vdso_install: vdso.so
diff --git a/marvell/linux/arch/riscv/kernel/vdso/clock_getres.S b/marvell/linux/arch/riscv/kernel/vdso/clock_getres.S
new file mode 100644
index 0000000..91378a5
--- /dev/null
+++ b/marvell/linux/arch/riscv/kernel/vdso/clock_getres.S
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2017 SiFive
+ */
+
+#include <linux/linkage.h>
+#include <asm/unistd.h>
+
+	.text
+/* int __vdso_clock_getres(clockid_t clock_id, struct timespec *res); */
+ENTRY(__vdso_clock_getres)
+	.cfi_startproc
+	/* For now, just do the syscall. */
+	li a7, __NR_clock_getres
+	ecall
+	ret
+	.cfi_endproc
+ENDPROC(__vdso_clock_getres)
diff --git a/marvell/linux/arch/riscv/kernel/vdso/clock_gettime.S b/marvell/linux/arch/riscv/kernel/vdso/clock_gettime.S
new file mode 100644
index 0000000..5371fd9
--- /dev/null
+++ b/marvell/linux/arch/riscv/kernel/vdso/clock_gettime.S
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2017 SiFive
+ */
+
+#include <linux/linkage.h>
+#include <asm/unistd.h>
+
+	.text
+/* int __vdso_clock_gettime(clockid_t clock_id, struct timespec *tp); */
+ENTRY(__vdso_clock_gettime)
+	.cfi_startproc
+	/* For now, just do the syscall. */
+	li a7, __NR_clock_gettime
+	ecall
+	ret
+	.cfi_endproc
+ENDPROC(__vdso_clock_gettime)
diff --git a/marvell/linux/arch/riscv/kernel/vdso/flush_icache.S b/marvell/linux/arch/riscv/kernel/vdso/flush_icache.S
new file mode 100644
index 0000000..82f97d6
--- /dev/null
+++ b/marvell/linux/arch/riscv/kernel/vdso/flush_icache.S
@@ -0,0 +1,22 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2017 SiFive
+ */
+
+#include <linux/linkage.h>
+#include <asm/unistd.h>
+
+	.text
+/* int __vdso_flush_icache(void *start, void *end, unsigned long flags); */
+ENTRY(__vdso_flush_icache)
+	.cfi_startproc
+#ifdef CONFIG_SMP
+	li a7, __NR_riscv_flush_icache
+	ecall
+#else
+	fence.i
+	li a0, 0
+#endif
+	ret
+	.cfi_endproc
+ENDPROC(__vdso_flush_icache)
diff --git a/marvell/linux/arch/riscv/kernel/vdso/getcpu.S b/marvell/linux/arch/riscv/kernel/vdso/getcpu.S
new file mode 100644
index 0000000..bb0c05e
--- /dev/null
+++ b/marvell/linux/arch/riscv/kernel/vdso/getcpu.S
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2017 SiFive
+ */
+
+#include <linux/linkage.h>
+#include <asm/unistd.h>
+
+	.text
+/* int __vdso_getcpu(unsigned *cpu, unsigned *node, void *unused); */
+ENTRY(__vdso_getcpu)
+	.cfi_startproc
+	/* For now, just do the syscall. */
+	li a7, __NR_getcpu
+	ecall
+	ret
+	.cfi_endproc
+ENDPROC(__vdso_getcpu)
diff --git a/marvell/linux/arch/riscv/kernel/vdso/gettimeofday.S b/marvell/linux/arch/riscv/kernel/vdso/gettimeofday.S
new file mode 100644
index 0000000..e6fb8af
--- /dev/null
+++ b/marvell/linux/arch/riscv/kernel/vdso/gettimeofday.S
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2017 SiFive
+ */
+
+#include <linux/linkage.h>
+#include <asm/unistd.h>
+
+	.text
+/* int __vdso_gettimeofday(struct timeval *tv, struct timezone *tz); */
+ENTRY(__vdso_gettimeofday)
+	.cfi_startproc
+	/* For now, just do the syscall. */
+	li a7, __NR_gettimeofday
+	ecall
+	ret
+	.cfi_endproc
+ENDPROC(__vdso_gettimeofday)
diff --git a/marvell/linux/arch/riscv/kernel/vdso/rt_sigreturn.S b/marvell/linux/arch/riscv/kernel/vdso/rt_sigreturn.S
new file mode 100644
index 0000000..0573705
--- /dev/null
+++ b/marvell/linux/arch/riscv/kernel/vdso/rt_sigreturn.S
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2014 Regents of the University of California
+ */
+
+#include <linux/linkage.h>
+#include <asm/unistd.h>
+
+	.text
+ENTRY(__vdso_rt_sigreturn)
+	.cfi_startproc
+	.cfi_signal_frame
+	li a7, __NR_rt_sigreturn
+	scall
+	.cfi_endproc
+ENDPROC(__vdso_rt_sigreturn)
diff --git a/marvell/linux/arch/riscv/kernel/vdso/vdso.S b/marvell/linux/arch/riscv/kernel/vdso/vdso.S
new file mode 100644
index 0000000..df22224
--- /dev/null
+++ b/marvell/linux/arch/riscv/kernel/vdso/vdso.S
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2014 Regents of the University of California
+ */
+
+#include <linux/init.h>
+#include <linux/linkage.h>
+#include <asm/page.h>
+
+	__PAGE_ALIGNED_DATA
+
+	.globl vdso_start, vdso_end
+	.balign PAGE_SIZE
+vdso_start:
+	.incbin "arch/riscv/kernel/vdso/vdso.so"
+	.balign PAGE_SIZE
+vdso_end:
+
+	.previous
diff --git a/marvell/linux/arch/riscv/kernel/vdso/vdso.lds.S b/marvell/linux/arch/riscv/kernel/vdso/vdso.lds.S
new file mode 100644
index 0000000..4c45adf
--- /dev/null
+++ b/marvell/linux/arch/riscv/kernel/vdso/vdso.lds.S
@@ -0,0 +1,74 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2012 Regents of the University of California
+ */
+
+OUTPUT_ARCH(riscv)
+
+SECTIONS
+{
+	. = SIZEOF_HEADERS;
+
+	.hash		: { *(.hash) }			:text
+	.gnu.hash	: { *(.gnu.hash) }
+	.dynsym		: { *(.dynsym) }
+	.dynstr		: { *(.dynstr) }
+	.gnu.version	: { *(.gnu.version) }
+	.gnu.version_d	: { *(.gnu.version_d) }
+	.gnu.version_r	: { *(.gnu.version_r) }
+
+	.note		: { *(.note.*) }		:text	:note
+	.dynamic	: { *(.dynamic) }		:text	:dynamic
+
+	.eh_frame_hdr	: { *(.eh_frame_hdr) }		:text	:eh_frame_hdr
+	.eh_frame	: { KEEP (*(.eh_frame)) }	:text
+
+	.rodata		: { *(.rodata .rodata.* .gnu.linkonce.r.*) }
+
+	/*
+	 * This linker script is used both with -r and with -shared.
+	 * For the layouts to match, we need to skip more than enough
+	 * space for the dynamic symbol table, etc. If this amount is
+	 * insufficient, ld -shared will error; simply increase it here.
+	 */
+	. = 0x800;
+	.text		: { *(.text .text.*) }		:text
+
+	.data		: {
+		*(.got.plt) *(.got)
+		*(.data .data.* .gnu.linkonce.d.*)
+		*(.dynbss)
+		*(.bss .bss.* .gnu.linkonce.b.*)
+	}
+}
+
+/*
+ * We must supply the ELF program headers explicitly to get just one
+ * PT_LOAD segment, and set the flags explicitly to make segments read-only.
+ */
+PHDRS
+{
+	text		PT_LOAD		FLAGS(5) FILEHDR PHDRS; /* PF_R|PF_X */
+	dynamic		PT_DYNAMIC	FLAGS(4);		/* PF_R */
+	note		PT_NOTE		FLAGS(4);		/* PF_R */
+	eh_frame_hdr	PT_GNU_EH_FRAME;
+}
+
+/*
+ * This controls what symbols we export from the DSO.
+ */
+VERSION
+{
+	LINUX_4.15 {
+	global:
+		__vdso_rt_sigreturn;
+#ifdef HAS_VGETTIMEOFDAY
+		__vdso_gettimeofday;
+		__vdso_clock_gettime;
+		__vdso_clock_getres;
+#endif
+		__vdso_getcpu;
+		__vdso_flush_icache;
+	local: *;
+	};
+}
diff --git a/marvell/linux/arch/riscv/kernel/vmlinux.lds.S b/marvell/linux/arch/riscv/kernel/vmlinux.lds.S
new file mode 100644
index 0000000..23cd1a9
--- /dev/null
+++ b/marvell/linux/arch/riscv/kernel/vmlinux.lds.S
@@ -0,0 +1,84 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2012 Regents of the University of California
+ * Copyright (C) 2017 SiFive
+ */
+
+#define LOAD_OFFSET PAGE_OFFSET
+#include <asm/vmlinux.lds.h>
+#include <asm/page.h>
+#include <asm/cache.h>
+#include <asm/thread_info.h>
+
+OUTPUT_ARCH(riscv)
+ENTRY(_start)
+
+jiffies = jiffies_64;
+
+SECTIONS
+{
+	/* Beginning of code and text segment */
+	. = LOAD_OFFSET;
+	_start = .;
+	__init_begin = .;
+	HEAD_TEXT_SECTION
+	INIT_TEXT_SECTION(PAGE_SIZE)
+	INIT_DATA_SECTION(16)
+	/* we have to discard exit text and such at runtime, not link time */
+	.exit.text :
+	{
+		EXIT_TEXT
+	}
+	.exit.data :
+	{
+		EXIT_DATA
+	}
+	PERCPU_SECTION(L1_CACHE_BYTES)
+	__init_end = .;
+
+	.text : {
+		_text = .;
+		_stext = .;
+		TEXT_TEXT
+		SCHED_TEXT
+		CPUIDLE_TEXT
+		LOCK_TEXT
+		KPROBES_TEXT
+		ENTRY_TEXT
+		IRQENTRY_TEXT
+		*(.fixup)
+		_etext = .;
+	}
+
+	/* Start of data section */
+	_sdata = .;
+	RO_DATA_SECTION(L1_CACHE_BYTES)
+	.srodata : {
+		*(.srodata*)
+	}
+
+	RW_DATA_SECTION(L1_CACHE_BYTES, PAGE_SIZE, THREAD_SIZE)
+	.sdata : {
+		__global_pointer$ = . + 0x800;
+		*(.sdata*)
+		/* End of data section */
+		_edata = .;
+		*(.sbss*)
+	}
+
+	BSS_SECTION(PAGE_SIZE, PAGE_SIZE, 0)
+
+	EXCEPTION_TABLE(0x10)
+	NOTES
+
+	.rel.dyn : {
+		*(.rel.dyn*)
+	}
+
+	_end = .;
+
+	STABS_DEBUG
+	DWARF_DEBUG
+
+	DISCARDS
+}