[Feature]add MT2731_MP2_MR2_SVN388 baseline version

Change-Id: Ief04314834b31e27effab435d3ca8ba33b499059
diff --git a/src/kernel/linux/v4.14/arch/hexagon/mm/Makefile b/src/kernel/linux/v4.14/arch/hexagon/mm/Makefile
new file mode 100644
index 0000000..1a0be4d
--- /dev/null
+++ b/src/kernel/linux/v4.14/arch/hexagon/mm/Makefile
@@ -0,0 +1,6 @@
+#
+# Makefile for Hexagon memory management subsystem
+#
+
+obj-y := init.o pgalloc.o ioremap.o uaccess.o vm_fault.o cache.o
+obj-y += copy_to_user.o copy_from_user.o strnlen_user.o vm_tlb.o
diff --git a/src/kernel/linux/v4.14/arch/hexagon/mm/cache.c b/src/kernel/linux/v4.14/arch/hexagon/mm/cache.c
new file mode 100644
index 0000000..a7c6d82
--- /dev/null
+++ b/src/kernel/linux/v4.14/arch/hexagon/mm/cache.c
@@ -0,0 +1,139 @@
+/*
+ * Cache management functions for Hexagon
+ *
+ * Copyright (c) 2010-2011, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ */
+
+#include <linux/mm.h>
+#include <asm/cacheflush.h>
+#include <asm/hexagon_vm.h>
+
+#define spanlines(start, end) \
+	(((end - (start & ~(LINESIZE - 1))) >> LINEBITS) + 1)
+
+void flush_dcache_range(unsigned long start, unsigned long end)
+{
+	unsigned long lines = spanlines(start, end-1);
+	unsigned long i, flags;
+
+	start &= ~(LINESIZE - 1);
+
+	local_irq_save(flags);
+
+	for (i = 0; i < lines; i++) {
+		__asm__ __volatile__ (
+		"	dccleaninva(%0);	"
+		:
+		: "r" (start)
+		);
+		start += LINESIZE;
+	}
+	local_irq_restore(flags);
+}
+
+void flush_icache_range(unsigned long start, unsigned long end)
+{
+	unsigned long lines = spanlines(start, end-1);
+	unsigned long i, flags;
+
+	start &= ~(LINESIZE - 1);
+
+	local_irq_save(flags);
+
+	for (i = 0; i < lines; i++) {
+		__asm__ __volatile__ (
+			"	dccleana(%0); "
+			"	icinva(%0);	"
+			:
+			: "r" (start)
+		);
+		start += LINESIZE;
+	}
+	__asm__ __volatile__ (
+		"isync"
+	);
+	local_irq_restore(flags);
+}
+EXPORT_SYMBOL(flush_icache_range);
+
+void hexagon_clean_dcache_range(unsigned long start, unsigned long end)
+{
+	unsigned long lines = spanlines(start, end-1);
+	unsigned long i, flags;
+
+	start &= ~(LINESIZE - 1);
+
+	local_irq_save(flags);
+
+	for (i = 0; i < lines; i++) {
+		__asm__ __volatile__ (
+		"	dccleana(%0);	"
+		:
+		: "r" (start)
+		);
+		start += LINESIZE;
+	}
+	local_irq_restore(flags);
+}
+
+void hexagon_inv_dcache_range(unsigned long start, unsigned long end)
+{
+	unsigned long lines = spanlines(start, end-1);
+	unsigned long i, flags;
+
+	start &= ~(LINESIZE - 1);
+
+	local_irq_save(flags);
+
+	for (i = 0; i < lines; i++) {
+		__asm__ __volatile__ (
+		"	dcinva(%0);	"
+		:
+		: "r" (start)
+		);
+		start += LINESIZE;
+	}
+	local_irq_restore(flags);
+}
+
+
+
+
+/*
+ * This is just really brutal and shouldn't be used anyways,
+ * especially on V2.  Left here just in case.
+ */
+void flush_cache_all_hexagon(void)
+{
+	unsigned long flags;
+	local_irq_save(flags);
+	__vmcache_ickill();
+	__vmcache_dckill();
+	__vmcache_l2kill();
+	local_irq_restore(flags);
+	mb();
+}
+
+void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
+		       unsigned long vaddr, void *dst, void *src, int len)
+{
+	memcpy(dst, src, len);
+	if (vma->vm_flags & VM_EXEC) {
+		flush_icache_range((unsigned long) dst,
+		(unsigned long) dst + len);
+	}
+}
diff --git a/src/kernel/linux/v4.14/arch/hexagon/mm/copy_from_user.S b/src/kernel/linux/v4.14/arch/hexagon/mm/copy_from_user.S
new file mode 100644
index 0000000..7da066f
--- /dev/null
+++ b/src/kernel/linux/v4.14/arch/hexagon/mm/copy_from_user.S
@@ -0,0 +1,114 @@
+/*
+ * User memory copy functions for kernel
+ *
+ * Copyright (c) 2010-2011, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ */
+
+/*
+ * The right way to do this involves valignb
+ * The easy way to do this is only speed up src/dest similar alignment.
+ */
+
+/*
+ * Copy to/from user are the same, except that for packets with a load and
+ * a store, I don't know how to tell which kind of exception we got.
+ * Therefore, we duplicate the function, and handle faulting addresses
+ * differently for each function
+ */
+
+/*
+ * copy from user: loads can fault
+ */
+#define src_sav r13
+#define dst_sav r12
+#define src_dst_sav r13:12
+#define d_dbuf r15:14
+#define w_dbuf r15
+
+#define dst r0
+#define src r1
+#define bytes r2
+#define loopcount r5
+
+#define FUNCNAME raw_copy_from_user
+#include "copy_user_template.S"
+
+	/* LOAD FAULTS from COPY_FROM_USER */
+
+	/* Alignment loop.  r2 has been updated. Return it. */
+	.falign
+1009:
+2009:
+4009:
+	{
+		r0 = r2
+		jumpr r31
+	}
+	/* Normal copy loops. Do epilog. Use src-src_sav to compute distance */
+	/* X - (A - B) == X + B - A */
+	.falign
+8089:
+	{
+		memd(dst) = d_dbuf
+		r2 += sub(src_sav,src)
+	}
+	{
+		r0 = r2
+		jumpr r31
+	}
+	.falign
+4089:
+	{
+		memw(dst) = w_dbuf
+		r2 += sub(src_sav,src)
+	}
+	{
+		r0 = r2
+		jumpr r31
+	}
+	.falign
+2089:
+	{
+		memh(dst) = w_dbuf
+		r2 += sub(src_sav,src)
+	}
+	{
+		r0 = r2
+		jumpr r31
+	}
+	.falign
+1089:
+	{
+		memb(dst) = w_dbuf
+		r2 += sub(src_sav,src)
+	}
+	{
+		r0 = r2
+		jumpr r31
+	}
+
+	/* COPY FROM USER: only loads can fail */
+
+	.section __ex_table,"a"
+	.long 1000b,1009b
+	.long 2000b,2009b
+	.long 4000b,4009b
+	.long 8080b,8089b
+	.long 4080b,4089b
+	.long 2080b,2089b
+	.long 1080b,1089b
+	.previous
diff --git a/src/kernel/linux/v4.14/arch/hexagon/mm/copy_to_user.S b/src/kernel/linux/v4.14/arch/hexagon/mm/copy_to_user.S
new file mode 100644
index 0000000..a7b7f8d
--- /dev/null
+++ b/src/kernel/linux/v4.14/arch/hexagon/mm/copy_to_user.S
@@ -0,0 +1,92 @@
+/*
+ * User memory copying routines for the Hexagon Kernel
+ *
+ * Copyright (c) 2010-2011, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ */
+
+/* The right way to do this involves valignb
+ * The easy way to do this is only speed up src/dest similar alignment.
+ */
+
+/*
+ * Copy to/from user are the same, except that for packets with a load and
+ * a store, I don't know how to tell which kind of exception we got.
+ * Therefore, we duplicate the function, and handle faulting addresses
+ * differently for each function
+ */
+
+/*
+ * copy to user: stores can fault
+ */
+#define src_sav r13
+#define dst_sav r12
+#define src_dst_sav r13:12
+#define d_dbuf r15:14
+#define w_dbuf r15
+
+#define dst r0
+#define src r1
+#define bytes r2
+#define loopcount r5
+
+#define FUNCNAME raw_copy_to_user
+#include "copy_user_template.S"
+
+	/* STORE FAULTS from COPY_TO_USER */
+	.falign
+1109:
+2109:
+4109:
+	/* Alignment loop.  r2 has been updated.  Return it. */
+	{
+		r0 = r2
+		jumpr r31
+	}
+	/* Normal copy loops.  Use dst-dst_sav to compute distance */
+	/* dst holds best write, no need to unwind any loops */
+	/* X - (A - B) == X + B - A */
+	.falign
+8189:
+8199:
+4189:
+4199:
+2189:
+2199:
+1189:
+1199:
+	{
+		r2 += sub(dst_sav,dst)
+	}
+	{
+		r0 = r2
+		jumpr r31
+	}
+
+	/* COPY TO USER: only stores can fail */
+	.section __ex_table,"a"
+	.long 1100b,1109b
+	.long 2100b,2109b
+	.long 4100b,4109b
+	.long 8180b,8189b
+	.long 8190b,8199b
+	.long 4180b,4189b
+	.long 4190b,4199b
+	.long 2180b,2189b
+	.long 2190b,2199b
+	.long 1180b,1189b
+	.long 1190b,1199b
+	.previous
diff --git a/src/kernel/linux/v4.14/arch/hexagon/mm/copy_user_template.S b/src/kernel/linux/v4.14/arch/hexagon/mm/copy_user_template.S
new file mode 100644
index 0000000..254d8cc
--- /dev/null
+++ b/src/kernel/linux/v4.14/arch/hexagon/mm/copy_user_template.S
@@ -0,0 +1,185 @@
+/*
+ * Copyright (c) 2010-2011, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ */
+
+/* Numerology:
+ * WXYZ
+ * W: width in bytes
+ * X: Load=0, Store=1
+ * Y: Location 0=preamble,8=loop,9=epilog
+ * Z: Location=0,handler=9
+ */
+	.text
+	.global FUNCNAME
+	.type FUNCNAME, @function
+	.p2align 5
+FUNCNAME:
+	{
+		p0 = cmp.gtu(bytes,#0)
+		if (!p0.new) jump:nt .Ldone
+		r3 = or(dst,src)
+		r4 = xor(dst,src)
+	}
+	{
+		p1 = cmp.gtu(bytes,#15)
+		p0 = bitsclr(r3,#7)
+		if (!p0.new) jump:nt .Loop_not_aligned_8
+		src_dst_sav = combine(src,dst)
+	}
+
+	{
+		loopcount = lsr(bytes,#3)
+		if (!p1) jump .Lsmall
+	}
+	p3=sp1loop0(.Loop8,loopcount)
+.Loop8:
+8080:
+8180:
+	{
+		if (p3) memd(dst++#8) = d_dbuf
+		d_dbuf = memd(src++#8)
+	}:endloop0
+8190:
+	{
+		memd(dst++#8) = d_dbuf
+		bytes -= asl(loopcount,#3)
+		jump .Lsmall
+	}
+
+.Loop_not_aligned_8:
+	{
+		p0 = bitsclr(r4,#7)
+		if (p0.new) jump:nt .Lalign
+	}
+	{
+		p0 = bitsclr(r3,#3)
+		if (!p0.new) jump:nt .Loop_not_aligned_4
+		p1 = cmp.gtu(bytes,#7)
+	}
+
+	{
+		if (!p1) jump .Lsmall
+		loopcount = lsr(bytes,#2)
+	}
+	p3=sp1loop0(.Loop4,loopcount)
+.Loop4:
+4080:
+4180:
+	{
+		if (p3) memw(dst++#4) = w_dbuf
+		w_dbuf = memw(src++#4)
+	}:endloop0
+4190:
+	{
+		memw(dst++#4) = w_dbuf
+		bytes -= asl(loopcount,#2)
+		jump .Lsmall
+	}
+
+.Loop_not_aligned_4:
+	{
+		p0 = bitsclr(r3,#1)
+		if (!p0.new) jump:nt .Loop_not_aligned
+		p1 = cmp.gtu(bytes,#3)
+	}
+
+	{
+		if (!p1) jump .Lsmall
+		loopcount = lsr(bytes,#1)
+	}
+	p3=sp1loop0(.Loop2,loopcount)
+.Loop2:
+2080:
+2180:
+	{
+		if (p3) memh(dst++#2) = w_dbuf
+		w_dbuf = memuh(src++#2)
+	}:endloop0
+2190:
+	{
+		memh(dst++#2) = w_dbuf
+		bytes -= asl(loopcount,#1)
+		jump .Lsmall
+	}
+
+.Loop_not_aligned: /* Works for as small as one byte */
+	p3=sp1loop0(.Loop1,bytes)
+.Loop1:
+1080:
+1180:
+	{
+		if (p3) memb(dst++#1) = w_dbuf
+		w_dbuf = memub(src++#1)
+	}:endloop0
+	/* Done */
+1190:
+	{
+		memb(dst) = w_dbuf
+		jumpr r31
+		r0 = #0
+	}
+
+.Lsmall:
+	{
+		p0 = cmp.gtu(bytes,#0)
+		if (p0.new) jump:nt .Loop_not_aligned
+	}
+.Ldone:
+	{
+		r0 = #0
+		jumpr r31
+	}
+	.falign
+.Lalign:
+1000:
+	{
+		if (p0.new) w_dbuf = memub(src)
+		p0 = tstbit(src,#0)
+		if (!p1) jump .Lsmall
+	}
+1100:
+	{
+		if (p0) memb(dst++#1) = w_dbuf
+		if (p0) bytes = add(bytes,#-1)
+		if (p0) src = add(src,#1)
+	}
+2000:
+	{
+		if (p0.new) w_dbuf = memuh(src)
+		p0 = tstbit(src,#1)
+		if (!p1) jump .Lsmall
+	}
+2100:
+	{
+		if (p0) memh(dst++#2) = w_dbuf
+		if (p0) bytes = add(bytes,#-2)
+		if (p0) src = add(src,#2)
+	}
+4000:
+	{
+		if (p0.new) w_dbuf = memw(src)
+		p0 = tstbit(src,#2)
+		if (!p1) jump .Lsmall
+	}
+4100:
+	{
+		if (p0) memw(dst++#4) = w_dbuf
+		if (p0) bytes = add(bytes,#-4)
+		if (p0) src = add(src,#4)
+		jump FUNCNAME
+	}
+	.size FUNCNAME,.-FUNCNAME
diff --git a/src/kernel/linux/v4.14/arch/hexagon/mm/init.c b/src/kernel/linux/v4.14/arch/hexagon/mm/init.c
new file mode 100644
index 0000000..192584d
--- /dev/null
+++ b/src/kernel/linux/v4.14/arch/hexagon/mm/init.c
@@ -0,0 +1,283 @@
+/*
+ * Memory subsystem initialization for Hexagon
+ *
+ * Copyright (c) 2010-2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ */
+
+#include <linux/init.h>
+#include <linux/mm.h>
+#include <linux/bootmem.h>
+#include <asm/atomic.h>
+#include <linux/highmem.h>
+#include <asm/tlb.h>
+#include <asm/sections.h>
+#include <asm/vm_mmu.h>
+
+/*
+ * Define a startpg just past the end of the kernel image and a lastpg
+ * that corresponds to the end of real or simulated platform memory.
+ */
+#define bootmem_startpg (PFN_UP(((unsigned long) _end) - PAGE_OFFSET + PHYS_OFFSET))
+
+unsigned long bootmem_lastpg;	/*  Should be set by platform code  */
+unsigned long __phys_offset;	/*  physical kernel offset >> 12  */
+
+/*  Set as variable to limit PMD copies  */
+int max_kernel_seg = 0x303;
+
+/*  think this should be (page_size-1) the way it's used...*/
+unsigned long zero_page_mask;
+
+/*  indicate pfn's of high memory  */
+unsigned long highstart_pfn, highend_pfn;
+
+DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
+
+/* Default cache attribute for newly created page tables */
+unsigned long _dflt_cache_att = CACHEDEF;
+
+/*
+ * The current "generation" of kernel map, which should not roll
+ * over until Hell freezes over.  Actual bound in years needs to be
+ * calculated to confirm.
+ */
+DEFINE_SPINLOCK(kmap_gen_lock);
+
+/*  checkpatch says don't init this to 0.  */
+unsigned long long kmap_generation;
+
+/*
+ * mem_init - initializes memory
+ *
+ * Frees up bootmem
+ * Fixes up more stuff for HIGHMEM
+ * Calculates and displays memory available/used
+ */
+void __init mem_init(void)
+{
+	/*  No idea where this is actually declared.  Seems to evade LXR.  */
+	free_all_bootmem();
+	mem_init_print_info(NULL);
+
+	/*
+	 *  To-Do:  someone somewhere should wipe out the bootmem map
+	 *  after we're done?
+	 */
+
+	/*
+	 * This can be moved to some more virtual-memory-specific
+	 * initialization hook at some point.  Set the init_mm
+	 * descriptors "context" value to point to the initial
+	 * kernel segment table's physical address.
+	 */
+	init_mm.context.ptbase = __pa(init_mm.pgd);
+}
+
+/*
+ * free_initmem - frees memory used by stuff declared with __init
+ *
+ * Todo:  free pages between __init_begin and __init_end; possibly
+ * some devtree related stuff as well.
+ */
+void __ref free_initmem(void)
+{
+}
+
+/*
+ * free_initrd_mem - frees...  initrd memory.
+ * @start - start of init memory
+ * @end - end of init memory
+ *
+ * Apparently has to be passed the address of the initrd memory.
+ *
+ * Wrapped by #ifdef CONFIG_BLKDEV_INITRD
+ */
+void free_initrd_mem(unsigned long start, unsigned long end)
+{
+}
+
+void sync_icache_dcache(pte_t pte)
+{
+	unsigned long addr;
+	struct page *page;
+
+	page = pte_page(pte);
+	addr = (unsigned long) page_address(page);
+
+	__vmcache_idsync(addr, PAGE_SIZE);
+}
+
+/*
+ * In order to set up page allocator "nodes",
+ * somebody has to call free_area_init() for UMA.
+ *
+ * In this mode, we only have one pg_data_t
+ * structure: contig_mem_data.
+ */
+void __init paging_init(void)
+{
+	unsigned long zones_sizes[MAX_NR_ZONES] = {0, };
+
+	/*
+	 *  This is not particularly well documented anywhere, but
+	 *  give ZONE_NORMAL all the memory, including the big holes
+	 *  left by the kernel+bootmem_map which are already left as reserved
+	 *  in the bootmem_map; free_area_init should see those bits and
+	 *  adjust accordingly.
+	 */
+
+	zones_sizes[ZONE_NORMAL] = max_low_pfn;
+
+	free_area_init(zones_sizes);  /*  sets up the zonelists and mem_map  */
+
+	/*
+	 * Start of high memory area.  Will probably need something more
+	 * fancy if we...  get more fancy.
+	 */
+	high_memory = (void *)((bootmem_lastpg + 1) << PAGE_SHIFT);
+}
+
+#ifndef DMA_RESERVE
+#define DMA_RESERVE		(4)
+#endif
+
+#define DMA_CHUNKSIZE		(1<<22)
+#define DMA_RESERVED_BYTES	(DMA_RESERVE * DMA_CHUNKSIZE)
+
+/*
+ * Pick out the memory size.  We look for mem=size,
+ * where size is "size[KkMm]"
+ */
+static int __init early_mem(char *p)
+{
+	unsigned long size;
+	char *endp;
+
+	size = memparse(p, &endp);
+
+	bootmem_lastpg = PFN_DOWN(size);
+
+	return 0;
+}
+early_param("mem", early_mem);
+
+size_t hexagon_coherent_pool_size = (size_t) (DMA_RESERVE << 22);
+
+void __init setup_arch_memory(void)
+{
+	int bootmap_size;
+	/*  XXX Todo: this probably should be cleaned up  */
+	u32 *segtable = (u32 *) &swapper_pg_dir[0];
+	u32 *segtable_end;
+
+	/*
+	 * Set up boot memory allocator
+	 *
+	 * The Gorman book also talks about these functions.
+	 * This needs to change for highmem setups.
+	 */
+
+	/*  Prior to this, bootmem_lastpg is actually mem size  */
+	bootmem_lastpg += ARCH_PFN_OFFSET;
+
+	/* Memory size needs to be a multiple of 16M */
+	bootmem_lastpg = PFN_DOWN((bootmem_lastpg << PAGE_SHIFT) &
+		~((BIG_KERNEL_PAGE_SIZE) - 1));
+
+	/*
+	 * Reserve the top DMA_RESERVE bytes of RAM for DMA (uncached)
+	 * memory allocation
+	 */
+
+	max_low_pfn = bootmem_lastpg - PFN_DOWN(DMA_RESERVED_BYTES);
+	min_low_pfn = ARCH_PFN_OFFSET;
+	bootmap_size =  init_bootmem_node(NODE_DATA(0), bootmem_startpg, min_low_pfn, max_low_pfn);
+
+	printk(KERN_INFO "bootmem_startpg:  0x%08lx\n", bootmem_startpg);
+	printk(KERN_INFO "bootmem_lastpg:  0x%08lx\n", bootmem_lastpg);
+	printk(KERN_INFO "bootmap_size:  %d\n", bootmap_size);
+	printk(KERN_INFO "min_low_pfn:  0x%08lx\n", min_low_pfn);
+	printk(KERN_INFO "max_low_pfn:  0x%08lx\n", max_low_pfn);
+
+	/*
+	 * The default VM page tables (will be) populated with
+	 * VA=PA+PAGE_OFFSET mapping.  We go in and invalidate entries
+	 * higher than what we have memory for.
+	 */
+
+	/*  this is pointer arithmetic; each entry covers 4MB  */
+	segtable = segtable + (PAGE_OFFSET >> 22);
+
+	/*  this actually only goes to the end of the first gig  */
+	segtable_end = segtable + (1<<(30-22));
+
+	/*
+	 * Move forward to the start of empty pages; take into account
+	 * phys_offset shift.
+	 */
+
+	segtable += (bootmem_lastpg-ARCH_PFN_OFFSET)>>(22-PAGE_SHIFT);
+	{
+		int i;
+
+		for (i = 1 ; i <= DMA_RESERVE ; i++)
+			segtable[-i] = ((segtable[-i] & __HVM_PTE_PGMASK_4MB)
+				| __HVM_PTE_R | __HVM_PTE_W | __HVM_PTE_X
+				| __HEXAGON_C_UNC << 6
+				| __HVM_PDE_S_4MB);
+	}
+
+	printk(KERN_INFO "clearing segtable from %p to %p\n", segtable,
+		segtable_end);
+	while (segtable < (segtable_end-8))
+		*(segtable++) = __HVM_PDE_S_INVALID;
+	/* stop the pointer at the device I/O 4MB page  */
+
+	printk(KERN_INFO "segtable = %p (should be equal to _K_io_map)\n",
+		segtable);
+
+#if 0
+	/*  Other half of the early device table from vm_init_segtable. */
+	printk(KERN_INFO "&_K_init_devicetable = 0x%08x\n",
+		(unsigned long) _K_init_devicetable-PAGE_OFFSET);
+	*segtable = ((u32) (unsigned long) _K_init_devicetable-PAGE_OFFSET) |
+		__HVM_PDE_S_4KB;
+	printk(KERN_INFO "*segtable = 0x%08x\n", *segtable);
+#endif
+
+	/*
+	 * Free all the memory that wasn't taken up by the bootmap, the DMA
+	 * reserve, or kernel itself.
+	 */
+	free_bootmem(PFN_PHYS(bootmem_startpg) + bootmap_size,
+		     PFN_PHYS(bootmem_lastpg - bootmem_startpg) - bootmap_size -
+		     DMA_RESERVED_BYTES);
+
+	/*
+	 *  The bootmem allocator seemingly just lives to feed memory
+	 *  to the paging system
+	 */
+	printk(KERN_INFO "PAGE_SIZE=%lu\n", PAGE_SIZE);
+	paging_init();  /*  See Gorman Book, 2.3  */
+
+	/*
+	 *  At this point, the page allocator is kind of initialized, but
+	 *  apparently no pages are available (just like with the bootmem
+	 *  allocator), and need to be freed themselves via mem_init(),
+	 *  which is called by start_kernel() later on in the process
+	 */
+}
diff --git a/src/kernel/linux/v4.14/arch/hexagon/mm/ioremap.c b/src/kernel/linux/v4.14/arch/hexagon/mm/ioremap.c
new file mode 100644
index 0000000..d27d672
--- /dev/null
+++ b/src/kernel/linux/v4.14/arch/hexagon/mm/ioremap.c
@@ -0,0 +1,57 @@
+/*
+ * I/O remap functions for Hexagon
+ *
+ * Copyright (c) 2010-2011, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ */
+
+#include <linux/io.h>
+#include <linux/vmalloc.h>
+#include <linux/mm.h>
+
+void __iomem *ioremap_nocache(unsigned long phys_addr, unsigned long size)
+{
+	unsigned long last_addr, addr;
+	unsigned long offset = phys_addr & ~PAGE_MASK;
+	struct vm_struct *area;
+
+	pgprot_t prot = __pgprot(_PAGE_PRESENT|_PAGE_READ|_PAGE_WRITE
+					|(__HEXAGON_C_DEV << 6));
+
+	last_addr = phys_addr + size - 1;
+
+	/*  Wrapping not allowed  */
+	if (!size || (last_addr < phys_addr))
+		return NULL;
+
+	/*  Rounds up to next page size, including whole-page offset */
+	size = PAGE_ALIGN(offset + size);
+
+	area = get_vm_area(size, VM_IOREMAP);
+	addr = (unsigned long)area->addr;
+
+	if (ioremap_page_range(addr, addr+size, phys_addr, prot)) {
+		vunmap((void *)addr);
+		return NULL;
+	}
+
+	return (void __iomem *) (offset + addr);
+}
+
+void __iounmap(const volatile void __iomem *addr)
+{
+	vunmap((void *) ((unsigned long) addr & PAGE_MASK));
+}
diff --git a/src/kernel/linux/v4.14/arch/hexagon/mm/pgalloc.c b/src/kernel/linux/v4.14/arch/hexagon/mm/pgalloc.c
new file mode 100644
index 0000000..19760a4
--- /dev/null
+++ b/src/kernel/linux/v4.14/arch/hexagon/mm/pgalloc.c
@@ -0,0 +1,23 @@
+/*
+ * Copyright (c) 2010-2011, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ */
+
+#include <linux/init.h>
+
+void __init pgtable_cache_init(void)
+{
+}
diff --git a/src/kernel/linux/v4.14/arch/hexagon/mm/strnlen_user.S b/src/kernel/linux/v4.14/arch/hexagon/mm/strnlen_user.S
new file mode 100644
index 0000000..0eecb7a
--- /dev/null
+++ b/src/kernel/linux/v4.14/arch/hexagon/mm/strnlen_user.S
@@ -0,0 +1,139 @@
+/*
+ * User string length functions for kernel
+ *
+ * Copyright (c) 2010-2011, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ */
+
+#define isrc	r0
+#define max	r1	/*  Do not change!  */
+
+#define end	r2
+#define tmp1	r3
+
+#define obo	r6	/*  off-by-one  */
+#define start	r7
+#define mod8	r8
+#define dbuf    r15:14
+#define dcmp	r13:12
+
+/*
+ * The vector mask version of this turned out *really* badly.
+ * The hardware loop version also turned out *really* badly.
+ * Seems straight pointer arithmetic basically wins here.
+ */
+
+#define fname __strnlen_user
+
+	.text
+	.global fname
+	.type fname, @function
+	.p2align 5  /*  why?  */
+fname:
+	{
+		mod8 = and(isrc,#7);
+		end = add(isrc,max);
+		start = isrc;
+	}
+	{
+		P0 = cmp.eq(mod8,#0);
+		mod8 = and(end,#7);
+		dcmp = #0;
+		if (P0.new) jump:t dw_loop;	/*  fire up the oven  */
+	}
+
+alignment_loop:
+fail_1:	{
+		tmp1 = memb(start++#1);
+	}
+	{
+		P0 = cmp.eq(tmp1,#0);
+		if (P0.new) jump:nt exit_found;
+		P1 = cmp.gtu(end,start);
+		mod8 = and(start,#7);
+	}
+	{
+		if (!P1) jump exit_error;  /*  hit the end  */
+		P0 = cmp.eq(mod8,#0);
+	}
+	{
+		if (!P0) jump alignment_loop;
+	}
+
+
+
+dw_loop:
+fail_2:	{
+		dbuf = memd(start);
+		obo = add(start,#1);
+	}
+	{
+		P0 = vcmpb.eq(dbuf,dcmp);
+	}
+	{
+		tmp1 = P0;
+		P0 = cmp.gtu(end,start);
+	}
+	{
+		tmp1 = ct0(tmp1);
+		mod8 = and(end,#7);
+		if (!P0) jump end_check;
+	}
+	{
+		P0 = cmp.eq(tmp1,#32);
+		if (!P0.new) jump:nt exit_found;
+		if (!P0.new) start = add(obo,tmp1);
+	}
+	{
+		start = add(start,#8);
+		jump dw_loop;
+	}	/*  might be nice to combine these jumps...   */
+
+
+end_check:
+	{
+		P0 = cmp.gt(tmp1,mod8);
+		if (P0.new) jump:nt exit_error;	/*  neverfound!  */
+		start = add(obo,tmp1);
+	}
+
+exit_found:
+	{
+		R0 = sub(start,isrc);
+		jumpr R31;
+	}
+
+exit_error:
+	{
+		R0 = add(max,#1);
+		jumpr R31;
+	}
+
+	/*  Uh, what does the "fixup" return here?  */
+	.falign
+fix_1:
+	{
+		R0 = #0;
+		jumpr R31;
+	}
+
+	.size fname,.-fname
+
+
+.section __ex_table,"a"
+.long fail_1,fix_1
+.long fail_2,fix_1
+.previous
diff --git a/src/kernel/linux/v4.14/arch/hexagon/mm/uaccess.c b/src/kernel/linux/v4.14/arch/hexagon/mm/uaccess.c
new file mode 100644
index 0000000..c599eb1
--- /dev/null
+++ b/src/kernel/linux/v4.14/arch/hexagon/mm/uaccess.c
@@ -0,0 +1,58 @@
+/*
+ * Copyright (c) 2010-2011, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ */
+
+/*
+ * Support for user memory access from kernel.  This will
+ * probably be inlined for performance at some point, but
+ * for ease of debug, and to a lesser degree for code size,
+ * we implement here as subroutines.
+ */
+#include <linux/types.h>
+#include <linux/uaccess.h>
+#include <asm/pgtable.h>
+
+/*
+ * For clear_user(), exploit previously defined copy_to_user function
+ * and the fact that we've got a handy zero page defined in kernel/head.S
+ *
+ * dczero here would be even faster.
+ */
+__kernel_size_t __clear_user_hexagon(void __user *dest, unsigned long count)
+{
+	long uncleared;
+
+	while (count > PAGE_SIZE) {
+		uncleared = raw_copy_to_user(dest, &empty_zero_page, PAGE_SIZE);
+		if (uncleared)
+			return count - (PAGE_SIZE - uncleared);
+		count -= PAGE_SIZE;
+		dest += PAGE_SIZE;
+	}
+	if (count)
+		count = raw_copy_to_user(dest, &empty_zero_page, count);
+
+	return count;
+}
+
+unsigned long clear_user_hexagon(void __user *dest, unsigned long count)
+{
+	if (!access_ok(VERIFY_WRITE, dest, count))
+		return count;
+	else
+		return __clear_user_hexagon(dest, count);
+}
diff --git a/src/kernel/linux/v4.14/arch/hexagon/mm/vm_fault.c b/src/kernel/linux/v4.14/arch/hexagon/mm/vm_fault.c
new file mode 100644
index 0000000..3eec33c
--- /dev/null
+++ b/src/kernel/linux/v4.14/arch/hexagon/mm/vm_fault.c
@@ -0,0 +1,203 @@
+/*
+ * Memory fault handling for Hexagon
+ *
+ * Copyright (c) 2010-2011, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ */
+
+/*
+ * Page fault handling for the Hexagon Virtual Machine.
+ * Can also be called by a native port emulating the HVM
+ * execptions.
+ */
+
+#include <asm/pgtable.h>
+#include <asm/traps.h>
+#include <linux/uaccess.h>
+#include <linux/mm.h>
+#include <linux/sched/signal.h>
+#include <linux/signal.h>
+#include <linux/extable.h>
+#include <linux/hardirq.h>
+
+/*
+ * Decode of hardware exception sends us to one of several
+ * entry points.  At each, we generate canonical arguments
+ * for handling by the abstract memory management code.
+ */
+#define FLT_IFETCH     -1
+#define FLT_LOAD        0
+#define FLT_STORE       1
+
+
+/*
+ * Canonical page fault handler
+ */
+void do_page_fault(unsigned long address, long cause, struct pt_regs *regs)
+{
+	struct vm_area_struct *vma;
+	struct mm_struct *mm = current->mm;
+	siginfo_t info;
+	int si_code = SEGV_MAPERR;
+	int fault;
+	const struct exception_table_entry *fixup;
+	unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
+
+	/*
+	 * If we're in an interrupt or have no user context,
+	 * then must not take the fault.
+	 */
+	if (unlikely(in_interrupt() || !mm))
+		goto no_context;
+
+	local_irq_enable();
+
+	if (user_mode(regs))
+		flags |= FAULT_FLAG_USER;
+retry:
+	down_read(&mm->mmap_sem);
+	vma = find_vma(mm, address);
+	if (!vma)
+		goto bad_area;
+
+	if (vma->vm_start <= address)
+		goto good_area;
+
+	if (!(vma->vm_flags & VM_GROWSDOWN))
+		goto bad_area;
+
+	if (expand_stack(vma, address))
+		goto bad_area;
+
+good_area:
+	/* Address space is OK.  Now check access rights. */
+	si_code = SEGV_ACCERR;
+
+	switch (cause) {
+	case FLT_IFETCH:
+		if (!(vma->vm_flags & VM_EXEC))
+			goto bad_area;
+		break;
+	case FLT_LOAD:
+		if (!(vma->vm_flags & VM_READ))
+			goto bad_area;
+		break;
+	case FLT_STORE:
+		if (!(vma->vm_flags & VM_WRITE))
+			goto bad_area;
+		flags |= FAULT_FLAG_WRITE;
+		break;
+	}
+
+	fault = handle_mm_fault(vma, address, flags);
+
+	if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current))
+		return;
+
+	/* The most common case -- we are done. */
+	if (likely(!(fault & VM_FAULT_ERROR))) {
+		if (flags & FAULT_FLAG_ALLOW_RETRY) {
+			if (fault & VM_FAULT_MAJOR)
+				current->maj_flt++;
+			else
+				current->min_flt++;
+			if (fault & VM_FAULT_RETRY) {
+				flags &= ~FAULT_FLAG_ALLOW_RETRY;
+				flags |= FAULT_FLAG_TRIED;
+				goto retry;
+			}
+		}
+
+		up_read(&mm->mmap_sem);
+		return;
+	}
+
+	up_read(&mm->mmap_sem);
+
+	/* Handle copyin/out exception cases */
+	if (!user_mode(regs))
+		goto no_context;
+
+	if (fault & VM_FAULT_OOM) {
+		pagefault_out_of_memory();
+		return;
+	}
+
+	/* User-mode address is in the memory map, but we are
+	 * unable to fix up the page fault.
+	 */
+	if (fault & VM_FAULT_SIGBUS) {
+		info.si_signo = SIGBUS;
+		info.si_code = BUS_ADRERR;
+	}
+	/* Address is not in the memory map */
+	else {
+		info.si_signo = SIGSEGV;
+		info.si_code = SEGV_ACCERR;
+	}
+	info.si_errno = 0;
+	info.si_addr = (void __user *)address;
+	force_sig_info(info.si_signo, &info, current);
+	return;
+
+bad_area:
+	up_read(&mm->mmap_sem);
+
+	if (user_mode(regs)) {
+		info.si_signo = SIGSEGV;
+		info.si_errno = 0;
+		info.si_code = si_code;
+		info.si_addr = (void *)address;
+		force_sig_info(info.si_signo, &info, current);
+		return;
+	}
+	/* Kernel-mode fault falls through */
+
+no_context:
+	fixup = search_exception_tables(pt_elr(regs));
+	if (fixup) {
+		pt_set_elr(regs, fixup->fixup);
+		return;
+	}
+
+	/* Things are looking very, very bad now */
+	bust_spinlocks(1);
+	printk(KERN_EMERG "Unable to handle kernel paging request at "
+		"virtual address 0x%08lx, regs %p\n", address, regs);
+	die("Bad Kernel VA", regs, SIGKILL);
+}
+
+
+void read_protection_fault(struct pt_regs *regs)
+{
+	unsigned long badvadr = pt_badva(regs);
+
+	do_page_fault(badvadr, FLT_LOAD, regs);
+}
+
+void write_protection_fault(struct pt_regs *regs)
+{
+	unsigned long badvadr = pt_badva(regs);
+
+	do_page_fault(badvadr, FLT_STORE, regs);
+}
+
+void execute_protection_fault(struct pt_regs *regs)
+{
+	unsigned long badvadr = pt_badva(regs);
+
+	do_page_fault(badvadr, FLT_IFETCH, regs);
+}
diff --git a/src/kernel/linux/v4.14/arch/hexagon/mm/vm_tlb.c b/src/kernel/linux/v4.14/arch/hexagon/mm/vm_tlb.c
new file mode 100644
index 0000000..b474065
--- /dev/null
+++ b/src/kernel/linux/v4.14/arch/hexagon/mm/vm_tlb.c
@@ -0,0 +1,94 @@
+/*
+ * Hexagon Virtual Machine TLB functions
+ *
+ * Copyright (c) 2010-2011, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ */
+
+/*
+ * The Hexagon Virtual Machine conceals the real workings of
+ * the TLB, but there are one or two functions that need to
+ * be instantiated for it, differently from a native build.
+ */
+#include <linux/mm.h>
+#include <linux/sched.h>
+#include <asm/page.h>
+#include <asm/hexagon_vm.h>
+
+/*
+ * Initial VM implementation has only one map active at a time, with
+ * TLB purgings on changes.  So either we're nuking the current map,
+ * or it's a no-op.  This operation is messy on true SMPs where other
+ * processors must be induced to flush the copies in their local TLBs,
+ * but Hexagon thread-based virtual processors share the same MMU.
+ */
+void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
+			unsigned long end)
+{
+	struct mm_struct *mm = vma->vm_mm;
+
+	if (mm->context.ptbase == current->active_mm->context.ptbase)
+		__vmclrmap((void *)start, end - start);
+}
+
+/*
+ * Flush a page from the kernel virtual map - used by highmem
+ */
+void flush_tlb_one(unsigned long vaddr)
+{
+	__vmclrmap((void *)vaddr, PAGE_SIZE);
+}
+
+/*
+ * Flush all TLBs across all CPUs, virtual or real.
+ * A single Hexagon core has 6 thread contexts but
+ * only one TLB.
+ */
+void tlb_flush_all(void)
+{
+	/*  should probably use that fixaddr end or whateve label  */
+	__vmclrmap(0, 0xffff0000);
+}
+
+/*
+ * Flush TLB entries associated with a given mm_struct mapping.
+ */
+void flush_tlb_mm(struct mm_struct *mm)
+{
+	/* Current Virtual Machine has only one map active at a time */
+	if (current->active_mm->context.ptbase == mm->context.ptbase)
+		tlb_flush_all();
+}
+
+/*
+ * Flush TLB state associated with a page of a vma.
+ */
+void flush_tlb_page(struct vm_area_struct *vma, unsigned long vaddr)
+{
+	struct mm_struct *mm = vma->vm_mm;
+
+	if (mm->context.ptbase  == current->active_mm->context.ptbase)
+		__vmclrmap((void *)vaddr, PAGE_SIZE);
+}
+
+/*
+ * Flush TLB entries associated with a kernel address range.
+ * Like flush range, but without the check on the vma->vm_mm.
+ */
+void flush_tlb_kernel_range(unsigned long start, unsigned long end)
+{
+		__vmclrmap((void *)start, end - start);
+}