[Feature]add MT2731_MP2_MR2_SVN388 baseline version

Change-Id: Ief04314834b31e27effab435d3ca8ba33b499059
diff --git a/src/kernel/linux/v4.14/arch/blackfin/mm/Makefile b/src/kernel/linux/v4.14/arch/blackfin/mm/Makefile
new file mode 100644
index 0000000..4c011b1
--- /dev/null
+++ b/src/kernel/linux/v4.14/arch/blackfin/mm/Makefile
@@ -0,0 +1,5 @@
+#
+# arch/blackfin/mm/Makefile
+#
+
+obj-y := sram-alloc.o isram-driver.o init.o maccess.o
diff --git a/src/kernel/linux/v4.14/arch/blackfin/mm/blackfin_sram.h b/src/kernel/linux/v4.14/arch/blackfin/mm/blackfin_sram.h
new file mode 100644
index 0000000..fb0b159
--- /dev/null
+++ b/src/kernel/linux/v4.14/arch/blackfin/mm/blackfin_sram.h
@@ -0,0 +1,14 @@
+/*
+ * Local prototypes meant for internal use only
+ *
+ * Copyright 2006-2009 Analog Devices Inc.
+ *
+ * Licensed under the GPL-2 or later.
+ */
+
+#ifndef __BLACKFIN_SRAM_H__
+#define __BLACKFIN_SRAM_H__
+
+extern void *l1sram_alloc(size_t);
+
+#endif
diff --git a/src/kernel/linux/v4.14/arch/blackfin/mm/init.c b/src/kernel/linux/v4.14/arch/blackfin/mm/init.c
new file mode 100644
index 0000000..b59cd7c
--- /dev/null
+++ b/src/kernel/linux/v4.14/arch/blackfin/mm/init.c
@@ -0,0 +1,122 @@
+/*
+ * Copyright 2004-2009 Analog Devices Inc.
+ *
+ * Licensed under the GPL-2 or later.
+ */
+
+#include <linux/gfp.h>
+#include <linux/swap.h>
+#include <linux/bootmem.h>
+#include <linux/uaccess.h>
+#include <linux/export.h>
+#include <asm/bfin-global.h>
+#include <asm/pda.h>
+#include <asm/cplbinit.h>
+#include <asm/early_printk.h>
+#include "blackfin_sram.h"
+
+/*
+ * ZERO_PAGE is a special page that is used for zero-initialized data and COW.
+ * Let the bss do its zero-init magic so we don't have to do it ourselves.
+ */
+char empty_zero_page[PAGE_SIZE] __attribute__((aligned(PAGE_SIZE)));
+EXPORT_SYMBOL(empty_zero_page);
+
+#ifndef CONFIG_EXCEPTION_L1_SCRATCH
+#if defined CONFIG_SYSCALL_TAB_L1
+__attribute__((l1_data))
+#endif
+static unsigned long exception_stack[NR_CPUS][1024];
+#endif
+
+struct blackfin_pda cpu_pda[NR_CPUS];
+EXPORT_SYMBOL(cpu_pda);
+
+/*
+ * paging_init() continues the virtual memory environment setup which
+ * was begun by the code in arch/head.S.
+ * The parameters are pointers to where to stick the starting and ending
+ * addresses  of available kernel virtual memory.
+ */
+void __init paging_init(void)
+{
+	/*
+	 * make sure start_mem is page aligned, otherwise bootmem and
+	 * page_alloc get different views of the world
+	 */
+	unsigned long end_mem = memory_end & PAGE_MASK;
+
+	unsigned long zones_size[MAX_NR_ZONES] = {
+		[0] = 0,
+		[ZONE_DMA] = (end_mem - CONFIG_PHY_RAM_BASE_ADDRESS) >> PAGE_SHIFT,
+		[ZONE_NORMAL] = 0,
+#ifdef CONFIG_HIGHMEM
+		[ZONE_HIGHMEM] = 0,
+#endif
+	};
+
+	/* Set up SFC/DFC registers (user data space) */
+	set_fs(KERNEL_DS);
+
+	pr_debug("free_area_init -> start_mem is %#lx virtual_end is %#lx\n",
+	        PAGE_ALIGN(memory_start), end_mem);
+	free_area_init_node(0, zones_size,
+		CONFIG_PHY_RAM_BASE_ADDRESS >> PAGE_SHIFT, NULL);
+}
+
+asmlinkage void __init init_pda(void)
+{
+	unsigned int cpu = raw_smp_processor_id();
+
+	early_shadow_stamp();
+
+	/* Initialize the PDA fields holding references to other parts
+	   of the memory. The content of such memory is still
+	   undefined at the time of the call, we are only setting up
+	   valid pointers to it. */
+	memset(&cpu_pda[cpu], 0, sizeof(cpu_pda[cpu]));
+
+#ifdef CONFIG_EXCEPTION_L1_SCRATCH
+	cpu_pda[cpu].ex_stack = (unsigned long *)(L1_SCRATCH_START + \
+					L1_SCRATCH_LENGTH);
+#else
+	cpu_pda[cpu].ex_stack = exception_stack[cpu + 1];
+#endif
+
+#ifdef CONFIG_SMP
+	cpu_pda[cpu].imask = 0x1f;
+#endif
+}
+
+void __init mem_init(void)
+{
+	char buf[64];
+
+	high_memory = (void *)(memory_end & PAGE_MASK);
+	max_mapnr = MAP_NR(high_memory);
+	printk(KERN_DEBUG "Kernel managed physical pages: %lu\n", max_mapnr);
+
+	/* This will put all low memory onto the freelists. */
+	free_all_bootmem();
+
+	snprintf(buf, sizeof(buf) - 1, "%uK DMA", DMA_UNCACHED_REGION >> 10);
+	mem_init_print_info(buf);
+}
+
+#ifdef CONFIG_BLK_DEV_INITRD
+void __init free_initrd_mem(unsigned long start, unsigned long end)
+{
+#ifndef CONFIG_MPU
+	free_reserved_area((void *)start, (void *)end, -1, "initrd");
+#endif
+}
+#endif
+
+void __ref free_initmem(void)
+{
+#if defined CONFIG_RAMKERNEL && !defined CONFIG_MPU
+	free_initmem_default(-1);
+	if (memory_start == (unsigned long)(&__init_end))
+		memory_start = (unsigned long)(&__init_begin);
+#endif
+}
diff --git a/src/kernel/linux/v4.14/arch/blackfin/mm/isram-driver.c b/src/kernel/linux/v4.14/arch/blackfin/mm/isram-driver.c
new file mode 100644
index 0000000..aaa1e64
--- /dev/null
+++ b/src/kernel/linux/v4.14/arch/blackfin/mm/isram-driver.c
@@ -0,0 +1,411 @@
+/*
+ * Instruction SRAM accessor functions for the Blackfin
+ *
+ * Copyright 2008 Analog Devices Inc.
+ *
+ * Licensed under the GPL-2 or later
+ */
+
+#define pr_fmt(fmt) "isram: " fmt
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/sched.h>
+#include <linux/sched/debug.h>
+
+#include <asm/blackfin.h>
+#include <asm/dma.h>
+
+/*
+ * IMPORTANT WARNING ABOUT THESE FUNCTIONS
+ *
+ * The emulator will not function correctly if a write command is left in
+ * ITEST_COMMAND or DTEST_COMMAND AND access to cache memory is needed by
+ * the emulator. To avoid such problems, ensure that both ITEST_COMMAND
+ * and DTEST_COMMAND are zero when exiting these functions.
+ */
+
+
+/*
+ * On the Blackfin, L1 instruction sram (which operates at core speeds) can not
+ * be accessed by a normal core load, so we need to go through a few hoops to
+ * read/write it.
+ * To try to make it easier - we export a memcpy interface, where either src or
+ * dest can be in this special L1 memory area.
+ * The low level read/write functions should not be exposed to the rest of the
+ * kernel, since they operate on 64-bit data, and need specific address alignment
+ */
+
+static DEFINE_SPINLOCK(dtest_lock);
+
+/* Takes a void pointer */
+#define IADDR2DTEST(x) \
+	({ unsigned long __addr = (unsigned long)(x); \
+		((__addr & (1 << 11)) << (26 - 11)) | /* addr bit 11 (Way0/Way1)   */ \
+		(1 << 24)                           | /* instruction access = 1    */ \
+		((__addr & (1 << 15)) << (23 - 15)) | /* addr bit 15 (Data Bank)   */ \
+		((__addr & (3 << 12)) << (16 - 12)) | /* addr bits 13:12 (Subbank) */ \
+		(__addr & 0x47F8)                   | /* addr bits 14 & 10:3       */ \
+		(1 << 2);                             /* data array = 1            */ \
+	})
+
+/* Takes a pointer, and returns the offset (in bits) which things should be shifted */
+#define ADDR2OFFSET(x) ((((unsigned long)(x)) & 0x7) * 8)
+
+/* Takes a pointer, determines if it is the last byte in the isram 64-bit data type */
+#define ADDR2LAST(x) ((((unsigned long)x) & 0x7) == 0x7)
+
+static void isram_write(const void *addr, uint64_t data)
+{
+	uint32_t cmd;
+	unsigned long flags;
+
+	if (unlikely(addr >= (void *)(L1_CODE_START + L1_CODE_LENGTH)))
+		return;
+
+	cmd = IADDR2DTEST(addr) | 2;             /* write */
+
+	/*
+	 * Writes to DTEST_DATA[0:1] need to be atomic with write to DTEST_COMMAND
+	 * While in exception context - atomicity is guaranteed or double fault
+	 */
+	spin_lock_irqsave(&dtest_lock, flags);
+
+	bfin_write_DTEST_DATA0(data & 0xFFFFFFFF);
+	bfin_write_DTEST_DATA1(data >> 32);
+
+	/* use the builtin, since interrupts are already turned off */
+	__builtin_bfin_csync();
+	bfin_write_DTEST_COMMAND(cmd);
+	__builtin_bfin_csync();
+
+	bfin_write_DTEST_COMMAND(0);
+	__builtin_bfin_csync();
+
+	spin_unlock_irqrestore(&dtest_lock, flags);
+}
+
+static uint64_t isram_read(const void *addr)
+{
+	uint32_t cmd;
+	unsigned long flags;
+	uint64_t ret;
+
+	if (unlikely(addr > (void *)(L1_CODE_START + L1_CODE_LENGTH)))
+		return 0;
+
+	cmd = IADDR2DTEST(addr) | 0;              /* read */
+
+	/*
+	 * Reads of DTEST_DATA[0:1] need to be atomic with write to DTEST_COMMAND
+	 * While in exception context - atomicity is guaranteed or double fault
+	 */
+	spin_lock_irqsave(&dtest_lock, flags);
+	/* use the builtin, since interrupts are already turned off */
+	__builtin_bfin_csync();
+	bfin_write_DTEST_COMMAND(cmd);
+	__builtin_bfin_csync();
+	ret = bfin_read_DTEST_DATA0() | ((uint64_t)bfin_read_DTEST_DATA1() << 32);
+
+	bfin_write_DTEST_COMMAND(0);
+	__builtin_bfin_csync();
+	spin_unlock_irqrestore(&dtest_lock, flags);
+
+	return ret;
+}
+
+static bool isram_check_addr(const void *addr, size_t n)
+{
+	if ((addr >= (void *)L1_CODE_START) &&
+	    (addr < (void *)(L1_CODE_START + L1_CODE_LENGTH))) {
+		if (unlikely((addr + n) > (void *)(L1_CODE_START + L1_CODE_LENGTH))) {
+			show_stack(NULL, NULL);
+			pr_err("copy involving %p length (%zu) too long\n", addr, n);
+		}
+		return true;
+	}
+	return false;
+}
+
+/*
+ * The isram_memcpy() function copies n bytes from memory area src to memory area dest.
+ * The isram_memcpy() function returns a pointer to dest.
+ * Either dest or src can be in L1 instruction sram.
+ */
+void *isram_memcpy(void *dest, const void *src, size_t n)
+{
+	uint64_t data_in = 0, data_out = 0;
+	size_t count;
+	bool dest_in_l1, src_in_l1, need_data, put_data;
+	unsigned char byte, *src_byte, *dest_byte;
+
+	src_byte = (unsigned char *)src;
+	dest_byte = (unsigned char *)dest;
+
+	dest_in_l1 = isram_check_addr(dest, n);
+	src_in_l1 = isram_check_addr(src, n);
+
+	need_data = true;
+	put_data = true;
+	for (count = 0; count < n; count++) {
+		if (src_in_l1) {
+			if (need_data) {
+				data_in = isram_read(src + count);
+				need_data = false;
+			}
+
+			if (ADDR2LAST(src + count))
+				need_data = true;
+
+			byte = (unsigned char)((data_in >> ADDR2OFFSET(src + count)) & 0xff);
+
+		} else {
+			/* src is in L2 or L3 - so just dereference*/
+			byte = src_byte[count];
+		}
+
+		if (dest_in_l1) {
+			if (put_data) {
+				data_out = isram_read(dest + count);
+				put_data = false;
+			}
+
+			data_out &= ~((uint64_t)0xff << ADDR2OFFSET(dest + count));
+			data_out |= ((uint64_t)byte << ADDR2OFFSET(dest + count));
+
+			if (ADDR2LAST(dest + count)) {
+				put_data = true;
+				isram_write(dest + count, data_out);
+			}
+		} else {
+			/* dest in L2 or L3 - so just dereference */
+			dest_byte[count] = byte;
+		}
+	}
+
+	/* make sure we dump the last byte if necessary */
+	if (dest_in_l1 && !put_data)
+		isram_write(dest + count, data_out);
+
+	return dest;
+}
+EXPORT_SYMBOL(isram_memcpy);
+
+#ifdef CONFIG_BFIN_ISRAM_SELF_TEST
+
+static int test_len = 0x20000;
+
+static __init void hex_dump(unsigned char *buf, int len)
+{
+	while (len--)
+		pr_cont("%02x", *buf++);
+}
+
+static __init int isram_read_test(char *sdram, void *l1inst)
+{
+	int i, ret = 0;
+	uint64_t data1, data2;
+
+	pr_info("INFO: running isram_read tests\n");
+
+	/* setup some different data to play with */
+	for (i = 0; i < test_len; ++i)
+		sdram[i] = i % 255;
+	dma_memcpy(l1inst, sdram, test_len);
+
+	/* make sure we can read the L1 inst */
+	for (i = 0; i < test_len; i += sizeof(uint64_t)) {
+		data1 = isram_read(l1inst + i);
+		memcpy(&data2, sdram + i, sizeof(data2));
+		if (data1 != data2) {
+			pr_err("FAIL: isram_read(%p) returned %#llx but wanted %#llx\n",
+				l1inst + i, data1, data2);
+			++ret;
+		}
+	}
+
+	return ret;
+}
+
+static __init int isram_write_test(char *sdram, void *l1inst)
+{
+	int i, ret = 0;
+	uint64_t data1, data2;
+
+	pr_info("INFO: running isram_write tests\n");
+
+	/* setup some different data to play with */
+	memset(sdram, 0, test_len * 2);
+	dma_memcpy(l1inst, sdram, test_len);
+	for (i = 0; i < test_len; ++i)
+		sdram[i] = i % 255;
+
+	/* make sure we can write the L1 inst */
+	for (i = 0; i < test_len; i += sizeof(uint64_t)) {
+		memcpy(&data1, sdram + i, sizeof(data1));
+		isram_write(l1inst + i, data1);
+		data2 = isram_read(l1inst + i);
+		if (data1 != data2) {
+			pr_err("FAIL: isram_write(%p, %#llx) != %#llx\n",
+				l1inst + i, data1, data2);
+			++ret;
+		}
+	}
+
+	dma_memcpy(sdram + test_len, l1inst, test_len);
+	if (memcmp(sdram, sdram + test_len, test_len)) {
+		pr_err("FAIL: isram_write() did not work properly\n");
+		++ret;
+	}
+
+	return ret;
+}
+
+static __init int
+_isram_memcpy_test(char pattern, void *sdram, void *l1inst, const char *smemcpy,
+                   void *(*fmemcpy)(void *, const void *, size_t))
+{
+	memset(sdram, pattern, test_len);
+	fmemcpy(l1inst, sdram, test_len);
+	fmemcpy(sdram + test_len, l1inst, test_len);
+	if (memcmp(sdram, sdram + test_len, test_len)) {
+		pr_err("FAIL: %s(%p <=> %p, %#x) failed (data is %#x)\n",
+			smemcpy, l1inst, sdram, test_len, pattern);
+		return 1;
+	}
+	return 0;
+}
+#define _isram_memcpy_test(a, b, c, d) _isram_memcpy_test(a, b, c, #d, d)
+
+static __init int isram_memcpy_test(char *sdram, void *l1inst)
+{
+	int i, j, thisret, ret = 0;
+
+	/* check broad isram_memcpy() */
+	pr_info("INFO: running broad isram_memcpy tests\n");
+	for (i = 0xf; i >= 0; --i)
+		ret += _isram_memcpy_test(i, sdram, l1inst, isram_memcpy);
+
+	/* check read of small, unaligned, and hardware 64bit limits */
+	pr_info("INFO: running isram_memcpy (read) tests\n");
+
+	/* setup some different data to play with */
+	for (i = 0; i < test_len; ++i)
+		sdram[i] = i % 255;
+	dma_memcpy(l1inst, sdram, test_len);
+
+	thisret = 0;
+	for (i = 0; i < test_len - 32; ++i) {
+		unsigned char cmp[32];
+		for (j = 1; j <= 32; ++j) {
+			memset(cmp, 0, sizeof(cmp));
+			isram_memcpy(cmp, l1inst + i, j);
+			if (memcmp(cmp, sdram + i, j)) {
+				pr_err("FAIL: %p:", l1inst + 1);
+				hex_dump(cmp, j);
+				pr_cont(" SDRAM:");
+				hex_dump(sdram + i, j);
+				pr_cont("\n");
+				if (++thisret > 20) {
+					pr_err("FAIL: skipping remaining series\n");
+					i = test_len;
+					break;
+				}
+			}
+		}
+	}
+	ret += thisret;
+
+	/* check write of small, unaligned, and hardware 64bit limits */
+	pr_info("INFO: running isram_memcpy (write) tests\n");
+
+	memset(sdram + test_len, 0, test_len);
+	dma_memcpy(l1inst, sdram + test_len, test_len);
+
+	thisret = 0;
+	for (i = 0; i < test_len - 32; ++i) {
+		unsigned char cmp[32];
+		for (j = 1; j <= 32; ++j) {
+			isram_memcpy(l1inst + i, sdram + i, j);
+			dma_memcpy(cmp, l1inst + i, j);
+			if (memcmp(cmp, sdram + i, j)) {
+				pr_err("FAIL: %p:", l1inst + i);
+				hex_dump(cmp, j);
+				pr_cont(" SDRAM:");
+				hex_dump(sdram + i, j);
+				pr_cont("\n");
+				if (++thisret > 20) {
+					pr_err("FAIL: skipping remaining series\n");
+					i = test_len;
+					break;
+				}
+			}
+		}
+	}
+	ret += thisret;
+
+	return ret;
+}
+
+static __init int isram_test_init(void)
+{
+	int ret;
+	char *sdram;
+	void *l1inst;
+
+	/* Try to test as much of L1SRAM as possible */
+	while (test_len) {
+		test_len >>= 1;
+		l1inst = l1_inst_sram_alloc(test_len);
+		if (l1inst)
+			break;
+	}
+	if (!l1inst) {
+		pr_warning("SKIP: could not allocate L1 inst\n");
+		return 0;
+	}
+	pr_info("INFO: testing %#x bytes (%p - %p)\n",
+	        test_len, l1inst, l1inst + test_len);
+
+	sdram = kmalloc(test_len * 2, GFP_KERNEL);
+	if (!sdram) {
+		sram_free(l1inst);
+		pr_warning("SKIP: could not allocate sdram\n");
+		return 0;
+	}
+
+	/* sanity check initial L1 inst state */
+	ret = 1;
+	pr_info("INFO: running initial dma_memcpy checks %p\n", sdram);
+	if (_isram_memcpy_test(0xa, sdram, l1inst, dma_memcpy))
+		goto abort;
+	if (_isram_memcpy_test(0x5, sdram, l1inst, dma_memcpy))
+		goto abort;
+
+	ret = 0;
+	ret += isram_read_test(sdram, l1inst);
+	ret += isram_write_test(sdram, l1inst);
+	ret += isram_memcpy_test(sdram, l1inst);
+
+ abort:
+	sram_free(l1inst);
+	kfree(sdram);
+
+	if (ret)
+		return -EIO;
+
+	pr_info("PASS: all tests worked !\n");
+	return 0;
+}
+late_initcall(isram_test_init);
+
+static __exit void isram_test_exit(void)
+{
+	/* stub to allow unloading */
+}
+module_exit(isram_test_exit);
+
+#endif
diff --git a/src/kernel/linux/v4.14/arch/blackfin/mm/maccess.c b/src/kernel/linux/v4.14/arch/blackfin/mm/maccess.c
new file mode 100644
index 0000000..e253211
--- /dev/null
+++ b/src/kernel/linux/v4.14/arch/blackfin/mm/maccess.c
@@ -0,0 +1,97 @@
+/*
+ * safe read and write memory routines callable while atomic
+ *
+ * Copyright 2005-2008 Analog Devices Inc.
+ *
+ * Licensed under the GPL-2 or later.
+ */
+
+#include <linux/uaccess.h>
+#include <asm/dma.h>
+
+static int validate_memory_access_address(unsigned long addr, int size)
+{
+	if (size < 0 || addr == 0)
+		return -EFAULT;
+	return bfin_mem_access_type(addr, size);
+}
+
+long probe_kernel_read(void *dst, const void *src, size_t size)
+{
+	unsigned long lsrc = (unsigned long)src;
+	int mem_type;
+
+	mem_type = validate_memory_access_address(lsrc, size);
+	if (mem_type < 0)
+		return mem_type;
+
+	if (lsrc >= SYSMMR_BASE) {
+		if (size == 2 && lsrc % 2 == 0) {
+			u16 mmr = bfin_read16(src);
+			memcpy(dst, &mmr, sizeof(mmr));
+			return 0;
+		} else if (size == 4 && lsrc % 4 == 0) {
+			u32 mmr = bfin_read32(src);
+			memcpy(dst, &mmr, sizeof(mmr));
+			return 0;
+		}
+	} else {
+		switch (mem_type) {
+		case BFIN_MEM_ACCESS_CORE:
+		case BFIN_MEM_ACCESS_CORE_ONLY:
+			return __probe_kernel_read(dst, src, size);
+			/* XXX: should support IDMA here with SMP */
+		case BFIN_MEM_ACCESS_DMA:
+			if (dma_memcpy(dst, src, size))
+				return 0;
+			break;
+		case BFIN_MEM_ACCESS_ITEST:
+			if (isram_memcpy(dst, src, size))
+				return 0;
+			break;
+		}
+	}
+
+	return -EFAULT;
+}
+
+long probe_kernel_write(void *dst, const void *src, size_t size)
+{
+	unsigned long ldst = (unsigned long)dst;
+	int mem_type;
+
+	mem_type = validate_memory_access_address(ldst, size);
+	if (mem_type < 0)
+		return mem_type;
+
+	if (ldst >= SYSMMR_BASE) {
+		if (size == 2 && ldst % 2 == 0) {
+			u16 mmr;
+			memcpy(&mmr, src, sizeof(mmr));
+			bfin_write16(dst, mmr);
+			return 0;
+		} else if (size == 4 && ldst % 4 == 0) {
+			u32 mmr;
+			memcpy(&mmr, src, sizeof(mmr));
+			bfin_write32(dst, mmr);
+			return 0;
+		}
+	} else {
+		switch (mem_type) {
+		case BFIN_MEM_ACCESS_CORE:
+		case BFIN_MEM_ACCESS_CORE_ONLY:
+			return __probe_kernel_write(dst, src, size);
+			/* XXX: should support IDMA here with SMP */
+		case BFIN_MEM_ACCESS_DMA:
+			if (dma_memcpy(dst, src, size))
+				return 0;
+			break;
+		case BFIN_MEM_ACCESS_ITEST:
+			if (isram_memcpy(dst, src, size))
+				return 0;
+			break;
+		}
+	}
+
+	return -EFAULT;
+}
diff --git a/src/kernel/linux/v4.14/arch/blackfin/mm/sram-alloc.c b/src/kernel/linux/v4.14/arch/blackfin/mm/sram-alloc.c
new file mode 100644
index 0000000..d2a96c2
--- /dev/null
+++ b/src/kernel/linux/v4.14/arch/blackfin/mm/sram-alloc.c
@@ -0,0 +1,899 @@
+/*
+ * SRAM allocator for Blackfin on-chip memory
+ *
+ * Copyright 2004-2009 Analog Devices Inc.
+ *
+ * Licensed under the GPL-2 or later.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/miscdevice.h>
+#include <linux/ioport.h>
+#include <linux/fcntl.h>
+#include <linux/init.h>
+#include <linux/poll.h>
+#include <linux/proc_fs.h>
+#include <linux/seq_file.h>
+#include <linux/spinlock.h>
+#include <linux/rtc.h>
+#include <linux/slab.h>
+#include <linux/mm_types.h>
+
+#include <asm/blackfin.h>
+#include <asm/mem_map.h>
+#include "blackfin_sram.h"
+
+/* the data structure for L1 scratchpad and DATA SRAM */
+struct sram_piece {
+	void *paddr;
+	int size;
+	pid_t pid;
+	struct sram_piece *next;
+};
+
+static DEFINE_PER_CPU_SHARED_ALIGNED(spinlock_t, l1sram_lock);
+static DEFINE_PER_CPU(struct sram_piece, free_l1_ssram_head);
+static DEFINE_PER_CPU(struct sram_piece, used_l1_ssram_head);
+
+#if L1_DATA_A_LENGTH != 0
+static DEFINE_PER_CPU(struct sram_piece, free_l1_data_A_sram_head);
+static DEFINE_PER_CPU(struct sram_piece, used_l1_data_A_sram_head);
+#endif
+
+#if L1_DATA_B_LENGTH != 0
+static DEFINE_PER_CPU(struct sram_piece, free_l1_data_B_sram_head);
+static DEFINE_PER_CPU(struct sram_piece, used_l1_data_B_sram_head);
+#endif
+
+#if L1_DATA_A_LENGTH || L1_DATA_B_LENGTH
+static DEFINE_PER_CPU_SHARED_ALIGNED(spinlock_t, l1_data_sram_lock);
+#endif
+
+#if L1_CODE_LENGTH != 0
+static DEFINE_PER_CPU_SHARED_ALIGNED(spinlock_t, l1_inst_sram_lock);
+static DEFINE_PER_CPU(struct sram_piece, free_l1_inst_sram_head);
+static DEFINE_PER_CPU(struct sram_piece, used_l1_inst_sram_head);
+#endif
+
+#if L2_LENGTH != 0
+static spinlock_t l2_sram_lock ____cacheline_aligned_in_smp;
+static struct sram_piece free_l2_sram_head, used_l2_sram_head;
+#endif
+
+static struct kmem_cache *sram_piece_cache;
+
+/* L1 Scratchpad SRAM initialization function */
+static void __init l1sram_init(void)
+{
+	unsigned int cpu;
+	unsigned long reserve;
+
+#ifdef CONFIG_SMP
+	reserve = 0;
+#else
+	reserve = sizeof(struct l1_scratch_task_info);
+#endif
+
+	for (cpu = 0; cpu < num_possible_cpus(); ++cpu) {
+		per_cpu(free_l1_ssram_head, cpu).next =
+			kmem_cache_alloc(sram_piece_cache, GFP_KERNEL);
+		if (!per_cpu(free_l1_ssram_head, cpu).next) {
+			printk(KERN_INFO "Fail to initialize Scratchpad data SRAM.\n");
+			return;
+		}
+
+		per_cpu(free_l1_ssram_head, cpu).next->paddr = (void *)get_l1_scratch_start_cpu(cpu) + reserve;
+		per_cpu(free_l1_ssram_head, cpu).next->size = L1_SCRATCH_LENGTH - reserve;
+		per_cpu(free_l1_ssram_head, cpu).next->pid = 0;
+		per_cpu(free_l1_ssram_head, cpu).next->next = NULL;
+
+		per_cpu(used_l1_ssram_head, cpu).next = NULL;
+
+		/* mutex initialize */
+		spin_lock_init(&per_cpu(l1sram_lock, cpu));
+		printk(KERN_INFO "Blackfin Scratchpad data SRAM: %d KB\n",
+			L1_SCRATCH_LENGTH >> 10);
+	}
+}
+
+static void __init l1_data_sram_init(void)
+{
+#if L1_DATA_A_LENGTH != 0 || L1_DATA_B_LENGTH != 0
+	unsigned int cpu;
+#endif
+#if L1_DATA_A_LENGTH != 0
+	for (cpu = 0; cpu < num_possible_cpus(); ++cpu) {
+		per_cpu(free_l1_data_A_sram_head, cpu).next =
+			kmem_cache_alloc(sram_piece_cache, GFP_KERNEL);
+		if (!per_cpu(free_l1_data_A_sram_head, cpu).next) {
+			printk(KERN_INFO "Fail to initialize L1 Data A SRAM.\n");
+			return;
+		}
+
+		per_cpu(free_l1_data_A_sram_head, cpu).next->paddr =
+			(void *)get_l1_data_a_start_cpu(cpu) + (_ebss_l1 - _sdata_l1);
+		per_cpu(free_l1_data_A_sram_head, cpu).next->size =
+			L1_DATA_A_LENGTH - (_ebss_l1 - _sdata_l1);
+		per_cpu(free_l1_data_A_sram_head, cpu).next->pid = 0;
+		per_cpu(free_l1_data_A_sram_head, cpu).next->next = NULL;
+
+		per_cpu(used_l1_data_A_sram_head, cpu).next = NULL;
+
+		printk(KERN_INFO "Blackfin L1 Data A SRAM: %d KB (%d KB free)\n",
+			L1_DATA_A_LENGTH >> 10,
+			per_cpu(free_l1_data_A_sram_head, cpu).next->size >> 10);
+	}
+#endif
+#if L1_DATA_B_LENGTH != 0
+	for (cpu = 0; cpu < num_possible_cpus(); ++cpu) {
+		per_cpu(free_l1_data_B_sram_head, cpu).next =
+			kmem_cache_alloc(sram_piece_cache, GFP_KERNEL);
+		if (!per_cpu(free_l1_data_B_sram_head, cpu).next) {
+			printk(KERN_INFO "Fail to initialize L1 Data B SRAM.\n");
+			return;
+		}
+
+		per_cpu(free_l1_data_B_sram_head, cpu).next->paddr =
+			(void *)get_l1_data_b_start_cpu(cpu) + (_ebss_b_l1 - _sdata_b_l1);
+		per_cpu(free_l1_data_B_sram_head, cpu).next->size =
+			L1_DATA_B_LENGTH - (_ebss_b_l1 - _sdata_b_l1);
+		per_cpu(free_l1_data_B_sram_head, cpu).next->pid = 0;
+		per_cpu(free_l1_data_B_sram_head, cpu).next->next = NULL;
+
+		per_cpu(used_l1_data_B_sram_head, cpu).next = NULL;
+
+		printk(KERN_INFO "Blackfin L1 Data B SRAM: %d KB (%d KB free)\n",
+			L1_DATA_B_LENGTH >> 10,
+			per_cpu(free_l1_data_B_sram_head, cpu).next->size >> 10);
+		/* mutex initialize */
+	}
+#endif
+
+#if L1_DATA_A_LENGTH != 0 || L1_DATA_B_LENGTH != 0
+	for (cpu = 0; cpu < num_possible_cpus(); ++cpu)
+		spin_lock_init(&per_cpu(l1_data_sram_lock, cpu));
+#endif
+}
+
+static void __init l1_inst_sram_init(void)
+{
+#if L1_CODE_LENGTH != 0
+	unsigned int cpu;
+	for (cpu = 0; cpu < num_possible_cpus(); ++cpu) {
+		per_cpu(free_l1_inst_sram_head, cpu).next =
+			kmem_cache_alloc(sram_piece_cache, GFP_KERNEL);
+		if (!per_cpu(free_l1_inst_sram_head, cpu).next) {
+			printk(KERN_INFO "Failed to initialize L1 Instruction SRAM\n");
+			return;
+		}
+
+		per_cpu(free_l1_inst_sram_head, cpu).next->paddr =
+			(void *)get_l1_code_start_cpu(cpu) + (_etext_l1 - _stext_l1);
+		per_cpu(free_l1_inst_sram_head, cpu).next->size =
+			L1_CODE_LENGTH - (_etext_l1 - _stext_l1);
+		per_cpu(free_l1_inst_sram_head, cpu).next->pid = 0;
+		per_cpu(free_l1_inst_sram_head, cpu).next->next = NULL;
+
+		per_cpu(used_l1_inst_sram_head, cpu).next = NULL;
+
+		printk(KERN_INFO "Blackfin L1 Instruction SRAM: %d KB (%d KB free)\n",
+			L1_CODE_LENGTH >> 10,
+			per_cpu(free_l1_inst_sram_head, cpu).next->size >> 10);
+
+		/* mutex initialize */
+		spin_lock_init(&per_cpu(l1_inst_sram_lock, cpu));
+	}
+#endif
+}
+
+#ifdef __ADSPBF60x__
+static irqreturn_t l2_ecc_err(int irq, void *dev_id)
+{
+	int status;
+
+	printk(KERN_ERR "L2 ecc error happened\n");
+	status = bfin_read32(L2CTL0_STAT);
+	if (status & 0x1)
+		printk(KERN_ERR "Core channel error type:0x%x, addr:0x%x\n",
+			bfin_read32(L2CTL0_ET0), bfin_read32(L2CTL0_EADDR0));
+	if (status & 0x2)
+		printk(KERN_ERR "System channel error type:0x%x, addr:0x%x\n",
+			bfin_read32(L2CTL0_ET1), bfin_read32(L2CTL0_EADDR1));
+
+	status = status >> 8;
+	if (status)
+		printk(KERN_ERR "L2 Bank%d error, addr:0x%x\n",
+			status, bfin_read32(L2CTL0_ERRADDR0 + status));
+
+	panic("L2 Ecc error");
+	return IRQ_HANDLED;
+}
+#endif
+
+static void __init l2_sram_init(void)
+{
+#if L2_LENGTH != 0
+
+#ifdef __ADSPBF60x__
+	int ret;
+
+	ret = request_irq(IRQ_L2CTL0_ECC_ERR, l2_ecc_err, 0, "l2-ecc-err",
+			NULL);
+	if (unlikely(ret < 0)) {
+		printk(KERN_INFO "Fail to request l2 ecc error interrupt");
+		return;
+	}
+#endif
+
+	free_l2_sram_head.next =
+		kmem_cache_alloc(sram_piece_cache, GFP_KERNEL);
+	if (!free_l2_sram_head.next) {
+		printk(KERN_INFO "Fail to initialize L2 SRAM.\n");
+		return;
+	}
+
+	free_l2_sram_head.next->paddr =
+		(void *)L2_START + (_ebss_l2 - _stext_l2);
+	free_l2_sram_head.next->size =
+		L2_LENGTH - (_ebss_l2 - _stext_l2);
+	free_l2_sram_head.next->pid = 0;
+	free_l2_sram_head.next->next = NULL;
+
+	used_l2_sram_head.next = NULL;
+
+	printk(KERN_INFO "Blackfin L2 SRAM: %d KB (%d KB free)\n",
+		L2_LENGTH >> 10,
+		free_l2_sram_head.next->size >> 10);
+
+	/* mutex initialize */
+	spin_lock_init(&l2_sram_lock);
+#endif
+}
+
+static int __init bfin_sram_init(void)
+{
+	sram_piece_cache = kmem_cache_create("sram_piece_cache",
+				sizeof(struct sram_piece),
+				0, SLAB_PANIC, NULL);
+
+	l1sram_init();
+	l1_data_sram_init();
+	l1_inst_sram_init();
+	l2_sram_init();
+
+	return 0;
+}
+pure_initcall(bfin_sram_init);
+
+/* SRAM allocate function */
+static void *_sram_alloc(size_t size, struct sram_piece *pfree_head,
+		struct sram_piece *pused_head)
+{
+	struct sram_piece *pslot, *plast, *pavail;
+
+	if (size <= 0 || !pfree_head || !pused_head)
+		return NULL;
+
+	/* Align the size */
+	size = (size + 3) & ~3;
+
+	pslot = pfree_head->next;
+	plast = pfree_head;
+
+	/* search an available piece slot */
+	while (pslot != NULL && size > pslot->size) {
+		plast = pslot;
+		pslot = pslot->next;
+	}
+
+	if (!pslot)
+		return NULL;
+
+	if (pslot->size == size) {
+		plast->next = pslot->next;
+		pavail = pslot;
+	} else {
+		/* use atomic so our L1 allocator can be used atomically */
+		pavail = kmem_cache_alloc(sram_piece_cache, GFP_ATOMIC);
+
+		if (!pavail)
+			return NULL;
+
+		pavail->paddr = pslot->paddr;
+		pavail->size = size;
+		pslot->paddr += size;
+		pslot->size -= size;
+	}
+
+	pavail->pid = current->pid;
+
+	pslot = pused_head->next;
+	plast = pused_head;
+
+	/* insert new piece into used piece list !!! */
+	while (pslot != NULL && pavail->paddr < pslot->paddr) {
+		plast = pslot;
+		pslot = pslot->next;
+	}
+
+	pavail->next = pslot;
+	plast->next = pavail;
+
+	return pavail->paddr;
+}
+
+/* Allocate the largest available block.  */
+static void *_sram_alloc_max(struct sram_piece *pfree_head,
+				struct sram_piece *pused_head,
+				unsigned long *psize)
+{
+	struct sram_piece *pslot, *pmax;
+
+	if (!pfree_head || !pused_head)
+		return NULL;
+
+	pmax = pslot = pfree_head->next;
+
+	/* search an available piece slot */
+	while (pslot != NULL) {
+		if (pslot->size > pmax->size)
+			pmax = pslot;
+		pslot = pslot->next;
+	}
+
+	if (!pmax)
+		return NULL;
+
+	*psize = pmax->size;
+
+	return _sram_alloc(*psize, pfree_head, pused_head);
+}
+
+/* SRAM free function */
+static int _sram_free(const void *addr,
+			struct sram_piece *pfree_head,
+			struct sram_piece *pused_head)
+{
+	struct sram_piece *pslot, *plast, *pavail;
+
+	if (!pfree_head || !pused_head)
+		return -1;
+
+	/* search the relevant memory slot */
+	pslot = pused_head->next;
+	plast = pused_head;
+
+	/* search an available piece slot */
+	while (pslot != NULL && pslot->paddr != addr) {
+		plast = pslot;
+		pslot = pslot->next;
+	}
+
+	if (!pslot)
+		return -1;
+
+	plast->next = pslot->next;
+	pavail = pslot;
+	pavail->pid = 0;
+
+	/* insert free pieces back to the free list */
+	pslot = pfree_head->next;
+	plast = pfree_head;
+
+	while (pslot != NULL && addr > pslot->paddr) {
+		plast = pslot;
+		pslot = pslot->next;
+	}
+
+	if (plast != pfree_head && plast->paddr + plast->size == pavail->paddr) {
+		plast->size += pavail->size;
+		kmem_cache_free(sram_piece_cache, pavail);
+	} else {
+		pavail->next = plast->next;
+		plast->next = pavail;
+		plast = pavail;
+	}
+
+	if (pslot && plast->paddr + plast->size == pslot->paddr) {
+		plast->size += pslot->size;
+		plast->next = pslot->next;
+		kmem_cache_free(sram_piece_cache, pslot);
+	}
+
+	return 0;
+}
+
+int sram_free(const void *addr)
+{
+
+#if L1_CODE_LENGTH != 0
+	if (addr >= (void *)get_l1_code_start()
+		 && addr < (void *)(get_l1_code_start() + L1_CODE_LENGTH))
+		return l1_inst_sram_free(addr);
+	else
+#endif
+#if L1_DATA_A_LENGTH != 0
+	if (addr >= (void *)get_l1_data_a_start()
+		 && addr < (void *)(get_l1_data_a_start() + L1_DATA_A_LENGTH))
+		return l1_data_A_sram_free(addr);
+	else
+#endif
+#if L1_DATA_B_LENGTH != 0
+	if (addr >= (void *)get_l1_data_b_start()
+		 && addr < (void *)(get_l1_data_b_start() + L1_DATA_B_LENGTH))
+		return l1_data_B_sram_free(addr);
+	else
+#endif
+#if L2_LENGTH != 0
+	if (addr >= (void *)L2_START
+		 && addr < (void *)(L2_START + L2_LENGTH))
+		return l2_sram_free(addr);
+	else
+#endif
+		return -1;
+}
+EXPORT_SYMBOL(sram_free);
+
+void *l1_data_A_sram_alloc(size_t size)
+{
+#if L1_DATA_A_LENGTH != 0
+	unsigned long flags;
+	void *addr;
+	unsigned int cpu;
+
+	cpu = smp_processor_id();
+	/* add mutex operation */
+	spin_lock_irqsave(&per_cpu(l1_data_sram_lock, cpu), flags);
+
+	addr = _sram_alloc(size, &per_cpu(free_l1_data_A_sram_head, cpu),
+			&per_cpu(used_l1_data_A_sram_head, cpu));
+
+	/* add mutex operation */
+	spin_unlock_irqrestore(&per_cpu(l1_data_sram_lock, cpu), flags);
+
+	pr_debug("Allocated address in l1_data_A_sram_alloc is 0x%lx+0x%lx\n",
+		 (long unsigned int)addr, size);
+
+	return addr;
+#else
+	return NULL;
+#endif
+}
+EXPORT_SYMBOL(l1_data_A_sram_alloc);
+
+int l1_data_A_sram_free(const void *addr)
+{
+#if L1_DATA_A_LENGTH != 0
+	unsigned long flags;
+	int ret;
+	unsigned int cpu;
+
+	cpu = smp_processor_id();
+	/* add mutex operation */
+	spin_lock_irqsave(&per_cpu(l1_data_sram_lock, cpu), flags);
+
+	ret = _sram_free(addr, &per_cpu(free_l1_data_A_sram_head, cpu),
+			&per_cpu(used_l1_data_A_sram_head, cpu));
+
+	/* add mutex operation */
+	spin_unlock_irqrestore(&per_cpu(l1_data_sram_lock, cpu), flags);
+
+	return ret;
+#else
+	return -1;
+#endif
+}
+EXPORT_SYMBOL(l1_data_A_sram_free);
+
+void *l1_data_B_sram_alloc(size_t size)
+{
+#if L1_DATA_B_LENGTH != 0
+	unsigned long flags;
+	void *addr;
+	unsigned int cpu;
+
+	cpu = smp_processor_id();
+	/* add mutex operation */
+	spin_lock_irqsave(&per_cpu(l1_data_sram_lock, cpu), flags);
+
+	addr = _sram_alloc(size, &per_cpu(free_l1_data_B_sram_head, cpu),
+			&per_cpu(used_l1_data_B_sram_head, cpu));
+
+	/* add mutex operation */
+	spin_unlock_irqrestore(&per_cpu(l1_data_sram_lock, cpu), flags);
+
+	pr_debug("Allocated address in l1_data_B_sram_alloc is 0x%lx+0x%lx\n",
+		 (long unsigned int)addr, size);
+
+	return addr;
+#else
+	return NULL;
+#endif
+}
+EXPORT_SYMBOL(l1_data_B_sram_alloc);
+
+int l1_data_B_sram_free(const void *addr)
+{
+#if L1_DATA_B_LENGTH != 0
+	unsigned long flags;
+	int ret;
+	unsigned int cpu;
+
+	cpu = smp_processor_id();
+	/* add mutex operation */
+	spin_lock_irqsave(&per_cpu(l1_data_sram_lock, cpu), flags);
+
+	ret = _sram_free(addr, &per_cpu(free_l1_data_B_sram_head, cpu),
+			&per_cpu(used_l1_data_B_sram_head, cpu));
+
+	/* add mutex operation */
+	spin_unlock_irqrestore(&per_cpu(l1_data_sram_lock, cpu), flags);
+
+	return ret;
+#else
+	return -1;
+#endif
+}
+EXPORT_SYMBOL(l1_data_B_sram_free);
+
+void *l1_data_sram_alloc(size_t size)
+{
+	void *addr = l1_data_A_sram_alloc(size);
+
+	if (!addr)
+		addr = l1_data_B_sram_alloc(size);
+
+	return addr;
+}
+EXPORT_SYMBOL(l1_data_sram_alloc);
+
+void *l1_data_sram_zalloc(size_t size)
+{
+	void *addr = l1_data_sram_alloc(size);
+
+	if (addr)
+		memset(addr, 0x00, size);
+
+	return addr;
+}
+EXPORT_SYMBOL(l1_data_sram_zalloc);
+
+int l1_data_sram_free(const void *addr)
+{
+	int ret;
+	ret = l1_data_A_sram_free(addr);
+	if (ret == -1)
+		ret = l1_data_B_sram_free(addr);
+	return ret;
+}
+EXPORT_SYMBOL(l1_data_sram_free);
+
+void *l1_inst_sram_alloc(size_t size)
+{
+#if L1_CODE_LENGTH != 0
+	unsigned long flags;
+	void *addr;
+	unsigned int cpu;
+
+	cpu = smp_processor_id();
+	/* add mutex operation */
+	spin_lock_irqsave(&per_cpu(l1_inst_sram_lock, cpu), flags);
+
+	addr = _sram_alloc(size, &per_cpu(free_l1_inst_sram_head, cpu),
+			&per_cpu(used_l1_inst_sram_head, cpu));
+
+	/* add mutex operation */
+	spin_unlock_irqrestore(&per_cpu(l1_inst_sram_lock, cpu), flags);
+
+	pr_debug("Allocated address in l1_inst_sram_alloc is 0x%lx+0x%lx\n",
+		 (long unsigned int)addr, size);
+
+	return addr;
+#else
+	return NULL;
+#endif
+}
+EXPORT_SYMBOL(l1_inst_sram_alloc);
+
+int l1_inst_sram_free(const void *addr)
+{
+#if L1_CODE_LENGTH != 0
+	unsigned long flags;
+	int ret;
+	unsigned int cpu;
+
+	cpu = smp_processor_id();
+	/* add mutex operation */
+	spin_lock_irqsave(&per_cpu(l1_inst_sram_lock, cpu), flags);
+
+	ret = _sram_free(addr, &per_cpu(free_l1_inst_sram_head, cpu),
+			&per_cpu(used_l1_inst_sram_head, cpu));
+
+	/* add mutex operation */
+	spin_unlock_irqrestore(&per_cpu(l1_inst_sram_lock, cpu), flags);
+
+	return ret;
+#else
+	return -1;
+#endif
+}
+EXPORT_SYMBOL(l1_inst_sram_free);
+
+/* L1 Scratchpad memory allocate function */
+void *l1sram_alloc(size_t size)
+{
+	unsigned long flags;
+	void *addr;
+	unsigned int cpu;
+
+	cpu = smp_processor_id();
+	/* add mutex operation */
+	spin_lock_irqsave(&per_cpu(l1sram_lock, cpu), flags);
+
+	addr = _sram_alloc(size, &per_cpu(free_l1_ssram_head, cpu),
+			&per_cpu(used_l1_ssram_head, cpu));
+
+	/* add mutex operation */
+	spin_unlock_irqrestore(&per_cpu(l1sram_lock, cpu), flags);
+
+	return addr;
+}
+
+/* L1 Scratchpad memory allocate function */
+void *l1sram_alloc_max(size_t *psize)
+{
+	unsigned long flags;
+	void *addr;
+	unsigned int cpu;
+
+	cpu = smp_processor_id();
+	/* add mutex operation */
+	spin_lock_irqsave(&per_cpu(l1sram_lock, cpu), flags);
+
+	addr = _sram_alloc_max(&per_cpu(free_l1_ssram_head, cpu),
+			&per_cpu(used_l1_ssram_head, cpu), psize);
+
+	/* add mutex operation */
+	spin_unlock_irqrestore(&per_cpu(l1sram_lock, cpu), flags);
+
+	return addr;
+}
+
+/* L1 Scratchpad memory free function */
+int l1sram_free(const void *addr)
+{
+	unsigned long flags;
+	int ret;
+	unsigned int cpu;
+
+	cpu = smp_processor_id();
+	/* add mutex operation */
+	spin_lock_irqsave(&per_cpu(l1sram_lock, cpu), flags);
+
+	ret = _sram_free(addr, &per_cpu(free_l1_ssram_head, cpu),
+			&per_cpu(used_l1_ssram_head, cpu));
+
+	/* add mutex operation */
+	spin_unlock_irqrestore(&per_cpu(l1sram_lock, cpu), flags);
+
+	return ret;
+}
+
+void *l2_sram_alloc(size_t size)
+{
+#if L2_LENGTH != 0
+	unsigned long flags;
+	void *addr;
+
+	/* add mutex operation */
+	spin_lock_irqsave(&l2_sram_lock, flags);
+
+	addr = _sram_alloc(size, &free_l2_sram_head,
+			&used_l2_sram_head);
+
+	/* add mutex operation */
+	spin_unlock_irqrestore(&l2_sram_lock, flags);
+
+	pr_debug("Allocated address in l2_sram_alloc is 0x%lx+0x%lx\n",
+		 (long unsigned int)addr, size);
+
+	return addr;
+#else
+	return NULL;
+#endif
+}
+EXPORT_SYMBOL(l2_sram_alloc);
+
+void *l2_sram_zalloc(size_t size)
+{
+	void *addr = l2_sram_alloc(size);
+
+	if (addr)
+		memset(addr, 0x00, size);
+
+	return addr;
+}
+EXPORT_SYMBOL(l2_sram_zalloc);
+
+int l2_sram_free(const void *addr)
+{
+#if L2_LENGTH != 0
+	unsigned long flags;
+	int ret;
+
+	/* add mutex operation */
+	spin_lock_irqsave(&l2_sram_lock, flags);
+
+	ret = _sram_free(addr, &free_l2_sram_head,
+			&used_l2_sram_head);
+
+	/* add mutex operation */
+	spin_unlock_irqrestore(&l2_sram_lock, flags);
+
+	return ret;
+#else
+	return -1;
+#endif
+}
+EXPORT_SYMBOL(l2_sram_free);
+
+int sram_free_with_lsl(const void *addr)
+{
+	struct sram_list_struct *lsl, **tmp;
+	struct mm_struct *mm = current->mm;
+	int ret = -1;
+
+	for (tmp = &mm->context.sram_list; *tmp; tmp = &(*tmp)->next)
+		if ((*tmp)->addr == addr) {
+			lsl = *tmp;
+			ret = sram_free(addr);
+			*tmp = lsl->next;
+			kfree(lsl);
+			break;
+		}
+
+	return ret;
+}
+EXPORT_SYMBOL(sram_free_with_lsl);
+
+/* Allocate memory and keep in L1 SRAM List (lsl) so that the resources are
+ * tracked.  These are designed for userspace so that when a process exits,
+ * we can safely reap their resources.
+ */
+void *sram_alloc_with_lsl(size_t size, unsigned long flags)
+{
+	void *addr = NULL;
+	struct sram_list_struct *lsl = NULL;
+	struct mm_struct *mm = current->mm;
+
+	lsl = kzalloc(sizeof(struct sram_list_struct), GFP_KERNEL);
+	if (!lsl)
+		return NULL;
+
+	if (flags & L1_INST_SRAM)
+		addr = l1_inst_sram_alloc(size);
+
+	if (addr == NULL && (flags & L1_DATA_A_SRAM))
+		addr = l1_data_A_sram_alloc(size);
+
+	if (addr == NULL && (flags & L1_DATA_B_SRAM))
+		addr = l1_data_B_sram_alloc(size);
+
+	if (addr == NULL && (flags & L2_SRAM))
+		addr = l2_sram_alloc(size);
+
+	if (addr == NULL) {
+		kfree(lsl);
+		return NULL;
+	}
+	lsl->addr = addr;
+	lsl->length = size;
+	lsl->next = mm->context.sram_list;
+	mm->context.sram_list = lsl;
+	return addr;
+}
+EXPORT_SYMBOL(sram_alloc_with_lsl);
+
+#ifdef CONFIG_PROC_FS
+/* Once we get a real allocator, we'll throw all of this away.
+ * Until then, we need some sort of visibility into the L1 alloc.
+ */
+/* Need to keep line of output the same.  Currently, that is 44 bytes
+ * (including newline).
+ */
+static int _sram_proc_show(struct seq_file *m, const char *desc,
+		struct sram_piece *pfree_head,
+		struct sram_piece *pused_head)
+{
+	struct sram_piece *pslot;
+
+	if (!pfree_head || !pused_head)
+		return -1;
+
+	seq_printf(m, "--- SRAM %-14s Size   PID State     \n", desc);
+
+	/* search the relevant memory slot */
+	pslot = pused_head->next;
+
+	while (pslot != NULL) {
+		seq_printf(m, "%p-%p %10i %5i %-10s\n",
+			pslot->paddr, pslot->paddr + pslot->size,
+			pslot->size, pslot->pid, "ALLOCATED");
+
+		pslot = pslot->next;
+	}
+
+	pslot = pfree_head->next;
+
+	while (pslot != NULL) {
+		seq_printf(m, "%p-%p %10i %5i %-10s\n",
+			pslot->paddr, pslot->paddr + pslot->size,
+			pslot->size, pslot->pid, "FREE");
+
+		pslot = pslot->next;
+	}
+
+	return 0;
+}
+static int sram_proc_show(struct seq_file *m, void *v)
+{
+	unsigned int cpu;
+
+	for (cpu = 0; cpu < num_possible_cpus(); ++cpu) {
+		if (_sram_proc_show(m, "Scratchpad",
+			&per_cpu(free_l1_ssram_head, cpu), &per_cpu(used_l1_ssram_head, cpu)))
+			goto not_done;
+#if L1_DATA_A_LENGTH != 0
+		if (_sram_proc_show(m, "L1 Data A",
+			&per_cpu(free_l1_data_A_sram_head, cpu),
+			&per_cpu(used_l1_data_A_sram_head, cpu)))
+			goto not_done;
+#endif
+#if L1_DATA_B_LENGTH != 0
+		if (_sram_proc_show(m, "L1 Data B",
+			&per_cpu(free_l1_data_B_sram_head, cpu),
+			&per_cpu(used_l1_data_B_sram_head, cpu)))
+			goto not_done;
+#endif
+#if L1_CODE_LENGTH != 0
+		if (_sram_proc_show(m, "L1 Instruction",
+			&per_cpu(free_l1_inst_sram_head, cpu),
+			&per_cpu(used_l1_inst_sram_head, cpu)))
+			goto not_done;
+#endif
+	}
+#if L2_LENGTH != 0
+	if (_sram_proc_show(m, "L2", &free_l2_sram_head, &used_l2_sram_head))
+		goto not_done;
+#endif
+ not_done:
+	return 0;
+}
+
+static int sram_proc_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, sram_proc_show, NULL);
+}
+
+static const struct file_operations sram_proc_ops = {
+	.open		= sram_proc_open,
+	.read		= seq_read,
+	.llseek		= seq_lseek,
+	.release	= single_release,
+};
+
+static int __init sram_proc_init(void)
+{
+	struct proc_dir_entry *ptr;
+
+	ptr = proc_create("sram", S_IRUGO, NULL, &sram_proc_ops);
+	if (!ptr) {
+		printk(KERN_WARNING "unable to create /proc/sram\n");
+		return -1;
+	}
+	return 0;
+}
+late_initcall(sram_proc_init);
+#endif